From 874f7bdab3aab3e54984f6b9398abec1bdbfcb07 Mon Sep 17 00:00:00 2001 From: Kerwin Bryant Date: Tue, 9 Jul 2024 10:52:12 +0800 Subject: [PATCH 001/257] Optimize the processing logic for Depend-type fields (#2248) --- .../monitor-edit/monitor-edit.component.html | 1 + .../monitor-edit/monitor-edit.component.ts | 70 +++++-------------- .../monitor-form/monitor-form.component.html | 2 +- .../monitor-form/monitor-form.component.ts | 21 ++++-- .../monitor-new/monitor-new.component.ts | 20 +++--- 5 files changed, 47 insertions(+), 67 deletions(-) diff --git a/web-app/src/app/routes/monitor/monitor-edit/monitor-edit.component.html b/web-app/src/app/routes/monitor/monitor-edit/monitor-edit.component.html index 922964404f9..40cb5f49c99 100644 --- a/web-app/src/app/routes/monitor/monitor-edit/monitor-edit.component.html +++ b/web-app/src/app/routes/monitor/monitor-edit/monitor-edit.component.html @@ -35,6 +35,7 @@ [paramDefines]="paramDefines" [advancedParams]="advancedParams" [advancedParamDefines]="advancedParamDefines" + [paramValueMap]="paramValueMap" [collector]="collector" [collectors]="collectors" (formCancel)="onCancel()" diff --git a/web-app/src/app/routes/monitor/monitor-edit/monitor-edit.component.ts b/web-app/src/app/routes/monitor/monitor-edit/monitor-edit.component.ts index 72ff1bebe18..88a4afeb148 100644 --- a/web-app/src/app/routes/monitor/monitor-edit/monitor-edit.component.ts +++ b/web-app/src/app/routes/monitor/monitor-edit/monitor-edit.component.ts @@ -57,7 +57,7 @@ export class MonitorEditComponent implements OnInit { params!: Param[]; advancedParamDefines!: ParamDefine[]; advancedParams!: Param[]; - paramValueMap = new Map(); + paramValueMap!: Map; monitor = new Monitor(); collectors!: Collector[]; collector: string = ''; @@ -79,13 +79,15 @@ export class MonitorEditComponent implements OnInit { .pipe( switchMap((message: Message) => { if (message.code === 0) { + let paramValueMap = new Map(); this.monitor = message.data.monitor; this.collector = message.data.collector == null ? '' : message.data.collector; this.titleSvc.setTitleByI18n(`monitor.app.${this.monitor.app}`); if (message.data.params != null) { message.data.params.forEach((item: Param) => { - this.paramValueMap.set(item.field, item); + paramValueMap.set(item.field, item); }); + this.paramValueMap = paramValueMap; } this.detected = false; if (this.monitor.tags == undefined) { @@ -102,10 +104,10 @@ export class MonitorEditComponent implements OnInit { .pipe( switchMap(message => { if (message.code === 0) { - this.params = []; - this.advancedParams = []; - this.paramDefines = []; - this.advancedParamDefines = []; + let params: Param[] = []; + let advancedParams: Param[] = []; + let paramDefines: ParamDefine[] = []; + let advancedParamDefines: ParamDefine[] = []; message.data.forEach(define => { let param = this.paramValueMap.get(define.field); if (param === undefined) { @@ -144,11 +146,11 @@ export class MonitorEditComponent implements OnInit { } define.name = this.i18nSvc.fanyi(`monitor.app.${this.monitor.app}.param.${define.field}`); if (define.hide) { - this.advancedParams.push(param); - this.advancedParamDefines.push(define); + advancedParams.push(param); + advancedParamDefines.push(define); } else { - this.params.push(param); - this.paramDefines.push(define); + params.push(param); + paramDefines.push(define); } if ( define.field == 'host' && @@ -158,8 +160,10 @@ export class MonitorEditComponent implements OnInit { this.hostName = define.name; } }); - this.onPageInit(); - this.detectDepend(); + this.params = [...params]; + this.advancedParams = [...advancedParams]; + this.paramDefines = [...paramDefines]; + this.advancedParamDefines = [...advancedParamDefines]; } else { console.warn(message.msg); } @@ -182,48 +186,6 @@ export class MonitorEditComponent implements OnInit { ); } - onPageInit() { - this.paramDefines.forEach((paramDefine, index) => { - this.params[index].display = true; - }); - this.advancedParamDefines.forEach((advancedParamDefine, index) => { - this.advancedParams[index].display = true; - }); - } - - detectDepend() { - this.paramDefines.forEach((paramDefine, index) => { - if (paramDefine.type == 'radio') { - this.onDependChanged(this.paramValueMap.get(paramDefine.field)?.paramValue, paramDefine.field); - } - }); - } - - onDependChanged(dependValue: string, dependField: string) { - this.paramDefines.forEach((paramDefine, index) => { - if (paramDefine.depend) { - let fieldValues = new Map(Object.entries(paramDefine.depend)).get(dependField); - if (fieldValues) { - this.params[index].display = false; - if (fieldValues.map(String).includes(dependValue)) { - this.params[index].display = true; - } - } - } - }); - this.advancedParamDefines.forEach((advancedParamDefine, index) => { - if (advancedParamDefine.depend) { - let fieldValues = new Map(Object.entries(advancedParamDefine.depend)).get(dependField); - if (fieldValues) { - this.advancedParams[index].display = false; - if (fieldValues.map(String).includes(dependValue)) { - this.advancedParams[index].display = true; - } - } - } - }); - } - onSubmit(info: any) { let addMonitor = { detected: this.detected, diff --git a/web-app/src/app/routes/monitor/monitor-form/monitor-form.component.html b/web-app/src/app/routes/monitor/monitor-form/monitor-form.component.html index c44f49a3e4c..2cbb02005e3 100644 --- a/web-app/src/app/routes/monitor/monitor-form/monitor-form.component.html +++ b/web-app/src/app/routes/monitor/monitor-form/monitor-form.component.html @@ -45,7 +45,7 @@ 0" - [nzGhost]="true" - style="background: ghostwhite; margin-bottom: 24px" - > + - - - - - - - {{ 'tag' | i18n }} - - - - - - - {{ data.name }} - - {{ data.name + ':' + data.tagValue }} - - - - - - - diff --git a/web-app/src/app/shared/components/form-item/form-item.component.ts b/web-app/src/app/shared/components/form-item/form-item.component.ts index 18fbe33e830..5559dce1cd5 100644 --- a/web-app/src/app/shared/components/form-item/form-item.component.ts +++ b/web-app/src/app/shared/components/form-item/form-item.component.ts @@ -19,105 +19,19 @@ import { Component, EventEmitter, Input, Output } from '@angular/core'; -import { TagItem } from '../../../pojo/NoticeRule'; -import { Tag } from '../../../pojo/Tag'; -import { TagService } from '../../../service/tag.service'; - @Component({ selector: 'app-form-item', templateUrl: './form-item.component.html', styleUrls: ['./form-item.component.less'] }) export class FormItemComponent { - constructor(private tagSvc: TagService) {} + constructor() {} @Input() item!: any; @Input() value!: any; @Input() extra: any = {}; @Output() readonly valueChange = new EventEmitter(); - isManageModalVisible = false; - isManageModalOkLoading = false; - checkedTags = new Set(); - tagTableLoading = false; - tagCheckedAll: boolean = false; - tagSearch!: string; - tags!: Tag[]; - - loadTagsTable() { - this.tagTableLoading = true; - let tagsReq$ = this.tagSvc.loadTags(this.tagSearch, 1, 0, 1000).subscribe( - message => { - this.tagTableLoading = false; - this.tagCheckedAll = false; - this.checkedTags.clear(); - if (message.code === 0) { - let page = message.data; - this.tags = page.content; - } else { - console.warn(message.msg); - } - tagsReq$.unsubscribe(); - }, - error => { - this.tagTableLoading = false; - tagsReq$.unsubscribe(); - } - ); - } - onChange(value: any) { this.valueChange.emit(value); } - - onRemoveTag(tag: TagItem) { - if (this.value != undefined) { - this.onChange(this.value.filter((item: TagItem) => item !== tag)); - } - } - - sliceTagName(tag: any): string { - if (tag.value != undefined && tag.value.trim() != '') { - return `${tag.name}:${tag.value}`; - } else { - return tag.name; - } - } - - onShowTagsModal() { - this.isManageModalVisible = true; - this.loadTagsTable(); - } - - onManageModalCancel() { - this.isManageModalVisible = false; - } - - onManageModalOk() { - this.isManageModalOkLoading = true; - let value = this.value == undefined ? [] : this.value; - this.checkedTags.forEach(item => { - if (this.value.find((tag: { id: number }) => tag.id == item.id) == undefined) { - value.push(item); - } - }); - this.onChange(value); - this.isManageModalOkLoading = false; - this.isManageModalVisible = false; - } - - onAllChecked(checked: boolean) { - if (checked) { - this.tags.forEach(tag => this.checkedTags.add(tag)); - } else { - this.checkedTags.clear(); - } - } - - onItemChecked(tag: Tag, checked: boolean) { - if (checked) { - this.checkedTags.add(tag); - } else { - this.checkedTags.delete(tag); - } - } } diff --git a/web-app/src/app/shared/components/tags-select/tags-select.component.html b/web-app/src/app/shared/components/tags-select/tags-select.component.html new file mode 100644 index 00000000000..ff8b3f7c58e --- /dev/null +++ b/web-app/src/app/shared/components/tags-select/tags-select.component.html @@ -0,0 +1,78 @@ + + + + + {{ sliceTagName(tag) }} + + + + + {{ 'tag.new' | i18n }} + + + + + +
+ + + + + + + + + + + + + {{ 'tag' | i18n }} + + + + + + + {{ data.name }} + + {{ data.name + ':' + data.tagValue }} + + + + + +
+
diff --git a/web-app/src/app/shared/components/tags-select/tags-select.component.less b/web-app/src/app/shared/components/tags-select/tags-select.component.less new file mode 100644 index 00000000000..98753eadd09 --- /dev/null +++ b/web-app/src/app/shared/components/tags-select/tags-select.component.less @@ -0,0 +1,4 @@ +:host { + ::ng-deep { + } +} diff --git a/web-app/src/app/shared/components/tags-select/tags-select.component.spec.ts b/web-app/src/app/shared/components/tags-select/tags-select.component.spec.ts new file mode 100644 index 00000000000..41d89a3bfdb --- /dev/null +++ b/web-app/src/app/shared/components/tags-select/tags-select.component.spec.ts @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { ComponentFixture, TestBed } from '@angular/core/testing'; + +import { TagsSelectComponent } from './tags-select.component'; + +describe('TagsSelectComponent', () => { + let component: TagsSelectComponent; + let fixture: ComponentFixture; + + beforeEach(async () => { + await TestBed.configureTestingModule({ + declarations: [TagsSelectComponent] + }).compileComponents(); + }); + + beforeEach(() => { + fixture = TestBed.createComponent(TagsSelectComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/web-app/src/app/shared/components/tags-select/tags-select.component.ts b/web-app/src/app/shared/components/tags-select/tags-select.component.ts new file mode 100644 index 00000000000..3ed1c72a8d6 --- /dev/null +++ b/web-app/src/app/shared/components/tags-select/tags-select.component.ts @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { Component, EventEmitter, Input, Output, forwardRef } from '@angular/core'; +import { ControlValueAccessor, NG_VALUE_ACCESSOR } from '@angular/forms'; + +import { TagItem } from '../../../pojo/NoticeRule'; +import { Tag } from '../../../pojo/Tag'; +import { TagService } from '../../../service/tag.service'; + +@Component({ + selector: 'app-tags-select', + templateUrl: './tags-select.component.html', + styleUrls: ['./tags-select.component.less'], + providers: [ + { + provide: NG_VALUE_ACCESSOR, + useExisting: forwardRef(() => TagsSelectComponent), + multi: true + } + ] +}) +export class TagsSelectComponent implements ControlValueAccessor { + constructor(private tagSvc: TagService) {} + + @Input() value!: any; + @Input() mode!: 'default' | 'closeable' | 'checkable'; + @Output() readonly valueChange = new EventEmitter(); + + isManageModalVisible = false; + isManageModalOkLoading = false; + checkedTags = new Set(); + tagTableLoading = false; + tagCheckedAll: boolean = false; + tagSearch!: string; + tags!: Tag[]; + + _onChange = (_: any) => {}; + _onTouched = () => {}; + + onChange(inputValue: any) { + this.valueChange.emit(inputValue); + this._onChange(inputValue); + } + + writeValue(value: any): void { + this.value = value; + } + + registerOnChange(fn: any): void { + this._onChange = fn; + } + + registerOnTouched(fn: any): void { + this._onTouched = fn; + } + + loadTagsTable() { + this.tagTableLoading = true; + let tagsReq$ = this.tagSvc.loadTags(this.tagSearch, 1, 0, 1000).subscribe( + message => { + this.tagTableLoading = false; + this.tagCheckedAll = false; + this.checkedTags.clear(); + if (message.code === 0) { + let page = message.data; + this.tags = page.content; + } else { + console.warn(message.msg); + } + tagsReq$.unsubscribe(); + }, + error => { + this.tagTableLoading = false; + tagsReq$.unsubscribe(); + } + ); + } + + onRemoveTag(tag: TagItem) { + if (this.value != undefined) { + this.onChange(this.value.filter((item: TagItem) => item !== tag)); + } + } + + sliceTagName(tag: any): string { + if (tag.value != undefined && tag.value.trim() != '') { + return `${tag.name}:${tag.value}`; + } else { + return tag.name; + } + } + + onShowTagsModal() { + this.isManageModalVisible = true; + this.loadTagsTable(); + } + + onManageModalCancel() { + this.isManageModalVisible = false; + } + + onManageModalOk() { + this.isManageModalOkLoading = true; + let value = this.value == undefined ? [] : this.value; + this.checkedTags.forEach(item => { + if (this.value.find((tag: { id: number }) => tag.id == item.id) == undefined) { + value.push(item); + } + }); + this.onChange(value); + this.isManageModalOkLoading = false; + this.isManageModalVisible = false; + } + + onAllChecked(checked: boolean) { + if (checked) { + this.tags.forEach(tag => this.checkedTags.add(tag)); + } else { + this.checkedTags.clear(); + } + } + + onItemChecked(tag: Tag, checked: boolean) { + if (checked) { + this.checkedTags.add(tag); + } else { + this.checkedTags.delete(tag); + } + } +} diff --git a/web-app/src/app/shared/shared.module.ts b/web-app/src/app/shared/shared.module.ts index 4091bc6b49a..3188c585b3b 100644 --- a/web-app/src/app/shared/shared.module.ts +++ b/web-app/src/app/shared/shared.module.ts @@ -17,6 +17,7 @@ import { KeyValueInputComponent } from './components/key-value-input/key-value-i import { MetricsFieldInputComponent } from './components/metrics-field-input/metrics-field-input.component'; import { MonitorSelectMenuComponent } from './components/monitor-select-menu/monitor-select-menu.component'; import { MultiFuncInputComponent } from './components/multi-func-input/multi-func-input.component'; +import { TagsSelectComponent } from './components/tags-select/tags-select.component'; import { ToolbarComponent } from './components/toolbar/toolbar.component'; import { ElapsedTimePipe } from './pipe/elapsed-time.pipe'; import { I18nElsePipe } from './pipe/i18n-else.pipe'; @@ -28,6 +29,7 @@ const ThirdModules: Array> = []; const COMPONENTS: Array> = [ KeyValueInputComponent, MultiFuncInputComponent, + TagsSelectComponent, HelpMessageShowComponent, MetricsFieldInputComponent, ToolbarComponent, From 3e50b96322bb925cf186da3b9c0dbcfd76c40af1 Mon Sep 17 00:00:00 2001 From: Kerwin Bryant Date: Wed, 10 Jul 2024 15:42:10 +0800 Subject: [PATCH 005/257] Fixed failure of monitoring form validation (#2258) --- .../alert-setting.component.html | 24 +- .../monitor-form/monitor-form.component.html | 192 +++++++++------ .../monitor-form/monitor-form.component.less | 6 +- .../form-field/form-field.component.html | 171 +++++++++++++ .../form-field.component.less} | 0 .../form-field.component.spec.ts} | 12 +- .../form-field/form-field.component.ts | 81 +++++++ .../form-item/form-item.component.html | 227 ------------------ .../form-item/form-item.component.ts | 37 --- web-app/src/app/shared/shared.module.ts | 4 +- 10 files changed, 392 insertions(+), 362 deletions(-) mode change 100755 => 100644 web-app/src/app/routes/alert/alert-setting/alert-setting.component.html create mode 100644 web-app/src/app/shared/components/form-field/form-field.component.html rename web-app/src/app/shared/components/{form-item/form-item.component.less => form-field/form-field.component.less} (100%) rename web-app/src/app/shared/components/{form-item/form-item.component.spec.ts => form-field/form-field.component.spec.ts} (80%) create mode 100644 web-app/src/app/shared/components/form-field/form-field.component.ts delete mode 100644 web-app/src/app/shared/components/form-item/form-item.component.html delete mode 100644 web-app/src/app/shared/components/form-item/form-item.component.ts diff --git a/web-app/src/app/routes/alert/alert-setting/alert-setting.component.html b/web-app/src/app/routes/alert/alert-setting/alert-setting.component.html old mode 100755 new mode 100644 index b25c9978b1d..5bba7bbb1ab --- a/web-app/src/app/routes/alert/alert-setting/alert-setting.component.html +++ b/web-app/src/app/routes/alert/alert-setting/alert-setting.component.html @@ -495,16 +495,20 @@
- + + {{ 'tag.bind' | i18n }} + + + + {{ 'alert.setting.default' | i18n }} diff --git a/web-app/src/app/routes/monitor/monitor-form/monitor-form.component.html b/web-app/src/app/routes/monitor/monitor-form/monitor-form.component.html index d163ca4455f..c96cf1197a3 100644 --- a/web-app/src/app/routes/monitor/monitor-form/monitor-form.component.html +++ b/web-app/src/app/routes/monitor/monitor-form/monitor-form.component.html @@ -20,42 +20,57 @@
- + + {{ hostName ? hostName : ('monitor.host' | i18n) }} + + + + - + + {{ 'monitor.name' | i18n }} + + + + - + + {{ paramDefine.name }} + + + + @@ -67,56 +82,81 @@ - + + {{ paramDefine.name }} + + + + + - + + {{ 'monitor.collector' | i18n }} + + + + + - + + {{ 'monitor.intervals' | i18n }} + + + + + - + + {{ 'tag.bind' | i18n }} + + + + - + + {{ 'monitor.description' | i18n }} + + + + +
diff --git a/web-app/src/app/routes/monitor/monitor-form/monitor-form.component.less b/web-app/src/app/routes/monitor/monitor-form/monitor-form.component.less index a4d26c668b4..65cf2020706 100644 --- a/web-app/src/app/routes/monitor/monitor-form/monitor-form.component.less +++ b/web-app/src/app/routes/monitor/monitor-form/monitor-form.component.less @@ -1,10 +1,8 @@ :host { ::ng-deep { .ant-collapse-content-box { - :last-child { - .ant-form-item { - margin-bottom: 0!important; - } + .ant-form-item:last-child { + margin-bottom: 0!important; } } } diff --git a/web-app/src/app/shared/components/form-field/form-field.component.html b/web-app/src/app/shared/components/form-field/form-field.component.html new file mode 100644 index 00000000000..f03ef101b7e --- /dev/null +++ b/web-app/src/app/shared/components/form-field/form-field.component.html @@ -0,0 +1,171 @@ + + + + + + + + + + + + + + + + + + + + + + + + {{ 'monitor.collector.system.default' | i18n }} + {{ 'collector.mode.public' | i18n }} + + + {{ + item.status == 0 ? ('monitor.collector.status.online' | i18n) : ('monitor.collector.status.offline' | i18n) + }} + + {{ item.name }} + {{ item.ip }} + + {{ item.mode == 'private' ? ('collector.mode.private' | i18n) : ('collector.mode.public' | i18n) }} + + + + + {{ selected.nzLabel }} + + + + + + + {{ 'common.time.unit.second' | i18n }} + + + + + + + diff --git a/web-app/src/app/shared/components/form-item/form-item.component.less b/web-app/src/app/shared/components/form-field/form-field.component.less similarity index 100% rename from web-app/src/app/shared/components/form-item/form-item.component.less rename to web-app/src/app/shared/components/form-field/form-field.component.less diff --git a/web-app/src/app/shared/components/form-item/form-item.component.spec.ts b/web-app/src/app/shared/components/form-field/form-field.component.spec.ts similarity index 80% rename from web-app/src/app/shared/components/form-item/form-item.component.spec.ts rename to web-app/src/app/shared/components/form-field/form-field.component.spec.ts index 581e39d34ec..971578a9d5b 100644 --- a/web-app/src/app/shared/components/form-item/form-item.component.spec.ts +++ b/web-app/src/app/shared/components/form-field/form-field.component.spec.ts @@ -19,20 +19,20 @@ import { ComponentFixture, TestBed } from '@angular/core/testing'; -import { FormItemComponent } from './form-item.component'; +import { FormFieldComponent } from './form-field.component'; -describe('FormItemComponent', () => { - let component: FormItemComponent; - let fixture: ComponentFixture; +describe('FormFieldComponent', () => { + let component: FormFieldComponent; + let fixture: ComponentFixture; beforeEach(async () => { await TestBed.configureTestingModule({ - declarations: [FormItemComponent] + declarations: [FormFieldComponent] }).compileComponents(); }); beforeEach(() => { - fixture = TestBed.createComponent(FormItemComponent); + fixture = TestBed.createComponent(FormFieldComponent); component = fixture.componentInstance; fixture.detectChanges(); }); diff --git a/web-app/src/app/shared/components/form-field/form-field.component.ts b/web-app/src/app/shared/components/form-field/form-field.component.ts new file mode 100644 index 00000000000..35da45aacb0 --- /dev/null +++ b/web-app/src/app/shared/components/form-field/form-field.component.ts @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { Component, EventEmitter, Input, Output } from '@angular/core'; +import { AbstractControl, ControlValueAccessor, NG_VALIDATORS, NG_VALUE_ACCESSOR, ValidationErrors, Validator } from '@angular/forms'; + +@Component({ + selector: 'app-form-field', + templateUrl: './form-field.component.html', + styleUrls: ['./form-field.component.less'], + providers: [ + { + provide: NG_VALUE_ACCESSOR, + multi: true, + useExisting: FormFieldComponent + }, + { + provide: NG_VALIDATORS, + multi: true, + useExisting: FormFieldComponent + } + ] +}) +export class FormFieldComponent implements ControlValueAccessor, Validator { + constructor() {} + @Input() item!: any; + @Input() extra: any = {}; + + value: any; + validateStatus!: string; + + _onChange: Function = () => {}; + _onTouched: Function = () => {}; + + writeValue(value: any): void { + this.value = value; + } + + registerOnChange(fn: Function): void { + this._onChange = fn; + } + + registerOnTouched(fn: Function): void { + this._onTouched = fn; + } + + validate(control: AbstractControl): ValidationErrors | null { + // if (!(control.dirty) && !(control.touched)) return null; + let { value } = control; + if (this.item.required && (value === null || value === undefined || value === '')) { + this.validateStatus = 'error'; + return { + required: { + valid: false + } + }; + } + this.validateStatus = ''; + return null; + } + + onChange(value: any) { + this._onChange(value); + } +} diff --git a/web-app/src/app/shared/components/form-item/form-item.component.html b/web-app/src/app/shared/components/form-item/form-item.component.html deleted file mode 100644 index 9d765662879..00000000000 --- a/web-app/src/app/shared/components/form-item/form-item.component.html +++ /dev/null @@ -1,227 +0,0 @@ - - - - {{ item.name }} - - - - - - - {{ item.name }} - - - - - - - {{ item.name }} - - - - - - - {{ item.name }} - - - - - - - {{ item.name }} - - - - - - - {{ item.name }} - - - - - - - - - {{ item.name }} - - - - - - - {{ item.name }} - - - - - - - {{ item.name }} - - - - - {{ 'monitor.collector.system.default' | i18n }} - {{ 'collector.mode.public' | i18n }} - - - {{ - item.status == 0 ? ('monitor.collector.status.online' | i18n) : ('monitor.collector.status.offline' | i18n) - }} - - {{ item.name }} - {{ item.ip }} - - {{ item.mode == 'private' ? ('collector.mode.private' | i18n) : ('collector.mode.public' | i18n) }} - - - - - {{ selected.nzLabel }} - - - - - - {{ item.name }} - - - - - {{ 'common.time.unit.second' | i18n }} - - - - - {{ item.name }} - - - - - - - - {{ item.name }} - - - - - - - diff --git a/web-app/src/app/shared/components/form-item/form-item.component.ts b/web-app/src/app/shared/components/form-item/form-item.component.ts deleted file mode 100644 index 5559dce1cd5..00000000000 --- a/web-app/src/app/shared/components/form-item/form-item.component.ts +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { Component, EventEmitter, Input, Output } from '@angular/core'; - -@Component({ - selector: 'app-form-item', - templateUrl: './form-item.component.html', - styleUrls: ['./form-item.component.less'] -}) -export class FormItemComponent { - constructor() {} - @Input() item!: any; - @Input() value!: any; - @Input() extra: any = {}; - @Output() readonly valueChange = new EventEmitter(); - - onChange(value: any) { - this.valueChange.emit(value); - } -} diff --git a/web-app/src/app/shared/shared.module.ts b/web-app/src/app/shared/shared.module.ts index 3188c585b3b..bea3cd89d07 100644 --- a/web-app/src/app/shared/shared.module.ts +++ b/web-app/src/app/shared/shared.module.ts @@ -11,7 +11,7 @@ import { NzRadioComponent, NzRadioGroupComponent } from 'ng-zorro-antd/radio'; import { NzSwitchComponent } from 'ng-zorro-antd/switch'; import { NzTagModule } from 'ng-zorro-antd/tag'; -import { FormItemComponent } from './components/form-item/form-item.component'; +import { FormFieldComponent } from './components/form-field/form-field.component'; import { HelpMessageShowComponent } from './components/help-message-show/help-message-show.component'; import { KeyValueInputComponent } from './components/key-value-input/key-value-input.component'; import { MetricsFieldInputComponent } from './components/metrics-field-input/metrics-field-input.component'; @@ -33,7 +33,7 @@ const COMPONENTS: Array> = [ HelpMessageShowComponent, MetricsFieldInputComponent, ToolbarComponent, - FormItemComponent, + FormFieldComponent, MonitorSelectMenuComponent ]; const DIRECTIVES: Array> = [TimezonePipe, I18nElsePipe, ElapsedTimePipe]; From 8f9143001fe36c0530e2c99c139b89959d7ef24c Mon Sep 17 00:00:00 2001 From: Kerwin Bryant Date: Wed, 10 Jul 2024 16:21:53 +0800 Subject: [PATCH 006/257] Fixed failure of alert form validation (#2259) --- .../alert-setting/alert-setting.component.ts | 58 ++++--------------- 1 file changed, 12 insertions(+), 46 deletions(-) diff --git a/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts b/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts index 84303f31d6e..90c511fdacc 100644 --- a/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts +++ b/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts @@ -17,7 +17,8 @@ * under the License. */ -import { Component, Inject, OnInit } from '@angular/core'; +import { Component, Inject, OnInit, ViewChild } from '@angular/core'; +import { NgForm } from '@angular/forms'; import { I18NService } from '@core'; import { ALAIN_I18N_TOKEN } from '@delon/theme'; import { NzCascaderFilter } from 'ng-zorro-antd/cascader'; @@ -33,12 +34,9 @@ import { AlertDefine } from '../../../pojo/AlertDefine'; import { AlertDefineBind } from '../../../pojo/AlertDefineBind'; import { Message } from '../../../pojo/Message'; import { Monitor } from '../../../pojo/Monitor'; -import { TagItem } from '../../../pojo/NoticeRule'; -import { Tag } from '../../../pojo/Tag'; import { AlertDefineService } from '../../../service/alert-define.service'; import { AppDefineService } from '../../../service/app-define.service'; import { MonitorService } from '../../../service/monitor.service'; -import { TagService } from '../../../service/tag.service'; const AVAILABILITY = 'availability'; @@ -54,9 +52,9 @@ export class AlertSettingComponent implements OnInit { private appDefineSvc: AppDefineService, private monitorSvc: MonitorService, private alertDefineSvc: AlertDefineService, - private tagSvc: TagService, @Inject(ALAIN_I18N_TOKEN) private i18nSvc: I18NService ) {} + @ViewChild('defineForm', { static: false }) defineForm: NgForm | undefined; search!: string; pageIndex: number = 1; pageSize: number = 8; @@ -150,21 +148,6 @@ export class AlertSettingComponent implements OnInit { this.editAlertDefine(alertDefineId); } - onEditAlertDefine() { - // 编辑时只能选中一个 - if (this.checkedDefineIds == null || this.checkedDefineIds.size === 0) { - this.notifySvc.warning(this.i18nSvc.fanyi('common.notify.no-select-edit'), ''); - return; - } - if (this.checkedDefineIds.size > 1) { - this.notifySvc.warning(this.i18nSvc.fanyi('common.notify.one-select-edit'), ''); - return; - } - let alertDefineId = 0; - this.checkedDefineIds.forEach(item => (alertDefineId = item)); - this.editAlertDefine(alertDefineId); - } - updateAlertDefine(alertDefine: AlertDefine) { this.tableLoading = true; const updateDefine$ = this.alertDefineSvc @@ -543,6 +526,15 @@ export class AlertSettingComponent implements OnInit { } onManageModalOk() { + if (this.defineForm?.invalid) { + Object.values(this.defineForm.controls).forEach(control => { + if (control.invalid) { + control.markAsDirty(); + control.updateValueAndValidity({ onlySelf: true }); + } + }); + return; + } this.isManageModalOkLoading = true; this.define.app = this.cascadeValues[0]; this.define.metric = this.cascadeValues[1]; @@ -607,7 +599,6 @@ export class AlertSettingComponent implements OnInit { ); } } - // end 新增修改告警定义model // start 告警定义与监控关联model @@ -714,29 +705,4 @@ export class AlertSettingComponent implements OnInit { }); } // end 告警定义与监控关联model - //查询告警阈值 - onFilterSearchAlertDefinesByName() { - this.tableLoading = true; - let filter$ = this.alertDefineSvc.getAlertDefines(this.search, this.pageIndex - 1, this.pageSize).subscribe( - message => { - filter$.unsubscribe(); - this.tableLoading = false; - this.checkedAll = false; - this.checkedDefineIds.clear(); - if (message.code === 0) { - let page = message.data; - this.defines = page.content; - this.pageIndex = page.number + 1; - this.total = page.totalElements; - } else { - console.warn(message.msg); - } - }, - error => { - this.tableLoading = false; - filter$.unsubscribe(); - console.error(error.msg); - } - ); - } } From c70cc5d9b7f25db62d56220b8f4360adf3260a90 Mon Sep 17 00:00:00 2001 From: Kerwin Bryant Date: Wed, 10 Jul 2024 23:17:02 +0800 Subject: [PATCH 007/257] Fixed failure of form validation (#2261) --- .../alert-converge.component.ts | 25 ++++++++++--------- .../alert-silence/alert-silence.component.ts | 16 +++++++++--- .../setting/collector/collector.component.ts | 12 +++++++-- .../message-server.component.html | 8 +++--- .../message-server.component.ts | 15 ++++++++--- 5 files changed, 52 insertions(+), 24 deletions(-) diff --git a/web-app/src/app/routes/alert/alert-converge/alert-converge.component.ts b/web-app/src/app/routes/alert/alert-converge/alert-converge.component.ts index 4f82af4aa6a..b890ae6aea2 100644 --- a/web-app/src/app/routes/alert/alert-converge/alert-converge.component.ts +++ b/web-app/src/app/routes/alert/alert-converge/alert-converge.component.ts @@ -17,9 +17,10 @@ * under the License. */ -import { Component, Inject, OnInit } from '@angular/core'; +import { Component, Inject, OnInit, ViewChild } from '@angular/core'; +import { NgForm } from '@angular/forms'; import { I18NService } from '@core'; -import { ALAIN_I18N_TOKEN, SettingsService } from '@delon/theme'; +import { ALAIN_I18N_TOKEN } from '@delon/theme'; import { NzModalService } from 'ng-zorro-antd/modal'; import { NzNotificationService } from 'ng-zorro-antd/notification'; import { NzTableQueryParams } from 'ng-zorro-antd/table'; @@ -40,11 +41,11 @@ export class AlertConvergeComponent implements OnInit { private modal: NzModalService, private notifySvc: NzNotificationService, private alertConvergeService: AlertConvergeService, - private settingsSvc: SettingsService, private tagService: TagService, @Inject(ALAIN_I18N_TOKEN) private i18nSvc: I18NService ) {} + @ViewChild('ruleForm', { static: false }) ruleForm: NgForm | undefined; pageIndex: number = 1; pageSize: number = 8; total: number = 0; @@ -211,15 +212,6 @@ export class AlertConvergeComponent implements OnInit { tagsOption: any[] = []; matchTags: string[] = []; convergeDates!: Date[]; - dayCheckOptions = [ - { label: this.i18nSvc.fanyi('common.week.7'), value: 7, checked: true }, - { label: this.i18nSvc.fanyi('common.week.1'), value: 1, checked: true }, - { label: this.i18nSvc.fanyi('common.week.2'), value: 2, checked: true }, - { label: this.i18nSvc.fanyi('common.week.3'), value: 3, checked: true }, - { label: this.i18nSvc.fanyi('common.week.4'), value: 4, checked: true }, - { label: this.i18nSvc.fanyi('common.week.5'), value: 5, checked: true }, - { label: this.i18nSvc.fanyi('common.week.6'), value: 6, checked: true } - ]; onNewAlertConverge() { this.converge = new AlertConverge(); @@ -283,6 +275,15 @@ export class AlertConvergeComponent implements OnInit { ); } onManageModalOk() { + if (this.ruleForm?.invalid) { + Object.values(this.ruleForm.controls).forEach(control => { + if (control.invalid) { + control.markAsDirty(); + control.updateValueAndValidity({ onlySelf: true }); + } + }); + return; + } this.converge.tags = []; this.matchTags.forEach(tag => { let tmp: string[] = tag.split(':'); diff --git a/web-app/src/app/routes/alert/alert-silence/alert-silence.component.ts b/web-app/src/app/routes/alert/alert-silence/alert-silence.component.ts index a2cb4a178cf..bd96c93e1c9 100644 --- a/web-app/src/app/routes/alert/alert-silence/alert-silence.component.ts +++ b/web-app/src/app/routes/alert/alert-silence/alert-silence.component.ts @@ -17,9 +17,10 @@ * under the License. */ -import { Component, Inject, OnInit } from '@angular/core'; +import { Component, Inject, OnInit, ViewChild } from '@angular/core'; +import { NgForm } from '@angular/forms'; import { I18NService } from '@core'; -import { ALAIN_I18N_TOKEN, SettingsService } from '@delon/theme'; +import { ALAIN_I18N_TOKEN } from '@delon/theme'; import { NzModalService } from 'ng-zorro-antd/modal'; import { NzNotificationService } from 'ng-zorro-antd/notification'; import { NzTableQueryParams } from 'ng-zorro-antd/table'; @@ -40,11 +41,11 @@ export class AlertSilenceComponent implements OnInit { private modal: NzModalService, private notifySvc: NzNotificationService, private alertSilenceService: AlertSilenceService, - private settingsSvc: SettingsService, private tagService: TagService, @Inject(ALAIN_I18N_TOKEN) private i18nSvc: I18NService ) {} + @ViewChild('ruleForm', { static: false }) ruleForm: NgForm | undefined; pageIndex: number = 1; pageSize: number = 8; total: number = 0; @@ -291,6 +292,15 @@ export class AlertSilenceComponent implements OnInit { ); } onManageModalOk() { + if (this.ruleForm?.invalid) { + Object.values(this.ruleForm.controls).forEach(control => { + if (control.invalid) { + control.markAsDirty(); + control.updateValueAndValidity({ onlySelf: true }); + } + }); + return; + } this.silence.tags = []; this.matchTags.forEach(tag => { let tmp: string[] = tag.split(':'); diff --git a/web-app/src/app/routes/setting/collector/collector.component.ts b/web-app/src/app/routes/setting/collector/collector.component.ts index c4257d8245d..72c236f6206 100644 --- a/web-app/src/app/routes/setting/collector/collector.component.ts +++ b/web-app/src/app/routes/setting/collector/collector.component.ts @@ -17,7 +17,8 @@ * under the License. */ -import { Component, Inject, OnInit } from '@angular/core'; +import { Component, Inject, OnInit, ViewChild } from '@angular/core'; +import { NgForm } from '@angular/forms'; import { I18NService } from '@core'; import { ALAIN_I18N_TOKEN } from '@delon/theme'; import { NzMessageService } from 'ng-zorro-antd/message'; @@ -42,6 +43,7 @@ export class CollectorComponent implements OnInit { @Inject(ALAIN_I18N_TOKEN) private i18nSvc: I18NService ) {} + @ViewChild('deployForm', { static: false }) deployForm: NgForm | undefined; pageIndex: number = 1; pageSize: number = 8; total: number = 0; @@ -286,7 +288,13 @@ export class CollectorComponent implements OnInit { } onDeployCollectorModalOk() { - if (this.collector == '' || this.collector == undefined) { + if (this.deployForm?.invalid) { + Object.values(this.deployForm.controls).forEach(control => { + if (control.invalid) { + control.markAsDirty(); + control.updateValueAndValidity({ onlySelf: true }); + } + }); return; } this.isDeployCollectorModalOkLoading = true; diff --git a/web-app/src/app/routes/setting/settings/message-server/message-server.component.html b/web-app/src/app/routes/setting/settings/message-server/message-server.component.html index 4fd77b216b1..2d77c3592fd 100644 --- a/web-app/src/app/routes/setting/settings/message-server/message-server.component.html +++ b/web-app/src/app/routes/setting/settings/message-server/message-server.component.html @@ -80,25 +80,25 @@ {{ 'alert.notice.sender.mail.username' | i18n }} - + {{ 'alert.notice.sender.mail.password' | i18n }} - + {{ 'alert.notice.sender.mail.ssl' | i18n }} - + {{ 'alert.notice.sender.enable' | i18n }} - + diff --git a/web-app/src/app/routes/setting/settings/message-server/message-server.component.ts b/web-app/src/app/routes/setting/settings/message-server/message-server.component.ts index 49cbfaaedf2..d3392fa007d 100644 --- a/web-app/src/app/routes/setting/settings/message-server/message-server.component.ts +++ b/web-app/src/app/routes/setting/settings/message-server/message-server.component.ts @@ -17,7 +17,8 @@ * under the License. */ -import { ChangeDetectorRef, Component, Inject, OnInit } from '@angular/core'; +import { Component, Inject, OnInit, ViewChild } from '@angular/core'; +import { NgForm } from '@angular/forms'; import { I18NService } from '@core'; import { ALAIN_I18N_TOKEN } from '@delon/theme'; import { NzMessageService } from 'ng-zorro-antd/message'; @@ -36,12 +37,11 @@ export class MessageServerComponent implements OnInit { constructor( public msg: NzMessageService, private notifySvc: NzNotificationService, - private cdr: ChangeDetectorRef, private noticeSenderSvc: GeneralConfigService, @Inject(ALAIN_I18N_TOKEN) private i18nSvc: I18NService ) {} - senders!: EmailNoticeSender[]; + @ViewChild('senderForm', { static: false }) senderForm: NgForm | undefined; senderServerLoading: boolean = true; loading: boolean = false; isEmailServerModalVisible: boolean = false; @@ -84,6 +84,15 @@ export class MessageServerComponent implements OnInit { } onSaveEmailServer() { + if (this.senderForm?.invalid) { + Object.values(this.senderForm.controls).forEach(control => { + if (control.invalid) { + control.markAsDirty(); + control.updateValueAndValidity({ onlySelf: true }); + } + }); + return; + } const modalOk$ = this.noticeSenderSvc .saveGeneralConfig(this.emailSender, 'email') .pipe( From 467c595802b761b655e2ca0250d1ff2680076e0b Mon Sep 17 00:00:00 2001 From: Kerwin Bryant Date: Thu, 11 Jul 2024 15:58:05 +0800 Subject: [PATCH 008/257] Fixed issue of plugin-upload form (#2267) --- .../setting/plugins/plugin.component.html | 15 ++++++-- .../setting/plugins/plugin.component.ts | 37 +++++++++++++------ 2 files changed, 38 insertions(+), 14 deletions(-) diff --git a/web-app/src/app/routes/setting/plugins/plugin.component.html b/web-app/src/app/routes/setting/plugins/plugin.component.html index 3eec55b2462..fd749ed975a 100644 --- a/web-app/src/app/routes/setting/plugins/plugin.component.html +++ b/web-app/src/app/routes/setting/plugins/plugin.component.html @@ -124,15 +124,24 @@
{{ 'plugin.name' | i18n }} - + {{ 'plugin.jar.file' | i18n }} - - + + - - - - - - - - - - - - - - {{ field.name ? field.name : field.value }} - - {{ - field.type === 0 - ? ('alert.setting.number' | i18n) - : field.type === 3 - ? ('alert.setting.time' | i18n) - : ('alert.setting.string' | i18n) - }} - - - {{ field.unit }} - - - - - - AND + + + + + + + + + + + + + + + + + + - - - - - - - -
+ {{ field.name ? field.name : field.value }} + + {{ + field.type === 0 + ? ('alert.setting.number' | i18n) + : field.type === 3 + ? ('alert.setting.time' | i18n) + : ('alert.setting.string' | i18n) + }} + + + {{ field.unit }} + + + + + + + + + + + + + diff --git a/web-app/src/app/routes/alert/alert-setting/alert-setting.component.less b/web-app/src/app/routes/alert/alert-setting/alert-setting.component.less index 50dfab550d3..c2e03f5d9f1 100644 --- a/web-app/src/app/routes/alert/alert-setting/alert-setting.component.less +++ b/web-app/src/app/routes/alert/alert-setting/alert-setting.component.less @@ -8,6 +8,15 @@ } } + .tree { + list-style: none; + margin: 4px 0 2px; + } + + .tree:empty { + margin: 0px; + } + .ruleset-invalid { border: none!important; diff --git a/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts b/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts index 8dac902cc9a..7ad32d237a9 100644 --- a/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts +++ b/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts @@ -18,7 +18,7 @@ */ import { Component, Inject, OnInit, ViewChild } from '@angular/core'; -import { FormBuilder, FormControl, NgForm } from '@angular/forms'; +import { AbstractControl, FormBuilder, FormControl, NgForm, ValidationErrors } from '@angular/forms'; import { I18NService } from '@core'; import { ALAIN_I18N_TOKEN } from '@delon/theme'; import { Rule, RuleSet, QueryBuilderConfig, QueryBuilderClassNames } from '@kerwin612/ngx-query-builder'; @@ -56,9 +56,9 @@ export class AlertSettingComponent implements OnInit { @Inject(ALAIN_I18N_TOKEN) private i18nSvc: I18NService, private formBuilder: FormBuilder ) { - this.qbFormCtrl = this.formBuilder.control(this.qbData); + this.qbFormCtrl = this.formBuilder.control(this.qbData, this.qbValidator); } - @ViewChild('defineForm', { static: false }) defineForm: NgForm | undefined; + @ViewChild('defineForm', { static: false }) defineForm!: NgForm; search!: string; pageIndex: number = 1; pageSize: number = 8; @@ -76,6 +76,7 @@ export class AlertSettingComponent implements OnInit { ]; qbClassNames: QueryBuilderClassNames = { row: 'row', + tree: 'tree', rule: 'br-4 rule', ruleSet: 'br-4 ruleset', invalidRuleSet: 'br-4 ruleset-invalid' @@ -90,6 +91,12 @@ export class AlertSettingComponent implements OnInit { condition: 'and', rules: [] }; + qbValidator = (control: AbstractControl): ValidationErrors | null => { + if (!control.value || !control.value.rules || control.value.rules.length === 0) { + return { required: true }; + } + return null; + }; qbFormCtrl: FormControl; ngOnInit(): void { @@ -651,8 +658,7 @@ export class AlertSettingComponent implements OnInit { } resetQbData(qbData: RuleSet) { - this.qbData = qbData; - this.qbFormCtrl = this.formBuilder.control(this.qbData); + this.qbFormCtrl.reset((this.qbData = qbData)); } resetManageModalData() { @@ -663,6 +669,7 @@ export class AlertSettingComponent implements OnInit { } onManageModalOk() { + this.defineForm.form.addControl('ruleset', this.qbFormCtrl); if (this.defineForm?.invalid) { Object.values(this.defineForm.controls).forEach(control => { if (control.invalid) { From 0b81717a9e576e904196e9337be4debce2fd88c2 Mon Sep 17 00:00:00 2001 From: Calvin Date: Sun, 28 Jul 2024 09:43:42 +0800 Subject: [PATCH 085/257] [docs] update img of new_committer_process.md (#2389) Co-authored-by: tomsun28 --- .../img/docs/community/icla-content-1.png | Bin 84250 -> 111513 bytes .../img/docs/community/icla-content-3.png | Bin 94337 -> 90867 bytes 2 files changed, 0 insertions(+), 0 deletions(-) diff --git a/home/static/img/docs/community/icla-content-1.png b/home/static/img/docs/community/icla-content-1.png index 2423fef5e128aa428b474da0d087637cd73c4459..b6213439490b8e6ba3a1de76a9cc771d4605712f 100644 GIT binary patch literal 111513 zcmdR$V{~NSpY^Ldw(XAXimi@q+vwQnSe2 zMyYV$SS8_P&X$&?1EzSaER^a-ay!2I27y#3?;x|bv0pjyjg7OP!xK?9me;$)Gs|BF zuevYJd7`4>S}{AS8pPwgV)GdZ2grA!nFu>X7Jek^wElDmp2#7}OSIBxxjivizQN8U z9MgzL%xMjJt~w+a9Oz!Et3jw_-~}(9f*D05r1&0B8II$ziygBu)dr_u`m;*~1OX82 zp`N93#*2UiLsVRXk$$XObjk%I$xe48#8HCtH!u}mb0*hKvNO=j!ZakX>2 z&e|)tAE++k04M>dpg9WN#U&RmeXcK-zOMi-pG!(jRMNne{A!y`y&+Ok@MJAAJ@GIGmM#D_-eT1DbXo2pcVsbf9LuM>ld?xfp*iz~3>qBprzcIt5@GP>ccrfRTA4vLwqv-;cs2ILb3x|Z{L@d(LQDqk6oy2& zX*<4OjyjBQ_3fu29WJ7gV`?yqkdsq0n~IR2Ia?;x;Fax4l5RFIyDbJIjb{RNHD(h5RnJ zl@RdX)KMmw_v3{TC=){&z9bhX{(MOUnePndKo8blv*Avu|1pBGD=f#AJJ_{MQNeQ} zCZ0Tzz&{5F5+g%{Z;=Kf&V5Y(nGH(}e6sxaH95OkzPSI|=yQMtS5c<;-|hcyr-gV_ zi1PpQE<^>H6s9cUqF;UveSL(&d5_$VWG_mKA0i`?IiB`fF_+PG;Rn4i^^``X<(*#2 z>tggu><8}rLM3@35;m;&N$h*-!PElZiWGc(tv#{)-GUiuP_tz7J)d=F!aF{vBpVnQ z0ec-mZiR6QM%u)&5!~}-kxniY0*T*$=U3sTq2CEK7b?Bl`=YbuI(6TG1nzGQI)p;a zhGLg@bd|?n4>^v-nx+Y`Q}4)Q<}3S6Pt>r89_!_k;jcUz7i`I#fqga4W&<@bq-38j zhjL{EWX(=+)~33;+Hs*ID6tj@`AG{aUU{l&?HJ7l^hwcBGkC-Oey>Z2XlYU_w~Xpz zTd@@FjOS$7LfLq~;iPw(xahm`T~2MjYZ{_nhH6yec)Jl*`UXDN8vFQnS!NGXHS@A1S$5geCNhGax1o_{V#vrpUl#YM80m)uPcU{^Qs^#m+UoZe z#fr5*32G(AxxX$pD~8IA@}kvcci7GOruMsz_XAYOthWD1>wC<7*Mf(I_?%sqSCkCy zpDnk^PsX`0o84Bo9%|)=qh?&Mk8>RWSr{x_$mCL92ETX%`4n67r(aJw(dacI;{;T5 znNqNsUQR;kcf`^60@r@K6*){UHcW$-^GViBM(;ZQn@*Q8W|(Ep+Eoa!fj)P1pqKOiqV%nVL6GJlPJ8;>@Va()NI9viH99)aC$+4zq0lBgna^}xENly^PKNAkEgT0t+}xX89kp~Z8QtOXgY0u zEB3vY?5M=q&(W-j5jr1Y?dT@T;BwmbAhG*1>ve%FPb(Yzo!WK^7d=fvX-yAyP25b} zZrQIv!ETYq0{q_S>*5BI6EkZJijyB`TBM_k>{sg|oVh*kSotK=w?mE_Yx2EH9Df#I zko*r@?iAu!5qRSj#0;+NLOV38#6mqD0-RUjJma;ckq)9pRL7IzFw?{PnV462{YjNz ziU|2Lp`lW1kVr$OM|`OE&4XRn^rh~xv~)Raw2C5p&#aGyLFjQqlA3F(_d^R$E+zqg zD)3vz_Vb$R^&QJ6d;8P3?JH?!aNKdy{peE#PjkP;3f8GZa+_x%rv-)!=AKVkX#Qho zLX;~-{Q#+6iv(kfh|SSk@kkRtD$-lZ%o52X0x*+SPv6Z%bMAbQQ0i`f7HowRVyU`fxx;6OJ;JW5a32# zW~Cb=CD>;$03hc+WRzxisJqCt4Iu^JcjGd0-gf!sc~uYz?K=|@GA^mV!i)Pto)7_o z1Ar+cG5aSlfZ5r7nKRqiszk&fZJmUgMFSJXWQ^QFr_yh-@dr1j+9{gMjNQY$#P0dX zOqvk2b3*I{A$Cb}Kky2k3lRC1v8mF*Z8?~0w? zYs-?wc4Ni;e0;M8Y9T@TV)($0mh2pp66+HtzutF=iMsbm`?a>r+88jK${-8HejUvPMehKeyGP%7Cl zQc3dw%~kw6TDZE78n}(3E1MuE)@LRL#?k_>+qsm$Nzwr-wNU9cxy*10#)nO^1XOYb zYD45&!%Cq=5fp{t-!%{VA$O}q3LR!AZK?`_La<@88;TSlLODIFN`F9t#F&`iDK+RY zBm0bMn$*xHU=B-jmSLsJVN%}ZFTT35QuMRA;BkB0qfrN$oIRT6wyj(`6lmUOtzC}s zKnExYSRE9ORKD5_nA|TVKd%3fRY$+<$=6+!DV>t2u@cdZN8ihxm^{OJYPG9$;Nl)aj!1+%1LvP=D+U%m;O=>I$S!fns;=ADihBs@a*4$Po z-ir*j|3MW{7Lp_=*t}nxWocZQac{1KYK$A$pDM{Y)=ut^Jj}vzV^+mI9zjEvPo1r& z6J?3z%n*alr*RQ^y_WB8A16%wh!>Y`NX*_z4X-q>c}xoMsHxv48(Eg0t1Ku5)F0{rgW z4Ttaw+PHYG+F~IDTB_*XpQB40DlyptCxUik*wYUfx?Ig~UNzcAUf{wIT8r`xNXxhP zhckYeyp6U?@69-9HqYVaSL-qB6tocs{cLyz;v17ukT6;vmcJkZ51K0MA~_j>M*By7 zHHGpZX1As0XrT)5(v7`ka1`A%3%O)?4X@KB&{gU$3VH^*;zxlsO>_lc&oYw06+dY; zpUa`N?F~&l_~kfhVQ4rc$OEREc0R4eKVBwxKzrGcp$Dh#BbsqIcr0P)zW4E3(gy)O z3?XQU(9fK7mA`FJ)v(|p{a%*3o5M|TR=hnL=l$30aThBNQOh+JYMWmfWLYTj{n}P$ zwWOFsP!Z~c8&v7jY(oet_!N*OCS1OZQtF}#L6guuTStAGp}J5}Y(p`%B+0-XzL`;} zniA|ONb0*%4)7FBGQE|z8)OlAwqiRf1gQsC{Dw{YTnfTy>%3UbS3$p(*3LO)RE(yy zvYtau@?Ewy$psB9c37(x8Z$zdK3j^MPd*|RxQv`CYPgH(oj9MW%v+A!7xcBk+UABE!a#3O}ocxN+3bDkug=)(rY))n6h1vS+2La&t zLv|8#IouGq$h0-D*fu`rs~nTXo6%h`yK%5x1fFm!1l3H!L{%*xLsqn`(IX zLF;oS^ZU%Y+4tqq;Pq2X0K9p1-OcDpvwMs-`inG?aoQeT1j+R@i?jn?%o*nGT=$qb z(Gae#ZQw~17Dl6P5(76vdl;VEp-uQJC`FGL3iDQ=lL>#@!qNx;h#LFiucPNB_NCw) zgh*jr+U2-vY7&uCPO01*XfR?1LO4x0O;v^ zUqOs!SS^s#I703_vbSSdu}BJ5`&K4=Kt2;XPM%5yt++fVNghY54hAajWjkI771uSQ z(oiwsE=R_<(iAGEV*5m416W)$d*OCC&*Q`UQb2^44{BxIsU371iORqQ1X!4A&C9c{ zVQsXrFVsV*Ke4he!pryxTJ(CyKq?z!PlaqC0sw-22?;Z9rY+^|_xG(8b17=R24S1= zPo_WTe1{K3J9v^91l%*UX@03;ny>MB8Mt=hl?Sm3?Qy1SQgt?Jr8e@m{0_4j7f;4i zN9)|0uD(C>3NwE5;02PMp0B zgmr)iQ=mczA}gsW(I}K8WQQSGVe|UvCeqP!X@0h{x1y|xQ;yhu8Xgn^nWg1Wr~sj2 zJD#3<=@SPA4KoVZpWS9FRvkAdFR`}G>P(Az^k!kC336s%Lyk7NrTJeGGy|%|G8I@t%TgS5ppltG`LK z&OIC((|7zCKqBGCr9Upz{*l-s0ksz#ukOBoxCY|)kH_Mb9vetEk8v#LvPsyNSd;v6 zoOz0omcGgw^}gRnuFPuo%wZc$lQqVqap-B+OHWbUzfaTJy7sZP8;&G= zdwgEc={nxO-)4UB@mev?{<(>iKnxxNrdms-PSxFQ*2!pLXv;Os0 zOz=Q#Kk1Wm-Zx5iTV3`=I$}Cn>k^X^J4P+LTz|`jy2yYRjowQMr|)Z}eX;xE`0YA? zx&t_23_P~U+q?6g^k(C|tIF=cwy|V?x>hU}+Ziwz?63OpsZ1bqRe7dISj0)eR4m%K+c`OG;zfmC3 zcz2uS#La}od{>hAc6D{q+gMl$fvmDaE)Wsz^+KnK%DxVX6ZaKgfczDLp5 z*WcgY$2Uep@d*Gw5DwQ~BeOxsX1Van3j+WQ!lE*~FuOH^QnK&W0Dw*ef>VT{=N0n` zu5a7pV{j7l?>lR@quDmlkr`sTKRn1G0fV&btujqbW~&cl3oKHS0RL9r$D1YWPb=+h zd}x!Y!Kh{}MCx+iXns6=9LMyTakbrx_3EOT`nBce`P)i#*Un9?!;QlqF z$OZ)<$nce~#ilo&^gOKqT;6VgSh|{ZT}{yAur&VUsw7i@Ja}n|Ts3rA;Q@Fo8-0u2 zU0U<{S)K54-|nVk-)#c#sp!=p>@onZZ27(*bX}Df?yqfj;)4gFi&iY$tsqCP{HkQo( zzP=8+1d%RKo)u1<6Rj10p`1nua(k_`MK5IZlUTZKd|xW6meHh0SB@|x=T1H&+Sb-q&B%`6 z&7Ftr@ZKb&T}fpY8(VQ5Nb}E2xZJs&o@6b42OvF7+?y`Gab7F2wVtwY9RnJ>;;`v# z$;*u<%znI<5?8;#9|f`Iy?k9P+4*5#&&(!jXpVz<+K`p$yZM6GO>5iKraz+2S@fgAGAu% zq~O!l%db!MT60K8T@JF58Qb%3*VXF?cnQ>ndp8a9y(y`l6#cviX;b}%7u9e}O3>K{ zM-ytQA)OioSOUltWml7(Ox=ce9Y%_?@I0S;*q-fVk|far*EGbp5p_1Wmj;$pKw=gL z%kEJcPi|7tG!gJxq>|Uo^Bq{C*eyeum0f=haF3l};EfL(t}|NsQkV7E&I7X@K^G_P zXcUluP6xS|xo{Q@LVoc~#X6zVh13$zqiH26mZ#?LQu^^fO;Jm=j*a|?Os!*`YqfS~ z*-;2reL#ELR+z}GWiYn|WlF}UPJ3l>1ROS#n;2U8e)E$H)0$BHqUO_xM4B6mb!u`- zarKyROV41_VS}M66y8=Vu!e~cbev!s`-2n_l7xMVWEShR?B}s;H)LTAN0!OhaMi~Y z%a;yllKeJNEJ|&rs(aJftEIc@42fbiHKDNSQDBLt1y8+Y1I8TE&9th-) z4~Kz1G%M94nok#vta<`8m60Pisi{$!j6@hVmH1Y#x>h2ct5Le=p=GSVK%>(2Df?l3 z)%9H8F3d2^t~7(jiLyqDmP%IE3V9=ASMUPzR6OQZ(3_3v!LV}O22)Z|N=L^kKy)O` z-uB!e_kEpl!bDN(CNLhgyLV}AOuTR^c=`M60k|0y5}MF(TyIpXW7NR;!Fq7z+`;d7 zBTVkte9doFlJ@!Bm_HZ@Y@9)y#S)3_?)XXZq@C_Bh`KyljtShHiR9%cXU${eFS`LG zG;iW!(&~zO-LH!`s9DCFC(BDA_wB%v<;ZSZHk5tx)tC{Q1*YT4vAL2eW$G+EJSmb$ z?X7uZujgE@0ST)690G0yPBC`=FfJ;~;U!uhPWbf%>!4(wMg|uGINbIm)j=rcwx+eh-8nt(?VPQ{UMsDL8gc)Lq zm=#2?W1sN1T~qLz7blQ0g5&`#d?(fmnjEyrbRjxl5^CrmU)m4%8Ed)nz^c{3b};C4 zOXbio1bwCWB&{3Ji%L$?{hYVg1{?tR~`JrxbJql_s9F3L1O_|Ot4f} z;in$6-iY7BrPSnkAr<*fj2)jcfJZHluL95?C(_`<-1w^fQk5_0?eXGUYwMR|vqye0fMMH^alL}{X!YWb3eI-Lpkx6NY^3&MKlc9JF+7a?z9~_dWZbm7GZ-Z6$w118& zE*&>-I9u63fi|^J6riUWrF~<=tB(mN`VzC(^CUpF+hG526S3&4uaTD8>d=|ZwGi)d z<5#NS&AaMf_bb23cK$TRzG4DwX~w{f9<$4=W(i!{d@%E|eA0PvSUNbO;O8pnyxS1P z2D7l|J5;6-hTz;GTAoKCIuVg&r%@GKn%SDT-t$(L?k1UhPtV&TiX&Nb{&A|-@Mldu zr5~7#qBXTynTe~Kvo)vw(G!X~A0_rMpUSj_T74dzkKMIRbLpUS=zX;&mD73JNDRoy zZBdfuFA0@%V@x`93Hg_vv2#=#pOMM?%70s7FkXAEvA^kGU~`2DR?nMHf;m_&WoVR?45M z>!9GA9-B1lqseuxO4H+IC@9rJdE{U@!y_$CoUH1LeBqjI=CXd94wizxj|-5FNw`YJ@aYY64wTr| zA^zIisigxqh`f;CK+E?{*MdW?&Pp$TFvOkj+AYQm9b_a1+%YHY^??WDZ%{4ctx}AJ z%ScB}t^wtmbAowHccLd5GmLS1k=d?z`~0VV8GF;3m6vd)7#bqKP#-NO^clBiM*eEj zO+U}cCYIhj*^i;Ty=c#3M#znz+0xSYquCkSt~qTMt`D41R@M#Av?d52VL)=baJzsW z2e1hSR;WXxzDguJNl6SLfI}{S#SGL1wqOR@>r+CxtUjDhVy4%fisx!HOSMX6eA&64 zm{F`|G3eg3BhLCRWA&AmcYidcY(F?TAa3ptiFc`|eFIuS-q})|H*GsrgJ_7QwcCY@ zHq%O1eV5eN_NHr-BHf+?p`yQRqQRCj#je?u3=NlEF zmnqpo)L_Rj!=Qlupp-zH-&@a64QbLCL!XGFp1@^^49Z2Pe@?hDETtH+ne{YK$U+kj zMYcKI_0O!rBA_Mtigc*SX&yCMY6aer(WD>6w>CLh9Iw|*QA6bY2oBVK-}Ei`+eUFg zS#(WsV@LS?F6o=kQxpy*S_a(w*cwjAgU-`Pq=(|4qb3& zfo^B4TlCiJ+A5Da*T#6NU1HZ0WmnLQi?6I zwnks|)>iTA?}cqV14L(%?f)6yqx)$ZdiJ);BAZ#vaOAKArPV`+U%VY{W`@86K2-7* z*NagYK9>!ml=cePL`yAWCVa}5m)j5`ARQOW*(kV^^EJ+vPrN%v6WwWyVSPb=2rnX- zvDFc-nxTowS^Gs|qZws2la;2u2u0|I+e?lM2>8m+G1(JDJPH-m4}CB$vL}FKg*RF! za5XaQ1)A4II%@eDO%GKDrmZ((?fR2{$u(*M7I^t!eRgy(W?lK?usP#T5CoySI<3TG zU<-di7q&6`hD%4~eWn~_7q$kXpu;(W7_u-GEiaIgx6`kI3jr&t{olO+D696jHSFzp z?C#pUuZT@4mE9hcKhQq}sG)CeS&3=~=>az-TykOtJN7;LKa>{##q^R$nJ$=|&^H72wT8%f& zMXKMXH%~oxX67am|1$KIqPrS+7FCYXxE=7L%svK1vRx`h9fUmzI zaNsPFzWdSoPDenZ^>X^^Q&E48$r4d}X>x1%x`2)@ny3BlY@phS!3uS8pG#aAQ4l^0 zc_M4Ai0(7N)mhz~|KD8&PgY_j)mlz3qvd+f)oevLJ+(c(U9xE3-SG7V-0i=mx1q13 zsokiE(yy2g>hzs^nN2kx$ad(%+(46rMS}@Dm4Ld*DQlj^zidwM!Ow|Hu) zc*QjChn1d~k$z_=Acs{Ygtn(UJGL0@Vt=gP!_TngsC-+qsr{z2;AO2`0vqGoW9n1K z9$F?K@+Qzlx;B=KJu_>2u(Kwghy1H)KPmdoZlEW{Y%S1rK>BbQ{zJBOGvi@Oab>j& z$MuVMbvB|TjI*BV8(An+=Mkx|H;x&H3|brY>${215BkAQMUkzVZlUl-+&d88iM{N- z;M>05mIe?lB?H)Z(?@Dsri0t_-dUcyq~Q zpflnqJHM3zXx*KvwHxm!{SkCc=%OH5=BsO!7zT5mRP{%O;^ z9qx{^6?vfR?lm4=odW?Y(*+7jQM#(&cw?3L!st#H(-c3wo$Y<-x<_H`?R&VqUh)D& z98QSi&jl2P7($0Y_Hd!SOa!Zut!1Pa)7^gA<3#Y=Is;?C@7DU(3Q1cRbi^Y~6_+wx znf2HL8l5^~b@d-G*evb}NVEt!+rK2vL&<|HZc;JHPgJiK+{8IctsTBA&0$=cX2}R3 zg z1T|USOl>tw{yKX;sGK}=J8yy){@NZs5yqR@YW0V^`^vQ^WAU<=QOzIFe}nmR0jp zSey0C(f!QGigSl+u2tr*GR@OK&9<{Vb)N9)y;<}OF5@AcLtgeeJpd*VUQM{9O}cJJ zrP7Y4hAa8>kGgdZvcf#Ah6bL`K$fb6zOU&p*KL+*%`vvTI=ESI_Rx+IZ>@@=^tMWd zTpu>wJ#QuY{sm$*SL91c52?$AxL~4>M5BqVLP)!qZeZI})Rj32?Jigi0XYN9Y&d+n z`Qcr5;GNhZZ3X9TKOe4K_Hv!@KCf7Bx|~*cu2T3WDKioUFBC4LHs{LEsbUU4DCxrA z{_9HlY(L|(8BLR$^bfqb)zVkS>_tj)HDoOpCn{GzkL&in!C8J4K2Czz3`bdpC&>8p zempmKRyry>LQ_x-m%lPg5^^zXK{#c3xUGeSE>U^SZpqr{YO5^0nUxld+Du)ZI&?f` zBcLl{x&w};hW5&qP~rSVf!Yg$5W6HAg|I^)*Czt%vM)3zb$qgoOs;_2GbkM&JXJS3<_)F{W>b>`Aycr-X`(3CR+hE(U^gbPS z7vBQL2jr4}w;gRY=B1!WSRUx0bL9#~Q0`*4l^{4AeRfLf{g2dc)@6djhjB ze-%K+u7euV4Nf&smB}NRURn_73P&1_afuE$Hp+LSqt>_$61LN9m~E;v3y}XccDsn) zTYt+ju^6wA5WYS8ukFsQ^zK%nFqdMzv-&LOH;tUxz%NVwzOx^%c$yOQ_B`=~y#JF( z=h4Qs%Qf&`59Dm7Z1Bvz`TK}i?!3@4mdy2ClBsRORwbv=1<_`08Fb=?tPhEQdg#=4 z3Lbjczm>AMt_a;y4zLbq;5f?6jx$|9E*0v?EJ%R*O9e$ihN0H-myx;q+$ei-4D1Ue~O>X(@g&I zq`KEytzQ|#xXzS{D(OYk0(|qaU8f!7rG}fFZ|QYiS$s`oe9U=&$7!Izn@yX}FH@WA z-Ayep*N)=l0?dZ+Ya>$E3O!!0r&=7Rr1BN)9jV_>wiTsxY+KS|%(FY_huit-(0^f+ z$S1{FM%(cEj@kHNycwHhTptm<#Nc7+I67?62^UKj?O;lA3s(3x5zw%uRnckzCi8~` zlZr+GHm~P(SJ3LpNBfu2b$G%;&C$hA%303I=*3{(&)hhx;s;1ANk8pysDGXzbZrbu zGzw#cxz+njHZtz67_j1Um@{L*YPXtSL#`H|q9=Ilp?~tmm-xh*i%Do_-W<{gy>}3_ z*h~QOd$2XUZ6LQi-2Ka-LRDqV98Lx3-zB&liK);&5l=|dGIV(muC2_fBXKx!{bFZy z%yI2mV#0dABf!;Hp`enQ2;N#zCs^#dHPbwOH*doKIxJ<03*H{lxVUC{KG-L3mml9T zLi?lTf|ErPap(7?Pd_|Xs67v|EJF7QFn3?D{DluM17k3Njv2U9JL)Fakb&#s<Eo-h3KmyrQ}B|y84J8pCSLy2cGrG`1r0}Sf(f#!X*8-8*i zuG5TqP!2{(uVHBtgDL2-gHwMULEi-KtzxA;F{mOl3#Su2U?V=(}e$?Yix78C-8m|cT^`U8yskaDHZQjW<{Znq3WXQ2~V*x z4IY}as%}5Y8iag4HT{eN@|ET)=$ixo-B^3lwqjwZ84lROrz^Lq{CPRg6RZC8$SKWM#&&V=o^3rcBGR4VoSyTTupa>@fgw9 z=rO6r5%lxd@AbZhj5F}J$epRs?J)#d`_r(f$;FJh9{Zi2>* zCPBoV-NHn!r&w%PGXvL4u5YWjZ^=5#%U3$ROwCrj1C7RGj^Dn{$V8GRQkV}5Vd>y7 zXUWX}_W!y|1^@>Id^$`>NOCm=mA7*^>Mb0x==uI>+>B4nARw(}WA<fSsRoglC8B1n)lG0{-T$C{>nrExLU!Taj_~t=@E>W;^-r^+<0>1}<4jYVMr1Js! zik2p6hG{q?XXe8D>2&C2bux4pZO~`10zXh8LxU0y4Ab|q5`HS>DO(J6HMxz1)B30N zjcY5V=9f-n5mVYoE2G*U$^hlSvCbQ)An{RqjdSKGiyr$0x$+4nr9Z&iur4csUFV#Q zbm=4_-U)gPN!rMovkX2aoG&I?zSF( z(3ON(*m3f}t2Nl&(r3`Y)bs@Icqy}tWj~R`#gO;KYLPx+*X(qg7Yna84NxtlhVOP4 z@jQ}QD3g9}s7!)krE!@kR?0lL_VUrVxtKclqJ^rwKN-l)CbtEF5jQm%Y{bSke3r>I zzgRqu94&no43XPd_tdbkdgy6ivnH(sK{c=& zf3Chz9jtvuA~PT^uPmiSI_SPjeml-;i*p^tH^0j2rnD)TrY!sbe6|Gi`0cc4t$yRd z|2y1-O8hstS<0z61|G1fh} zRW%hb#O$_}`)(BLwc&dqgc`xwK`QM491s@3k9ziy>uoF~qZ! z?tZ}yk4Qq-F4tOa-Ai?iL9~Si^};c&WppJ~GCE331iVyRbOlftZmegd`|(`4X5#}q zEInu6HYgr$aWQd`00vDH8qED)Y>ykyIYdn(0RTs#qa{<)0(_5sHgb1I9<2%e?-=${ zN_Ojn9B*%R@F*~V*vL5f!d~d)yVKhg0KlI|?&f8@+QMtHN%tP&!^{wiMV9x*e4hte z@;op9$Q+opRL~Y;qf6*qyaBD=gOl9s3kdbx+WnHi1nfqJOFrNxap|acDjt@;77f&Q z4v246O;uG*F>xcbR`~|lGCGq}EPNCeN+++p9|t$-4uWHL3O#3G<*j9}x#MN%X>TLs zc{klTh2;WVP3IK7Dx$r-;bW#keUMp%6NJsz)Aus2pDz1mM?Y_WuCKlE$0Ha_wG@qT zReq!%JQVj<8XH@a@{wV|CBOYW?Y9^>$K#!%%ea)0mvG2)J2NsZSG~zUJ8S^JZ?7hB zjh&OFmuhqa8(9i4gSs&`KIRfPbiK9{EcfFoF-!6qo(!SXe(+Ux&OVl11{7)%YW&Vm!#f0QUE}3+RQzVVQ_NEhm@3D@z=tnPniy z-_jj}wj5F=bw8n5S%SV$F%Hv=VtfYpH)c^C6TsvrFe;7E#{cF5ROb}YLhL#MBo#;J z)a{S?4I3veaE%pfU37Xwij@9hVo5$ccBA1LH#yxvN<5Z>MnZ#^OJ5R^r7CNr6>U2w zmlZoxCxct*A1SB<8EjXAYVip_^OH*NRq$lm*( z5V4Z{w6a#<8CgUL8UkzW^S52}!mY5()(+kJ_wLgc4pO8_&~%4%2T{XX5e%$4+L;)O*GTpZ)K5`}>uLN)29xQacum)et=HWxQYa4HG&}Tm9)EtRH3XLoF-G zB1_1ae38xg*HSVAIabIb^m0G{ohsAj7tooQw8aTpPDRdgU;qFi+r1DQng#YqskO(x z86VX!fq=n!OXgV}8=dsBrueuHFiiQQBP0DZX+pnq{8NmoZ!vLrNs3bFjj{wr{~h*^ zD-bItkmicy#xEs|Q&ww?;FusB_zT1N$7vFUrdHyQQZm|#v&^Sx(8aSH?|$VMnvZYK#ivUiLFoT&U>k@)(V76?kDGM9 zje4|3K`P^UMUsn#vBtuEPQp$UiJB~qR3x<`QVeGDLJ^QCV3Ht99 zFoxq4?+OV0G`ia^I!0XmS_|XPt9J*m&?G@SBNx}{&T0m8+xB7Ga`hSk?C4-Ue)mgi(+}AQu+mSILHOT3yaE5#uMKf-aRyZ3gw2v9enyfltJS~n#$i_aLRarMhJTB z#BLiQ=t?jk5f;k{eTJvf#)1|<-p4nv6B=dI{hN)v@XEn@ChBpp&815U8F+yIW*T8Bn(1#|?x4-Q(qIxGn$^Mf?ShCSEsbfW1L{bF(_4i9c6=Gs6F7a zI~8C&hL_XqRP=wE5oBg^ zXd<(lm8ImODiYylZeT~^yM7syy1UOo+GQflAqAjO5WF=d;ouoN-pWQfFTzLtS-K_U zv$uEVhao}NO|$9pW>jR-88KgxCP&ZN1}5}T{XfkO4R6Z@o|{>$kr6*sBZn~5IH5_% z^lh%^`$3DJ76=^*W&~e}YX)z%T;{=?_|?W)*l^4w`|^E*nckNB!Rf8W#qSMD^})G{P`um%7BJYpaZYra9A|vm8;bhz2g>wuIfsR!h!kLhj1d4Pabcbqg3!w~~HHh$* z>t9VwrEZSnQA5IOQb5t-tosDJ`I`1dao|enz#k0#!03!Nc{nYgdQ|I;*vI=u3Sj1s z8`H7H5~q8-2&P+4`9RN|#6CXJ)DBx-Y^o}VN3wO|A6RI>02BjV zYl}MDA~&a*NRHpG>SmL9xkA8*p^zy@b;k$}1z0LEh->8Nb(~pr>9F$FJjr$&^;mhxclFGk*96j~4h0N<^( z?|7?H-OWB(6`;so4$*@~lJ1`uzjzA|ZkX6@BxcZ8_fJtX2?LI?(_N+efGCb@uT<{bKE}WP_cLU4U zv|d>Zw}l)gfBv7MfUKVh0!jZp3rN3Tb5OYZVQ2XH^T;PPH!XgOp!#P04o6u%eZnr` zKGGN26eW}D@A|J~u);q*k>(V>AhpwJksoW<)z(7NQ|mh_QFy(#1wn)7unQuD;y&Iy zTMo57GT?u>7rt@Rxf3g%wd8cSZc%h7d3)!5itL+qNOb{uejS>*_$}ZXgddKnd{#nJ zLiOAjt;%fLnYj>xZB=s)c)ET*brVM#3OoVlVVsO&iM6SrJie_FYa^e zbe1L{Amkq5N%T)rkeSZ)MKaHQaqY5)IvswLhf=SZ`~TyiV0y6)mT~s5>mW}^sKz|W z{<7MoO_Vl=$8r*sEKaD9^NXFI{co+S{#EN#eR^KU-tB`3eNtLQp4Xu%laD54e>ACM z!=aWz>Sm^~eZQAnx(ty4zWwyU`T*$^M?otCji`yyWHfm3mzO!^if`5%^nNOLZNt2> zZ(qRY00lKA6`9YIU)c&O5)wX*4VW;c8iK zBWIXlt9JrIW_?k_gOU|+O8*!W23K_D2#Z!n9;&tdg<>C_VEwlfAa>yvT<-9?Bp3NP zt^y^4Etubn6VP3`<%k@`~;-9ypgm1Rb;JyM`C3xw)vyJNeyCAoLx8D`o z(aMA07twg`(%RE+n&GWLJoh-4*R_i2w?_>0Czl*Ct5!Zb86bbF7wp$>i2i(fv#RWFwj6re~ z&^yRIi}iu#atBKXE`I-YLQh3pl*Y8L(vD#D+n?^&cn)EmkEk>&@j)Roq4@2y;t+@@ zwEm0_FI`FS<5u5zOLS(QN(lC12F}SegoV}f)~ZXqa0v9Z^Ctj<-i`^*kot&xZ*tnq zBN1|&%mpS8vvbGywyxNQu9ho3f#_#MBInT&-G18E&P`2`Umk4h;uIO)?`63}j^wI%p=d5v! zqp}b8#qILp-~rYbg5z<$f7}Wm zfeD)Fueaat;lY4o8?L%-G3slphBDP1gEn#tdU0+tc&}++Hd9zRGO)HFWrMiLof8>o ziiVTAWwO=8WKD&3;|oADVby4nATZ zkG+vO%k-w9p_a^1Z*KK_s`m-<^$43a&}cEtPU`l{t*&p$72@Cn3>ShCjh;{}_g7G; zANJ&b{VxELK4RSd!E4V8dv-Q$p{I5g%nCJD-a zYXtxN+4S74T}S$ri1Ad^LbHy2?||#(;i$C2n;xG7zm-E;jRR5B-DNk~pCl0K_V+X~ zH-iV6k3AvFyN!Z2=7R4ZMp_O#tp>89N(ekK|J}M>_TnHh>UeUv+{9a;iQ|LM0Haoe z<;=Mg7@rJux3{20RF+xT6%+}70eJu9Ud!F}?b)u0putCY;buLSI#2<(!>iiQoD|aQ zBES)vaFdBI8hH$f42iJb5DrG_*0E-&(;xP{%4lCd zJiE}gNg28~8oVubuRd+74_ffD&-M)h714|s<)27g&`bNMXD5d=O3Fe_k${L>HA^9< zd_J#Fu55GDbqyS5Omuu5`cH0&QQRmNGQ!sIeMXb4p<621q#-4BxF2wujTJ=#>@~iGc(H^bIi=lOffSvT|Gaz?|rMjs&D_2R=ZQ9o}KBQ)93UK!BbXHYG`bi zPV?o@&J41z4*xxL?x3`Rx$u-!l;at}%Eq-DHsMulPu2W7HdA42T#*(yd#VcwM}i?X zZcmOAY|zblQzp*n^QU_<5F(V&YPQVk_JrPm?tbgdwBaVa>ce^*$a&$DBy5`Y)uT>} zz+4WoRH3m%*Wd?Lb}o z{KQ68s=PcpbuV>0erA!3yk6EV-}V*{(8yYS#Se55j}eU+hug0V)6S+UJE?grPcH>j zn~I{~9Dki1h)~pb$$u?fvG9tv#lh+A!ECnDd#3L@C*)QeVT4^21!X@L+*P@S6#b{=Xw%)V=&CArO~gi$e$ZdaFM#1FIoHd8dR|>n~nskM6=D8%|0eK%X5Wjh3vQH@=238k!66G?Dyn z$Eq?Blq;#H@@w&$9>M^s&naO6+0@vJF!$Nm6Vs##~rkf~my?ZnrF-rflpdCU7FD{7uC*vs=hIh8I zmjA7VYdVt&_xJru9p@fd~O0C|*_E<^2f z@mNA5xosrCg7wt?>{`wFlKn`zv+PIciP?5^#`jYGL$KJN3mN?TI}q4%;9j=txL^)am}*Ba2g?( z4AI|mPRJ9I8+RoW>rCvxFZ zyg~=lJPO5`o^YmbNb*?ech~wFY*LoQ?EGn{Yr4-YL*&^4GTam5AVtC19a{wi`nAs zb>+FLlBEI-H1BG_riW%ZOGOCE*+_*PSRGkJk&j+fBn=wHK9kE+5vMm5hL9<8as-cC zP&(B&81^!_GB>yp)FL?K++-FRT4QyFctfUEF%#W7A6ML~DtvohZ+3d>zFAX=tFPrcIbZ%%_i4j|A`?Epp;(DJ!ty6O z1RVpt=fzK)-xXqjpV7&b(Zls@Kiyr7(4>##;7%w4^{V7tsP3(JO$$Ip3f+GaM^px| z*@0>r&v5ciHsSetR}|I0MT*f`Qe}WkvV5Y92rB)@=5qYO z>6!V!0T#FC7CLPmhgzsPflI$J1qJHlli5TedXPBcysCIs1K!TPK7n_|tii|jmkws; zM{{+e^>js$zG98!+2-zxT~yqNlpmNFUkSSgvSvKK78g_nsVU29E9Hjcsiaeevnh*2 zYEbou=OQXM;{_DBTbNd$i>qXQ@G*z{KKfq1n6Gdx7$T!FWuMV%++DQMNuN%1?r4%o zjqLZo1d4oE$^VZ)Q6&Cf0!4rEC?Oa9R-B<2aeQC4Z%geSaPGE!+oRA*N0NG0zqN(5 zH14~3fd4nS#nNAArs|%f{95xsNGTIAX1R7NUrHCS+0?|3g@^lYug$4wJHEIZ^pV+? zDe3gouZptuyIRxSZ0?D_B>B>9OdKH+irU6?&jr6Xk5C8v)W+T4;ieuG_iaj zYFF=JNc1U+p;72YDorekWW)oBYbVJo5H$|o8p57s)f?|~CBR||7_lY^B0$U98ivvk zaDW!f+_68xD3D<8&W9lJ=PQK?`sYC3Ak`df$B6qb+Obna@-kU*ECHX_>v9c=tre)DfZ@UUYoHY=qM#WLbkjrMCYJ7O$!WDBnX{$&W`RonzceL^e``u76wdwf}2FROByos_o>~=puGA?L%tD2jz$!zRJ`7a(!;Z!91p1{^O=6 zkoC#mlh6c1hJ<`mBC?5uOTpMxq7zLNT#4Yra`)b_^Cvs|VJEZ*To5*8I;&PA1bFY? zNr36@wjN`!p!aK-Sda3o^^r*ziUeb@`g|G2T>F`U&y7rg`{;dHUeIZu**>!SL?-K1X>m^?h;`@V=F+2gc({*C*eC}-~*QO9AF+AN&flWr2FB9K z@}OGu?t0ivrkUn`_i%Bq)89u!uQA4=&j9Lp-E<JI)P%6eotO7MAE*o{9E(4lA1LfT)vdK5w=fVUL7ujFusGf@ zhOG8p#^MBn*}JT~re#pmW9;NxM@Lh#N2>IRynwIae2FmHve&p1XR->(du3-eI1@`R zSEw#Y)SjX|(pnJ=^xjQWk$L(P3%(15knc~!pP;b%_c??IB0jgRlb2oj(}U>(UP+j6 z-h^@)w0Moo+@=VVfpbdBqlt3c4owvqRZ+$<8zaUB%jSE_mb~oX31#b4h3TMF+;Xc0 z)xEk({Y(?Mj1|oa_15r$pt*dJJ(YO64>{&9!kQGMBU2Q?wtaG#1V3;@G)o49`M7y& z8r3j2160yITs`QGwVO9Gs?7LM*7d9TVT*E_;I%MnoX-U8E}k-)s(fuA9c}p5%YB!aag|z;x+EQSpdv%PC)yq||9dyK4CkhnFabF$Q-2c!B3>P*Iy`fr zlb3bA-DH6JOK4r$is$@m6$cI~!;D!(vc9f+>`N8!z+@~ApMHT>2v7%^aRxK*jOQ) zG<`Vtk}j@eruzJ5BlKZGn-eDo`{PB}c$R!kUp=TjD~WkOT+Kr8#9FKI4B{xS#M~{J zb@CK{Ha_Z^;prROstW?i_3j}a?siganB&P2!i#z=X`i<98rt*4&`jAMJ}C|FXMXoC zNKWIxLQG0MO&YI(7AZ8AH}`&81KG~ci7HRS5clX%0Jsb`J&d7Q@{0}J7@Q1FgG~B* z{{aQGu;_tfJi~BDQCS(Ka{+d{W`PiqsSPceB+ho4gLj$8Sm`(D_cWyW>!AWDnbPay zKi4YkJF+@PJq5hF8_?rS579*tTZ+SRm_JvPv$haQvo`o{-jb}>n)H{=k7ym(P!ZjnPPAjg4@@}Y&D#$h3<{pT(WFDVhTji_ z;~J7uxfvr}4T%f=X!9*!7_43_8l+9%Z^eqozu)!u)dJ%q^Dxe%TD!R4gAYB{3$(sL zf9d_s%JH>rGm;01Q*D{(=kL`fzH?&VA2=puI5L_iLavKe6Wy@mc1oV!>K9Y`1fSVZ zkP391j)69;;!diF`B+Xye$TP?UW16A}tU7md4HiZ- zsI=7ge4WP#izKr=64RJ*+%AhV+zh8D|AFCy(UCDQafOVA{^R|0*%VvCtN|=)*ciF@ z%%p0`xZVK;5tUFxe!z`on=y4L#@5J})|!^a&(!XfH9-cBtl<%b_I-e`KOL6l(=|$63GRPdiG1xnwfs+mFk~N0p6UPX#gHKQukL<% ze#X4JQTL^$OT-tHzznaZLkCQSbeJ66LtD5mgyv%T?j^0a@yYDzKV05cVhRdMwS~x~Rhz`R zoI4N!xAc4gi5MMrM7u}B<5`R}zG@h_?2zMGF&?*L)UeQj0Kd~sSVlu3S(LcD1f-MU z=Z3ZGB#^%68#yNiA7do0{3pm}(GX?sl-ES!6oj3Dt*Xx4+s0Zf z+R>pe1Aplo{EWJ_2aorqklK7RNEH?rcke&~CO{eP`#oDPAa>Y*?Zv^&4F?e0>{z=x z3o;uYGgLAfm+`nO>uA`j*|`dh0|;#fgReVgZTCg_MSS}xekQKgz?01_t4mN^?A8ZY z>xFy92}q(ZS0%@L#scJeO>FKgX>~Nfa=H>;TNl&?o}ylKg3SvuS5%vT&gNHD&5Db| zDZP#%j>Vll%;H@;b*G}&uSEQAgLHqdGm>krAS(0ucv<#hMPBamIjYC6!(`v7?YnYv zB{f_R(T;^ys>x&rqp94dsZdv9xMq1;Vbu2rRy-!S2cq28z#H?UkeFO4=pb~A&t*!) zMZah;V$&h7u28>9?(C2-lduZ9ZZVbV>l*)*4!eEjUOV7yPnTP$^v7ld@!`fNkZ3EY z?#3n}mQn*dIJ7W?`>hXe%t7hd)%<_E==rx&Y{7j|TY;ai%7{yk6)Kh!`FJ5ZzbiK= z%FE^-j<(qik(JOxEB)^j zgVzK+p2*-1=y`<5vcPAgzABAy5;hD#tn5UU*L|YTqhHo&D7ByUl8|9L6u`tp z0cfh2x4jMMA`pPY!orolx^kC{n3_nyi!eZ^ghX#oj`4gD)^8C4(uny&@p?|RT*^&m zQntE;V}jRY3>olp%>-CGZivm!ZV>an4rH=|0ZZKIHRO`~xZtH6Do%*W$kZC}hW5Og zs@!&|USG6qFv%(Q9~Zm*Ggu9i6p}_xH4l@=cb^>jJmjh%W7VfmA(euD;a>&ki^r|x zn46o2MilvQw>g)pz{kZ+jS-Ze9z9%Gdf*{_{zaP1MnZ`G7XqM-0byn3aDZG}H`}XC z%AQ2`l7PPy!s{x0##=TnQmse%a-W;ygQZ*CAtv!Tq5Y6d@RS`_y(*vb6NSmF#Pbvi z9_K{d>poKRPyQ2Mcb2{ZQr=elu;VQ}FzFIJ0NQO#7wSgmnQM1;%bBA;lX!QdfD3a5 z5jck>7E;oA?}`8bmt3z11VBzMu9Cjp6T47lSHsXm8RYZH_TXI40Dt$9na7!hkY8fQ ziV%?W?ZE2?2KcETi-dv|wg26S_vJ<_XTHqQ^PzmWRQDB9xFA0;vl>kI+sE4dE3Q=N zCj=gJK-XO4=_Ea&W*#Y53Iw3mCF(d!j2|A}WdF8p_>Wzu-aT7j2>2l>GvMz%1FmYc zxNQm`?NpK@-a~0J88R; zm*=%B&?M!Nh&q0lK*H6TP2ZT`>|>|1jU@Hx{|U^LIgsh=mDuumg8;w{n|_*>i~$gP zM%`W+=b1NE&+(_e^n zKwa8{!T3BBquT((ZWs6VnUGm!FYpsUD0Hfm(HlE@(h&6sKHT#oyw!QKWEaMPg8~8D zESj{#ICz!k+M>TwU(s&!*R4~BOR}U`bd~i6q}@_#0bS%xix`=zDj>JCREJJSZ9?4P z${|^5Fy5QVo5!S?J{A`K74ByqY%_Hm4BM9d1nsVmS zp6V>|$s4iBX7FOQ_u)gn_JWv;%fv)R71D2jpU{0&P8y;#)KHGhn9YQ0t^yvwMhvK^ z?4DMBApq&9gU_XAge3-or=SRU3Qi@Y;&eEMjD4v2I_frk0;pB=$!DiOAR8%t=uxNX zCR%{AiEvh$dGs0enxi(G@RXE51}-jp*%Zs>xQz~R4x72;?OxdH)oReB-5>Mnv6!u% zHSM9fOkR7XS$(%xF)1n3V#VIOlLBM{;jJL}o$j7r0!aCJcFNTTH06nIA5r-Q(?iN> zocS_2*wpUX)@wo>Hi6*v-5d6Bnylxs=5c56H0OeZT|JdQA)6;mGN!vg;@~oe8Lp9GMrQ~%{e4ilh`});yl0V+jJ00wiV%*9rLnI{pRH*A`e^cjsx!ca;0RXCBY<~=#Bje$~ z$pl@NN4G-_Rr>7HgZB}<|C)$3HF5ZY&;XNOp6e;aT5oKCiQBK@p`)7;4@`+qkYK06 zn>dyA#qu|Ws>dLv-6{#IwZSB57j(ESd@SgijA+>f(>gmRC6hX{N18k&n8D#kK z{-2^B;^(*8YNVoSbGamMmk{v@O0D-Gbt|_AWPlQ`xyLmW3&;L@Dx#SgKL`597-}@IusA%_3M6A&D5atI?;$N!__>IP8$w z{9tHxC%YZOc(|&)r^~0u&u1l%T{LUIJ>Rl`_J#*h=c;@fI#K~HO1EX5( z=$yZTVxhb5Y7;fQJ{v!88FX0?@M z4#s||adVpLL@$05A)%L-Llwn))<`XPp0!c9yo?KOC<#J7v$%(g7X!!~4IsTP&^OmP zXgM&NkIB2b{-%bZL3syJondYpkttW|EY;Ef(HQ?pLXVW6ue9p2JgdZwZF|GvaqpBI zkusuAm-11cgh^BBlR5hkCj{r6Q7#G6>|=2jDfa!>8{1l(0LxV&4TGC;y{}D<1*QudFcHFDWxYc{~^#X z2{W7h>)SJ}cAHe{X@3%RK$$wMIPiSW{!2onE{ppiEI_Fh88CIp1ON=Gn7s4dc486# zg+)ifi#%@58;}?Hh`DNm{%#u$oDaTaGTQb6`#s>1zxkMmiwlqJC<@%qx&swsvc%@i ziJAD-X5Cay?2)7**Na$pM@u)HV?jU+VS(|m5^7-DY#i|x#i$dkkdiAea1(-kWIIj1 zSRr`wQ;5MMwq|9qYfXNhVvb4>WO=?#TMU#4sE&2{55#;$DN8W|BnvY%pb!;PB0XC` zyh?ANU~qToo(4|_zDz%4)=DJIg&2!AfDczDsV}Q#S7y>QaAq{+8s;~4gPbhYT{{YO z>7}k9pbMdn11n)?aBEpLcGO?x`H@+>p7^01E>1AT!BwL<8fTbbcAxZ2(IP~J6~?J z*`b%BbTs33HB*tvX$Lo3LYncz?J?A=9ug2M0?*MewD{{V1%^9id?_s#7ag$JYT#PjV0$mVX#Q@P zPfQu;6Vk)i!pQUKAT{ot%?0yZ*b#r5CWBp>Plu{!`a{m*?{|DdXz&Lsxqnkyo>UL4ZlH`zpZ+-s;xNx>PetFLB>-ME{<W<9ve_l+d_(+= zF!!TjtI=BY?yH61Y42rW;Z(xl@cB9$51#A!uM%*rt^7b0qjx@!R~!K7M;g2!d-1Te zHg2LNG2C*~Ed-#+%8-HRlg9~VQVC8j=F_j|r+nbNy458vb3d)x7Kh!uPYPb7$t8EO zJY}fL^DXuF%m;KWQ{0Z*cYYQiRAGs#s%os%P;Jrf_r@kw0|0=<1a-WT&9Bv^ch*si zqfiDKtG)Xi0sSuf{Uzp}?)L0!dw8Kwhe`iuyh|t)g5Y38(c|>6lF7bV^Y{YE!`o7~ zgYz*6IU;@$hP|$Qsc1-|XZUtc2Uhb4ZNXyBDl#&%xprP|&-5?u3o74<0dNWLsC8%l zx@~F?5wro_RDE8ip6P)XN1Hy|`rN}i0KZu2DERk{-u@)A()YnMzbYaXxBgCpakIkW z>!+b^CN_{7HJh3|%8aGz+5z#>!fHkQszb#-&mj~eN4 zxx$c=HiJW2jQGKTpbcN4U%wFN$ocay2nED)IuoYKHIKhq?V#G9Fn!K;w=w# z9d6;APWzI^8sS+K0Rn|eDrs`_pKJ98UjPn{tXqu}r&E$BL6XPD-n&BeR;b=zNnckG z=Bs+M1R|V*KcS$&0_ z&?Y0xjf)*MUE1S4FI?(I;UW$!?$NH!E=_p%(doH=L~3KpL}~NZ?;jjn=1}0%2p(E&NFYa=39|V2n%(2T9QWxFZbVddc%&Y+TW=vBM!CCdlcLlIX-6ZNzQwAdH{)+0WO%swpItD43Z>#K2}xW{YIW z6s$P=qZ-W+-HuR5Ne7`*5L3dOo+-mi0;ZhMd6AGMT3jrSB*g-moLTN}poRU=DTMNb z820j{(3Gf3MN03H!E@StHA#wypOm+8+jjj5q?~r|R8;^qJ&Mgn4Ua@ntdGU#<5gAZ zvrIPYt?fpLFB?m?F| ztTl^vrs~37TaQ-iRPv)}w*;OpC-Db?1MQ1V&5X2xK}diWR^B%SP}T&xNMN^nTt z-~KwR-{WI{)U5(Z0s`ZDuZrolkIJg4_OaY$NA+Js1wAep4B9Z%tAi>khBKGfIzI8x zZ=DVjTKZS$igH$9TP}c6M3K?O`-j*h;8Ib^Re@=pcP8H5bLEM7zXa)ytNRmp3t%@{ zx$HxhuC+k4H9s8m>1pQ^?FVwKDpS>xyO%E~dx)JEZXl<*##tMtsC5SNv6+kQa(cl7 znKFEntwCJ~d;Rb|rU9Xs*~tRgB4KBEq$V-KAdpbBSk6Lo0uB%uTUYM7_9|APZ-r;L z`IC#da*ff^jMJ^w`YKNno&zbEPH(O0_#g?LWaVLMpmNX_s$I4aE4oV1`73vefxC_&>wea)gTRG zP{3X8h^o*ox+2Vm1$4-lw+W_!r-JJ-Sh3W={_uciGzXWbsi?MzRH!mH@&$0YMG|`^ zWnwgDaeEEzDxg@;6&Y7R=5!jk>MAyGb*WCfqo9BJ=Sl~U^!1*$5ND38ryF8S4h#{3 zykSE_F<|a{XU#zHepyj^91Lap8LMrWZ?~+4;VagfQgJfP>o0-B20ghDAaU6(_jw(5 z;G)1wig&FO{BmbXs|%~3x>^-t3Zt+!9R_{bKFxqzzs&EtQA`yHyZn5=gSa|Jjcx=i zVE9N6r=7;3hS(=??X*o`wUR$9z~ckrvD}wlc-~5AKmBJPFIAc`HZ^c6R<}_EM=+UA zzi&J{B`!S`5uhX%!Y`jB!(xo5RtEl0Q1Broa+qzD@m~fVn_muc3dsSebpdH$Uu)LU zU`pBc?wQ@xKO`-Wp9Oxs48V_43`+3*pzq0C7*oR4f%)6@&{kK8KN^I&}EPPkkq@&NmyWe1YcVyTvp9&yh zb1yfi3f=;5EZJM%7D`jdqFa1TL^~xs747(1%MF99t=fkdi`h^9+!~QGtT=UdVt-p? zzSZu27M-0T7Rau*FPWY$<5vf15HP=>9p!!6tw1(# zYIzF+Hk@WHQI(jZx) z5Fdn~WNBXb87|=qivyRu%t%I6@Q=o|viaD=z{p6U>q_4UFwoQeXy&N(blMmHq{zM9 zf2H0ruB&)?T6^M*4FE_-k$mN(Hb}lxp$Go`YqL*iThPW*7GHvr*J50ahp74hof4D5 zR>sUa1U#I!8s#IPi1l*SnaxSL;Cc0Wl+&Dd>_rYhZae;=(!*d}=bafmn7YTuqcF)e5;WAw_MA}1(8H0)+e(j zH;72z0@%8DgMist5M^$(e$gs11b%le}~t~qu}t-==j(d!Okd6YK?mLuix zMtcri9*O@f1?_&!^3pJoVAy5*0c|au9gm;l*1rQuc*0a^-2+t5%72Wsd}`_OT`(&A zuTBGl8$B-cMjHeUHeV_m5HG-c!0?m)fTziEe^Y(+05*4;7>5(vev4Y+#(d3!K!#TpM`iDvRER@-k0v-5J0u{?XE46*1<3iy#fLya*;uodxRo7uLpqyP%g)#ehC zY??v@hl1U8cjK6#ZGSOgv$6d`N+Zlfqtjzx1b+=0;)2}IIn!5Nl&}@A>1(q^f*n4= z;^#m89p`R}E$t!VMGCQ_>v<2IJK)Q3x}CZFMM6DT;jp%1y^8Fc=zyHVFcSiKQY+HQ z?L%UMU6J|=;>qD75?RueCgbo7g;Fz1b|-3+4N5WP?W;DNPo55HzL-$8p}CivI?Yk5 zz1jqYRpn4O8bIlRQ15}zL+$7S1+e_EUr?t0vx-c_EnC3-7jSXnb}F~K`@2n4IChAh zza6`{;Ff!>Ikt8u@-n!rhVEevnaKcKJrMeM7W?5CNkao@M|!d_b+Y&ev)n=lFHJx_ zG-79=>f>2A;xOu~Q0d6_E-!R*^WJsNgstLEzihYq$-NS03S$9>&GmzfC<;-22=eyT zm$-6TTz(atrIb}qq0PFlbyCtKU|@-C<91qf*xQimN0`Sgu`k2;%7_FU5?mL(*eMa} zDnN$8K*;&H%$0h&ypT4fd*n0w@RukBK6Sk-@as>m>GVZ@1>6`0!B~Xr6@G(xLk)_I zR9bcb9Iw&uI&@n>wmU%I9S523!sSCoN3V5v20^?9D)%cNMS$nS<7`YLpk&7GtY(vg#^20RE($@&gjuMkHvqwkw@sb<# z&w%7kc}6NrW($dxJn+i7-YVu0`YM8xmYP8d5Rzb_MiqgGH|z+p=Nu3&H|GBFP5~z%_ERTRIZ3^&Ivw_a@6%Ih}{Y zA`b0^%W%Ad!BH`O&w|D?Q~(P{F}id|lqKG4B)+CAo&se~zHq{q=1I)C-^v5jBhY>u zZNh>W7NZ4BKlyeeH$-#ze*NJxdi|rv{qtgDS+L`PvIIrW1tF{>M#Stm)3>Mh@I-T2 z@c|Wu+b41?D=MA6S2P%Mz-<-T?M}teIGsiXt>|`|%wsMilNW)S8kC8U%@Q>di3&Jh ziLzdnE0!C0K_N9Zc6-No0K{JxL?^zp5A{RG;2{BSpV{G1 zdOyJ)SS;S#D2WABV&&x4kZ3X#EL0-w`p2RwHk@{<&0kaN#6TJw0{~&6u)o5W^Q}qR z+2gYnRy}@MsaG(tHao@9rNSyJ|M?K&a}Q6KK06FWAw&;GMBg71+X%T#v|NFyGdo;G zh5HPZwD5xoXOnev#e8(g_~?!Xl!(V{;c|gY`ZsmGT45yU$(SQ#Ufb#Au&@R9g*?H< z`8PrFl{pO~x-l8@vRf6O0C$Nom&gpMLK5wP7u`tAu?{b#)SB)w__wCBrTwXrFzG!f2Lau;kaLCQnou6-~pb_`Kb5MfZ#MQrp@ z&o82GIOj{$AsJ*vm#^I1j^SX-UThZpX#C@-xmNQ71z z6vSFS1)iXAL$0_+HEG7X2)~B|E9-=dM79kv@xxET+dJAqrCq7|-%4M>o5)8cBBC=C z$@Ua_#@KQJw_~TXhj0>B3CZ`cuwhJFeC}kZ!T8ukH~w&ZNgm_p5*I=tBCRtg1*aT(aIYh%kjeOHAqj3D{19#@d| znZ-9hKz!U6UqtWL{9@OR8o^~%;LsK-AU!<~rT&%IX8c0%F)I^s&3CMuR-GeQapPm~o_8_Z0qP7IQYB+ z2Z)O^>E?6TW5J*wd27aRxX3+1!d=Elz`v&SbV=x3K&Zr2zcDt}_-L%PG0P;zwIWTjsu@2Je=urn_j}bKz=praxTl zS>B1g5c#?X3a0dg?Hs0@uXPD;9i}=%81F6zTP=rY3P>+hQzW!=T^RFBQstptsM+Rr z{haHAIz0u$hvU0mGw-S&sI`w@7ym`S<^Im^BnT4}$xF;J?u_ZsaB^9zT_9wdG z!z>=*0K}RM2}i2JJ$ZNyf5Hr=Y7$0R#gHguwvJeh`lxDMIr;xa2@*|2j*VS*buU?| zMCa&|k^Qcl^x+(NVgwK)`G44N`m6+u#BE9n>v(K9J@2BaP}z?r>zrf4Hvy66%1bB$ zro?n_Q2<*mXVjqpHonLTZD=^*IMSg4OkmE>8FFye1es6>9X?feYo9*01vET&@Y%Lx z`guAy4fl8|PPd-?C7dqFzPSbc^z;Txo_Rv54-OYsnEKmXwcx#V0f*iv;41PB-`)kX z%kXdIF+4f&>or*>F?4C1qQKAEmrsdeU%v2JbCs#ney~WB@Amx!+`o(-3@5yQ*HNcFnDScLh~TWwS!J&a zh=7NzQd`74_+$(vLtW&hH}u7D6C6~o>8$mL>G&Q8y`T{8(#g9b6BG`%@$na@&lS%; zOJ2lN6{$TPJxy;X6*gvlc;Ls$Q5Rj;1rTs+H!C0@8+2?tnqACnpHG;nz<;acF&S)r zG5RcUJ}zF(Gec^lA%G~4k;plq8JmP_e`$)ZxEIoj9k-C>aHr3LDS%I;Bmq<@oh;_P zNJ>;mf1mys3l?mdbD>)BIy<3Lo+X+rOS-z!Ua(dN#eN_&@rgLqqY_k-sp_O(3XJ$A z6~Dm77LvEyqHP4DyN?{aJk^g1kW*_<>WCu~@}oiZXMZ}1$+nm*YO|Mizt3qqvec|D zvE9^HLL=Rzy~jr--L#r4UUDQ}f5=NmIPyxLWZ$;2@Hy6?!SqDFoD8^ew4?k*oKxqA z;;x%3@)_oXGVn8LG36uzTAa2h^J2eW?rrPCjjFc}p1kbnt77PT;`u`;6$W0B+m#9& z!emAjpO1|Dt(Xtg5Z(6DaQz9VvzhFJH)zRINf$XAo0K#Ng-BYi0t9PC9Ed{HsiO~0 zpM`9Lk*(JZ_H(mktk~H4vBOb!_6}5c2PY)i`5qLOpSJwoMxCEk0=degdGr6n`J8F9 z5|{di^hr^~;}9CJ=HuYk4h1HsP$FSNU$YIjHX)VBFxj?y*QV@_YX(IEdfOx2G4 zAh#6!h6C|%ol&*{`CLntXj!t%{jSf36p|!p^|7H>>u@?!eUG+Kra!z=eqo_AcYGB_ zB7uV?MDA%uQEj*IRFP%E12!gnC{eFc(fF)XY0+2QL@LUsbx4Yp6SXK zz=+GK>5;^igAHO6NAxkiAw|jVfOlU#01w&qGFfiUkbsAa3-*A?1dYCPj|D}ZEzF-u zpr`!7{doMRA2)JOe-+i%3K^mP)enl!_gRsl{cM{>-SPICh75Iqh^@@y1}D4ZOI1@O zWTl?f%1^xVEw-!Ii)i|?=?-uiiH?p_A9ULyffd~qqyFs-jK_S3$+f5}5tf`K?AL$R zKej7NldcIQUk1^3bd48$*4q=rd7T6b)qqz+@oFDCazcwJ)NOwatIb$dRNbSvo#io_%g#oxvZXf&dfI#vE zhKBN`uxE8?pElq5O@9Xww0JBl7JI!_7OL~}^M(=wDmcITN5y~vHy%eoE@Ee+=T23f zYry7cjj8k^Wfn}-;h9F}Cjjb3wr<^<1Y1poU{`qp0_^o5tj0S5amjbRnw!UW+hh+u z7O9A9e$VT0Uz6|}?|Cx<-GlL5apZM4yNQYwciP}34uZMy)3sW5$qUJRblY_>#AIcW z@KzIERY;6LCQ^Tq7o{7s4(E{~MUSuV3l;=A#iWbZ82mcM64j;O?QNjE^VqRbI6!B z=s&P+I6DE)x$on9*cZ+0!L1)j>5Ugth4rO0Ap$!QACJ3pr4cc`2}O;|qo@&8M=-JX z@B7)#s|p+N(O7h{8lc%WE7}mS@N#}h0kiKAHjbeDD$K`6XQ7Hcs``QpKUU*Bg}d%6 zpLXNpQ~^{5j3^evx$d1~K*L>|LSGp)y{foe6RsH)aNa+7P60YEm;b}LMd1Ctiv8O! zRe0iwxrWh40p~FYfYjI-xMo3{25LCDiy#ug#Syt#u@`^6;xnW?RMzp4RF1}%nUx%a0+R8+^#%TP9 zO3Kg0q#UAP4Q?m0PL{WWa2Lv@xzfynF!ko=^YN3Eny#gm{+90oH(?glfxb_g2M)+C zjT)5r@u@niWmE=hH5+qQGFLY8-nAV8339-Tg;lHRvUyp`hbmn?;NKZ5&EG1Sxqthn zlo39zyZQ*TR^}ZON7CteI+l8;m&&{nH0V#b(!ZB8y!;U75D$GF?G^i>UL*c79HCj2 zDOe_MNoLt>bnsVfBiqd@wk{20Cx0CFuF^J4u=+2tYD(1!A*C{ngI5vkL^)XlXa%I9 zoR%c}woiEOoH@old);a6mgs6d^x(>z@MfnU7~4e8Q>;7FZc0y37+Pmvwmw!Mz9Z9L z2nP^aaeDgtvl5dPvVmWDuV7qsMT0RI0@d7BcTFUmOO_UauEELvcOq#*@hD69^z=;& zY7v%`947O+Su7ezANII3i!BD}ihFKTqbvJ^MxU{bm!mYS01DgLX5(n_30nX@CqYr} zVmVJ*n{}c^V)M*D@i?3MnXlSQKv_AV!|#i5lgD#h{g;5NyQ#%&4U#s&h0cn!(o*+3 z!K#6S*7G^mPiLm-^T}Sm*sPZ8Pr_}_qder1OVc$}=Ckgay9d;n?{2pGUrtD%3OO-+Esitwba#Y zj)T2y>SL>1OMamkkdP^iS#kNQhp#KwMYDwRnnXgi{armP*6Vo0d2 z09?PhAfJ9H`Dc`&`<6Kf>&|4pAGUI$DAHf3_v4$V?X?W1lp$Kr+dn6vlr>UsxS^C; z)6olF=d8^tJX|zAA0?l%Hs&YRIp?HYWl}}8cD5z)!_EZD8id4>|qEey9@Pbk)lZQPn$m+(~s_3)dJ#`SNysU*9Ng7aNwV z%Ks*{64w1FvwPr~yk_TqK@u`1NwmJoYI#=usj-tP(DTGTY$E|yQ--hgxH^R9=!XK` z>aL;3)~=zf5g?Sm?nKKZ?tXUPeZ^D&BFr&{!j~B1-`94)w| zWa^KO4T2+e=-Q-azV3Vtt;)i}xfKo8Vy@Sto`QXgqBflb$oxav`rC@QKf65)P(C{& zXHi)UZA|yOU^`WMo~ASP_mgf&cTCYq(l#lfAP}^&;%2ndFXm4E=c&t^*WgEi`jaL@ znNx*ZOeeq7wrC-Sh}WLTlj!&SNY|-_{lWY%SH9^<1gb7__~o@m&wm$fRR79WYVipC z`>R`-R;YBAt~m`o4gFS5CGw>?O#A(;!B)Y#fo)QVBL{z|Z-jMLVUUuNG6Y2wQ707h zzjaQoc3N!dT7wO1cSzs8K1dcn+*(Y~R$1d~VVYN`M>fg!pd?p`TG#8{`l271pm@hM zU)%1ODk1DNlMY#}-Q`~FcxkM|#Y#;TIS2QaYwHth*u2G2+PdOYyIH)JO25OW$^LXM%6xR31J8Wx(T@n3&EBeV1Go>|oYOMi8IXg4kPwle=WrCJCgyK3wC zDe&3fs;5#YPnUdTkxW=rxGv<7%4GI<@|J4S{&tYqHnnu?Esk)Ff)EOP7fM2ZqTJuT zn^FOoQw(yn7u%!IW&0htHXW#(a5c+i$kGySzZ0aBx0paO>aA7xOEt%R9s`dknb5W+ z+8hHerX`hTsyU!fhlSkqmj`P<SAu&mPrV6RC7p;CX3Cs7;K@0J? z9VBH=KDvTc?ch;JGiy&-R9J`_Gh`S93+^*ECeQLEJQU-G=tL3$$~%wOO|Sh*YtFBv zH18zBuLKH0MXGPg59}b0q*YG?@NUlHW`~O)CkmDls!Ft{{dw@XQbgs7KaG5()_4-c zHBG&PPKW(9H|s6N$AS$_>Rr}1wxDs}0HwXO6cXWjg~)7=cPpt~Cy6(BpXp6rxZh`& z8ikoLJ1IY!uW8bLIa+5##qx>&!6l$q9orsM$ES!~+!W6#Jo~6wp?P1GZwM-0if4W$Dma~bQ ziOGWSJ=|F8t`3W+#$uJQL5N8;E?SVw|4v9IWI$KYmY?+Wk@EZ)>=MRLH$S^2zFgUATza20u&fd= zcfEWq6b^N`3QPns5>zp)pcGl?W4Bc8*QAF@ChJwtZO?bi^%ah46kDg>e-_Fd+6|3N z9tZzfWSc{C)CJyyfi2a5y3?0Y+V%4+HoLmI`Z7O;JBwBj^7Z)zxCGoDKI+^_rC|}T zZr$;dq=c`xT?h7yQSB@H%OH}EI9zQD6*ZY4lW>_S95CQOhW7&#=ejrht({#Ak6%=Q zv8I=uy+)J4tDrhAr;Vx6{EAv^8pmkwuKcd?W!szQRwunXVd*&i%~mvYI&+Gl ziut9@Q#=8!m4@AV0YSwqfevT$`<%x7Xzv1c2Fks8m_R9>Kiz{t?H?%s$NcvYbr=LP zW2x5nmMiqIG_@_zvHT4ttko&wnjg(Iu@*uCBHvD&oLGj?=D29Tslj$8tlHV4=}K~I zFqn!Ncg~*yfu4?h>bzYYD^+_M`&+s4Amg??F^7|*D(l%J?@~VCwe;B;L zDYEBdkH>ESMOnGqi@|>tWlt-3#DPtx+TXX@_5D#AP3y!H#a(C1?Jq9HVN>Vyx2h#` zS|_P(BC3fS%-bmFCl_|+wz{Y|ojLo_X7e7LGzL=WRC3=d>I{}oP)HS{s$x+UVN`}oi$)W&o zJgj+rx5r`L~Or+%#9S9O-ip`c%XlSFVjY-Y@nV%WS|)qaQf>ydY}7ZS%3K7^taN$ z`v20Q^mJhV-F3S<8uq_lw6R` zxAqsPz-znXv3)W5(Xtw*&616m-_Bdrt~PEMr;(BaM@;ssb^2~EJpZ2tH>`4Y1Jt}+;!63K~D787^l zZW*LrDR6h+`p2l0<2O(g#RA-3ME>h@7H024s@sGjA9!st_k45Hb!*Qnt!YIN?OB!d z5tYkNtV3Q>=^#(;$8%frDYY$V5Ke{H@`oK{mboojgYs zNsPD*Ol2Lzx9ZRQ{!CsEL8 zRB8;o$TrhqUWFR}49Qw7#r&gyAqH$bO2EbwXyDe_t*e;#?&&9g%4Np_Lu@Cq2u|Uc zu2DZ!&90BD8{a9cC|2P1f^0?woxk})M;7n-6&;1Nm+QGftMs`;-6wsUhl+z^u@)P4kmPoo*%|&N z3JJ-O5pzTLPmro2!T{DzaD8#?|C=0^(`N3(Txw5=w5-Ve+bicD0*{P7Bw#E5 zCP?A-K`-EVGb)wLq@p@p7~T!PbD5KCa||%SR2Q_7QAJGp9{;9qvV-+HWH#HM7gU(B zBM`p7nFUebea}uBHJft}3CRJ*UnY}V%WUm#hya7hU(Lw}AbkV8NF5a6n1DYq4zKCo zm>f~*Z33GkYlmZen^(AA7iGjNL3F0i>l-wF@vX$aSqNJ7ZH-DyZB;e&%?cX83gLX3 z^+>wej$^)(0MK>s9XEzAefpN-sl>vFct#(R+5BKFFM?u;v?9}aE#mf-0k02(@x`X} z*@uC-{-(mj&PU9&hk3z26K0}`VgsR4(nMw8esB{-<0D2zncE9`PdJ-LC%=XAIWFYC z*+9Nui5#tg@$lHWBm1bOC3YIs{k3l)zv7XqjVyn1`CB`p4ZB$*K&zLr+WJlhOAieI zqs)dVO-=$grW7}r20bn>Q=0bsxLf$;Y*;WQ^>WxY&6beTc&P z!}ta4a$3FnITjR~5b>Dj`hxW^={AdP02r@MGT^^=8Di$QemneZ=!7B}Qu%Xyd^cj` z87MxkT^7Xtjvg5i(dR_xf1U07?9PnvWiv-_p|#o{!k#8N?IU2u)nE$ra#FaW-3$Sf z>!f?$pSRJ~leK;~W;T~^;=9+24G9h^F(6y>CVqDKBH*5;-GL;NFr@D~x0!vmG%%hP zzbHB$pwlY#`A#LNVoAT&N#-IyMv}H@n8+d`CK}-Teuo*jOD-~5N1MqN3=LH_*=z@I z?;M<|_^I*lYuVKGzVUvr8x- z%-10C35KgOGo;+LP>w3`ntvDT46wt33wWYwxrH>f2BfkbwXnsFYR-3AyxYc%rw;*4 z`38h5;{F>+{bK{}FDLF7u6wx8_MRErJxSTmO86QC8 zV9z(!))YbBCpmfgb@-GixdNLdC>wvela`RLZa+5KE`Mj1aQ0^Tc`z?M;?22SI zDJs%wb|&JtaH{(D!Wl4@-OKPjz|sl~V8-cq+yIZJ1~v;iBF;!;dd%)FNVHksdpr8e z+$Pa{Nv>Ggn+SEES^YP&KkVNm@>q>>q^!k!Mnv#yy(Z4r!o#qr;1A>LOqwBr zV;oY?oB`5fSRbz>n+f3j#!~|685|aqlc2A!)pBzlFGJBbmLS!u0hntJT6!)r80N9G zhNVnH_xBfGMB<1R!j6E%QL~?*va<21Vi^h<)tPQ#DJkK|G+~ur15m1LTikAsNRd$o zX)Uvg)618lXr(KTZc++DV;8gY31>_E-C%a#7& zptp3F%(JG8b&DqW?`qp?Twk0JJw~0Y@ zRtGtflJa1@(0@n@fXzJjf;%YlN7UyGa{??^q=;Tbb_dKE8TEr`)qy#FU0fLEe&Z!6 zU6L<AS^WO+q<=uGeUNErT_sk^|{P9+FX*x=#!n=)>iN16989I zLhMBBw3xpnNGs?xXyN>yEB_}9G$nS3Z=eq`$@=I<>SHe{{b+4lw;#u6HqqY2TB<|w zMGKqBMCs|+`HDX-r{5kql(Bd$kbkhwbhAuH!ljhTy0&it>aNr+X7()*S2eWiJ>Mv2 zrQ0YJ=o_T*YE8ig5t9W$6mR`OBxJ$8zd!{2Sw!-32ud1=#!gEWw!Hx7=8kbN0rSb` zSN=fFII)L8rlY=_p-9V$I+-s%ob0`d_4ZE+YWow&sC(;?9x(axLK@(OScFUn0;y3W zgOq9$L@`qFc@l0(Sve&ZI8PEnBO>eMnec;6#<$)7`FM0CLvDYC)A0}<@C zbdtt$`%xJnFc`Ei?{}zNHnEAMEMGvvnp~?^6qSIX-f)K8r&HuWpPd84PA6&Fz`l0x zrm&)KVBmZ1#3O_#*stF;9sYh;jblL^*3)QO^$G8>f_LDrGO4H4ciRwNXP+^SW&q6> zv)YG@7}(5>drtt`(xz_rly$5pA@#d_)aR5|$o88pX8lBOA7u>^DKZA!yJ`zapptB& zT4$xI4KR>>RkY)ugyLbg1hl4G!)Z||nQukLCDLCFHT4AL?cszx;XE&TD_4DL7qLN8 zli9=LcrLS;&nuX=E-Qa>67(Lrzl7Fy#qc%drjUFTi#6UAhV_7a==FrVyf89&;bkn+ z0SLg!GC1sE?+A6N;Var#5|-G3L6Ps+LM1|i9`t0o!*Ofv&#AKghRZ51+Z`FSOafJ5 zgKvtuh2Q2`Anrxv(}eA1lF0Q1XJL?)(i?~R)B-6j;^iUY=g*E$7Y!$^K{+D6cQ+Jk zg>>d~Qk*PZPV&}{vJ=^2k+{1b5KkfyaCXi+$if+v76?htEJ>?WN%lvB$amLd`(}oU zMuxf-Dj4%+@=ljKjjWz&|Au^yXCp*?RNOuZ1)LIIUce^-o9@( z6BM&Vzx|oN31#!{@MH=QHq!4OY^akel-%wWnarr#TpD@8o8LMy0=e1|{?yg$fF)I_ zbOR&cm7rgb;(Qixz%W$06H}Q!j4YmNxh~phEizxWEe+@Zis@0byhDdh)AFq;$$^1T zTxI}0NisKy900zd9yrf!zNy!uYnc@qb#OSIyI7t3a^FHTOTkur-QjMx+6bU3wKD-h zG6N9--NM$hoKOQ(pu0UgX0E~UG!s1%E}JchND*>ykTC4~ESQmI=5a{%6$g3ox|G7u zbno4Py!E-;keOtx^U$&C-Wd+RBks~_xswXL#u3_Q)tN^UY{v$Jbw}SK{UwG-aVVye)?E1T{9U+;H;b{Tlz9jLS)5_{%1re0<@v-Nfugs*$Zwj=~Usadt zm0d&O3|&4l=(S4t5%n5HukSxUcBvwQr=~oJ1Pz!!m{SDDr|_@sx^Drq}KzPn&33&0Jr7lmT|yG@TZNeL9~pI?PWSdZ}~LzF)O&6g(~< zeEgUQ(V&-y;@M(gR=Wk}fAu-wCk1b_L0b$t1#g{@EOZiu%OP51jMJEISM%fYZvpv_ zS@2pf0-lPWHt2C^B-~OeZInuI(xD*XDqa86#k!lt`RxlgFc2XJ^y9H?e&?+i^tN)o zT{-8>z1e@hP;l#E`x=?-)F^Ri0E5EHIl&+g&D+XTJuQj+41Hg#8(YJp3|;tk6npi6 zn8k3usxpaO#<1Nr-fg{D-ydU%;t$bJw$ds^a{9-78audHyt{`4n6E$hY+nc5<2epLnql^q@W>d$o;N6 zCa^s}JVHJ_U(|%}^d>=BcB9V@N@FM-HemQLnY^=K2Mq?~VrJWWMURb;RVs0Rbf9rr zgX^}6YSDH$MSfOuHT~f^S(4^txV%9|L>7xo-?3m5r5Dw08l;Fueg~=Et6ci*iKV3xIpZIcKs7E-+y>3CM#ZwLt=w|QQW;-(Y zA2s-YZba<4QFzdQ@^s|+tgltnI(Ug8fpt9*1LsjF=E_fA`-o28>j~Mh4?uT1RXAbP z>M%i5$xP3#b=&KFYu=zNcJYVtFnod*wnHGJ#qbcxZI@TC7QM-mf(ly5JL*iY5&dGz zfSDuygz?UuytW<^gp1MNWH}(89_nU94JJ2?>-?rx@ zVtoIR`xk(}W$A2fg@xFu+#anJ`jaIjY3Pyu12llTZ!wKCL`UIEn&`qJ0_xRp(fr27 z3&Znqw4aSesXH)0BtUXaS!JSQ_rz4ps7sSr|cI68^x0IvFJLB4QL2rLi z-%^y{@&f&eJxNNUYit%R=18 z;aDYJoFC~=mi)tp9OY{RjGvOlNxTmP26|$tfn!7gr#7+EcgT*pt;P#OvY4>CS@~k zRo7}Us0UZ0ELUYXsT(~3->goMtAI`qX*Z`q6&P37o(;W^&^yGxvXZqw z1wFR{R~oR+)({J2Xoh{Dp}-35wPA*8YPTxzgsfP3T;nW&Ip*YM%P%E_Gx!rcU%k#g z#dJT~6$d=7+gnzhjcO6GgFtQCa{Ph|61~ag|nGbpPxKPsj(sGi|L? z+-CEj@e;h{AH^Y}R<|8|vLV6M7Wdm_*IU3fJ1<;n3OJ~s__ka#Pp_kv0KTTa?;U35 zT-yNfN2ounsLRXKOujN`%-G%-a@oeFND+3)jBhLRH;Xhm zVHnQ>aHM2gpOKUHBY0?}$1`8zzD`dWtPQb@$Y&_empxVBtvCCgt&+K3i`pJ|G!}9P z)a98C(RUNWmkJVqfrw+LeoF!i@5tLaG|aWT)AVd{Z0Hw4F0jP9{L9VW*T)}U!mu@jUCX6D-m6J+fy>+ z)+b^iU`!3kFJ>?%(<+k^t$^d}hx5V?_nP2Vy6Ec$NNL`shMw9fNq zj^L;CA}+_5LZH|M>VY!N?A7`o4@&gF*3{*O@uM?A&wfxgDfb4$ej`#Q#i))hGBj4_ zVQA=ST5QKLTNMq5iY*>09P$>kL{8#zb9XQbOibMR0NpSQ2v^a$O}d*BskNdL`bAy# z170yL!kk8c3-bZ-?#`j^HwnyxuNTs;&9Nd|k=qqL67C zHoU_Ta0ipL-3-;(FLda(#uSFz$sz15LG=*qMdAQW+t3QMJ^VS%w%L&;EG$Tdm|d3N z?E{}y6>w7LJyng3$!d>!vnSCbg046q<>W-%!1q?U)*gtS@ygYyfOYjOJq-ysFLSL8 z{^5bg>K_I>exczj=UfAx%Z5aU+ibDo)3#U(LEAh%HPzfLXZy|gANhDY7J398wVj01 z*@6D!Rjy~orPpT2aaxw8@)l{7c ztFG$wm2In=(L?~C$~msZ8w|`ID!IP8AiHaYwJRU?wA!Kx)39rx- zk1}?p*d2^cl#ixT9G7<(k+7t1@r1YD0F;vHIW-j9p;AT<_(F?CYNXPYJc^4I`3EPE zk_RpPZbURiXaK%Y+IevwB(d5B>EonO&JRysoQkpBITdt4I;{Eq{9GS+x5414nN7#sPq8R7bQvFfc$J4znseV(! zsr$oY!U`bbt>$?PicG#ZoY^}3_K?}VcXx8C`10gfRb6|%fpvE+Tqzg(VS>kFI{hrs z`?-;@pa8?RI21)B{2aZlIx_< zwHu3;*PW-DY>o&N=gP5IM=-~b)ypY4S|lP<szS2P-tRp2-vm&vxHK0H{+@jn#Ff)<961H8oG`;X7g@ z%FuPpSy>^zw@DF0B?k~atjVjpA5xl{S4X91if0ECG*LR~>{O0V0 zY@AI@ENY8CZ2$RJ=!$XLiCSqfdyWXVx2#^^-IEl0C!)4Qpk z#Z5N1WWh%7#zuk3^c0_Xh_4S@B4R!&DB=5eX7=Z_151Om-83HpH(kL;3q3q?Rz1-OlVeZKuY zp{zvV-+47X7bMPpJUpotTE1=%6L4eJ2LE(h!J+$q&Ptp-$~X-YhfmqSO_E_IBpB{b zPyjj&9H7&9@hDwx+6)jn3H`O&FX;eZk+}cP+uvbAkcjPl>sdeIEcrV_z(}bu!y#miHV$? zn1}6;yZdolDkKy*=wgkc*HKIg77S#i{!(8^adf;Bn;#=93HxOq=6;aPB>x3&ZOWLm zX3_v(r=1&=lY%|8z3-DH2!)DDn`w>(4ZyKpKmukvxAT^gg0_?EA4lMR0XQhH1B6-w zh9(p51=fN>3g;s!jgb^==?+W2FLzN?KnQ`LH}t>&(2}36P|X$uT+aKKEglUGrL1~0 z_i7AMXIslD@$gv>GKPf4I^Si{oJ3m0#QD*wYrI+f7y7WNEWTF8fq)(I=MTsy48W_F z-`%Hf4`Y5C(dQ?a=m>d5&})kddB?3z<|=O%zRMh-1;(JnT*pZ#F+q`$U5vF@6428l zuwp2I47G#2-qWI~;)>PGKp>bwzd;1aLYdhqB`Be9;-eqp?H99q=>L}NmS+rGRiver z-*nLI!1`6?{?x}E$Nqa-eZsgm>yd^1DZ&`;mwJWvMG^Mr7>zvMqmOl^@j(($XN|gNaxHPpMrvi+rF+ggahdfJYTBA|b2VjH%EOak}VcGm&K| zHq*lOKm3gkVnkXnzI8$FoeVSTB7guX0TwAprLehh6wGI} zQFq(*8?6IjY*A5MuK&D;%JDT>T1-s3E1l;fQbZ)%d(L!%AxN0FYz%?90nr?$|PRpQ8agizXkaiNz-ezk1;9M`~nH@{u1u?t;!nN8yQ;PC*`q+yt|7H z2N$vc;fToHp35LBX4AYQ%V0r(hAt)pf@TJzuPT<|{QRBHzG*NNTOY<|Xgb;u4Ovep z5D6A2D`!;1A0#uy$`)I?4)jeifk|m(H#Sj8BF~mklVe5DL=$Ukqu9=!di}dMqgG9F zYZkMwT{A)Uyq0@VTx*BYYGZ6#_~w|=9GUK*r+M^^Nt@=Mh@k`L<2*aF(ta)!E^gSE z47FnK&o_ZL`EUr+rA=$3llaf1&V@lLcdU(RG@%DqWvv2l@BL=XvW2Xf$IN&KoJqw z-T#IbG}dcxWJ*gC2|TR>4065cyo1Oaqzo*OdJq2fUjfp^a$Hk(Boiq|}1x5*UevQp?M5%q1Rf~gKQ8%QZfJz zPAoZSd0~MNHm!u?U@kgThCw{hC>;KR9FQ+L6lMf;K+8JKD0vss2sA-KC6Y|F&d3)_ zEa|fMdxoXe0{0F*p077l(eb)`SIzWv+%FV9XDg`;1cSK)v{A|jI0CvMkz}urqxwzC z=5Gv`y`xyBeNu?EorKz5kX}x z{g`bYKstG0_=S!?*XiH>erJTdwyM72#Tz_bFn|(nW{dLvh64yPK9hI1CNIxyK;p&H z(v|;J8mt0{t(|J zzm`&xyz`vl%`Xi94FvT2ib@gcktbe{_0I-MoG zF9~P1#oSvwH{6pVK z6;6GN<@eL8Glg#37AhH_2FgqMEP4Ti5jgtYSrL?rL%!Gk_Bjyjvn{q`m0>K9d=Az{WwJNpp&+%DLPpk2i zj$r}KaZ4-k<7*NxuIQ>G*Q=+e+a}E>C5!_^5Trz*l|1n9{-d;2o-_#ZSDc?p#@yY@ z&DJdkWv$-_szL9Bq2xERo**I2;Z8}~MzHz$*q7KG!i&ClaE})`u}))p9DDrQ8ee;- zW&<%=>EicKF$syatoOLiD#H^)j}v-YW$=v-aZ1V`;d-?<1%)bInk`}JaTVQ#iy19L zJ*2WW)Oz^QK!1HVE7|IfW!L5Nc^W;a2&zf3S&ry@*bR#Ow%$72mZF6BGL{|`yz6wn zUlk9GLq9r7lIhBHDCn+7lR-W*2UmCmUm^Gv9fUlA(53us1J^`E=cA!$d_b*P$AWCH_0n~h29IbTmMSHZ)Qe7J;&rEZE(h~quUf-Z8t1c8>8 zjNhKO1riJLF9hsmf!vfrr~S}HPX&qWgNHK;?WUN-(LxxoSO)>?Q70bnWWdWHg%$7V z{+!7NB3Y~85%dbPTqSiULG_PZuczuA97BY0V&v9+&~$C$3^HkdOyuNwGbFbQAa?Q#r(x`bn1LHWYrZA0%iG~ z_^VFoaRfvSG9EIfy1OlnrBX#DB(aY8f9*Mga0COq0G!nG>HA~|poUVSoCY{P>D`gs>} zAZGq&x%F=71e2^wXXWpq9yv<|pRJ4uA5xqizOMU?IvUh@m@XxP?MSB|L#T`EcF#T5 zzfe)OR|!S}yp_FiJ+kBdX-6tO3be2o4W8s{{}a4X#i$*dM`Y`j^M@&+I4*kiT#ayR zD|#WpK+auWO{HS3r^$=c2Ul^_IthRGBVDbA?Xh?z!~MD6BkkMwa)vT8H_n?4r+1lH zL}I^HV?~B(D}xoXmvS>OL04der;i(-3K0b57MtttJBiIYkI%Om`c+y^Z>|}kw^J0O zFX|t5el#<$PmQ`JmmMFtiik42oEI$Sa+4?h4(4b(C5o4#JQgt8`PmkOV#W$obtcB| z@-(0;h!>;Px`_|B(dLCFWJK5@hSiF{^vfb0Zg#g=V4%p*psi}9QnZUD)a67NK0d?W z=?3U_=d?nOthM!f4}>6icV- zrzv2lb3t$iL|Oa}$D*H@iu^`;v4`XJis^Oju~fq$R<3csP_Usm{11W{g)|q$T%@dg z_s_GSFtF8mGU;pwiq(PF{tpl#7<0aQWFa)Q5cK2%1RmH|WEgM|qDu+z3zLJ(9JASg zl+LQ)L7<_HH?Y|m{hW6SXDEJ!}eA+&1f zYjmjCx0 z4g_Hj)BH4tUM7Ru*@t`kEox=~^$QQZcU1G2jM<#Cp=rTE6N~wV&QyELnm8~&{c{W# z`h|G=-;@%sL0Jj)kB!%Oj&gJztZ?N*nG<7h;MBh>5!f$KNV5foZu@AgZ1IklQeBd0 zkKt<-%R;UoR_rb`*ZH;?l}JmO{SItb+0^#&$q)p;7~RB>mSog|M+rTBDIVhe{WW% zvRPRtt5ZEjmsLIF;>Te=KcBC1eL0Tp>T#rEb5fxTf7XmF9vJ#~ReAu`nkB)&i z`z?-mN~fI5dP10(Yj5JrF4W8Iq59m&>B-&8@{t(CGkJ^EV8Jv8Ko=28W4(rsNR%oc z8Xa|z%qTH&4-7_xwbNy0{ykEN!{VU#67U(}6ciPk^c%6Wigvi~t_c(YXZ;9g^2z{w zGVRLB0CqINq+%*8FcR+@`9Slrv@A@&T#H&)i>umV4E#=OpYb-kxepub=}y9p{f%_4 zgoWK2aI)Eb<@X}(u*-B?pZx1ir-7iD8DxJ(GW6RN$WDZe%5OAMl7S126bD8FMs(JF z)D5JO<%%+ZIe``pwz+q3ZvjiWRAmk*kBNOjKN2BLOx9{-tMt=BwbO8=Qjo2@F)bm_ z4-D6?a6ZD+knpl=qZrZ8`WqGT4r0^^s})M*I2v5gN-SdidNa3hqC;w8z;E}LuMOsz zUAG}TxszG5vKJr_DQYc)fAM6@d!pqsIA&7G*KqcUpN78F>c5II@W{{BwxB%~gGkzx zFPt?wV$&mjIeq@-q4kHgRs)=CNj4WEina2yCOmUHc%JwuJl*n-T!iY1#a-IZ96c4ir@Y7>(@>a|0C zYepfeqH9Ks@)Yi|b&AR#V=s5Xv5vF ze;fBVGY{~F0BvnDmg-wgoR;k>v{agFs(?bHjXBM0)6(DHrtt@f0z>mffB&f%W5ZO5 zvxzKSc`(f+6B%?nf&dZyElPbKYo6A$skVOd6gy3ezxaW~>+WrG+P&O>XI;NaU|*Fz zG21c`nI!Jav*kK8uaRc5k{-zzNhMfTdvYB41%r(PZg|vH!4XpsP;r#w9f_nPM3(abvwJ96zuo!Ll!@Ov3R_}Ha5Z;0FXMJI53eB7~s zVY5B(l@5Y`rn!*W%gGk>Qz}uA6U0S_b#sHW%9x|zNu{JRlk-;#oYU>t+WtxrCG){~ z)DJ&Y5jdOGJi{c!)qm_d7c!dt?PoTNrM@9$bXJiY{z0wHf5IOI0 zvpxJ|HXM%D6^@%UU(6l6AI#uG=O}Z|_I+rILavJW&B8wLS+vTdFQxeO@UoCgAn8AP zr=MB8h1D~X&oyqNAzFrNnt0slbw6*BiDb>s(KL^8>qi$By7++qPHo<5eOW;%WY1Q{ zW^mbhi1>kJ5hvND235?&H2i4E#U;m4g#D z7!IqnbDIctMN}D1@3{CDE4~!g@t;Gn$ez5%!=b&C5idmP8;$ZKaTCd(&Rc8gAG2kn zM&!>fZWk$|_?Sny+DYYtwx?-!5V%9Zj!Mw1m8z4985y#}WtmH5i`zbWuH*2>*f_>3i3S7&1m9qvYYr6sCL_<;P1}e}G#6X!X*^+F zyoO&RAu~eq*qmVv3i7MN0e~koKo~Z#51B~pxxSkQqrS*cRu+gV8w6sY8A520g`S;U z^!iPh;cx^z9og-n#c*KXJ#Go1K_Jr&10w%x$rxl$t?K2GV4@zGbGc{$JqWu~J{#%A^$avEp+|Ogn7_Y^n6M*_|Yn zDhrV(HhZ?vDEEzA)wy5wiNIf15K>&kg_1tDp`^`0=m#<+}-n@;RD7G1p1+i8v{1 zRoB(Yc&Z~i-gwLRe|dE*|4jnCm6|S%C6PluKR7Vf)R5Zz3gfhApdw6=ZcXa4O_J2)7e_0t^U0od<}^(kiyiWewJ|H<(-MFah;3)>%mlGGkx;`2J~EFRoWB-37S6u|ywcDK5! znGIchT_cj+6HgNg|9~tLBiPDKOHHlug1}D0zol*7Pcr+O@X>5^HAN8J(+3QkhcpSb zi~p1x87QSNOc#O@S>TSbiO#jat$_xiqCQWXWw73|{G9>)HX*;Y5 ztwqhFWHrS+NTo?eYOwLVf&y^lqwYlTo+Iw6+9+15&L{^o3h2 zn9!m_>wx`DxCid*Ws$NB&8LxrEJOt{6z7wPi3H`bFP2FYU2%3SoWZ)v z{VPKkT1wGHFJ%j2Jqj^M_MQ|xO(F;@q)AqOsj4u zc1^C1j;O@SfnOGxB5Pv>q0rPb@41iXe^FVHdj`#|?w$p}9&`Sn(C z>B%#r(W>%gp5B4YkT_I?DfmVEh{^=a;@03k4jvZ5PqU^`mU+-uOjr(*7EYlSnF#i*=LzewU57-<3=lHs( zuTMX$!~X99bwXeVn&&o>J54o@TmE2S_`hUxX;8Bv;v+{^QuYqYW5NRQsVH?Wsu04- zq2!+lGqoz3?JgKGJ}Fte@I<^(|MN2yyQ?s#D+IeMGA9iM0&;#Zb9wo(7>eunn-#O? ziw#Wr{KXow_~8X|K%e)+b!1b8ZiiZx(}nc4bk8qn8Gam0X*e{ZC!XoG30mhaKkxYe zJkftYom|AG%9Q@Mh)oG4`4F%S^~7ItsmACyV_vOFB<%qpOk3IkcBa@X;0T0_+IlY z3$c3_wG7eTS#baB*`Mo8S5mx-i=wjRbG3wC91M?YXq$nKcjBT9z?BPFw6j9iG{X8j zi#aXV8&9spC{kH!hnzk8@n09!$kEJJ6`QFvvRFB2=_SPD@`Txbp<92dw56AaKK-j7 z;U&#Yk}4j#8^pDMxKIB`7ICbL{mW+&TwkIxBBkxa6C10$Xl1is(e{y3D!$+Qq27=S zHdlc-zpHEjC$9*PFfbXg?2b<8#+XjnhH<&H@1m6!Cjb(eAHJw=4WyD)sa7!K<)aXf z(-xg*RVX)>-^JxKB^BM%7w$YAZ61s&ZY?7Ca8y)Zumj?VBD`j|L=@7n zz%_Z7u8+@u+v9&M(!WfXZ9{mFnawyUx1RkJEivc(7_Ab?EZ*R-m^U$OQfbBS;oZ-m zSKhdJvQm2f>#c5O3d!+b?_-MS?0?=zHiMTxB&5u=Ru*N2d(8ppx(j<&(%!6HIb2}<}jP=EG*J!cm%x=Z*rbms=v^kK2_?{%i$O5DwKvB zn}Zw)InqC{MAhs1Hy@hg_n0rzR%duy7UqfA8cIBty2?1&{aPDx-S}x0a<|QV|2>7G z*9c}l?F_ZXdr1e7lCxWJn|i#mQXfqFMq+Igqk#-U2mA*c>-IsQ8NW;XrN+(O*v}?% z%kLvK28aL1bTfgv8}ybei}$b=P9Ee4qi}3FC^8eCZ=u*Jlb54gGa3M<5Cj5|zfDY3 zYC_CXoAQ0|-FMO2Jw943*?nveey_YaFQ(7l7?DZLgeTgn@w$A!?5m&tdp&*~e|u=t z5#Ceu5UInbrlsQ=7nD^d7Jbl+5R8|nRZMRYx+QB^ao>`o|KPb# zVYP7)h_A?tds}xt=k+}7ADmiJGgPXTInAA=a-uG#P}LrYns2zg5V)4nsd{~LonX>r z;QQd8=yN@aGkT}}bonXx|E(s?v+}95p>v$T6K}sq)?FZX72vLD*evAI=x966ajC1d zjt`;AGk6sg?QG$lE?V<^(jz37Eth|C3FSK-IwVc3r_`LgL3jZ=P>Gt1(e=v z$dPUzY}@wn(kiT!Tg-|Z;c7>`KjQ#Qmd*k_lD$IfHo44tLbA*If$^=}MvC_+HiYW& z5`vhVxv~7mFu!Us&0VRuK;OTtHmtilO^FcxDs-v&D8c?)W`y9 z;W!KC@uSmRTFgu@4BiIN&DbRbcSd@=PrrYO>8{l%HsI4QQXL5fptABpe^kuxJjff* zq(CN{+{)f|H<5`arhp5Lcz_D{Qd#r!%_^AJfwYQ9lr0KQ>3uwG?hsNC-8h2>i-KBAN_r~3=ad&r@#@*fB3U_DfdG3Ah zch}6UHGiglRaI1GW#oxG5oezhdpj+lv2LOFj(zrs>FDjRw&qK{da$kbmepq$j!}xc zb(AF)BHKePvZgoLtn_Ru;|^#qo86nsIm1u96(4QtV^(?m> zP<)PmeVVSUbp9fWp@sOT$?AMnBwuUQ40a5m8y!UFVm{^i)mm=>9fC-9t0K@3**;hl zO5M!N+?-07D@xVaDB;GzQOq)Kx_88Rbk-S|BIj7CBUECz{4#hjmhPy166}4H0UIkXY5sD{xe)lmW<22>u2!QZ?H+p$Px^+9U~C%<%cT6jx7 zc(6zTG(k9rOFMYJVz`Nyv-+BE9~6bPhFrHRHN3DCCf*Qxa$_7xx1AxqGda!b(Unvh zuIszT-U-b=`xQcJM&~mI8An&%{m!py0?&Q#=bq&Z`fRIB6(2L!^D?O_Wfxls|5n5| zy6Bz|{rG+RYm{{58pHk91Q(FH1sD@2y6Ql|D}4p|m;O{%?Dp4Wz3ru`DTay*9SK5H zL>AWm*h9al(}^|=2K>&_Y&is%Y0P9oXL8l54(xuyMMT*C|~^N|NTieK-puy_)dvb&^uA1gOdPHxg}} zZ5h)oOCy1iul&_EWS~Ww9e23gqu!b{66_h29?YH+;ZzCTJkX0MSuyRwNEU(KPo|m_ zEY-cJYjb)u@P}F3LuQ&{8Ngfb(O7Db}YU4c&(Kc`G~?gLaGE&hEfi40wFWOG%kS5`^Fn z1EY8??^1LYIo|InHdPLEg*+KLN^0f3zYPhMm`B|@>KZwr1P|9V%s&-9iAu~oSWSxk z+0SCi6fHkS8&{Z_IcHVm)IDb`3o+zNlp<*2WgnPPRnu?Z#CE*K`!Fi$!&}eZKS+bp zZaZ<2v|?9t5^uI_CrT{I>MrUw7=vQGTN*TA4UafoSqmkV%1*lC;M4wys>FybP#ekf z(3@>12`+dvW857SrT#l)?$S|~N1a!ZU)^GywX!AT$b~cY8aI|qrW^PQnE(O}uOIcf z1KK7GR;F3h89hx$HzP7ZgwYq>Nf}Ew!i}c796$2IIH3VK*5;;`i7BRsl8h{=M228Vfqstmo0ch!GswWk&Kl>G}>jLrNvU_mqi+}=?u`P)LyYyE>g zypaYWeR+X-1=yfPP@t?CZh=~T#!Ya{VQ}!Yila5)krYyuxlB|%Q=#xG@grRVI6vKL z7%_XNWmj|85!sWcIrf16QL3T{?_>@9*xUW*HJkf$t*yzuZVyae^`H#eA8LRqG$-xM zuQh(V*1WyiXKBFFGihvDmE9q&#n|D!g4gBXot8+h@L=`sbV?S|TwMc>%@-q-#9SR9 ztEY$5+gZKoj+IOW2C&-aS=h7g$?c*@qY*Uxm0%LVn1QRdyZE{KHrM&{h zSlkg3U4M&enWfBwr%W~%HL8&5iZf-)O!5InHj;r*stx{s9rXa_1+u4ruZ#;gg3lr^)W-bkL;*K!ZX@_ z$f8^?Z7_h?WO0~4`QQJAh>G9vS5oY!)o>Suo_%}3;`aXNl!6;lH=_uDg`YCfkHXY#KNdn-q-MxG2cgG z%e+N+?7${nd-gKJSKDj8>K>m}4e849MYQW*PaTC)j*8I5oY=Vr_u-(#^!Ucm<&O<{ zQ2g}RquN=Fq_g4Hmv-(mK~uf*&}*_i@hNE<$y`uf49`NrpgXAp78N5?pf0AzZporZ z^P?SEk*c1NG0uok=>(M^R$ht%u1u1t-+cEBYrudB%#Z3(yv!Z67Z_-OijFaXWnoD1 zzW|)~+KTjl@f)5JYH(|M#MA~YPn$F)HbE3r%zBsUf*Q%4Na;wCcqH4+P~zKAe!{y! z{@{H}&WhigM!;gdVn603}!Xi-Fs$1)M_VRQ3NBM1l6y~{CH5A$I%c(PazwubQ&pMxp z=A4w}XYI6l#5KwohLeaS&UQ=e*U$s6<%ha85lZuA_v9R3vJZYSjlcTmFYOIZJZIpdLk(Ro z2FG49J}$6xgU#n7N*)#fAiV#z@W=T%3^uQl+`r)IsaL^x0T(8SG1!7-4nif8+)w8m zx2XSdz1Fl;LnJVZ+liI=hlIi%<2_!X-7OXB&1}uv5&d~(WaQT<>zAz1Nkh&so4ztO zJuHp(*DHy7EIREDr-fugwIwHp1UTP2TgXld%<_asu+9s~+LdAOtL??+3h#bqgj;WGxvd0%G2GfqQ%B z=kFv`St<+Tk{}M^&Z*6wHuy91hAiSVwNlR$g_{#g=jSMIV3f9hw zezzan&gCWm0fhjvxN4pz*%Q^V(TW2K=K|$o^rU;pWsmJumCSH= zMq8z>2JZ>YXg1?C1_U}A^7>$+=(js+5Nl{2#M{pVP#jCb59x(9cX!Nq*=@W~TH{rC zqlxmkekpwTDvOyZnpK3h|KAHj91rln(@yY4XNLc4J_9Q&3i9t}767>kzZ9*qo7RvOxlh)&j{I2jM+#+R+{P`O z#9L{H@XrwmdMoEzf##N=);PAqa22~$APSk|w-h92JSF)4PdK4Kj(l0q zKDRUvb9HLJ3|7eAmr6G1vXiv{#Zp^Dg)pC2TN~WP(V1BRvUxU!y?Mh)oKJMt0r5`W zPFabcD5xsH_4cMxd9z<8`gxSwU-3OEuESr5#nXpi>1x;azbdbt!Z|%@Gl6-&xJ}f zusp0CX~ZGSWp!4-(xHvRg$0q{!Wb0V-7KS`FUaD`&Wn2mY7v_ z9eTj`j8b>n9VoW0Y;54VNFg|i=G~;-+ljNON602;c`!pFrR&UDa%+rCFC@Ek#;<7P z#5Z(A<0Td%K?_OlYvNJv!_U`LkzK%nfkgl5{;8-Zufi*@!Tw1cHcKo+o#f@5JyW49 z>P6O%qvE2|T#^n86&UeeuXKt{^!QP@*`ro|lEfZPfvbA|Ryf!Y>*5<8&>?{%#N~|j zT2#Jrcgj+c7^u`K^PMQb1jv85kt%q%iO3~hP)sQ-ZuZN;xO}n? zvxRLod!o89f$1H?vg;!tuxKM)Q38(?5ZFKy*_~OIdrD=uB`^55cqTGZIYumrdj1b4 z+}&u-X_Q!0$2uI>iU(#dQJ1bT*G2K#Ygp_8jEvjUHe&i`y*QNhnm#acr|cc&16Y2q3HYJ+e2-O<#Mr2Lc_l`iG~xW#*uLOt11YAc_=?w zAus*Ph7>hH&3s_6tEwueiw!es>KG!k-SZZSAfW|u`Gp$u=Aq^0Ra9+{Kkb*%O;GNW zeAn-NYW_5`m|-7`?|ebzM5MqnXAAWsvvr}YOI9(katMG=b{7bOD`PTHQT*~#^9N(I z-sQ$1m84HHAN{qC@|3%E7y=_BV&jKMF`VfKe$9nRXy-6Ejz%~qz8k_lHS27l zo7P-+c3Mg=?ZcVGwcz>M@>~r5)KZev*>*7`UOiaTSikz`-NAwR2Kv+f=UN&XPXZ)K zbm&}IiE=wRP?QOzZ#>LRreS2IWIY{8EbaN_*FXPMiGKaU^6cU`c1SB|juS*uq4vBr z@@{$LC|7}u(#_pzv9*W&5t7k{xIuwXW?8b!FlY-w=yAFI7x+=z(PVEv1ZBO-N|*mx zF9H&s`3e&3^}Lxo`#;R^`~Z`lp8tmAf9(^3wj=z9fa*EqWDfNEfAMzHYAUPQv;e8bf08xWr@yHR`PIO&u&_V`9#KXM z9qza3?6wwUwEul+P(Sf~K3-S=Z)=_~0K0D~m*3p7_}#6V56S+kzo6@%-%Xh=2FsC1 z81#O*bANjZbKGeA9s9!0=W#bYGIDZvq6FH7^WU^t=)B$Gdk)wyx{`Vm9_)BSy{8_=@AKqPS5T5c*|EXE&Kbwkk@m$=5Yh7tVw zKld^+<4x$L!LidL018oAsc=%c=pSa<5`{eywD|udL}CxH>K*}rBaeH)ugfm)W?o(} zBKAI{BI*BF#0SNP-W#{Bd>-A5aEZ@>hE=KPKk1Wh#O@3}1ZJAX;v;-OtV*d|28aC~ z)cyCYIpi6C<&&qO*{>DqvSeUF(^okM0EaMgyNGYNaF5HXdLu|n(xgJ2jC(77` ztqg;TArpj&-yuwb88IMoB_Fg*6)6;Qg&Ou0bNU=44A%CX&DQ+JMP;3smk$0(v~2Y; zbQ5tp7pqgq=e0i^wl`#)cp@64>os2MDIu-LqkI# zNnf-l3EZuBUYmmWa%SUCO=W810{Whe;o&M^Q?oD7D)Uv~i8_=}y1arE)gsKPOxpJi ze&ilLFE8EFEcn%tDqb+#Gavl~{b?bX{c$pr zdzMzDKpNt=eGI;wvkdneFe^`XoCnT4 z>BAER4j@-$8f%p084*n^59iGPsVRr67mE1$|45W5{s}6zK>vSDZQLTGpCAdX{L`C^ zqs=>@x1)K=UtRMv{9^RN4Q<08>)Apm2ruh+h2q6MTVI@U3#WB!ao!x1w)OfME9D@5 z*Vo^I41tyq4*(d>m+1H9(P;d#GBhkx84mhoIK;;5q-&kJtJ@Q4W?&Fc#>N#4JI81x&WAf=m=82G5^uD$?|gPA;~zIWj|xJv04H++-J_xIlH?%zHt`8QFf=)F4Yd zG1HA83Ee9P2Gme3G17XowZ&$YU!7oO>wBk0pUF{Mq!QtP{GceAqY8QqPr3oh`1vAr zxAA%J_QRGkBLD)Y`^2K(mc%yq#JW&?+kNfkY??zp;yUz8;TkoaEDfKpsT**XTK%OP z0>qMEo3LdYm+b1_Y-&wP86P>Kq4d?%4Ekx{fB*({T zOfS&4^LB?(6Xm)m5Pw=)CJce)dO3?e=aY5vv|aR<3k#vl(i}1S$a@=d8WP#p99*)p6?8o@rm&s0Vo7`jB4*64LGNOWZWRPD6goG8s<~sz(|VG!FJa>2I92C{HGQGxX{A_w;KH^`(7=%{_c(ElHvQr zq+0IvNO{7e|+Mk^CfNcn# z#mmmmOl)q|rLOkJep>t1i`yg4Ztcdir&|uRqg<)v`|>x#;Zm*o( z96Cc)XdmT;NC1I6g0qeN59%a&vhTJ$RzlpCtcO|BaLj$wxuH=09ve~^r(Zn~oDf0L zssL-*ncxE6oyZSl<&iSke5y)?d#;exz3a=B(&wZc{&YlqF3|ijoO}+whg!Pco#`3< zx#lS@w?s^!cA?vOT~iF4_Vq+sse6~#29J|!wY;DQg()$LG(dnjfAflRxSmO|tPPGhlU9 z^$Zq)*+zr2+4PoXqhKiM$quJ=tK2$K6R};J%cYUdOj1g`S)q3mbjIPBjrLhSq@-}H zY(cPbDTj}z%UwF@T;$FJ3ntGdrP@ypqxlW}IaWZXTeS*J=CS;^N<^^OmdbaclFqTY z_oD?=Je5+Q83U8Eo%L#OWpXBQwO$i2cnB6pP&<9DN3pSwPbX@cx;>7;N z#n|djdaR|5)tTP7-`-?m5mK_gn*LhA6hZ=2&4=LOzww4`~I6r4kh32ejCnC7qMN|i9s z0&YLY@GWuY2P4IEn zo4&pjBDmF`zPmjADBAr#)%-(SHy2a!r~KZPqD+)%HX(FKnWJ3MH(9@DM?__yv^Et2 zF+U%=il=f^0O{pJiLR6&JC7LTQo)@WO#0;An%rZ^u zN;FEXE3r&VN|Y}X`_ri0lamxEsuTlVnuIWt|F-WsayxzvSs4*3<)q`jdv^lbcc0DY zc)0$~4o9odNDUXy3#}nh$Ntct&nq*qV9Ni&rWM-NTo9CG-=3+*3bWv=OV4CiW z04nDo-~S^fqGP`jcVvidNO!Z}gG!P)n%~j;Lw4fC7V)7DD~wjUB`(gi{4pI^V7lnyS@Oa!z`TExOEpO3l17-Ldy}J|V{yi~)2GQECflxK zk7Fs*$~>#9Xg})hEVt+0ME?90gD@NyK`>JjbfMRF?4+Ch4WS!^5&QJ)oM-pxW`YYe zMk1FJWu?_hHa)pHMk}u~NXbo9gMQ-gUYhrV%!BHuOEEjC);!mvCCmo(Sz>z!V^Y0Q~_Hs8L zKwQ!A33A2!Svopa7d~_!T7HR^hLRCdF|ElY>1|wXiQ>&OqrVE961;3e=^3lq_Z!<9 zhi}XiOh||3o4!*mc`n@=$vo(anjHl%hmOg$C5&M;h07?c{?5Sj3}5<%SF2J!kfP@m z{`sKl;bau~f<<)j+|aTrT?l`9Z3>-&jMKD<$xt)Qi!bALTLn#$l#D90#6EXa2}ScU z{pn?1mk_o*SgG`G0F@M+4L7%4E?y~FFoo3;8pVA+Hax3zDN{w%gyX>Q`Z0Pw7Ff8R zsi;9$$_I=)`|<2i(cDOd^zUe{+4<2 zgzAg&E3V?qabe`ZJIF32C9ett5mB>CW2ZoiIJxrW<$1eHv~;YAjH~V$L&_!`9N|1uHwRH!AtwuQPB_*NBhe zaiqFf^2ivg_{O+1YpXrE(*C$|C*F(M>4vy}WbQbqs3Qph4U+$AcZ%cIAn?i<&godN= z*dGa(8Szpy9fh8NvDj6KQr21n=SSg>>z@?8jx~+FJO;lEXeR$m&&?6vt%;|@RtNM3 z&TA!%orI!eH*H+U2?=ReP&5J}I_s>r##rFm1_vRX9VH0T^Pn<|_Dq`>I@vlkL@Y^R z85e(0s}`xQmA+;PTX2Y@QtxUP*BK1i_d(s`4^T5^cZsJ2Lv_pSe*1=>)V}U8@Xa!j zNP$=o1xLkRB%PkGu}bG9DgkL+%tmWtQ}ew~{arFNt5ibN)=E}RE=FFGm1EOMdq~#8 z8oS2&ISd9)@pED4e|vRq+w!yC1vf+7_#3K4lY@h9|FR`Ic&S=X{P+Q9UtweLuY>FYde;ASE4f znWB)Rk8w`X07P7As&@! z*p?kqH+SBqExjN)UqapI{a2)DuXz5W9v(dJ9hSh}B;emTy{+Do-)~Njq$Xr@@2~gO zDRX87RaK2(zlNeS*=^Rp{O0R?HS_T#VfsFQWM*cRc!lpNP*C277NJTjC+)1D2~BKZ z*dC4nR`Sw_93UETfnU+pX`LdNSs9+8H$5$#9@_BcoJ$~4s6X_swaMqY^LMOfIq`1) znB7(OJZt(oVW&QUAZ;JcywrR&il3f7%3qv>xPMxJP~FK&I~gxQLt7er@9yjU56fIV z26{bb&Pt~h62WvaIqS2=y2hk5Vl(-X$KCLDIlQ`#oivQ}_l>fO<$X1W?Ib>OpFzSJ zcgV889|qDp0fov5N?ssy>>-R667T<{r@`%nDfmX*gNlZhBosT?PqA;gC*8*J>8mAV zN#FK*iB9S6kJa7Yc~GN%ua>*G`E;SmXm2&wYctU(1=dx%6t5+Xhr)7j*4h(HXiLcv zW2+#%Hz=QD+LE11ydUAk!Eqzymkj}5PW(nTZxaK#t|&k0Q{|;;%uJu~Q?e3gvMx>w zVW#oNTJ)sFCVRg7TuBHwrcI1Rvy+9P6054g@tLBE6WbN9+MaKt5S-wCfzyeiwwnR{)ZuWM$kF)9?p^ILCbc(PQnZf0o z($?Er#d14*+ubS{f!HM-m3^CD`b_7l#0O`4i-v;pYtUX9gunkhFB^xAa=Si-S92GNNy9lm9&20!179xdMs$7<BJNI%ifIBRNf zd*y^WAJnPnArJN`+q7Nwhf$p4&2iM!4x$NhQB}V2`KkvUqmiaLXVwGHD!G?m!C^QA3P;vW5gd@MjaS;@RTu9yF*%znL2H=~w%{&oz?E1jR; z(!-?Po#<@ecfTTNdvkKA0sy$5Z%?ISuw2HpNegG}smN`v^F0f$y>kE}^1E68E~Uxu z%{WSH*x=$BLzCymuO2$xU1QZjMF4<|o3n*1Ncf{$6&ob{AwmEE$pv;dw1}lZ`muhl zx8@ityh{c&ouw&qx*0b=%*l3zD;!D2;wFjbZ+`KWFqu7`P9a45C+!mu&`tKke4hIm z@e8uSCg1(_^~1vhd{jOi6J5{0#pOpQ>1G3;KO6=&iV>@>?q5VcQ6Y-Wee^{KLcjqm z4;w!S3A?8`;Hb0pMz?%Em=yZLZ(x+8-XVW!=Zk7wt?^t;y{BBRb2XdFrpQJR6wU^2 zbE`q*%|FZE*hm3#HbcPO*}pVez`CFA?L9Ii*9FRfZ9SN($9wn%!vLV@hobEl+&c=S zLU25*dEKwy{+=ulSc_&q^{Q*`r{_$a@-|5<3_2tqjA_C>vPZtYUM;LGO33i_h-lC)+nEyu8l8N z^*V6Hb2!d-(ws?`K+4zEM75kbIHRH-X6Mct_@L$Wk=cReJHC^J$pZ!uy4zR`6xSI4 zYg3eA6)f0yaAWU+3h@wCp{5-Y`jxICJni^O*y?mT=P2>~u?xmxA%2&2URC&a=vY)r zlo{#0p0ZdBh;%f={1rcYzI%pY##2R$TSs@Q;!lv@ALKBR0h6EX>k*2!N`cM5bR7GY z_6g+4lKVwDa6>u^Q!QT5EP{+i!bW3YZCo~XTcq-0<6CsmNlv{~#JToIJhZ?=%(a-r zpf;{BDabFBy>!`nAai(pLK=~WtG2Y0k8+m1nZ~4S?0WEEb%GO*hOqbVW3{G-FyX){kPM{U> zy}j`O=NY4YE3AeMmsBTfSH^O~gz7}dz5IJ_&HP#dtz=yyk?@rBUnQ9fG!-1P&DRK~ z!N3}&{POf+e{zop-*N5b?u1?#vYptesyu5Kx*wF>!pBqH2uxvz>YJr6 z`o?!(T+RnZLe2gWzcd_Qm!BYBHsKZfWaSsZ5Qm>my)?)b3Oi%lKq6jdVY~mc+%%FY zdHY1a4>+%bN@YxUaC$sTVjs1V`bc)<{)hHFDT7~Ka3i6y4X%o~1T^BJ<&5(`KLYkt zYCs|DyHd{Lk$^MqKTCqkPx%|Z^dI+=ycSJE>soL$>Vfaa(QN0djdSdEG&*I<*f^hV``uI@%tkl@|tRh_AwY?c8{n z8p{vQ%uGub_&LmkjuXV>exf;5l+U}+7$;ie3!SYE@qrPdg_i256kCvgh1>6@3Q~lP z;#q9AAS8So^P$CkoBxzj#xEItCy(;7zx-RSuJfwtT&`u=T3fajuc^!u=v5+eK%>}~ zz47RdR;|@hW;qeR%8I5ESkmK(1a8=`nv`jo#60t&Vdicv`O)Bkdwr*w(`D%m{S$zT zZ`Y@{KcCYP$5xBEk0X0XX%4eRpN$ypC4Y6)BV-1{4|~66__rB7aYHUn&eOx^a<9p_ zVYnEDMqsx17C5h~;WUx(IpKKpw9F+YTF6H($;AiG%c6bEXe zB%1ElmCRE4$^Fsn>{v02%B}f9&~XYZdai#U0&({afygH>KVSA{y{VjJxQFXWNnZXf z+D8b~`y;YW1ZtsFz4D-BO6ejhjOj0Hl4qQ5ai3HH6dLhmXN=;rJApI>~>7X9~danC<;r2n3?fOotY<==D85**GQyNV)Rn8py8 zq>gxbpJZ*|Rs;9M+z87~gvOWBMTDs);^EGAATXa*?(3Ru>hP8v_39aU-G%CZ@%aYV z)Koa;=W86dYo|y^|2pa*;?&3wZ|Ec75AI3PIHDl$8;W_Zc_<(YjhN6hw4ix^Bz$;w z_%}Yj@hz_IODgUSl6} zU5V>ElYwD(_AqIPgyn6oO;n!ACC#bMW=e?0{hP&iz^FT!QK~)`eDCzGITJP~pDVOE zV}d|L_NSoeK&5vW#>yJQ6vkTX=gI8b>b>L-%IFYiq?XQC@e^iTNC1clR@C*9%=-nk zJ-%13e&;X5$GS)%5g{M)%RNdaPr~D3t*PA3Ca*IM*@oxu($YbD5P>|^w=D>7{%?#( z@1h0ZWVE1q0Cf*s5Mlj z$>_D?mDv1_P18e}59br-D`MY9;Yfq)xLt45E051Osq`s5C-O*6HhI134}YmK*9$wb zv{ULqbUFkbpVn;?!N88vZSSU$6C47i)EJ;^IjW~3;D|7uOeSCt=RkveE8v;s#k;0B zCP?=}!<2xFjMs_EQ@_-aen7(JU9?K$mPb{#&58W=amLy5dC~g!k%|~8%-uHF?(v`Z zRm!ER0K3VEXE!uT(*@uRocU4~JVGIe3AGeg5aPHyU~S#O>Q_eMzONi3}R&3JtN6}yCUfFB4+JAA8fY$ZSqkcq1A<|2%=v{ zMGd+(l$g+54@Hr*WM?wo$qp2Je@-gTabDAI?qAQ?Y86{;r^M&<=l6_eu>?ajc4T;d z<7{_KXG(swMSr-+{I1`z`%OP@maN+mX+Lr6cFATx?h(W=eR5aoz8Lw>gBtYDT|j=F zeH&E?cML355s`gNL=w$b>_0Z#jy_2KsSowYDvB`9yd4 zpl5Hk5?Y5NTUc~-i%Ki`d~?9%GOrq(m^=vbX-?rrI^`?AY?)dLG!Dw&hf5HF4N-D( zkNRMthR+t*3cUURv&*sk;XY`j&mN3VlxmAS_{*jX-QBegNQDJK!h1vOkxERY-OvaL z-E4kwc|;T^>mSi*@MO$K?>DHDTGECm0fBb#nD!@%(=&cgUoa^z zw}c`iHTKmGkD$pVktt2Z#m8I9qyz89iP)`;ii+NT`vC-&s*M@55**T;bD;n(iwRj= zBbUqcuLuIJHX%|eZc~oGOo}FVduIpw$1}DXp(U3O9H+2lzmO0Be%lFzp0T1G+}lEv zVC-j$Ox7NLU<$$HZsGsqm?^lN5>wN_01?r7pWpUC1kyAr5|?}=2VdABsP_blWD`0~ z9xvb^2p#A(W4WB`Qwna6YQ? z9tOc5|9!_vn56;nnPh*5)z+?8>HAZ`*h1%)HMI(SlnnL%=*_yhsk1Z0+gXNPKr4Y$K=sRsqrzOKnAbNiW892 z?#fJa6#&|SRjQj=XpF}|$|P8#OKupB#}^Oc76KeUSjUnO?nL8pYPtY(p=M`6&r6ti zzmWgRZud%lNPmA|HcRToR<*~<*PWeSWTd=eF$<15M^g%)<_EsWPXdsuvGHK#0d{;@eXyeIm{=)31P_k_U0#nv7Iu?wRF7YN*>3fW zr5HI(K4#D0udTGZ7U+{6W;!2RI>&0t%8$VUdd3o9!0kU>LJG6>7UQ|Nj21J-N5XFi zNY9?Sk@3dj(TJ&2@%Z34e(xI;sn4tHOmei=dlh%f_)zT@K`x(62& z)rG=c^S)KB^#AtE3jmBsgkKka4?-#*1$$#0$X- zd+d2Aci|6q*p*`y!9uB)PL(kQ0DuAuR-f0&NQsB1+U?CuJa#`8^LQpD>}5jq7b&Bm z)gxsYny9q6-Nz)J{KlO3v(~Lf5LcSUqyp{)i0Xe;PFHLW0x}EO(2i1PhOlUxx z54}#etS<9+`QsIODW*10%CpIJ5$!pX^rqNH)_?o zDCQoAMxnFEZlGK3vtU7G06P$lf~MqhgsH9(mnIA7$>J?UqXbPSe}~0^)@p>0OWJwU zD#0T@$=En!uXeuga12#9OM{Z#ILkW$cQ35;wD?@oO|_t9*9W}Mr56`DGtbKKsC3@O zENJC<+VXLb3DQNuZdk!|Sj{&BE3PqjEnYP1I1}LtrIl&DcdNB*8dkv+XpIFC0PZjI zlDZc7Kq$^;RcgW8$S}82)Q7@&oMD3;+xSF1BZ<`(cyO@MEP501Z7Ugw7wP59+=Q5r zFjg`MV~m-0m&4{wILcrSh8oyIIR1?#Rz!4$DL>THwkoq%CY53og7Syx(Z`6QlJIR} zo2U^EMl`iF%|@C8jQIkEp`?0khNFE_;UAEhKHfyOIg<7~y{gvhMuYC^1Oe@X&H$)c<#E?}D6H=y=yz1Mbw!Bf!G^dY1OxYYa7xRP=R_0=CNVBfgO$^^K0^@?=( zL<;xMB3PUFXs*NhmdW=NT3egR!&EWG_=l#tfXBMnCoOc!-Lj|K`iSLNdKqhYa+(C=pqW-He$yVZS+SXayEi)O$6&mpx2@K4 zF%sU>XKOt|&G2(>m3%mC)pJqR*yg9_$#~(10AOOlE9Zna6F@yd0B$|U^YWNYMMeEt zbW|hD)g?AP)p5+HV;gK}E42E!xlm29JkF@Nj|ODY;tllyM4SjFLlb`rIntnY8h#V- zk+pq_mq*ckJsWgp^JOrf{i@4TtebT@J%sb41u~+i;!=umwD}efunOqXu+QghVd_LplqK2e0+-aaF=LG@X#TN-51hAdaiV!|+Rkc{0 zdpy$jK>#9gH;rlxi=!eC>16B+;NLas{lys70|#mG$F>ve_sWvw;6JN7YB(NP3puP9qH8Lb93 z8MAc!E(NzeU;o>Anxs{qP>!^*^y(6W^NQF(z*Fdz{KiMW{(4qgF4q_DR9VJy5!)wj zu`#+)d{>I=>A93fPk9MBaGYrU!~MG9aKLz_9eKywZLVpr8Jw@5 zi8Or_p*`x`Mt*RskV&;eOc^NaPZFRPeC>j9E(8C3Wze;$yffqKbT>=~c|1$bNk>l0 z=1ZKSq-O-!KAOVv*M%n)vm+IoHI|aN#a!X}wdF5kPR?CM`PC(2mS3;Yde};_?$J9Mx?3%^$ z?GYUrwvd0J4Bn9rF}pPvdk%680?3E(hh*n}HfRL2sXXyk?`Kk*&)nXTnTmlznpZ)6FHn1E(1)eu}cmnd$Rv-zSwEX$U! z>yt~HnKzs3lARxMr&29mS9eEgh8~S)N=*m;g0GK!SvAwtj@`wO_}>wO94X}c<@u#ZBT6P@xdx!kwN{=g)_1`kHU%Krn`>X8^@z- zu`{=*?Cpq`O>0Kyy&2)lCMSpYUYBryyZYw4{r%6r6Qly=W7Psw@LRI=Y*Xb){6K+& zvCXq&V;1{ICRNZK3nng2gZY4vzr_8ik;mkoN_oG~789dfp@t$DU^xOy!{cj!A8{le z65#EReTAW^fh`ibq<>eI7jHUSnI5(ILRy1GW-BY!RD#7qi>;yOn40?ZdokKB7$Drp zdmHiH6Xlkt@3S+oK?!Ic+P@sCRc*1x-%%X#NpZ-bOCSXEj`jZS*~!0IE~f{p84~cj zNx$fEP5iEpFexf8p&M*BFYCIvN$jMlX`~r%qtrj8mmbzfQyB z&ce&RnuJY*=B{zz4Au`;+M7dCL1;Op?wL1e&4Iry17>sw~8D?-F2FQFQ|QedDPg4 zrf}Fh?#Et|0p0DZ*MQ&&-u1Dwjpi{+!GuXrp5@wf$=b(a`FUGXqv|W5hyjgFnoGyo zqs2)FAw3Ys8`-H6{|u!5aX-2FJ|*5+2=BRU>b1_56fAq(QE`6ZEBp6} z8bI?{atiK{7rqhf%)@m)uVtUa*T=O#qtsznGwtfM`{jY;w_e+~aI7OthP(i12~Vek zlxi~LZ_11W&q-BUebEHuCMKXDsX#(W|C+?*E)>M*UZv;D{0q6ZVf^tANILcGhP+C_ zuu4X2c3FlRe_qIY; z#7k8w8e|k88s3DWgtCRu8w}v3P~GB`zixtoNRkQX!tTf9>QP80=R_9oo{7^fJIF#;WCA6&mHng_ zBAEW}4!yLhVui3XVaRxSq-d8b)MZ2Q?sR%}pkDR#_ph}aIG|>j>?7hR3vCAK=(K(0z=?u(!Zh*b(7tX7 zWS!M^ZAIhd8LU2*O@(L1EAoxbh%8Qo7pS-Bjs8Y904SITQGs(A{F$!hU1~!(nF-)n z1Q)7fpiP>^TiWTj_jsofhn%gaq z3-Aq%QcAl&L6aB!h_r+(5Ji|?I--;da=-myV`&%{cXdY%sA2#6PaF|9_4x$$#e9*N z^>lv3%ZRiPK;#7MH86MzidfjR2kp-69p1Uxg?z;&MjEdod%F&?VyEVdV;U=%b#qsw z_ImE^?c2Gf#3Vpus=fQ*U9880M+hpR0uV*v-KIAiCl{j|7<3hrA|#yeNBVF_GU*Xc zR+cIfx5sO*IA~xNFlPOBxqw3LE$hlF&O(%s!W z`-=a4KjV$(em}l`I66FM?6c4Q)mn3|HK(}5M5W$*x*7u;Ms#%3Z$BAS4^`bVF=!{( z%^M=)=dt<0r0I*g(_OM(nR58t+j9;>PPjbHw+`2|;Rr3}{{5W(+YImYP(bUSj0b*U zTviSjI|6RnO7S&D0L`ESP7|bpqd~9(0BmjV+8$)k6{-rO`tnMomY1dbBEN`AsJ27n z;sU@*9Xil>_u552BAXA%VKdIvq^v9`sn9oC6jEFr2>?cpyxQxlQ)t5l0InAHub$!|rH^y(Fp z!4GSLm}s9qi_7z%Q_`>+8C_o^?=8@q)nS4I|ME^q>ctAK)u0gm!=UV#8R|~sEcY!} znq-cUGnxJ&PjNdhApGA&4PT!dFwT&D=C7I_#)VdTs5b}{w4|UvIXUru$>CV6>j&{fufI%K+RnA1*?AKr#_p1s4}&)#8+>s2J$DFa0=H6Pz{@1VF5 ziLG2b#t-8U*SKckU&Iv4A)CKztjrWKe|2|8Glk=gwYx_8N(mSo*(891o2N3{{Iag2c4! z4K9`&%o{V?0=0XiD&8#YU9oV4vi5)GxF5lEIFX7YUb$;Y&zX0-?+cG37C66#U|>*v zu$F$p-{q_=jkIoEAt6w>PG*{huHCD zx*UpA-YfRh-R}lAgob?vPTSl36#Rzwa=N$?*(EtXx4WR<;qP{kbHsM<-NA&B`*cNj zx!`-EMrRgX@dk< zSh}eD0ToY;Lr~C&T*m*A@@hKWS4X40@RLm0OHOjTcjbg?cN(+m%T7}}!I=i4-^)t= zJDC=*_fO9~{IAA)DKp}wlM+A2_vA|!7O)%9P`)zB#VuNhDiZR_*mn~Nu7M3KlGABU zmYLzh{0jy=!gdy9^lNXd$`M5n>)x9)IwxOBQ*~aQ_@r=j#hcLHq`|6Y5X;RpXFs|y zehEI(BijloN_cov(OyZ3ua)v{Ucc*P{~V1arpzVnwv|_AGgVt^{^}N%kLWEh{hm*7 zJV@^4VXN(inKMwp!mwgGw5I0YypKtm=)B?Zg{@z;I*y)x^fz8&#IQnRdbzh+k`n4S zoIxC#R-K(TBRn&P?TD5l=~;uch{!9Ugn`eO!8@+pdf5(C3Kf^SoE=ec2qEnDZC8bP z--ILQl=W0nS&%H}ecM!-GI47h%-h!a#LbOOG^WMFX-w6^;RQ-;J8mRK1j>(@Ecc+6 zmMgS(3AEp%3Z3Ij{gurjK3yXY7X_ci(e$?a9b%rc27X@Da4XmB7PgAt(7MU_1SHRd9ih+!X&v`=vHA8M~B0y^dIu)vBkECXN=SS=0cys zAAA49uL=pYwnqGiR&72L823+__OERD`J3A-`gtU=qxGqKXAw8{{q*;19}8qq*gug4Bzs@eyg{46eK?tI7kX?m+r0?@ zsh@X_sft7o3A7nLNceBhZ)3hVyDcsEKNdQH`izmhSYzOoGjP*rxwwWlR5tFE*J+S8 z(vU9ye#Pp!rGokAOBl0Q0+mvapDupa^sEhKOHDjDwg)Qbyp3fMOnkwo7zx?F*h*!`Xv+j>F%B} zgk*3#3n^#Akl#GV8~ zterjy@j?F;ByySslMq4OU#(sJKNcrE*=swZU7U^HRgQaH_By2|st%C((WEAdDrK9a zqYLrGexY=~WeEwzw#Ysl#N0m6`^*Wd<%?!?KZ9IXkNc5%gZ(+T{myA>9N3(c;xG=l zx_BMWg6;r(x>NX{vWeyw=|&2$FxGWkIec${{1fe6ANyb0<8u!<2*I1?5syQHzQ!gd zr)Mxu&SY)}+szU?t-F_H1sf%uld9RC^Dzeubh6(SSXik*cT8Po>{RBh-)&pPic=rA z{sc1_y&@xf%=QV@G^?7N)D{LF@Ijf>?x5sq06smNX1IPl9oE+$pq_yjP{kxJzL}y#A|PL`5iFZB9UX4J@XOba zV-Z%h=~XpbE*A@BoA(3^0%-c%)u~)~W%G#@2Ups9dVnuiHIG+dJ_R`SeU>`o45~63 zR?H3i^5s_(c@6=8Gy%6^p~eJb=H7k%V|y^KEf$#H8R7gzF!rmeiLmA>ru}8n2}P!3 zX+dk4Wz<3ptb(RqQ{m(|UK;VAX)jMQ>nBTTRU3%s>It-Uf>W^Hsfwp=PQW~X!@ai< z^_^aXAxd?u;u;+GPFjUFOEbMhMpb^*XSQo zu*~I2`!9+Sufv=1ONpgnfuNh}XZEPvr?>Q~;#|R}C!L8C#_Oq}ywIA0N*vPkMKbC(a-B$)v%48(StdlZ+>4 zcX!i3@cXM+t>$?G5(H)=XOJ3AP^pTdRr>apdA=}QI5bPnhK?~tz#j!*Sj7X-hzz!O zX#9r<;**`?+cxhnDI}tXVDg|1+UWePwK_#bL%LvO$|je$E5&QS$E4$FwFlwQZ}Pc? zDP~_V3WAG9=&udpgj|3qCRQf=kMI;Vac`%7H=%NI6k znPE*dOl07&&&4=ey)wrmTb_YTEDS^H=i?%%Cy~vQ-Six9oNwmCHZImOT&4QcI(65E(!Q#($-&is*jF z=Qd3?jXPR)ba_^dw4RbIF8PU!TdGW6f)0^QUBABqt;Ivlq-LU%D-6O9=5jw#iM#2{ zd=3pg3@*&%>V8nRpD%pQIq1WZoNS7mdYQcZ9JQc{p^ zN*2x$e5C*(0cTvpR%4ZfFVQgEzC2}nUnX$C9hi>vuNfH%-pdfvN=@wjeT4fVJWxXq z1;i&07z)mP{BXFyK0dYaHL8M1M*5mEQQ$0iu6nC_!&Ih7wiOIFRyyJYAiqX(VEU4T{!OCsw@ViEXKElRsw5`FN)t5$f_>+)_iXb|H9Jhlh!ZnByIAPL! zz&nn)6M}hq^w(#8jbvd&A)W2elLHGENE)Arzcck%hxgTnTeYaXAD#b-Vx~M;dL@Zc zPKHeBQM`no9bV~6k=)b>K_gUfXFqi{9JAr-ONNs7b+Ur9-IC0Ek31<<`X`#M@qj<| zG%gO8B(#^iqH|xjQHf_AE5LQJCG^*_h*K}inAp=ysB>m&NT%xj-r&H)=>2h%ND|bt zb1du+uyu@1W8SMRG#^kl{Qw8ZW~T^2u;8!4ld&M%U$a`wOT()S`xGBDSqI4}0yjb= z)88h7K!*KhDgSJzs_J~;lj{XFNFtkk(Xi<=&+rl|u$ij~M+N=(ny3}(W7&7gPgdXp zqTeLKwZj6XU%hm{mPnOb5Rr*j0^}6z$A0RVpg;Dy^k(l0(QNexr6=i}JB~8Fw9DND zUV}0WCua-P<3LftY}PM@SrIgqOW*1%0$B&vNGSgrMr?2xZ+;R#yt>-oJe6d+*{=-% z0AtHB#2XDE$q<{Vu*<`u8WuJDd?ly)K^dU)>Hrl32^b$vTb);Fchf79+XD`V-9z64Uf;I8#rnf%0oDFMqwldC?!#Uu zzHvS`(q*b&v{~=IbO=pMkPSVQ8y6L!D@c9UBo@#5tcPY`aC^#n5-J@WrsHg?TCTnw zPp!iOKQVD=hxmXtq^d^72Qu=UuQA9%lp$cL=F_wECk*r5)5D8}=&am}pBNa@Pw?e) znW_E6g2q+#DXL^;Shz68>0{6Fkm1E7HDSs`s;Q>P$O4#q5p!5456f^AC?J`s4|S|H zgna04$8$)K5rqvg#Q+V z2E5Z@Sm;(~z^L$k4Pq{+pjIFlCh^)^B2q6?SY12X{;c4>JZ(tpUBcR+q^5-zHP7yx z6i-26SNJbZJ9vf6GNzlg;JS1ITsjXTMLC#-tBsL#ziN<;0sz(tIuZ|2t;&F!xX1gL z!xcT99eY!wS79i_`YGxl^p&7nAJHv8vb%?lfM9Z~4B2CwY_>yzqthZgdhdMn9!*M$ zG-Rbs?Cmeh-0ns9OyqHeQVrV0b)UQ9vVpDPq{*U%wwCT& zegr8gL^5@4h&(7YeV!XSwR6Jr!ayF%O3wD}okPmGF2`>F1y!hbwfEu?o}1n>(W;N7 zcPR7_(l~05s@lSy+P7mUhRe$MjUKp*MiLdGvokM{t@yK%Wj$Kt^0d9W33mJoydLvh^Yj$z*-)nX}Tapj$tf1MNJLA@+Sc zK3QHV$M}WNOz`)YRLl+t%;(QFBbh(|$@@T_nMA|pfgko{QXLWZNa35DO{ zE)4RdD`solISpDPT-@LaN$x{VX&}a?i682$whtZ(yG6jNMTz{KsdpYbKEImGc<&$Y zQer-g(bpXEz6i?--;u>(JD|w<`J0UuuUMWSKx8-NZRzJuc3BJ3O8!-=yUzMo01OXFMVz3vP~SM_9QL7A}5a$HOmpt8}~oG>VRPO9_U4(py8q z@4(QeQeSZ>WuxH;%iGm#Z4Kx|CKv??&1o57%}sAOZdQ@~{a+)W2e0-ES0vGf3xp9J z372ZKQy8Um>t`((Rm<*set!Vfaf;|e+K)wVs+K3Wc;8@9gOJ?ibNLTfOVwx&JYWO_dAUtxC5uuGp*URyKLpNQQkSrIhT(&dsVtQtF-_e*Fn zY|HWEYu1tNAJ$A5gv`L|WFHu(ho;9~KDc3x|ViM||Vux+bHuflpE#lk=ebXK+UFY2^rJI9fOM zn-XXfsip5#0;ozlpBZ;|3E^5CEw1FtVRCAD@EBNxMQv$l1}VM(>X)DwGmEX*b^- z`0B9%1=czTuuiJ3NEMhHJdP0TvjXzbcN9{4(N^8JwffT1{eZvgjUr3KttQ6~s|FBY zzc}#2cHRAuyq*J+K?-s?-1g}GPfEKB**1Ryr^{Y%Z=+$tn3!B16PemEJ|IYoQ20NJ zaM__rzS#&vLHHCR&?x0`OzL(3GB@hXV3l_1Du~-=UvG{U(pLE%@ud!>eNrG$Sb45& zu$oSndBPE3;5+P1sjrM2j8wo|sOr$`DD&HyJ~x~g&%2LT$W${-%V9Wp8p5^{xRE}X zt|+9dYnI$HD!;dXU6nOnq37owus81Sz+m%%A}Hj69iKzD{b68u?K^#x=YyiZ{SP0K zlLo8ZWy=`L>Z6@b7=X-lm`?GJv%&8!zdMtgFBB`4-bVU-lwYnxE|^O===#NmO7a}r zCM^i(oUilCe`HsnG}+DVF~?bx12y*VeKM-&-sK9VlC12jkh{Dt>e3n$iYa>Yar4Qg zp_^vz+Z06HB$;4cJ4xsqwM>(-P>jWDCalgH#tKwcMfzd({`E`mNVZg@HpnL}9A}fJ zIu=S=@pU>LmvQMJGqm4o)x zuz(l^>gRLzBag z&GyLJdYB1_z-|hC`U7uV)hXdm9>oKZ44EFu$IxNhB^>WNjI7&P|NiBy=?W!2y2q)W z`3UW;OAZAan<69q=6hqaBqtd1HP4z_D1()+WCWSxrkd%j$#pwicKH`ONIl^IP)k3^ zY=Clq-`&y>($j7Yf~Xtdgi)ZND5S0B!6kc2k}KV;^n64sS_AQgq^sMjvl z#7dQ9d(&4sK{}LgYkG%LiGdTlUD7;VCd&XIeef-UBN0gE(li_Bh<4MgN<7P`(i3a254>~* zgAAj3csdPJ5Br|*8wPl7#|x56&`w)R;+0hsPd%r2!(Yi3SLp_2{H4HhrsUxNUj3JqK>0#Y7nx z2cvbCFVo|bVQXta7^jVf8w9Pi2*Vx;o=J+B4!_7?Caf!=xVTKD9Pm!_n9~ZGmriE! zxp`Gz#G|3^soo0t?BLDyPWG*_f0x-c5VzuDpryTzG(Cy*uAKW#f9*YEbJC{&+R#U; z?>Dwj9}0P-X(1wYz8LBI3y5?88Z>3?n%~ad9a(y|fQ$P)>kIf-|D@`fXWp6qU($Jx zzL0M@k`~3(5wd1xNb@$SDd%wxe@cwg$~9c5r~BMB1hU3sJWOg~Wk1qOCnOX}%=V&k zv9XDUpsld;IocmRJI*nzBU{I1Pg^XZ{lR_~d}z^YTW)>S){SyAJ186D`m`KIgfhY1 zf+uEl`$2B4QRu#qfZI_y7CDzyvT_Nmtxi;<83=O&ZI4G!v;%o3KK5FAA5a^=A(V=V z?i@vl%M5t98dKRDeyiS)lkt@@jR|_~>>inRj>9}~)%z;nB`xS+s~9{zel-SFfWX{B zE9-k1E}&e4fQE8a(V%k%8?C^^L0Z!t%Z!5GmdAcsd?h3p=Gi7y7tik+wy0=rn~&ya zo+^0oqjKAYr7>vu=|cqq@7#?V8l)I?gVKk{qEB~Ot?947-7(2Lp+RFNL9-J-YpeT! z&yEE*G%WXPNoY!W6W$pVH?-inhOEuwC$VDce&M1sBx@P&kD~e*eUiof3+3VEVe*Om z*I(%49puu(j(ijez7?!3rep@=w^S})*7XL^w`Fr6vFrS*$U`)Fcf&SoE3iHW{SXb8 zTiX=uor1`-T*hucmm^ELNQt}s;}&OmJ-IAC>kE!9_;{pZ2nsQ%X6Rd*3NxksnotLsDf@k6IaaI-JM9i1*JSYy9pzOiK3AZAe53IH@B-4 zTNSsa%iw^Mmw#QAj%jYzLnH`&eeG2rYCXMay{W(OiIw=M&_?WUElA(D5a&avA4lWW zZP|z0<>}!<<@Q& z`d<3a+rKxMn_ruW!GJXZl)gy?T%}v=WtD?T3LE#MLWz^U>y7$JQ$W0{oi0(DEN6L| z$C>V>@-<^6t1Ga8HK-7~RrKCXX?1lMVtk7tD&vik+`s!R51cWwvzMtfZIdB<^$BeK4 zLxReNAuXgMMyAMl@23{J3^7V^JQL)Bi#lb9ibRwmsbZeqaN*UkCWB4Q*7jcJI?jkK zso>##t$+`TVd{_8BOxeZCa)Vd{ftX8P+_`sRVvU3jdZlp^Dgq*I-g{|eP_Cho`9+4 zwto564WFiD%K?T7o(2Pm7i@KX7_Q5(6)2ef{4&{a@UOZjoR~ip-WrWX-8-B955*=d zj-^Zf3r(kWUtg8whtqH|T1CV4&-NWMAhTK;Ry`fEldykNI8C6D)T-?S<>ByBY_I0J zAzLAcPN_r#i~$y9$tOsgEssQ?)l;8{PFNz0R)Y)IX7txXTlo;g zPqzA*$Sa;{s6g<)pNsu}2qOH(8L^Itqi z*1~@ecc?M5{VS~b3MvA!kvi-xmMAFbg?q`1{?@5Xv(R*6xw`)D_C|>qPKuID&hiF7 zC0pKkWN)aC%;TgZmYYGR%`j!1uljO`obkS|t+R%w#__BANMSPZQYBig5y{Q(IB!Ep z^Q4+T_kRRDJ19C7m0m34g>sD=M#~FqachW7hg#b*H!K0 zt*rU}=KN^=>j8JPYe6zuY7rshUL(=M-Y#X%3CLjDY;^73%I02wKTS(%|Hu`KgTW0U zkV+@$C@Ku?jrr3*k-Gtp$lp)AvKW3;+keYcD#+v3hP?2gD9>+fs6{zT9bQ}`?9&*X&YdcVWQ?wAt{ zkI$H(0slO;O!Nu#)PIS?hCSEZ5dKFgHtd;2;J?IScTxZMPyN5XQS&(U5=Tk-VvYLu z1Eq!iocRd;`vqzKk%QuG$oR$1N8OBbP^humAb48+2zfm3EgDf}{-YhPxP+C>rW0wK zo7#h<3+c{Y*VE+&LX2QM1tdN(@z35Tq|0uvm^1)5-N!x?g7+2l83P+=U(7goc&ZD* zsz>h~_h+1c2JsOGyglV`LV@nMi^0inkBc*p$ujXdRY!NpiReAIDZ`xFIeeicCxw*+ z?>!P{D(dXiN8mbwA6uK^6Y{$bGiIhe=y7p3J)UCDin=l042bMV{2-@7!Mt3D2d205`yROUHn}_MHs}QE zRlc!_TGbzI8SEEiSI%1+ICkN<9lE%i-d>0O$|=%}z~}1ik+Setl3$&x5>XrlJAk~O zblo}Qtc|T?n2kiQ_LB_WJAbaJ={0!1#rgT~5B<}9$w7f&eny_mv|%v{9}=JnIU^?x z$>HzH<aAqJs0}ZF0uL!_A{321|WGG~hGX5h&n92n$`R5e11EG3gq> zm9L96l9MftBgyx^iZ@^_>M2nYS@^bqO-mJpBWeno-F~yeJjENJow^myLp_1{*;S#n zIcUbhAx?YoB>xE`z9FZ$w$?YUYPnaYRN=kdlleaq#r`l;Q_;4&fqs{l@6@XWd3d3K zg{JJ^Em3co``iw!N+0(q{xon`snJw~kZ^?v`sO_zrh7$}rIVhr4tP1A!wziuR^V8> zpME6a_l{m?UucgpR;zhW?fb-tY+AvkJrT-r?#n7Gz@qqM-r=y+k#c ze)>JEDQ2f&R~c=TC~jYqh!+h5fKD@PgNi-iPyf;n9z(bePGT=t(^|-S zVg2@jo!bG*dJ+o*6ATJ`t8-kt_&NN^D5(Iy z@rmMPFdGd#FXWBJb*EZ!SBa9bCD;!;G7!+n^T9AG`b|wur{IP#?Jtvoeo0PR=r+WH zz&K$hnipH)`h!)GV2y)kY;1wdWNmlxr}j>0b^E9+;3Jb5x0%^^}3<_y{-?qX9oZ z7}g8vH%nt;LIvE~OVB6@U*PblRl|^7Ca@hKg4W09r>p%+lu%lv*EXqYtsXx3#h0nJ z3-Ui~;eq44+%Q2J{*=LQ%=)AQ&VL7iZ~7fz!3_>H;KM2?*O>U)u@^5AiD5s=FRfTW z^82FACx4q!eGWZQ?(JUe(Q?0n@rR-qC+Bq@-}{iXDrNeTy*(g6{H}O$;OEy4p)D^a zmwQ4G=fJNRNz*DTxFz+dh)*L@|BlmOX;iF~eD1vUg^emXjY2j>YBVd=VF5N*D7Xm@ zKzvl9+ZnAk&=|o9l8Omxgaw;yMkRBQosuj7Kr!}owOzM`gEXLeJ@qPOsr~iL6q;7g z-_x=%zUPty1^}|MnktuovR_yf;(ex*)y&^KR-OCtHeOYoGf((dA20tPT3uDnv(vBO ziYAC#ULm7<75R3|UW6J3u>P1w#?&NCTZC{^m|EGn$k(DwSL9#_10W+q0r%^0(nX|L z`Q;i=K)qFXCZ`6w*z+Atcttj!Sji|F2KmV}6e>}y?hkSs%|Y%e1OV~1ST^69qQP$H zh53=SZgbt_(IyrP$FKI%S-X_n?NP~;$7Q@wiW0P{;Bc5bD?PP|?VJUVfmA>;^5+<==V(xo{w4^1?C}^ zB}(U+m-#aMqk?|x08R}tOvE*>*9rBUShYo3)k9J!YB()g@IF3UmIMS{Ic*%Y+HThw z#YJ5RDF*G>?@F<;==&LUhvEn&uMhkE9-snDIf;jwG{{5|vG5Av2#vo!{<4}Mz^u3J zQ~2Clt*_{?B907-65-*C)|y`0Fu>}=0ryn+GnG-_wpUJ;$>K;V^kihk@Amgvh51pF zCr=%3AMw0ACVT!J@fUb`C3fe4@;M+70F;YlhZ4`X&MrIRr4ilxRD$7oos~c9l``gw zVbfbwAHk5yr75SR8%%#eI$nxn(2HJPR;jm+XMzE)Dmxe%u?c!l1q)TgL(W~Qi8I=c zgn*-E#aQAkd`}DRW}ml>O=7EJYz`=`o`EkJWv6&?gfU?#YTr9ichme^r3_Eya3&}q z!iczdYq7t*wf|9uKl>A~XBE-$-RWA;>U-JL()*oJ&HmvnezE}8aJYx}OYoGe!5mM>k}D;mX^Z~KfgH7_ft%3)US8fYyV=+T z#kpynSDc7fiDVg}+lHt2pL0p5%io9`NBrKUqyEKKxBaF{{wd<|t~H~WJUB&Xv52}$2B~_!Y&1Jo8WmFbT{;Cs;o>HQCaqRMG}={vI#jM zV(Bodd@kUZuNUd0rGP1yDEiP)pu)TKtGCYYV5npPkX#(GTZ)au(fFlw@&0*BuR-|% zLCIMnpfoXAdMvBBT!mgo-TCJe!fMq@=isNu7KPt6g5!rNk&((c-e>%ON(F^#NSgdl zGOH5)4x}W>B>&u53Hr*#{UBffWzvtV%X+M2Ec>~09I0bBWr@S$^E1cR%nA)&bBMGP z5-#qa5DX4X+ik`M3u<7QGVG0ZA5*w@$iP*g96)Q1U4xz)VS;i; zDsQ{id3scjBYnCBs4uYTCJ|GE`wob_jU?u5%Oys~Vx@W>RlacQO>BQfvKh&+I5?h* zuK1T((Rd?7XJeyuBNx20q(jal_Z1jmRBnBqO-&sy@frvS3lVlsTxa*aN8I|(ZJy6; zl!!M~Dh=Xhh-S%q>}_tCHzX0|>iHhWoA$_am1*xG8G_!)L_&;d7q7Rc2ZSnK z5Qi(zwe{D=SCnJQ*ZrL$LBHEHN@lFIah;vO@I)AA6g+WD#iW*tiOLOw?Ysv5S_S?7 z#0FP&kAtl^PbbaSB%IUzv%mUPZ_nr1{wN@r@)2K=80bb#)mBO^<)Gwx+#Sl^?9R8u zC9y4sh*KSAxQghd9ofvfv0|oHY*7pP?Jk}dS4N9C9}mP})R$8WekLK5lj86~ti6Wb#uA?txdz_oQNnfkSG7(1aqLXNA#2kf&p}iF7Aw0 zcbV{C)Y7Am7lL(vs2-1*+QQ=DktUcQr54C*r6tN9uSjT=1O22JPBya_?8PcNy0ci! zevi_I>^p=ty2ty6)bHB7h@zvXr)<8IVD@PSQTH30etpbJPBv3=mXi9o`iTV|L29}j zo33&h32rP4nVQ^2UQ003dQ$C1S7s7~$l<%-_zsmdKx!0$#B_%J{?DQVS)0U2??y>9^R zgPrZ@KYP!Oh}IpA+;@gG+P2oxgIP~vpm}b|pts0+M6zGzo^vex>lraq4#cWF_**DZ87_JSZ2Znp2=XbQA8DW1vUCm6%%vpN6>EH7}>G?4I)ysP2 z@EYNnlhg}-e-W9}rub2}1;uu`)8iw;*03eB*cU9ra8X-^oV$i^VCV5t)BA9uv~M4m zZx2s?#wH@5CR;{zBwigPCi$q3jo<2?cwNn%>NEaoCv9!AZ-8L?Eiy(YmS@ z{}L;5;n&7^F)L3Xe&oshI>td4Ny5s}Ql9jEbuz{G(em%3TZLyrLn(9mYQd>LT>JsG z{LhXfhclknegC;6PICjj|GiQ}|4(s_caLW+_kLT&1`Xj>xz5o8ljfw~K2B854535P z)@}MoTGZO7f+m$_1LRYSzXnI4E|SbH4E$EalNI<%tZrZLe^Z2A*+S>B(=!Y6A_Tfu z|5FPvVsUH(AMb9VTs9imY$Q5c)Y|6#?8Toi+K^0-2NtR2PXFdyvy*-UcXms$e7d8AK)aNT)G3)sJd^rM!aHF*k(Sc;n?xIKU z3&=B)v|V5j)Q-4B#FriJ?f;1yUpy}Fkdz>G1#|Ji#f48d5mMKWF2bP|2nZ=tZi+Zd z>b8^RsqgN5$vE^a5lR^p;6)R>lB(=rVNgiQb37XgZptyjy2z!|iYyu3u+p7!$Ltq~ zZ~6FgxNCker&>S_P9&&87yr>Zc?HrP7lQmpy*~MRCniKh5GmG5r0H+ibKg=FOB@NF zRp`4mXu5Ad4Ka)6udWqB<5wCW%v2TTx4IF8MuLlK`5Sh5?R3UFy;Qv)(Ae~;M@x%W zpcIO>&epkOkhNhfKM^}i0I|Td7LzqtBs(-i`|r=g@ZR0qL zW?}2)+aGmQB4iVq>NT~Wd=m|+N}>fsuz)aI(#>G51OENo-bX`&P15|~TpP_Bx;`j{ zlp-}S7&x41%bpnP>QeZM-O&*xuVK^NSLk-{rhpn@{iVG}DTTf|Y5Z z(bVy4Px?XhQ~GTrEU8?Q3P4&*R&C~3gtSueLGg3q=K#GfejW&drAvaVU4~`&mQqr@ zMw{2$s$Zpd$V*TEF*ky+L#|!R8Lp2iRtG3WrwW5e;29ICJ9*89um4mt}xC2 z_{wDJFQrTVGY>m6SZQ_pM>o8!KMo%)!d!fUYIHIyBU8=i`cts2OUW3x*TNXW<+|Dh zR&1jcBIgJpyKRpxH+cWvL?K#mDb64=M9ouXcjY19soONgT9f%h*N-x@e{vaPU;pP} z?~K1AMUeNpoxr+RHu{fuR3Tc=WXn@S0z%~h??(^M-#QI&DJ^v}Jdkrg5QW{>lF4YQ zPoW)Kk^k5nzcKk~yYn_zt1zBNS9JF#_eX#;g99T!3-8f3H|=wnlKE;yt=bRN;%`q zM$@l_Af(SfNqP5teEN-j=@}q@4NW@|vdqH$Yc ze9o{ceft(P35yDUBXPBFnIkhe*;i`gQDs7rt)~!CLPkcK5Ri|K#TYr1hR}6#!Jd!+ zCAwDaE^3sRw6`IZfvTee#>e$+&WE@+%FYIeQ6rggj1(C2xZ;(xQCN2FS0(PTzIH=*d z?TS;nP94w3!U(C9h=bLKlyXXSBi?(6xE;I`yr2jZl`XHzad|)mlayqIRxdHGmtf?W z8jq4?PA3ZHef^+&dyyC0(LdBpjwp#qDjU?B=S74FLBUYK@iKL_%df7DbL`GR8LctA z=o7@hq5gcK%PSi|Sfji&C5bU}aB-&Tz{P(|Ou)z53=b&;2$-DA)3iID#{1?7CFEP$ z`9i5)8ef?B&O!^pn>XKw;)BJmm#{-W2nm77&S^+Aae9jZWc1Iz z#CMAgp@+>4y5O@y5c&b9wkr+qr*poQ9Gp?HfWzZ2WnQQI5OxA!t@#f%S$cU4wyZ!-&BMIk>)EO*&)@}_S+wW zqynVd6J(&5ov3-~q@;GNvDcbjm#RbOSAU!-H=i}iP|{~Hx}vvVl-E$2@;KSu5Vjxn zRj>H5>J&R6qLc-qkvN1xaHFHV4A1ROg3&Lp{@$CyM#oo;+Z|56R+}#-e;sR7vuxGo zwF-Yg9eBkU8b?B!mQxINWmRZyPN6L{l6jD6br7?0!T@wQFdC!G>u^y3)D!;e1auG! znXInGXCM!;jWmBxigV#VCLYsz+TFJZU%&!kxtvw_=GdO+WH$c1CG5*DE(r~oe-ZQL z4+#NL2<9;@C$)-F-R`5`QPwk~_mK~%uyc`saBvVnwpk3R!Y?(K;V`HDMxRrt-syDk z0u>7bbXEM?!6{2V`Hd}`y7{~S4b9~LeforjjYIrsF(?3rF68s)L$R_!Kp1m9K|RAS zC|K+n9xpl<_-Apjm-@m9x-$lFn?YZNT1f0Tj*{kw1W z2SI#fe_Bl_Q0xqO>z$tNJA4#v9i(H69ql|H_dtg1OaUn$R)s44VommEgy<6<&_Acnj=xI3F~z~8U)eybC& z-(O`l>zbFI;n8o7upFU2n47YJ;-DbXsO*m+LRC~GO-WQB*c%}LfRyFNGfa(8tf$kQ zuR1*n8E;TfKyJ5uT~xRYqV%mMEgZ1w`q`yuuKG-k z8q^4o8g|e}0FHIgfYsRFrG?%c?0!VDsSK+0y@DIbQo%+*_#IjBg#%U!T_21H%5~1>Ab;NJhAv* zYeLOXl9zYvkabVjJGKVfrv3LG_oq-#m!IqxqUl;TgnY!*u@>r+P?5l-;M~tUKQhI! z`Zp3-h#@$^8>9md3j*j*ft6KpH(ouoo{tJE^IhgJ4*tUNLlj*p~*1dVsv8 zxgM6sZor#2GMyUjMh^C8Y}4&x?%5YRV1?N_+;R;%hYh^*m8;tygl-t&z>CK6JV`_7 zmn49cgcU1=ek>*zF)unOueSD7Rv?$+>5^B-h1pP8<$dzOvkJ|@%4?8^-SGWZP}}5| zexiX$HdQh)fOtHP&Y~j7F`4)FG3c&QZD(1@$@R!uT0ui?bKWvbH=us&%xRTfGS zJXhh0|1vHWQ`+r8Al>;6*&D3T0s^vJ=ce#u-o67(1h_QIIJWrr#!+CG^;c+qwyrjDI5>DJfB5F<3)3j@1Fhkj`T_cEw9u0-!v+YMEke49uA`>bKJD8F>o{2wP%wHtX$(yxnoV5dsom@|HY)udXUJhaWufg9x zIajR#Dn}(^+&KxBLSXC%*+SoJ!6R>%$gjU;#^SI+kz8Qk1vyZD?>Arx&WDn{KsFS^ z68TUZ9AgtuBnT@o6t1z^f+)XQR9%cZIkbtm=NoEY@Ay0@arBCqUmO7eAZ3I0gf?n7 zN5EGZsy39VMnJqKfqnlT0GhHQ`@mH@Fo>j-TV_v28AA9zYvRzXS~IuYXDJW@Uh4r8*Dp1!C-QG9fAOO9pRUsVU#8kc&ws<%;xLY5gK%4 z-q##Yw}BVd`gscHd{~D^E>RdjUfH|pangqt@i&06O+`w@diA3af%4q3HPW7P5_Rb? ziFuFra%D^Tv3OY6a)^x1Txcd&p@glKD;Bq#?(A2}asz~~m3!YUc`rEtVs0`UCd`Ee zk&WWZWjyWT zrd@l0V8K1OySux)O9;W;-6aHfcMI+i+#xu@-QC??zn$lOPSy9Gs`C%3re^k@>AlwK z?z^u$CNJ={F7DYD(#i_h0;OaM!eEEJy$2*#*8|EN1b`jndL&t>5ksVLHMK&-e zJugrE$Za+GdK@2kluO95=>R>{lCXQTCHB2#Bs~0P2w*RN{+Cmz(Jmhvu>@l}GP3=6 zW)lk}q(KK^&|OY^6)62g<}aL$Fx3k^G=*Yq{bw3AEge|#EgpHt-FVe1V6d@YV}(mR zoJ`8vN~k<97!F<-pUm<-Y2TnlqHu6%-1vO~%Sd@SV~bN6GHkb+&qEydr(Cnd?yREvcAoxLk~7Pi${2w_ zfW&Khw_T-yua2aBnF({QFX;~V-#gWW1x6yh89-?D=#~uY-nCm zL=hLq%+3ti;1ne)q{g z0?ff!1|9lvEyWKO(PRppx%f)}nvTFuaF6U(Jdo1f@ap*5Fk@WYYXR)-8n5jMM?Z~X zts!=Y9}*I7RfWFY#Mx?;<$}MCCyu(SWD7u&9m<#dbIX=*e|d6O&&RB5YG`mR>gzVs zH&trdUvcfzr{H(qbe|)ymxrrheo^rrePl-DCs1vB#_ex`4vLljw&6wzjWIWg?QN(N zUQ@VKt@7F`lviDy#%(wFBx#q2#`4>v)HOo!DaxL{BD;7a?Rcw!j|?IZqj=T3w&32b z)&+p&-3m3abj0)-q||5zXn4K54>sV@EZ>&A=XWY0e1d=g(vZ`2aRm708nn<4=GYMj zFB_vcj9<6TOiV*prh?Si;mg#5n$-%7dU@N`#!o7%6&~FQGl{@)1JLj_m?MBkuhd3; z01T{Of=y0XWxG%yDsqf)j1k+lWhjgq;6#(#F267I$6|C-0^?-exE9a#H${2LW>HBe zuY<3;l}h z6bT5&Y&=1UWO{LN>t<2k5*-f^ktPEmpP7oy{o2O*ljL0z?e*2;w~DIv-)1fw9TO&B z@aN+EwyB1pXS3tF=8?hZRu-Mte}2rufsmT({IZoeK9aT~`z@TU5w zQbiX_w8}_VEJ;<4Il=qPNM0}ujxsJ z2`gG{JI=*ReulrZW*boT|WoXTp@k#>H2`ptQHkKf!{)=!2G zgArjdZlReTA6d6wT&@8v^S2L{jURs^?c47LtA))y+4BqT;aTM5WMEjJQ`3#bo62pA z^!ll0BkY6I-4R; zmrD{j&tCL86py>le0;P&hW$hl|drfO!7%i&hA8CooG;y6xNFtaMkw&)IbL*+4%Gc!y;m)H_cUg_e5 zxx!As0p}r|ZIjc?(zkd&|GXC>;f~<+_;(5cyd0H0{B@H{0izzD=IP&YiU-9HliDZk5DYe-nw3LefK%Qf;z8bJV~EjF%-xjA{D z@ABqdfdWmC62~OGp9MzBO&?CK5$&w6@-q|`3^=5P{Yp! zg*uK-7KF$|3gL3Hi#+(P_G#Rx;oL6#GZ(03*W5l~XAm(m{-zhn;SV@8r%4i$`TPYp zziiG$%duDr#{Gi9VmbAi&vFew4Q~=>VxFKpb(wXyxoJ1{o-L)67T-kQ6eSY!#jdwR z(X=?MxaE2ddzgD(feRYoIl3Ki*iN&Az*^2q?c^hdMVor2D@#qPD~{xhT1|aaS$!yr z+E|ZS(*-fc(BG^%6BX?!e@qp+u=RH>^Eo-0PwCRWQzZjKGxtZ&z?jg)DqNJz0A_R& zjyn?0RJn#SBIpN_V4dkpWpzR5!K8!yWx&p0btmc$2?pPao4Jz6%(p|n;`8{NGd-U} zanxKG>_iJeTYdg=9pBv@sLKn5>UD>2a@PFNF6`WG-e*(LaYLON3xEBP%B}JZss0~* zyE>TdVI;gIOYsj3)`xq30VT?{B6+_!#)YPLr>%g{s<{LK_}d_Ebv8F zOchI}-wQ$Nhr_f;EgX+eRTyt}!m70(wVeJ`DyBT+X7~3T-+fZQ!Md-DzzHriCM56? zkCq7R&uXUc>!-vr0m02~(I?yQL)lsGe}0{&Cr)Q@DrL1UU#Lpit?fc1?JDGmGnlyC z%y;{*oWg+sxh50K&lJYJ4ll=y#%+t4x~xnAH2JjLmbFX!W4F|79nW&I}ezouLAJ&7XTG3-08K*TVxQXm7|5AP<=;***872h`i1|wVFZ>G!-v$<#{1eUIaWyX9Q!Vc=M{bTJ z?>j>&(E;N(m!tOE%ke37d4Hsp*@n4SUf5DtRF46{Gdwp~KqZTs^rik|J}-C}Mb5b9 zLPSKD!n9AI3)P(X^1PY?^|L*MG2waE{mRv*{Pw$OxnX~BQh9RVLkeefCuYwEuV8DY zV{m}j+5Hj6lYd=GlUs44>vfY^)7Tl_<(Amy{(LJG^k&|fS3@oLd6UJ~^geZelpy%R8IH^BiIar$N@#ySc%xl&bonxD=B4U&sqJT2$&EdHmDJR)_rc&l*;>Q zjea`tV2?iCdmr;`G&|0u`k;*wQQ-G{a8WuxH!S6hlqS{M{;`{4`H!^x9Uv|Hd$|dw z{r(*+w8F5kzb@tZJFo5`a)dcpLA^}#x58(En%Bjy2r3wUO=nxd8nt~goTO~R8GX0R zppcM!zSExNT+W_N|Bqks!@ZDhlHKTRYV{XWe1IJ*KlgO`iE!1@pU$=Phyr0VH(RdP zuN2m+Ix;xe2j&%j{9~Pcf;9&VW6*v#9Q=e!Dbo0us$0ChBK4hxQR~J40QeHoo6z^Y zPWwB8tu&0*ca}rbRIpda2LjDV(qTYO2_$1J?kLDb!V_S`m)GE+ZNJFT)L@#Hsb5&P zrxrl<05qA>9UJGLnd$gtcxaO{Iv|Dd>NV)8^r?g@_NJuyd7j?=4^eDkLPPnJYvmQ; zcjY}AeQP-*qpcliROBCC58dCBBIu?9!w#2N*V`Zn_|RM0AOK12PwSPR!%2gJTtS%x zRHzxT_$xr+<@U{MwJE2z-4SE4A%S33@hse5Exhd&3{>K1@@hs{>kQlOiy)s);^c&b zBQuecKam4VATZyl?6~(ivHDkC>Nw9^v*49rFf_bK0Q5}&biD2N=CRJNUsbRF(uz!E z6Z5`{N8vc|e*nrd{~azF3z=}o;@m{zXZ1*QUIcMTWl;!Yi>x;$14D3-`@D6QPN5+) z%%@md`g{^dGJd~kxva<(I?>7%su7?^F;g@(=nvEY0NtaY6BHp%xMsDoK=eKw%?Y@P z4jW#x8`65`l+rjbM^z$lsXX|c!okeJQOS3tovDjiaynjc4L)vgxukYSy1fID`3WjX zA}ea2HG-w*;em!4StHe+U;2vQT-Sw|XuPp=qOn|7AsgxO zQQ=H6aFA0KJG>pF`ADEt%;xjBo-K8H=t*$$iud%?XI9=zi(z$k4H(vAC?OZ4m>bM` z%M_+A409{?3Jsh)fGk~%_lXJ}w`G3<%%4Td2}01EUgFxT>%vO@KEnt+@YM+1GNbyd2+<0;%oz zkKRn^PnlqJ2G(Marpr~jbj9Uysu5CD)7?E|^MB8l&@OS-X?)NA=!ipSYHW|Kv*A

VsQ3%PnAa>N z;kf~I13I&W(ztp--xoYK2fC55YV*v(pwORMoAS6IY1 zZ@JprU6U{~r!Y+`=@>8vGw^LR_oL}MLrJW^u{BrI&JJf1@e-BWJG)oXYApH%&9*yu zwt>FM_2+c1&T-?~nK?~Yjz^>7hLS1GGhpe}d%m&8Ls||&WxyNM33 zFm4=_+_S6m7Il2ZvX0*HJ^ID89gCu(`VQN}5f8M|ip>q&c%~KrqdD-moTL_BDlUujQh%uj{#>`&6>V zd5gxO|2RGwe+Nz48#M_Yho0epA+3q;JwZ%R5W|$5z=fQicVB=En_1Z5L+YS9G$w_X zL9Ep=p}C@Mu|a#a=4LqEpLkmvq5qtHhhbH%X)?F1U=XzD!Ve2vZy&xu;S|DAMgO+r z`LkZouprb9?v$h=B_xB5wUHJ8?93044a4KHft!;KqS_R!wxJWr8#r|or!FgL+{s?8$`}w)4@(tM|ZZ03~lfv-2`W^SSK|( zfDn|;aZykCjc6@PNBEFwS0{a#yY^7%V=Ur5eUsJmbazQs6S0f}-J|j;^~J1HpFivB z^ukiac-|tWYJzF`E=8nhnd93X+k&d(^4wJ6r{E}!jxl0=l?eog-N3u#PG$NrRmB54 z(K(T9TkpF3#OXlNj&PXpdp=%NiP-QRmRVXA?Mb27j>Xl6pM^t{jmw*F z7tWryvuDs+Bnfian(G7NP^A8GVWI0Eam~k{*vc*A9;-xyTjk>Y56vEOX@c6 zF4k?+Gcx)PAf~0XJ!Q*gF@Ydmc2g}cdS~>H`JjtJiWsJko6C%ZiwS!{#?ATThsAya z@zMWxr_TasxbO&gFvp6$rPbbgk1j8^WPuM)|Fkgv7yd{Ot}mJ)UN>Nybf5%}K=+Foa^C@#0tvw<*m zkeuiTR-UG*=_T2|z4B1m#FS^bC_l7PZSM14P3aHp^ntc;h02?fa7#d!#q)rBf%Y%pN!yv8x^p{X=wI+t=gUlvMJ}9-T%Wp5 zIp;(rMXzIW@?cn~ZL0|es6QU>eLlOEYO|ZQ<2IHbZw1=6!$puQfdkFPjDdA}wefu! z*CQ>Xo}@{vYL;J%oYhm)%Lqae$(Ua++J48yp^yX`cOFsXy!!AUuHyBz%vSB}n}6O? z9Ea*m6s7Mu*-8AfRQjcekwj4ZNm<;4yH1UU&72_Jflhfkgh2V2y0J)}0$w{_%-I8y zts5TDeNt^sooRc`?|?RjggV&RgIvY+zRhrRXjZG_;Z8m?cp>oW^#Uv@(tz#Nq*Fvf z0xI-UEaQA~&+|qigka2R7UByVzF_&INVP0soP@ybPbMhYso4dH znfV7d&885M5HJID&VRXS07;hJs}+~n3i_nu|0MJo4jj=!$1v5chpXd3g&0gz8EEAF zB2ZSn3|jyk=Ca)Tr1+L82k&_#Bmn=Z-HWeEJ=h>~bC|DnI3OXSA1n+y0w_3vm!eQ2 zhN#!rBQ6(d4NGL8A2ldAENYW=luI>Pr;7?KESGF!%u7qDX>lt}LKG0u`FJ6tV{s91 z=XaEr8CFq51FOpjKUe~6IP401Lu%a2_^dybKvIJd;o{mJF!*kH7f`!F>%&V>=o|mk zZW=gnl*+K~@)70wQw_$5XP39#NP2Nzlm2g^*Rs?< zp;xLWdSN3B0zA`C=eOy6iv_)OlrK2;Lq&HqAEgbBuazjM>AY>NmUj;m8^Iiyoh>Mu zAZ`g5b4N$VXokeWe#)cS;@z34fCce}Ru@Ypz8i%S>*Ut+(Q}B4muSW;kHOuAJ;*iO zp}yHK2{DGS9|&K!{bX{iHu629JweFoc&>CMdzDUD=Uw5EiX&1Ko*?p0OTJ$!;!5`IO>; zs9aYxXt9}5AI@ZrjN z-NO!G>KoMEd+qfHIjp(mX6{=C3&JNnE-w4>1@k4UfUA+Ii-Q3&CqwXzqm~dfGgE`` zqlG}SyD;tXmN>Jo^~d!a0ZR?I#{9}?S5Kl?c|q!fY(p{*iiMi@FOu#LjAB!R38^K%$r4&xMsAlnH9g}b zZsKdi`!McIdc!KDnTVhPQV!4aIJh?w?#6oPpF)W_663B9|3S2aIkkDBxOXpxJ{5e^ zf=4&skn{A0wJulX!HPO5=d@VaUckT*hqWo(P-|~tkF4y67sLvO5YIw z+Ru0?g4Z_tjDok?Qr%@b^EYgtD$$sI?qjfc1^CLQ`KOIK))4j~IvMN5wU&o0XkqLC zyxC8}?-0aC%WM;fBRLer<(~gQ@ya*Q6zH=)IM*3;si}E7H$^4ZEp~VQS8qz^8I@S+ z{q;E7=kV1`T6xCZNfj0KoFzyU6x>Av4Whjxm{9qbH|5~+WW2oTBnCDJvf+6F&}e_3i@gmr{pG~rV^9w$0E6DEjER=H}{!sE(gzl=@Y&sG9fRz ztzstI#C*Ie!l@ek!>(T^UOU)T+2-*766e223eW!=$4Ce*;Qe^Hm35aD4l{zlZGW-V z8-fHu*cB-^KOF^sm7ywdE`oc3peR78B2#*LQl8&S3uD9kcEd&zrXrIt0q?pgX3~y@ z*X+o;d7E9>i<`oR@|i1lCHBmW&|59AdAM+E0vv)+z}wx+ftA$Y>2irz zx+*(vEnP|BN5s8goE5t!82X|f_I4BSayv#F1Ck2vyHyBr#ZQvPA(S_Iuh5C8IO>NT zPqvAXDl)8w**WW6rA`go0DWCm;^5lhA_p33U3Ya<>X764(5EnNLRx$2mv+llW%R$x zKFagl*V>&me6A{2)PvgKt!eEFArC=0ST)nMugCmO`-0=?YRC>T!a-{*;DeTs{PoNI z?Q#!=sYr@&h^|i(#_AsA25$Rfo+7!ZwMx}Y#aXf4>SwM7ufIx+W~6j9tf!GA3g0kq zj)#6<_iV2<3aGByTQD;s!PAjCA6{KJ_1oy!=U52~|0hdvn!VBEXh0_3`OL4O_c%|n&5!{$9#U> zJcY@pv2q5QWw(Q*JJAeH8e0lf9PK2aoI5|I)UdIz`)BSf6M>tQmKPU|ScC+30dQ_# zFHBT#o=fWU#u9@I9rtxVE&Z~0t59(xL`s4waC+B#8pt%*YD>yH-&v%@CZB0;=ow!J}$7nvx@*3 zrmJg7KB)8|(BcN6%#w`TePZbO9!g5zP_?2Y2rMKpnIW6M$hr+Tzl5xfeMz0)^%HUa zGF-Nqxq&F}zapsG`_t|1Ks3~6-q1SdlLk*;EL?b)jHV?VWhJwsY&f3fZKmZnvXq|(@_1xDeY+tfj0!Z7*J;6CuEu>Kzj9XkzltUsg-PI-$(Cv_1|KCIa zpQEKjlE=^4X?W90`jmzP4i-uqyT>ki#}Oer`xPR59T=JD5b5w})o-W@U)~X)nsW=m z{{Ldv!W)(z>!CpedYwT5!0&MPGN-ZCBUjdMPqCHkb}E0N=BP+KOEwwsUzf;nJKE?S zXyPOR-t|J+JukPz>&%i|uIptWc-f^z;{l40@STe3Q8qZp$b<7iMZ+`5{4G*oq3B-< zAYFaYiQK$ElZ^gMcWC2GQtMo$Q&(0;!MeeO{GXeAB!1=LM?iKn4Gn41>yPR#4moaC zzIm7l=|XxP5x*-RlPB;x+nR1|)xG^N@_RGXN;&X<&)5Ci1pnzjdR$+aE=SM7+PhWq z?_=N`HXbAkd-e5y#`24vgKn-CQtl2{Tbxn^ZW#OG3HOY0iI1*%Hq6MOs#A;Vf*(Il ztMxvgT(?g-&6?$Y2>p+uT=HsXTG|4v@uJb;<0bATfK=MWtTKt8gMM45bnnM!dJqU? zU<55Y<@a7$&39_zxT7AD+%W6IeE(tM_ofY8UVv}DK!V*D7|5@P+6zvT=a=oQy)(1b z@oY|3Hx4avd z7FmMW|GylD^$QfKo)9-_SB86!iHL;VK;zt&uL7`CU7`s8x9Ha_hk?NKX;z=9KKRJu z|EQKl{T96t|NZk?-SHA>MPdN~dLOm@jE|1|NnEWD+^~C{8>7weqGN5zYoRWs&TGCd zb5DUE_t&DAI2araO}IWp&7`nQE$;=iHmXQ1Z0MfPyP)8ZX)Uh}r6NaN9e`224Y~R+ z)c?8H{lDh#Zw7q-HGh9+%KoplyWfWMn?>N{12Oa7w*wGyU?+s>zy=TW;b7oDj{S!J zkd)^zlKqpyKX5eBKWli@aNl>aYQ2TK?||j0N?zbT+v}6l>vds81TFPWiP`?GxRC%_ zDY{%|L^03gL{75=!s4A@yhdf$JhMtXF*UZe&0JS{=BK~ZPo3KJ%^cYUzM`h<7aKgJ zpN&~U!@y{1k;8~cAqekpli1+fpY_g3w`irfZs6-Jz`d`=FVL_ll&`62|DZ4Og`m>a zabWH6i^1j5a)9fyg8mo`f4v_a%YnN&DC6Ev?g|Z&^k>OJ@;pX2HlE~d0%o#LPdf*Q zkuQAU&sR;8KdvfSNnpCXsTUWS0z&qE!k)7>Co)Za6hR}HJ_F%UoE{`QS8x^^!WQMC z7E? z6K%f1_QvM+y6X`F)}%f!!HM+%t(nzm@Mn!>)!|^wYbQT4p4>(!vFm+bIqOWN*Lu%% z>X%j%)($7!?d_66a@8gVR!_Rr4BEmLznqPR00CHHsNlY_D7iElx=J;;*z{qWbr{X6 z$z8H-HUE0cG3T@UhMu@bciT1X<{d>=mQy@l$4-%Kji z0<3NX{LMLCXhMVnKl;dQw?Bc1#>YqP@>ynPkO=1p!n3-BN&%YJBOofI7R14q#~TKW zywgxf=T^a~4i2mEUzcVspI8&=C2=?npb#{&QsYwpe7rCsd@iUa32k7XX(z{KVWULo zGAE9T3R=_xn08qB2%rL+Z$*V>`{k?MZvgt)(4;aq{;VQ-tU>L!zJQt zy_kz8pWfwr+L93H40}uo~gi2bR|-bd+mplJE#grA@jSZ3em5QGXx?9GoezQA8Or?>%mNo&_vgfo+A!U&;q&E6HQX@R)kg=mM}ec+ATFMs zh=bR3b+${yHV<5;uOmIc(0-MdCKI~>4m(9u#^hzn>hj!ZIgIJw+KNx@cwF4M=u~5) zxLZ#ZZH^}J!vJXRt9r!#cDEEl-e4T5wB&4kBT+(uZ8JGilaZ;*6z6;Eyx&EL4laJP{)a-F3u%@5T{+s_E*_@? zcnxE}?Gn_$00rMh$IK}-QZUO{*DH}UI6TPpL8W4uGTX7`QK z{DIi`uIcttI4*OfE|((THxrvUli#F%LH+@uYUxQ^*9yRFB2OX|#H8Vg;IJP38WZn&J{t9<$kdH)WVT8k;{4Ac zGmj>i$Of2qHk^o*A`sW1(ZPr2IB0MnHVTfD3B{b*Vn)qqeix1|KYe4d<$CoO)VdF$ z^$rwTE0xBI_p}EAiA)+~tc8jWmviu|tJgVNOSHN!MkRQ>Tt|~4;FSv@zT@=NfO$25 znt$}-JeovNQMGUv8^`p0QeomW!(>#~c8f$#qNWAx)u;2#&T|k=q~3zrsF*l!IPE6C zai$fHV00oz;q!I-DYv zk^Y)2m!7u~hz6+LJDlU5oDz=1tqqymjMJw3en25ex7`Cjk7H-xWQxJT6eYiYszfS1KoeoJ_7x%jm}pz}!8M${%Q9WCixb zxYJL13n`4REv&%jb9WO`Ux?|H|Exx$QiKB>E1D?Tt;(!Po+>nPZ>9Pg2Om4t8z*ve#BM)UOd`0RGr%!w4cKa2WTCqJs zb)8FZ*Nc^kO83auDoTTP1DHOKu`PYXz}7BCGJT3Q_n=X(&=FH6XsC??3`n4$2*E^o zR^Q=*q9TAHPp-Vj83fReT;j#Wp0-uiplA1l;HcUn87v)xJ4Jj8#{Gs9IaVb05vA4z za6&tFFFK9LzJ9PmcUh8Wlz)ftX+n$nNbFzjJ-6#!MOV>!Q-lUbo za{N2wLF!w{;;<<# zJD#(dM-}hkwR^+Lrbh-(>LuK}EW0!6G$lJrY-WCR@b#V2xE$8(SKboG%xpy}yPg`bGzvcnR? zv2;AL9UTp;_<4#G_gfTX=L%>m-b+a<`vU_%#j{FE4I$2`fL&7~ifr&l787sH7kPXP zG^7gD1@ty~vY=6>IPoFD_p*@O59Un2 zb_%1>K=gzTJcDfLwQ3taKA1Zex!nKJ0{rOo(S)J>!j%|YA~oIUv#$C1BjtL!&|jrt zP^JISyXPi@bGlej>tg50Hz-tD^GP`l1Y{ZGJl3Vt-j1jiYnay(?%cbMfTX`D0jthj z+IBG|RtQq!Z-4kqGnX)RgW*?|Ww7-QfNR1EFE6LIMu3)2;b3q#(bLbQZzh?aH`|V0 zUWJ~cU=&OuIt6pQP3|oSo+mWR(hbf_k`2#>r;$hYjU7o*y~o$ zG6ial8ck9-`98jzubLjVj*54DXV)+W0Wk*6FNvqaa;sqWYVF}3p}ya!aK#MYvN3H1 zBIZ+Q+mh|p+MoJZTkh*y(bNt$Ci4eh2N1$PHm8g#?edC&5WDo?@j4hkmG_(T>zZI( zrMnsQ5hgR_aVAqKJr=BHVB^wq`Rup|Cncq{_BU(?h?T_g&qtd_Xt5*GH~ZlhhXG#U zeyFmvh8E(CdNl4Rp{kJ2`v~oT9UlfNkSahyn|()}&q;?XzdgqAUBPs{(=PNnQk&+3 zA8$K`^n2xUaQK^rv8m=?R6OJ5#*Y6E&=TZ`y6Tm*`F7Vkyj2eVRz@L^kz+Qa)~c6{ zZnTzi`Aa5=iBwbwlYc2p6j0|>+g}i@tZ+Hb35TAJ_cM@MazsW3c@)iDK3tUhiK-R& z-dvzO6CC0yI-#tzYL@c6+Ix2+=6IV~L=3OFcD&GlxeP>oMHp`Q`X=Y}WteBy_Gqd? zG+}t-kdq>A1-(l$=4Sxm>QjE9KOMWej~qo;(`deBij88Ig+6(ONW(nUgXIfywFUG> zvgN}{Yoe7~9sB2&5Oiemy4*lxfZ5@fQ`X_0nUaR>bECm70~mf3DHseyQr_Y2-f)0G zq0##v%SFzsGn7XiZKI2@+~1@UDEeARx*Q?BpYm?kOQ_PFVCQGyL5DA^ZF@xIL7JRf z(4(W9XShi=K$!wG`9p~P$Yn746#;-EW^k6xhly|!v0Gl6DNr{uc6g@=_$Ip9J>5?z z=R2R|a8qxTVCo5u{_qDDWOeULG*AbwSjHs}Uu*=D-^e65 zWbWUEeWdTVCFAn}g`LADj_H}1Br!2jJ0P}K-@fFdx+9&OY6tJ!Nb5`;YfvX2hY3;F z1!Z3Po7eJOI*+x>+X(Ag=iDia0Y+Ja1R7&bd%B9pEB8iWgI~0VV|=S>jBfg~%RT+? z@h%kC4`HA6MmMCl$GA1W*bIRv!o{4Wj+8{I+(Fys+dEBOuLpfn9d?{3kdUpc-kTYtqD@XEj~_1E>?S{(r-xc2t=>sey3mR3=3`HGQQxl z)*aZ-6itG?{mU<%EpSkT?bh;&6>E9jM6o6DT?>=4@L3H=J_bKO3&dM&fXOThEyBHD zn|PklcsXnkZfs|WD60hrR+0nF{~)T%XePaZ*Ac+d~2zKiAD#O2kWN6YjNie6st z6L+_JvPCXdOdLg=cGqog%vo)*MV}Kcz9y&f#^7>Co=io-ga%eXnl=?LU?jHff@N($LfQv(DVU0Kkt!KC$(C3Rjimmt7$I6SJOy=fCCP4a3-6d zpZ%bC5_iEs@lh)SGSM-+*M#5@R-2WdQ3yUShzyyOQ5m}x2~xo^fcILnFhnXCME7bY z)aq7~4J0q`+y(s8-Gt{I_Sy?22$_=;xYOxTi1m&D8uib$$=nvRsr42kZvrVE?|gL~ zkH3Uktti#1;5``YCB?f}IE`OFvGa-?O8&CqqwngPbc*%c6V8{^($3J<4$Z-PF?vac zAmqirBgj9Y(Oj*obZQJvKCCOtuvxd~c{`iq|K^(0j{htW#LT!iJ!Exg=X0E>4+d%u zO*nB~dmR17PQbi{Me%3wV7l(hK$*%8B0g>svx-xuw_l44?0VOIm*ihQaxv}B-aul6 z@V&nw!PX!wU})wdgSp*hCwDDMu~Ns6jVOF^?nvP7A?}VI!!;50xXI@wt{kZ+Z+j-R z)JJ_{iM0)j%A2(W1aR;oAJRKf? zVL(|{j=I3^lCbK%;pz0m`7;y)f6-ley8X_@-HP{X?{*=^vix^}{6c)v0*=RKUN6qa z;G*lL2I0`zmYafZ7j#>eZ2?I9)!MhkyI8ld+1d3%=?7&aPYuQ@EpqG&yVEzPkZQj} z%JKHZI~^`5Zk~LtTLgLwp)uZS;P66ly&p>WNW>hIm!ArgpDE zn>D%RdUEzPaa<0^UMMGB{6*LVsg*_^vj7a`YZ?`e<(E<0q4hzyZ9e4NG)0v`#hr7RzH529^7|m>|Q6Dy2xv!|i?=qwY ziYr6L`qm<=YfEn*aX9StYW}5ZMJ|bBa$(=I?~a1uAqH!m*R*Yq(Kor6I4KcuYAF>= z1)#_yqUcl0yXNBSEqis^hUs-bp~BA8ae^jK7Re>RF((fVs+hdG6e;B{RmzL_AOO-YM&S>1NI+B2?b7b!_ zW^QsP9^y9C7izuJr%>E#b%kP8?f=f4KX92l(X1DOCM(P0iIn!9^-5{;P zfJsW>Jf=+CFa8}q>lUkht)H~0-Uxakv7yy<0Kk6B%>fA1XSjU@SNfguf!@~kbbRe~ z8!}DN7wp?N(<_zGs4Yc2&E>izj70Mu=aZsYL1-kCfoN6F`PM)0uzyA~`(toN5U@YF zx?11w=!c3kQ{`UfCu|zlmM=`xFaQ<3%L@U3-%yzL1PAsbhckq#csB+g(gg?7!k9;t{lzq zv`7or?n#HhV!c?d_l?uaL+!^!C+WiyCapK3VucC3v_S9b^O;YAx2UA~TdNJKmPM(5BTCs~27Z&f(d6O@fjf*JJ;<4are7rZXM?bk27cUz7 zd$)2i8T|>$*LJ~ICERGvnKre1oa*ea=imncI^+YzCHzd12|mBam~O1|q|}?u`%Idz z^f~wTsuPF_3TA2->l@{b{Al?0yBEF0rfZFvtNfGPbv^MA8ZKAqE?fD+N0#SF;UnvO zFfea)`9!wuG`GvUwcsawPKugl>?ifxx+JRYSKD<5#cd}JOM=XbtL2yo&AkYR#V6xS z-@R(*LDEv2o)13?$nLvHzgm@d_)UzZiQBD%g8#mzQZ_aw8vGy*G@zPD7%f(D;=7sf zooh)-1fQUVM9>4{w1x@Qay@Yc{n#awQUJO+2q$*HaZO5!NJAt2JPHC7ozKiHMgrz^ z^RudvHessolger>jK*1^g#{VA`k5N`8B4M7$-@w;3^8+DQ8Dls(bJDRuyG`7Y43mg zaO92(BTm-PKKFWw{~Qb@qmF|?ZubONr;jpI*>(;&1YL+ssA$3>kH0^HmHsTQ^nz*l z@p=sjj{sLE**^#&`ezm%W^18pUl9-Mduv%L?R!d5fm6z=WV#=(DG#0WPbG%$J4bTf z^$(N5Z%E>3#mc@58XOqyG{ryRKT2)o=P8hQP#Qa;t_xGOHi+li&Ny+8d>lk^Ge%i; z-;e(Y#ejt%419;tUhDfJL{M9z8|Crfegc)BLXQI|0;%@bYm!*^KOTBao9!m&IWVse zH)-8ADEJpy47>A9X*uBS4S|ACO@mcJYjoMd?->O|`W15XMVq8UMInwKUabYf4K2jB zJ4i28)<%p!cUJ%()9?M`Eq-2AviF@VhI!lYXV$gR&wJBfn&w}uK3?{P+~#sh8U|*N z+XhD*E=z|a2-nlqrJKHpe3NLv&?u7)+Skkvhs3`%4Hh{~3y67clbze*Oh5N_HrAJh z0tpec|Mq=zY4n47LB!zT|N3oXXuQvl-})unC?ahkqu%YIzKDaapaUs_gYLk*8^+Re zj^qUqkVqJ>zMZm|d>SubaQ|w3z^{3K=P+6kcuR@P!Hj|Q+X)@9O%lw+{-1?m!oL>nzE^g*Z)$14(Yu^;VkIE1Vm)fdf#6Ni`Y7mGfkas z>iZUzNXtm&BgWNb;-tVQU;TZLA{y&#CFezKTP}CvF@qB*FvrFUaYZ{i#ySzDKn(ip zAo4*&1)o-}ZR}Db$y~8Le+-O$WUV(-@c7Ey5l2gLWv95E$c@r3c0KIcuCxSfG$!%8 zq?htKH%#dO?T&xx9Xzo2tuw$Z7v4e&Z%u+ zpESK_C4LRrgR2t-yn4Csbp%XOCLZ6~80jsiiD{GdWsw#gyln=z;Xz!Bit7i4$U6e( z8-hp^cI>P3joJ~ebJI&ZD(**GO*SD=t$M%FG7*^WGB3Swy@26M>9J_~k;>(dd-{Jh z@V-PJH_Mf(Uk6jLnf1+CHtF02Vv_w=`gu;1MjH8^kW*L1!oq)*q+>3ap3Ky|4B&Kg ze=@oH6d`@{v0Pgod+=l|BL@q;`r2}5kVb6va_m~2Q`DLJn^d~;>tKvRMeD#|Iw?!iU!M4D$hef(ABo>B#VXTg;<`T;_hFx9!0@lXP7}n zSlj%%9T&&?^cRp2S@XDfQd`U|4_og;(qhBLkV30G)U^7N<7ggvL0ZV<%B7&6;pu6v zx5lM>Ivd7QmWIzX0Z6T)4}HFF8~NFC(So&^_x<&meRR}&zE$G0xi$_;M3S%Kuf&Zu z=xjdXC01#o8)9DEdFJwYZ=`NE5zm>OI!^Mzi%A(x*#@3cfK??R*&PV?3^BYO;wq&xdKp z%QDZ%nFvI|nETvVi`b%~qCGd<36@Vy>qf`wWh+Ma+2y%$Ir+U8*{gdGVu!7C!ac9*>xwu!$xxaJ;wb)Wrx0$gl1 zDlM}Yo0%+0pTxD$Q+$2O>eLGU6ugZ~5yYdRftnAowyxxsc->;i=9iKuQ+Kf405PQqh8kzuqYVtn83?NI&LE= zRMl!E%Z-Df$db!+!Jf3l7F}AKU+=BvIjpQVGj_N*Ygbkr(pk7kp2PvG(P#2x8EkND zi=&`A9&?VY&)=kI6ZRu&PWX);%P3AX?R-wRT0KVsSOOUDXFP~`417d8ddxlJ{1eHO zZRXt-tMi?0kx4k5N_4P)aB?skK5{bGWX3uv31t=gdEaxX=rmeBZ9j$3#U2SgBHZvvE&m6fb! zUzm%MlX(~c9{Qj3^bDq3S;N}nB#rr8i=EG8qZjr+wgVLe_DB89^84A-*@r!{>7&}3 zH+Wedv<(Il-a;U_Goa)hQxaHXWr)a`m2*H!_8e?mg8V-3aSj#>b*A@)3hz66xWt}} z4KMPG40x%)X;6Bzk|KA?K^ojCi>K>Me!*kKmx*GMiyD(@j9 zgdJOpX=Rk*b16M-Qo=PAvy;xNH~#9_4l}nGa}!p)an&;JA!ON_+%Rs=n7R6ohn5MX z=4~N!?o07jFPrxc<;*B29#TjEo0GFfr)J`8Q_LsR>{$AvGcCrXWvw}}QZP^b@RHl{br5(yMMBJV}+S#{N={*;-O@E(xt zt-YP>^UCr0lFXjl+zDN&>k|Q#U!Sa`qK8dBHan+z6-<#_-dfJY;-pIHwB=}XYoSDn zB7jrZCgY;47tU2~=AUN4N(@e3Ga@|fO$w{rB$v+mmc zpD`k6*;1>*ExyLLXEXHqkxH(?tb5;M^;yuGLLB^(JUvAbPD;Wz*poH8FQkK^B<)!q(qJB+Y^*=V8?rG7k0(1-AVWMwyv)#*W23?D+;-xT_h()ty$cezjtmIQu=K{ zE4mFCW{x(|R4|*x)J?a_;I_*J>Aj?46y`MU7N^uxvwe2FDz@9T1WlNlyFL z#Rvo|5w2*Bp-flxJULo*69HrGv%bTkdbrs7RX!n{`ecuRI?;_K#GQ>mSy5 z?Ne>qn02|s7a{!$>nJH59Tu|0WT9RGzVL6aC8E!h#yykZiiz1FG(XZ{#Lfb0w4v-C zDmR5~DcBQ&zL#DYM5xw_I^@EQdMqZSn2cAADb| zo$w!>@vk~A7n7V;%A9X6(}xo-L|b`j5Mrc5ZZ}vR3UblT7RC3K5}gx)qOIte7SL6@ zn%tH1Yxt0Bu$#LRY^Hc11joF)D7*faP-8RA%Dnx2_>=vWkwsrs&h!ucm<;}wj?Pfb z6gsfh{i9mfBoQrVots$j-Nb{D&{MZq>;#J1I<^J-5-pOouB?(UOyoVSgebCsEa3Am zNIG+FZv>!=X3k74?x+NFt;^F@$sO+*-s`Yz}9-#`CTQttAe9sV4LnTf^=+7Ja-O4aXV>yZQe? z1pLdR`}-kO{{LKu@&C;${BOS9lAY{`x7YK&&|UNEF9F&DW9Lrd zSsu<)=MxD!Z7auM8O}@xTU%Rvmle1frPiyPZRt-Xep8EFE#hX3@^(oqP)(rZlK=rK zk54xUN*5o+t=8W+JyV_!gMjL6R+_e0~A+a#>Zxy2Z zTLs>7mpeJuBb6C-LS({K-CgBF8Kr|t=Kp~>?FXjGn*-;SSuy}HeMEQL>S%IKU*A1y z6&>!y8&?=P;=A>l$&2{&g~6r;q-6Yuk&Hea)8eIY7W#Z7385Nxav=g#wx4cB-yXbp zK$A~)=@xtZj_C=a+PF1|ny|+Gz^!QL>Z6X1lU%1@Mrnm08H)hgeKR}oB*Axy`mHUx zBgV=!s6bgoV70djub>HUq4jw^IhMZ2k^)_9OGpM=nPURCMtt(+otlm3V*<2V?HB3} z+$}U{5mCZ?R;6rufUbXG9KIrTySV*8ie{}p;O{5jo&;j%%11V!C4=Ieg1UOg_CHp} zWIB3e{N%E38GOEWrMb5{x>GXb@+TJ}RZ-2@)!ZlgkJZ%RZ~yPrV6h|$B>}l>ekEVo znTy%+t4_VfVo4&nz-P}V89LA3h`9`%Uy`e}HiR56K3owg6MRg?YtRd*aOfbQ)za2Z z?d$9H#E}Qc5+{19Z}NQTIiZXGPXOWwEN@maR^lCeJd&51l$JpJHM6gTsM}e~BIWaz zj;$VM0&rO$O-P5)0vB&hy98e6iC>OT&26#00lrjhZ65b7e}lW{udH^!J-!t+?yoo# zU>XuywE66AFj6(S7AetyB$I!B+J}w2=w7qv{ssHyR zcHxa{*qz4-e8j=3)Ii8%Te#W^9Yr4sU)oq@q86+kciTCn<=>;(03zl#RqF6pQC4a5X@nfKb)lGX_qQcaghX40K+ z3p=4=ss*>}yyYQ{68G8`K@_%PMJ<@Too5O|1g8Q3$`V?tnl7u)$`;}id+E@Y^#6}U z_#CcDM^cwco_W)5uR}+chHs>E>n#|f*D-&+wEF;V?)N(+!mfQqBZJsOoIEf2+K#EW z;!o@&l+ZtNa+w7^LPmi@rsLsC*2WK7HVWN??WkT|)qYYxj0~M&49e>=Q0z8JdawD` zIHP-Ev3}+JNR08N`R9RgPuHg={Bz5e`KG9&OU}i%{pE5?Z9b^%ENWxCn34dGN=E0Y zVW0WtrT!~${i6X@k`de(iF?|>Jx;}N0$!vdVbK|7@0KAKlMFu9Q?%A$^+_aV+P6`J zEUt$I0pybqFlpB7v+~Cy1h?{?pXE$?*$9^E0h(jQwb&*}La`-2jys5u4E^Mj11SJ^ z`a+?ZmWzH#UXr!v<|XP{f2cFcx=Q9(Y|7>@PVoSnR}bT68?_6|HD+qe*pi(bQ2_TQb~;4QyvK@{j7u3WmtM?k|e-hy)|Jg|NfhDSiEW&;HD&WIwn_7|9>X zL?Rl6Y~%XNvV2R(*?&2YwdH_`1f1$ii~2u<5)`H~t4-?P`Xq?xdtRiL{;adgM9s=K zml|JTPC@GRu*D(SJz_o{F(>ro!eRrkia6Xm_ZoZFvE)@VNEUYJurK%)zA&eX{i z5#q~>n2K_asAOFT_0#gkEms~U?}}w7k5gtz1tbj-$rOAvn9Vv$xD4?9HC_`!i;I}g z1B;-8x5l}+@BD-_t`8m9GC7$r@tlv40a`|0y!gSO2`na9 zkXXDy&_C&&-P8geMgyhq7 zl&loYp%3o^UZUp($BWI2_D!EkL2BPiOB&jreEQd`)%}ec`M5c%%9Td!3;5}b>c}Z@a6DIEuVk}E_NuC* zP_6+olDv3fwH(vznmtkCO+t3v8|)f6331k|=|x`Q7vo$L;Id*wmUX-&ll0e-ynzm& zdSYJ7ZJ;2x%ltU0Q7tV^Ju2m(d>Tv3Ssp z=y9#?hP77zkqfy#|FhbslP47{qqv>McH?d9v;Q8FP&|~7a3T$XW<`}e{pB{8V+MSV zvRVCsp8&kDu!89*D1|flJwd zz2uS@k-o-o;o}5(hxh6qm5FJ$uD6a7bF0z&pf`LwV;#rLqPvbOF)>CGWX3eU90mFS zn|FAb)an+hh4G`@;e4P_rdO$7CZUxB*f`fK5R2yil9QJzmC%PLRz4FDUsG-iyCgTn z5A6SpAsr;<>kBR6M?y%3t>VMS1v;(kmpgxu@BjMMKxUsYW&JcG zVf`o>VD+9+L!F(1dFTbMVgz`^`KVC$fgchsTn#$Tr$*;T}{q0-gliE+; zj>LQ|Vy~}?YH*Ws>x1>dm!g+sIx4WSuPQf4VD>AshtH%Ym~(U>Hq4q?-h8b_yT@c` zw6(Y{nFUpWlyB*{~P zY;=&ox_6h8{9^)GEySs2Tyg)!Al(-&`CuQQNnG$ILr}dp+uTFjw=wv(x`?B(N zhiG3yA{VqwcB!2i*RD)5Lkalp(y<~a(B$+oidTB_!swt0g$WnK`=}KxW&s&DWU_x)AB~$?r#~bnBk%*1NX8p|*fC zTu+{IRIqG*Xp$2=32JjwWwN?K&qA3zqQbbf`xfm6rhHW%rkWZ9cj7}}buFXyb3$Ar z(q>m?jvOJAYwecr$&*jqrT!_+9j~h+i43;!I^qd!o|uXdc08K7`sErek>cmRr^S#kEM3|`*yAE`+LACcptIq{qE1JbR#W%v z55>Hrn4%)c@3~4H4itXE&q=tBZ$><27IKMpK*w^C9%_9N!o|eV~qt~l!(kI+V zNP2`UpA=hoxqfNQ&--%IMcP_dpY!F6@RL}|xT441x^j|$=$eDiQnV9L&tkftNC(ow z!8Lcwj>|47bXk$C<3~ifUb|QvG3iw|E{>V!@3O{?l^8!O&3|wNKnK%lv?C!5mP%~U zjwmW+LzpXXsgAMWkD-42->qDNb`*Y?gL2vRUfBG2?TDa#1TQr9ZJAzmL#GM~KiOe$ zy4)CmFcbA_zl}RzldNLHeO9JN>8(TqdJ9s<5>VJd%Yi)0J{0Cg8l-|c(X@G0HZ$4{ z%H>7L1p}W_9+JG+HnqA;n`V%QRjc=(zFsU0NReM`IRxfxuKgh<>HtU(7+K}yYAwqh z9&B9NYlNRtCMiGQF?t*E+*DH4*9-tWx@c75$U)%`fcmH&< zn^+lg*4sE%2f2!ei%wIt)WqYBSszW){gh}Y*>|MO@-MJ9t% zwHTi#{-~2z16sPOKp*S?AJe9v~fwUi|zfXp5#|RvMX3f+MF5k9PQofZat-4K;lXs z$l83WtW$Y#;FovpZ0PJtW<$aHK9quDXkF6&kuxPfvcN6E)2rFbgD=hdWw-t#?zM1M z)P#DjV!5C-P3xlQd$?0w$jNjcV@+j~ESKmx2sN2FI`_%2=ptyi6s|GtfAGKC?sxH>-$oF*1aBjB8Z87aDy6$ThW@CIo!qwHz4`Cf8g8b zw`#wh7#MHTJn=jsI$lt(JBY>GW$-J=5v|iZErD{pV7Oc$2;if4Dy*N33^%;T9T&GF zSAVXfQy6M%&T&$eeZ!D{7jU79{}6UOl167b#N$=*`dj?cLGbe8T69Cb{%^g-tI?3p z9MM_H5e|kj3M!9h-7Y9FE@HQr14EDU=hva}a=sj~^-eRhT3vt~iJF(w=L{rS#!Y&o zL1OQQv+qV#6Py!W4P~7Z^wRXcIqjJ@p8clyGZNaxOS&+_apt65fEypIC^NdHhE(h-%n?n>M^9HFP#J%I=DZdYvI?^MX@o`?>{yc3bwOWY?FR`b{ zAtqjEBl9Y|Q6_8S9f?L)sezO6r1NSAjP1qtzZ@!va8JKknMX^jn_Cm=as_B5&@><@4&v^PR;Zs;HGY= zg9;TSMVFvbHS2p`WAKmrB(~%`D-)XymoyHbOK{eaXCR}&4j(}_eQU4mY08`-o>EAO zJP`9WF%@!}*St9y$;sRlJVR39)+rL@yM^LplYZ@y+ZIGa>EmresD#+fD)z4vITSeS z{Rk=JJd3!8i;L@iGdBnLC42pKZ&*x6WL{afxPO%;BJ%Hd?gGx%Ts1OB<}cs8d84cA z5#VB++2) zgYvP*osWva_TbrAB8%Qan<>4D8bLJ@U3Q9D28||p3%^Osml<1F_uuPXrvPUr zzukD#uQr%0o9)}0wV|5Y7hPDmBOu}2y0N&cEO2wF?4Ul^`Ova^z;Q72#c+3ny?J0J zANIWrG(wbTH)teBUuiFW@jW?jEHAKwvb1W;h#Zs&B!)-o^B1T2DM}-ztrcp;?zU?{ zirrM47RHQeQMO9dQbVtYGhF1HmoDgMwD$_;Ck9e*%vyGLrjl)>I!|EP)AmyZ#Cy0g zVczPyFDp-!xuUw9;NxA`^b2GD{leP1heuAfTF=tO%&tt&%wCdB?wEB?)rCDrLyX%w zRg;YE_>EOoNl)u+AK{RJ)(~>$+N)>&O>aabX(R`epSy6^&DxT^KsdZQ2L%*lq( zfaBmGEK1ll*ZVHMF|oudTUVjVo(Z(6K2!GfGrp@NF;dJ7vrgNvH^Q#0KT_$>&x?hq zFw>L3?i}O&j?0p+1sSX`1Bax)7)V*Q$JLdOh-P}Y`CD5W)(qJY-qrk`D1Omjwu{jo zhj778yCShU_p%qfuC*+N+Kw2Q@VfHP9dB>H0`-#yIoK7A+Hj4X;Owopcae=Y#iNX+dD#m%tZWkeS{0_ z?0COA&NpK-1lYCML6IQ4U7td*m{@57^}FT%8d2inG%}1dqS0F6$@UDVbOJx-=7ht2 zA6ckj4mrqjGSO&#tkgGMpGo9HP0?lQgP|IE1Q*moO6F#`;BRl3jEXiPFePK2YTi zneH4DVjJ4rz4~0?>yoAviPas^W}zl=YjBpzD!a;-<-eTR`&1hsB)G0$A7nxZdoE(` zQg)X=9)PIUa6PNrusRU7GabrdyxBTkh-JfVt?+fu&@)T?toKQE0E*y}M&{z(%T3Y= zp?G@!TdNKUb#6Dc(>jtvajc+kTA5P^y1R;;_?sqscf3*$4|~yE^BhU}-^Wng9>ISK z87)b~E9UzL?uR84HeUh_D?L>VEY9=|k>CG5-c+u}tvNJ2TOdDK;#VMwv#Ns6$ zI8tFlsvvR67RHeF*5dbtp9o!-$Hk-{>C>FRsF=LJgrY2yqyZ2vp-4AKu5o`4<4W4aKDby)kJqf1ac5*nH?#9VXPvK7tzBIY&7) s-c@45EuY_)|D`7Yhy71M1@3Ttc0|bVG;)owN)R~eD%#35if*2K1L+s?$!iErNe>Z`i9?vJkO-u! zXKG<>0s#DsGt$>blcf1MVql=JKQc*83Fqjh6dwLZN#C!xXNa)3r(eIXCm~f^d*?gK z4p5~JP@ve`Y=!WL@THD!q(Vz)%iEB@>3M9a2>Kksiz>7XAJV?45R(GF=6BhVz^C`LSkFoY%0 zB!?F7&V`0!Ne^@F4#ffW`byb?orZ&EPxmjEv@st-g!>pv1=FP1Eb%7?xG?3fUS@g? zdGX$no{``HFpMLV#hjoXgVCQ&$VAEA!2!K{I6&Z?NU-`G22|*U4FUpMGm0VA-S_Vx zt?%EP@Teqcq=LEqZ*Zg?(rZzJCyl0idOV*#Kc}$~5YEwmeE*Pp`}};``~3W*gG%?y z2-p=M12k&rBZo;}+JD_Fhl#qRsjMvE>#hMX0I)C%0L0e}>}z0ujV~wUg8#1toGKUi zzjH9;|27%Ca*O}~0f3~4po%-#c^0(3_OQbTbZ0zakDNa#T;9;lO2m~tt+(0!fmt8NA@UEsX##v6ubjX(45H-n1GZ= zk;eB5ZBslfa`uS;2IxLq7^mD{NWlS8rBhb z%UB~BBBLS}&4Rnw&PZ;ZfjBrgv`WuOWagHZ`55)3Bh1sWwRw44>CPl#(`u;Bq7hzLbwF=i(lz3;vZ+Vrg%}SZq z@_(6?y1rzae|F0nis|T7?pIdRn%z;mk6I?IZLiFf@>ykdLES`j#LdQGR=zb9Np@5# zchB=L;NN|f&}%J7gV_M2SkROB&es^*x1b9X89Rq zdSfG6#Xcb)iS8mLPbSZHAVEV)ojCA{m^jipz0^h%HBiAF)otlXVIJ7|;X9b5e5IHH8OAwsDme6*ZF?Y+6#oNaPU**}I%q)w+?^y<0rojt8YKCo?wU}f^~3Cu8`Z<8m{(kaS9*ca=Vjac7VFJ+jROM^(z3GkM=(9# z1SBhBcMvgf{1YMXj@PZ6^jrvO3dH3j6B#H7eBf;GeeMUBXfv!o-qh)zux9@rs7*yRP;Osjlj;DgH}<3y%Qzx4z#aaZYeLXY|hU&!X}Wu(BBn&a6z67 zNo`_j9pr)eK*U*E{hKsCxbA*KvM87Rvu>N`qs2zMd+EWcBvB4?ce#5;JPX}3G_1JRwrzP9)@bEf1d21qF&D*}? z*`BvJ)&2ZfEV_GbPJcogTIwbpG&tccktGG!J~3_K@9K<2rL#i8}$^^O*WgC{_4}&o0V;U zWw>7#>M^~H1b6Gv{ov^=g|&-(?LV0hczQaB8VS)8JcDl4@-C z^dsiM`oZWt?t4}LeWj}Yc_sUiW)f(lz>A$4vvi}|uKOFtc@k8qU=Qu+9@-wul&gAo zChqve<#22RIjkcH&_YQ~*Ypnk2wNzsnHfy752^c2+8m!p-TZyASOl!l6!wbO%eBl- zex)Y>2+>7t{WP`NWVDQQ<52x;Nkv1+yKYcAh{Iqmaen_<27diTRzMIPo@ld7nZoP& zP!{}r_`ElapyBowu3WVfIP|BEPKS%zKaOrEGd1&P&6aihovUU^bQbQ)=~j74M#lIy zw>~GQMmlmUb{KR;L(2dZGQK4&I9U&1=>6|EYMTs!ffGg}T3sB#X{fLkgi16%mo@u_ zeS(hDMdMs_7lA)zIcShe!IIH|(H7Xt`Uz3W?Rs9*`LLvgNsTgJ;uZ>Mu$%J2`7QR% zQNX9!Wti=(#JBA!+e^-d4;=1}p5u*oA|SFSfI*=ZM&>;?(iqh+u6D$8T9XIaVi(uiXy}jAsfY2u(S+d& z1p&hzf3P*gVlC!AQ6Z1dfOpq}6{53scI9@^l&X%-nd?|}*L}XETHfuP^;4D38YF&$ z*H!1Vl9JMS>=-ly0xru6?6r%Ps>b$~51#wK_2%Xxv->*P47QbBVR7r(D zX9uH(T5Zm?nD?d`yl2?uSPs=)x9K{}UQI5jC%p)0rL_QKoAstis=wtnZz~RRKm}dZ z;*gh8o0XO=3-$bloo(M?o8;7)T_5i0rbmjkY$>q5C@nUvc!+=MH}cwbXs~CMTFnyc z4H~&7-TS@as3ef)kLns05kP8_IS>ZS&H%-Lmd>xFN{vn_XBMrS2sAD7+!S)eQJdy1 zQzD=3SpN4SIhU*&d*qw;|K;;Ny&BD@}v{#1_KuC02rm`%rZ8#Zf@ z{W3*61b(qxnl1Z=d&2rAe2{vD44Hr8*saoft5q`#5v!$qeFI?#_y8GNc3P94U%-%H z9+08^?$&1gk(SMJz`Xtaas8KEAgFcvsOu(uEc;)NlwDL#*Qy3K5Vh*3UqgEec>>~`V70S zU+LWmk3(m7G?bLHFZcg0hV0xdnA@E%_t@Ydz2G;XcCX`vGWcwYC)1od1+lj(HG zcid*kJsHBlarEc;ETTfOop{QUC^*K zI8WM=hX2r9@AOIu-n1=Gxo$grzobG-PpT8Vq@INNW@_oiM? zLYYZj$gssk(kUS`lk|lo%l|6~gxjpO%(l7yv(x}r34%g)*wJ5qw2^~BoV9(_ypTh0 z@Q1$SnJyv$6In0%$4NE$Yn#u9mYI1Tj6>x; ztgOD0seN&z+T`>;ZAmY%TIfP5d5Mg>q&o_mS*(v(s;90XDd`u-OiJO$#1h?8c(AI6 zMJ)?OYcjPgko!t!j!6ci2j{24s#ZJ5>D!!nE)oHsF|}%y^WiZ33E$27HeBvw;x%n@ zacK$*NyHp^;(Ax-tMQb^$+K+TR@#eL-6^o@*+40c8WW{>_G zZ9=G0rDR`GzwKFUT_z1ef zZzjT)PV-^9h;1-XWNO^{Nw_3$tHx_c1wLLp1M5Ir1z0rT}FZXVwKQKWMB&<~(T@E>WPRFa=zfQ$?f4 zf&-5NA>FoneU=peUfXVZs*5-W^RkqTTqG6eZeXiyW%E8Oy<$ns6iUrlsFfYp z1R&$F^%cuxTUK}6=9^cb@Gi1RfWN$)+1Yb*WGceLJ!%lZddUEi4o&7>Xq*LhUO3rX zng+}jqh_O9&0?IN;c%NpZ<93YtRo(#@ywsL{kmm1H?!;p(i^QR^lYnCHW(jo%d0s` zHj9g)o&_qtrH(YMU$zKadIhHzYH_2JM;eKW+m!cAx5k;<#^~6f0_%x38s0UG7%GYx}UmUNv-#??9oUP zi7y7R)K&xDU#%BTW|1V$S38HR;*yJ6S{6Lv^=%b5Dddw^QsYP(siAAKv9vn&k&?Ww zOJmF?z`Sj<(zL z%l7kw&F%6`PB}3tsmOIf!!1CDC76t<)_S8uuDH^){UKS)#1Zf4XfjbU!8%vQ2hRjb zv&%Zoe6Yb}4tH@)4D1*!plg>zY^8B|vQ~}?8d{@A+(k4Bc;VlxJ~_Gdun)rAtB? zbEOmedY4h;kz|s9VI!?Z*TS<`fXAsxv`BxyDFrvTvf4o~ft&%7Hvks~n1t;A536Jh zh7;d)p^sJP%%JchzSZP*K@x4R^SrDiJ3fnRD9vMo?vFA+Y5nzz&WD2C*fMquk7B}M zf5^%~=o(b09pswS$f|@xF~*W_b#B`3{Ezt6OZ=Dxn+V`2(sYM^EBk}hGZ8Q%EvL+2 zy`u1}*kN+F>nhzF!xRX5SklZ(tE>l zok%aKlPSd+tENrM^-0(9=wa<&IM=aXIvFW7t)G1MWG1I_Urow-KuYEcP05g3X7?D# z0Xtm|8LX5j_MMn9R$`#xO8=tD_u~tU(ytCAbM|h6Q zwN@P-Y8P^>w{|F`#3Ll*{A1|IWHGStx;nIH;!&AqcD){}4&clvEZ@9}i%Zd44;xG= z{;cU<3oBeIjjvr<1_4K_-Fodf66|-?CDrxl&2?H$p4{$XDJ^E5bUAuK3e!o@oBCu9 z2M)JoVv!{u!*Ony$JO0?zRs-)CFQ(DyC)7|{cc0qeAZ)pT`==vlVwR~M8rdYRdw{W zSt>9k9+>h?!5}{1?O7yhDkr67hYjL}Gp z+icD($79L1?U|ec14T>`Rgo(#krd|x0Xh~%hNoLY$Zj;AjLr03BCQ60&UT>$39AVe z*)R*mpu%{p76TxDO54LE(#1LMVo6eNm%QmXPCIpOuMk+E<{9clb??w`Vt2=Hn6*9` z9WQJmABD-qetQm1obbda`ZQhDaLWzt6x3gXv>TOVXy<)+T#`XjaXJod*_N7xUxU#;b z<=^qpoj^mCMD?aB8K|iCe=ct@=xiqY_r_Y|$+< zdX}Na{ked{IA3paGSho*TG{V@wSJnX(nIxV4M0d={j>i0Y9fVx%-+K1g+rF;h`7N& zt&Km|uZmWyOWU;5_g3)-aZaXpKo8aH$R>dUIY*`i?@V60_MY>PsXLBQCVIHXtL7>2 z!&G;0bU>X#F(;CvQK)94Wuq29QF_6G`OARJF~OpIx)dAoF2+$$(NWK<_@!tufH2@k zFAzgzIB?jwpPYc|8FP^Mb*Z`QFgu6MCY|?ogruYwj_^>N|0NESJF98NVPF{FONP5X zSYc47c3H8J{a;K-aq(Dl{ojOiF2P~StOQEx5`SDP3UR~L+PcX@$G<9|W%Fij>ZFey z!Jq|-+9|r}J!hFfquS`g%LGE&q#TjouFK~``T6_SHLiwujj!VpDg1%{;x;~&PAz!U zc}uc?AFGrY#m9XKe8y;c6nCtj5oFR?Lu2h#>%(KAt24XQLd=e9hox{{KmUy0K_AI{4P zvb9{KgriF}-|`?EeKNCWyOiadeFVkANxU6M2V-_aus%+s;f&HpM&q_cL>i*)=^Gol zY}ZM9?T0tr)=T4zlA0G(t8|GfBi5*BzjZ;f&WYsKmFS&_N8_7qq|?RD9I}`fb{VJL z&S+n(bd3i{E$_gCkh{K97!ylKUi>uBq|{F*Cdl1&l_=41Jw9CB4GOE2a**uI4t@%dpoNb3F$!? zH_EirM3mD%y=;5QX)*Dk5z@tNv1r^%HV_*`1+N$UZ6PEj8bv99P{ZO3g3Yop&R@>5 zB551;NKj7fVy3L4uMe2N>?VXLMloe%Q+I zI*5Qna2YcfTy`PI=lk`59SiG%{2u$mwK#qZ+pQzw+Ujn>=ymjb{IFMY9c7T!mW$in z0)L`#D;Dvy%JA7CMcv@vZ^4%T;zgDCk?X#fm9$gS4gaxVZj?~f+S>9SRN%B2Po`^x zO`uShe@?Y8Y!MVHaJxbfhZ8EScx#bU=^$$K`12+XWmUcP^D~i7^SX_*_cuf3(v%Ho zMDaImZ6ChgGqnY>=?Wd53;DKwmhVcJO*lHVVOG0q zQ^kaK^IboUh)mqK@?^@XK`3rj?+M%PXV(m>3j{_Y0GDLlsM7#zyyrc1sWAGyAVq%Q zLyEZH4#hpHnd#u)ByfQYQKv%Qmw)1iGSqiz{>oXw7~IR9WBrtgZJu~Vi3E z(ls`9xceVWKraWtc;TC_m8R zjQ>CAUYZgG*k5d9!_frww!gK4o^LkYwq3-_Zm79xs(a?z3P(xje#(w9X0Z9R^$Zf3 ziS?DhOio7gwl5+x&gI@aAmz?t)0>N#Qi53N*2=^LI&uc5crE{3Uuc-kE>B+ladXMCw?;k4G#=U230fxjLTilUy88wY8=45 z(eIC|HOSfE@?d|P1olPvl)jlXS$j-n@wL5AX+ior2g-Hy(x{RnQU#!|5;`hHaH^B# zc6v@yHAm+|I%nY8fS6uQvvtwsS()BYk9x@Vd&nTEXy|+n&!4J4U#cH3?EGF?7z}nn zuic+s(7Fo8)2WQYk%q;AYVV>6WfSN{KgtXc% zGqaGi^Nk~QPCwXfGYfLeR>?Yv1m)A}O(z|P;kn1ULEy1z=2nK8mrjJ-vr8%@&TBb}e&uzbuDR8Yp~% z+4j0of!f4MRG+}8bM7wliz1yNTI(THI$!>3Q{czb^73v5L>7P}p>h%H5b~YJ5)8c~ z^PITOz|^j_QI%BL#qJOi1sWZ?%{FRPpp$-ctXLR3F+aqgwFO^xVaoM(mhszUG0!DXZ0|9?H9q(z?P@4MsLy+tTC42Pah1UbgVfXSE zlE0_5hr*|hzVERR?(qeFkKOrD)*Vs|ay^W4Y?D}U_=R>Co<`SG0}0=huHZEu4z zkCr^E7;Sg*=9>Rw*TXEieLtGprwhBp3F0&G+4|1}3rE|>zZgIJ;TC0XH`TXH9MNj% zgWZfPjuvDd(1hck5UH#gPhs&wNnsP){s!^b-sUae{_M|wCoWAsEpWe>$*v=8>5EK5 z60w*?*o6IPHOp-GB8gJV1bN}SJb?T{)tDvTdXyZRdt_2;h3>~Uij6nHei1|6S!7gj zMmjOX3VZJu`wuF+Gt#Nn$7~Hk+J_pqo_B+Z(i4Z+t3wbvAl^Ow%Khk|E8J?|5Ins2`yv0v2ZyjQFz)$SYCdR+HHXZ5_iU-?}HzY(Spr~c%b z)~h+#M0TZLV8y&`v|8q(okaYiM>cw(KCJUQp?rFd9EGf!hoNe#&O8UX3pJZHnLMM1ixJ%F;yl z!?tX0c%_FZs3Emz&D@8dWNRlmSV%M}GOn*4BG}5j0xS1)50f)LxvSkgu5wX`%jd{? zRPSQw0Aa);5tT%oL*V{#{jM_dwqz8{sa*h`kKr7|=!DCyN$S+=nr0{^pTqQ1F_%8S z_2}@hHGwD#<2m6&K!EUVil!7zvm)&e^yUGzk?F-tqwCuVV+N(disY;k=Z7>3yEliS zk?S+~xCP_zbZ%?fW;cCdt5()8sJ;R_(_$7SoC+Ck=`m~R-|?>G;vW!&T(p<|P{Hsl z1-}5LC;`iEnaSK}18ck_-d3E&TU(t zMKGfFw5-rj50M4UZks+a@SxD^+eXO_)av0c})&_3S@p4Yg4Ck*=3iiUI0qlRP<&)0GEaX3hQgC2jN@^jaev>RM|E~WC zse*f6!c7UH&S1jz5hfej!2m-D{6Kxj6i_7MPxb-t6Nww-Z1MY~X@9NqT0kDvWQIIZ z!8g^89-PkNURHuswQKoDp625XhETnDbXv+1n|ZSSRb}Kf*k(BP#M2}A$)r6lm(ZU{ zNRKnXb0`YGp!T7i8r}cj3;7L9!7ZIpfB>vT9_RRdG{BloWpc{7A0HgFyp}RwCDVR} zVCmXZZAK0fX4-PX!Sebe_>X__2}Mp0e0yvZa&Bx)*J~PL?|j(NBui|kZ)^CNIoniI z5ahJcc!pb^8trZk0uVGtZq5amOMb7-eohoT3ZujmGf2B4VVF?!v(Mz2_)bpHErvTc za#=#|XO$jPa=bTFqg=LB0Yt1^Q$^D}?}@ld?BhRNDqMFhs%!!tg#9cWGCN-`snGx) zRd}7>K%)VoU`TRaq$drd5}XZ}tDVx~1#0V7)!lzX=9|$9Hcto}Z!0X8xd;Dg2CGkp=536v-|6Vc<4_23w`dgp2zug-cwKLa7-|ka8A#LGU zRpHzPD*`q-tqfpI%eSBD@O^BY+J+zu!h+BT^KRF2-UABZpb&z@?1PLNVWe4DR#!Q1 znYb)hE(*EK!Q|jOg_u5jks#1MdNESSKM&VWP{s~c z-&5Ibuh4qE$Gm4wYh}h>rMVltK=?f48Abp#}5dnBul}5*YW&Xp3$^U z*_P?u@(x&18NPdbq<0qNzk`n*%EYVUAZ>^Yjv4!^ac|sgL0CBBIoi8_2NwDcjJ*Kus(5EEpQPEv1k*Lefn+F6oC zGL9Oy^wTVML!dtZ=LEA6?O1y5vxB`q$i(d?e(QFF=6fZn4V!6FTtS^Cky$w^rmDpg zuDicAcNKvO90R=PmV0~dw{jeEAYy&7D^#850An>@MXpw zAO?CJmG|9~Geto`c$rDgl_jFT*pZ;U&ohL)&jkat;4WQ;%D+*VL-*`1Y1z3Pk=)M9 z0mkY5o~5D9=^zvW_+eyD`+^9RXWpOq327zLfkHFmd(b^?(%ek>mZtZxJ}fD9o}(hp zU&w1b(^;X9ejJRbI#Vzxl%u!jU;aYF)Eehgv!R770R2X@29*gG7UT&EF4$L_cD|^) zvj6Mj&CbO1!bLqvU>?~S=707!m8-0DX*F^g35U74;qJ-x~$ zXJ+ya1`CZa1YMHZY$_gj`aO#Mg2e?f*HN7uJCo$q$}OVl7<7{Wt|)w1KEIrFMxA%@ zsr2OE3yea?*>_%p;D<*O2#I|eNFE&#c(|O0YkPxhThuWX)nx{h!tibRTpYAQ6L^Y{ zy~x1le74i1i5zVE`>V9w4B=ScOEfZemIULKSiyLz(%Gj|@fT>bW9kfHe)igxuhlUz zOWFz;+B_jbOho@A5QpN;|HH;34omF&H*#2(F;)3O*`VjWc>}ApvQd{Q>b$ zfB%Yak7e@SbAw%iLD&d|ps`5%0LT*fpKdc2?JCcZXoJ|yzc@lTuLT`?a4u3Rd@c_w0oyU)X<^qS28sh7-;zkDjkx-rg3kP*#z^eHdpu9^ zjtwhLpu_cIkKO^$0Kbn}#@(us-sHM&qU<d}8RfQDHdhOu;Q&mD z#&hrlKL=QXU65gbJ0H~otKAmQ>y%s9J`2a^HS(vu<_bhXeWTR}Jo*KBCa*6&N97AV z{Fq=;wEfiNMG2%dA9e}mhQ5ownOX1fv_Ir``&}~E)IhLHinRnfMvsR?!B-&Zkkz3Rzk{qMJFfq z@=XrL9m*7P#vXlRZp0Br8z1-X)6ky-KN<=?3b#2n1Hj%f-vr%~IDY1#O+xuQU(9J3 zsq8C%dc5~S?Ro|n{^cPF4@x0*GfGRi&bZz*i>}q+n74gPcNe}(W1Ou330++14Jp`( zP8V^-=*BQz;gjP5>}xP8=dIq0E=J>q(069%(lO4Tzu(YE@2Isf}-gZTWo z`lQpcjPN;P&T#-7=em9pysix63iKcc_Zau02>}J}&g-YSt_7bnaeJ%xvARJa^R7Gm zxAdR4^l~lwPME$kU?_Rfe##j!~Lfxy){i+pB6lAT^cel02r2CwT7E(W#nZhDa)0j$B6^e)fx zK!R&H@0ShV*OcQ=O0Ph1}n&&2ru)n85l@ubMI7$1K8viECgIZU^)f4qW){&{f<5M5%NAIk8j@{b;n18pO4sl zbUTnQOwH+~hlPsfAl33(R&WH0Tt+xm3?)8QL({1W-upx8Q*1|=K7mQyYbpgF1gRcC z-;1{6&y&KWs7EMqbK0$Rk{6}B9lEya{xmAR?PavMkDQauIg5j%2ItrDVLM$ioTERj z3-h7Sbxb(z=Ds~QwJ<)-^LXp!1IwGOk?NIN{w}-JyJp|DTpH#XR0Q9fqkrhRT*_@f z;fUy~EAgRF%h*vbrGMxXyPWHm)c1$~`5=Lge2gR} z(*t@oBp-gO>1eob0PGFyobHkMv@w$FX7@%;+kLq@Q67^l(7dxO*L3^y<7v}D^RtQZ zBSL&163mzeoqJeMIOG$p+69Jf>3#eQL)72`R=U4bU39r5XNUo~|AG|GFnmUNkd{*Z zIN|O^{FU{C>asfTQ~Kyl!L**xMfhl7>!xF+&@75+?5#Fw*zN|9Qtx7YaWd(|Y`w`Q%+AKbFt8b|qh~hBXaQ0Ife@Q62|J z45_}W@yW%T4c!!UmMH$*5aT&IkD2Zf+QHNR)Wap*81qJY<#EM%#m=JGEso>y1KQtS zE(SFb1={M%`J)sUrU*`NnIyXqw|6fg9lD(XMp?J-&%(N&b*%(69`}O$JjNwg{9->z zV)+yMSwGyf<|E6QvPD8ln`UppiY?R2v*TlV}KKLW6c(SSM0=FZXF=hAvi45EIo^*=kxH3 zy@sa|Tc{^PpqJ~9;M<%uJ?8~{Pbf)GaNBVzQ@d91&q#O^`eo{QSd!@sby({UD;ab~9rWxsc0eYm23tBLJU@W4J?5`iE% zf=zW<{Ws$uZ$=@2I}R~zcXxN2;4|1)r8yDO{v05M+!odX@fg?MlWDZ_%^Ldq0-6mU_3;j6{Vu>&TGos zuwsqbsv`3dhwc{#wnph6hGz>O3jQF*W)gz2!582A#}4WNk%SY7&pc0pb}_z3lD|*2 zz?+IAu`cc6?L*rOum9*z6!ZY(D}cvx_M!S2UIW72vtR6#Mw|mnJiL()ka{gV7d8sy z#z>6;7Z3nC-Hw%q{_T&8?Iv^G5ONk0dESS_^&JNG?`9KjFT5&Q*R@4mg}CMh-Vj|@ zG%Kn5Yhjh$sn8wC!5pXF?ASs0&AaQUaWNfKg-ESSWg=5|Gb8{7-In8 z9d5J>xB*ktCSCK%_FOT-?=%}Uw!tmOJ{&L{c39h(Lv<1)2n;thHLJI!6Ifn z<>_|~{fPzCSG1&L*z!9(Jsh}vUjCJ2=|S(_@aOediKLm-3SQUcf)lEXX!M|B&g{w> z3!OBpPx3;?gZ!!%;YnFYSo3Q zBiK-Ipq2aVeD83c=%|J|hP41te!WUHwMc$|gPhlmV_br=J4Q-&N*qm1sRI*j)B4@YlkwjrbUS^MK4halXHhEuC0b|F3^R?|Ne& z`dzHMpPpqOz+p}XiBqrps<|g9{nT+Bd#s1o`0i(K= z#IhaKcxBnXWE%x%o~%RGyE2ns6OOR7uCF!c?LGmSRPXdCSK|&3VS=U~m4zG_8XLN*E4`Vv<>px`^5W+kpH^6c2#g~Zu zE$8E&0$K$?x)1>%Wo%V%nZR!q9w^qad^(@uyTbY$iD06h7ZUnP>>eA`ZAL zMgJh#STO;pT%(BdBKqy(95xPcW7+~p>~`N@mb))qt5R3hPrw8SK0(7lb3p%F-6n}0 zKH~Ga&xS}YRPvb+SEwk=eeerpddC43h^XsezFc~9=15$hnYr)8@-ll_Tm{;v z`f*S_b&f))NUx&LI z-Oq{>xU>t)gu+8|XlSK^`zY2v!Un`MMqCGlHE+6R0I7S2Xs(9(ut*m+f}p=7(evf#L+zVxqQiRcdQscTh^}I$wl-i%fg%S(n zPszdD&nR%`mop3c;QHV<`9dmZFn}?!{b0&qE>x7#?bfOHWV8+g#Ul;#Ja0_xVAv;s zz%`^U80Jjw4AQNacTtr0Q?Om>OD<&WXw`2WijEQ*-9(s?Aoku9!I?T3W~iMX9y|2j z1vF!#jq)BHp8jXr!tQY?+xLi(9ON;{0-^jDTS)Nf!0qmX9QteS2SxghAUSu)m|bMT z;oSf{2ZN{KF?&>h6tB75-U32~!cTwS`ICfg1x>sDuG@W?OfL6G~bClU+_ zU&6|c3yfNIvbbwKDv}33#?q8$L-=EJVuU{S9a<(Xr72iSZO$8caj5*CXL|q~Fd!s* z69-;K%UfBW28c*B9Gw9+;>LnY(?Uc3_PDU1O~DV34{t3Ei;Yq>BRt~OfVslhIu)Ib zhigg(!JUp9d@Ig(i`uMT4-@LyjdFpIoH_#P(_e@5uZc8HRwV=^Jw#B$Y~q;sC&?aPg zPvuhFuP2N>Nf7WnAwhGQb6ZGAOZ(S@@x}WrPXcutknU+&Y&P2-8uTIE1xX11ysXbn z!({7U>E+Y7WBu*UO=6otzUMUp-xB0rgB`+RF`P0)Fu?ck2B3cI@UIc}fW75be|Dp1k9XnC2LA7UxdDvQ zzPE8ZWIQCj84B;BI9gH*5yiLB+?Vz^kRa%mTt0t;QbzbM%+7ma{O1PRrUKHtZ+QvL zW{-1C62}8G+hf1Hh-;Lfg8m22AK;~8lNy~17)Vo!py?w}_vJ9nEZf1(`*ZvAU61`^ zl_l0(uZ9sGx#I?pV`+Vy00d1hJV`G<#E`8%|0B|x<|J&%xG#xmTMJuDZM;(&Upz$g z+Z{x^wd)`P8zv`iyTVm1&i*F1KkOFFZ*uT&oPgZM0ii%C^hO>{C`?i`7h>a?7Y5ie zbPCBg3iXbf>D#Q)(M$MsuE@t}zmL1vNR$LX4z%@0_O+;jGA0G0`^SG=dZP*wTOAjG z1;8SAsTn8~-4C?H)cl#DbLrOos?!`Kz#7vH^H-rt7`O)G4}eFG0t2An0^Sy@eHU+Hvm6;beV^L5IRoI-jl#s*)f2%g&C>y<*C;5!X)yHC zl-WK5wgNoJTcTXL2HezIbs8;j9Gb)qy@+Ft29)2|fj0qAh&!k>jBMZ0Q*qppa>z3! z3B0cb&4dDMy;Vkn{0AA6y9WG%LgF6t-g`O(b%&7GxHEb3)}4+*p#eNED4%&>HRDe> zt|dk{1HM z27{OU?jHrIzWAIbANdtMABMQW*!B3;Kid6iwov49fYH0m_e{${Q5r|^(-0Y6pdtoa z-k$K$P+6!qm>B_3x78oUVIH$T!C$}Q z6+Hk@hT@)gAIgudhlx-9-bnh4dH+J_7FtvN?%$REU1aOPcS7Xbzy}=v$VXiMHU-V3 zFDTZ`GF4sd^;OwmK<&sv+LnZ2gu;!4dFsd_UVauHX(H|T85R&hXgsTLU`X_&r`;MO zVCr_5=HKxOwOZmflNIWy@*^BacumMK6z6@{ZA?4>ltag|Y7uQWo`gY*;% zLxIiYcJv?^KbT=B*l|?Y=lYL4NaS`+h&wNcOPDtQGC=vWM!&~P0Ndo2CTC$CjhE;0 zCfmCK-Lo{MNr1ZJ5Wg?(Eb;7OBzO7D?DT$ZS6*w!EnAcuSX~z5`JEt70B(Ao4>y}} zxrT6l8i00k(;(YG(4~mUbN7OVZ2|p!Vs~0VcS7c2lzc|ICa`)P;I(k!H;r?q9mL)c zVs+2nadYAj0{INxmJ8(a7}_u5w*x*4GbT#w-rmqS-|+bKvzj9<>Isf;_>5*~W$x{T zWWorreb=?LnDE?ZwZH7u8sfDHk>C0ru(gEt49qdX^?LvIS8fbxG39#LisI0VrFVYm zX!#2+?+{!JTp`*JV=Js7ofPZPMy*En5)2uC@h2R95bd1XgvSx=KC4Pw{E=5Fo389K zJ*PtJ_X!h)e97Gg6mGFhbS)>xj#y`gRt*rpOj##O;}hrF)?7X9W;=$rQQSTT1|RxY zZrXW`_KM@WsPxmZyrA+u1m)-_LvhXX-7z=qxAaJT@kV-fd~Lb=rpE5K^#k60pO@|S zjqb4H*TbzKvw4CwjlFYkf>XWortq);Dgsv4VhC4-6e^z;0k{iWdf5w~sz$Eqk`4IT znDcPXU;VJ(Sn$sA`OVY6GBtFnKWq*Wosb`RK9o~6XIUDH4O-M$^d=AgJIdq#JIYVg zT*VyiRB$2i97&x=OF1WJGW5E-JOEgF`GbIT2zsA1b1Db;;S@8csWh77XS0jehii@l z-FMe%Wa@HbBzO`cG_Kp1Tc3dFc5~?yDmjG7HbvFctV;)3kkv10jM+RSgp2Vx~ZB zz61(p-lv3Y+bW^Bvagu<*eJ_i)=0vy&PdkgvR*^t0vb(5fvLj-0pSE)FFh>^v!$PE zY7M86;V`-s&Z^YeVopxMhCV32x7dx}|CJz4%321EMX7Kq5}nw^_Pf;MRyh>%3e(&4 zjwYWa5@aHsUdu=Qt#10;N4)shqCWU%){jsyr@SgAn|u)qU0Ns3@!pmt8+K`lT&KF6 zX?Mb+5YB6ddol*7(1?9T6qh!11ZJefZSgYx_q@b_{` z->=us>h;M$Yr1D^m*eYfA35+oy{Ej}R)0v2WDDi6JS*a;yN}}B$xWZ1h>$j}!>t&x zjVh~dOa7V*qlAu z&|ND_m8dkYWcv!}sRuVQwGU$EH8|{h{vW2!I;@SLYx}snySo=C4xzYfi)-=X?(XhZ z+@Zw@#VPI%CAfwLDGtTqOP}|BpX>W`_u5Iali8V_bME_h{0*GP#H&#T^!0N^j&|{v zQ3Q!B-y+w3QmDl^Xx?mWOT60m41519-6;gNN9diTY|tL^y?-BP^ssdPXE@sgsD`$X zRcRKK=-J8@Lj@CSaR`L0Tw46_J3#xc(Irw*DI7;G!|W&i6Gx*|q2IrBz%%mVh5A4b{^75?!p)U}+KhVl}SmJqp0b+8HWvd8eyAlWva? zJz3vKN)9nw`D6+mm_GbY=r)$a_%my1hjcW-x_5D(K>fOz<~$lhHGTJ1Ed7KC9o6fj z;2XrmFeSZ!QSx`$^e3;vwBZ=29gLY63MNJtulVdsG=^Sn&AkPNwcX9Hi}Uv{XahWQK6@j9Q8?;UmR6%CwtpPW68 z6Q13vr%#Ah3km~npO^_Ty}u{DXBK)Dj9M(8z<{3zF{Y`BcxD1RhvT9)Y%_FsMqigB-n6m6((A zE+m#yegeVeQhLPtMZNk&jG+bf7rUBG?{pg5`jiNL1-ZETCzRqH6Q!Q9=g|lK6wR8} zExk{R=8X6iP%<*Jj@%*~x4QeWC?#w`rh=oeC|LGB$3dTp({k zzj2~S`dPo;sAj%qb6jDySTPTC=m8v*Pg)a2-)beVFQVsEhwAb&*ZW_d5NNOzR{j!& zC~8_Ri>#-&jmjk}{c-{<*nN(R|MjCxehFz?nWE%QZ(j4H_z;+?`R<4Bq#Scfvzyy3 zhWYNvZPi^m|B?`lcl$*Q{>MB)=O3T^PHr;Bwd*ubjT*e~%CC4QZnl;g>sor6-0Qar zd&a1iy8@b<-2y@sJbx*#YiX6nv=deERyd~hC2Qu1!D5P^D&838uKlo^WI~;$`(85v zBxz;Vlag){b*}0&W0u7^bJ+a#v%+fl$8!(7-dG&D1IA zxb6^*pfZ|JSiBP|i2lqf)i}bzJ^TG5aF-miVa5vAb5U*bz|KB__!)auqKB7`k54&} z+Qd)%H3~3DVVelQyShLYUElZgCx*|jQkiFTolKVS^jnB=D_%DY41xxNsDT}wB$_51 z7RTCmP;JwDd!5yt57G?Y<*q4F5^bjSi~O~QU21Dc`1dSLttZRt1_wQmttSKw5&du? zpWy0&Qd3Qx<31E2>zl)OKXl{o@RI>fZ5w{^nB)=z-v|~0Pov(~Z~3A2;4>!~8msmY z5`Yygw5;SAXwvXRg-&H6fMT}(IAKsruxrU7JrJF`+B=(HP<|aZ5}BK`uJNRv`ZLtQ z2J$!Y5`E0G{PD#$K}+~Y(a#?vt57E<-a6fxaGC!dg%q^}?VY@DvDOdxoM!77{4HC+YX5hgjt4j=k;oY@ zU1WvV4#1?I>oAVGu$CW3A8x&$(zik0Fsq&OAk$;yoOR40b#;97!~!NzJ0%`Meja`{s0geSy#K=tY}+Xp&w)wiw=# zIF1#;$I7b1aie)Pt=ybU)H+%iOJJ6{;2tX(Bg z2>%2_S5U6%;fx6w&KnuAj{G{?1}XA42@#rc>bl07ocLo?b_@4$D$&(-9t|?{R6%k8 z^G7bS7{pG+PfW%iT>Z1VbSig%4na-CzWyOk7`BIPWcodCIzVL2M#;01!7j^aFY4@b zuSGz#HguiBM;?GCnT{PnI)4t}Xz>cWV>racrFR(SzaHn+T2xuu(S5!x%pfyt_6dcN zYEmtdFt*NeGrT{8JIP=E6y{$%P%+!Wg8FwAH0|{uIGHi{B=Y(5o@0s$MKUhxUk~%>_V|PziU47?PC`H7CdCy#<0>93r%1R;}wQ&m(K?ieXj}ucaRj>kQLcQJbz$n`yMFKXwWb)mn?_M1DA}9!IFB^&u@V zE3M_#Xjh?gVB)Xf0idXZohrr6;_UYm4j(^#I(@dm0b{P^Lf6aBJ)mByeZNWYXl=@Sb|m}wN()sOtLrOO5!l5vJMNZ7>TtAxmTFoke2y6T z7Qeo?zoi>BuE#$4rQWs?6g@m)38I1N3_S4?W!qV9Q#YNQn))bnq+;`hE#G@C^tV2r z^=c#TVBH6c6Qd*e0O3D#B2P1)oil8&M`#7-%E1Rmj$k;dGWo=&jXxC1XE|=md>*Mh zv3Qx|*-HDEiTc@G1;)Mj%oVu?KQ|AwK``EJPHPoF+1(KHalq$J0|pwA@d z2{?HCv?tfT*~-dXNaGh>L<>Xly!Ql|*np;UEo$(V41tT{9*2!fZ{Y{C3m*)^Je1RY z;i=O&7x@3FIhfS*zr^A-f5Hp-ZiUt_kFXvP%FoQv+ubZ~g6{TpVC8yw$9v0nG?%#3 zxo(Yv!NzYa_o7JlB;C$V_1268E!OC0beZzvp4b=JGp?~~X@Z-ZN z8Nj^T3S#-a+sLrXuR?_a21$mvKcPE3U?xGn6@#+Q;5vc+>f~g(t9nfX{Dvh+~ta-{q8eJ==a6mB(-|s$Y8? zd6$!t--mf9f6i=HC23Gvjzzg?&oIYPg4nmzV%ERDEX~wTae1iULzvT+T2`>*2)wbd zw*G-hDPUw~7V+p%$`6&ETey`*9P^GF9*GT6PdSkv7;4f1Dt9NmPd%Xmem?z#o3)wObILhw$~sJcGK zUqWk1!Nf46U(@Fe9N|g=+59T8@xhQx%EHqAb_X~7CG-f%POh|7xbG&WlARZA^pu5M z+${q$HOmdk%(*2xUxj1$W3A}J3t@W4I^JW}3YC=J9&Sy3Ir-)=ecZYRlX0XvNMB&# zwQfi>L;Ab#;6JYFfTw!n3}yxfWfT=RsT8;iRXr$=qRx=mN*Wm}6Ui!yXk1}+F?sEs z9XXCuO+nY%Auj6Qt>%h_sC;1Z=35A#bK{dVW&s_fZ(O_b3#3E7mrA~11Zi}UB{Mu^ zu>762S;@CjEwxJjd~PWRmHdc+%+qYX11wYyAgqO4viI?=rJWMaOd`q`&c_f23S4i8 zZM#L_`qWarfSLpg6jb9Hg3>an5t^WNYV_UyTR!RU_9@x#3}$mHP^dnX zPQFsD;7gO+MMA2E_jyE`q*98ctfa=fAeL8A(_lGW(3tM;3*4H!TRHns@-^b@90%i0 zFR!qk;GahEsf{%(oZikps!B>JE=fywKCm%NcAjzh)~qegoiKx<3*8P)C%+_^!5Q2- zK>%9{oSA4WDjb$?II>P%XX;VsUH95tj-cnZZMTr)v#v8Dg%fTwj{zcX13FdN%0x=h znTJ3#iNR>giHux0nF7bN4G?GWxyj^Vfs>*iz9Z;^bfAS)1cuILqFXg5J9{erMbN=y zwzXaN?c{Xkc#PneqIcqwq?M9*fh!4$btU?6f+l2N1Jzn2ld7vxsu=rg^-Q>2+L_fY zK36`c`K`w8GuMKLtnIM3TwmGt512i}VGK+8wQt~=E0C}#+|+I&j{)+w2w)_ppz`t+?&@5g`lTy|no z@C=nA=Km9$L$~Ls`qb0 zkbi1@E$P=v*Hut(NclP0eu;nEqcl?+@A+ucvk7G=321$Lj*{EPf?72hl|1Qv$`~oa z7}qjVQW#vCG$lw5_LK{3PiZ)1kWaX_!~f|p?bKFiExdMRlp*!T`B^o|Q)Dna5n{!R za#iJN6^N`v?PyWbGI5Ir_eoDkWh364ss^Q92pT(vUm*OWi(z3s3ml0pXX0RJcr6@6 z>K8%3wLhZnQu$T0P4y-RnfTHa8HJ(xHp&zw&}fG zJ~H9;O`Q{Q`Bu?XqxtoR)U9)EK%^Y{4#nK{?3$|JHh3y5#1w|yb0_=VmnjzI71WF|$`Bd?DJu^PATn3DbPUttLUG)pl~tkU>%%1coD9nwO4AYNTnRK>M)r*c1U z>zv064UZ7k>669Wi1f_ohfZF#iykwk^8{yw_aeV>2N?4zidlgIB&90k@0{Rh108yk zbg3ltSc0S6syJh_B@^S7)7XL=DaHNty87{T3=O^;yQI1covzkJt7gUp^@Y4zm#=aw0q{Jm9ck|&H!725TWK!tMRv3(s zLo>}VNPM6=5_g;c)~LM%>%9`^D3&f&GojkG&SWZ+;|h6i1XXE6vf0U~@B3`TTuk#d zkTK@u9aWRgOTV{Fsm`x$e&1VU;(y~^De*qvTFfQPv6!mzvMU=%2n^|5W; zBM?OFVe?Sbo?T>Kp?=jb>*b&J81&SV)G|;hxC5PQ@|!_54P<|ky3W0VH1j+rPK*w_ zo@PXU5tCvFGE#59_~IOkV%2_HJLFh$y$yW2Tcc#FidnSgH59?AEBl@9k!H7^<+yM$ zJ}PH0N@3T;q!_McWb{Q{;vph4qY!9sW6F(#x38A2x~xYb=2OgXi%wBoNRw#Y40K67 zF8W)pcUv^OYOLPY5cBsVc@PuSIgkKWlvjZE_&%PzBd9`NWu$mlnL(jR# zct&_3?swC8xj(uN0lfJ#m%C_4OQvR@GXPm~b92u`(P^25U6dIuu%auTJe z-uaD(A-Af$+#N<<38hB{0)xB|6w;HONvwR*v^u<8tw{qsw1qWB{j2b(JUiL9lW#8S ztu2Q3OEyr*qqIOWa7+33w@;JVcG*E1o{wt!J1tHAvubT+rzs2bp~y14e}mMRtdv%+ z4Bbqu=6;@26H7)h*_j`1H804|62b?A}Jy%uu1cJe^pXZp7f zM?)YK%6)Mky`;)6@VaJ22V5hdV4r~cg)6^<@G&o<$b-1#yr^>!=Wo(j^LP%!Vu3Vk zCw6WQ5axvt?p-?grxXW4(`X_H}6^|dB96`h}#c%K~Ffv$jIn&*~#=}#(rVzBKE+hc~>qX zbg~sJN-zVJWsvaR%j<{qfp^b$;QNXBM`S7=1&h`)PF54}Arrg&wf`ai$9jGr`D99o z)(_X0%^#}TE==QaRtAoI8u(>A7pTnrIOd~5az0q`9#~n~=(}a9O+EbDTy6-tN{8Do zTM+qP_%~y2`Rl;L0LA@<%g)O}QzFp3^x^gHXTzpU(mE^P?fDw3I^d55b$Z|fl1DQj~z*#$~-p5}J$1KOTlQT2RHK}a`@D@>4*4ArKg)4FP-EK6smlT7nsK&?Y5L|@! zRYEY-g*>C&p_0l^R&f{VFtBR=wWdF}LaE;TL*TrwVOWT;z5i%^^HT%Dlu9uk-nZmG zkAP=3@*)>a9!FXa-nH&O?eM?29zUL#z8$ZH|A0&Rp<1QL0`WSP z(R_0Y`+T<1fpyrQjf&6I?&U{=g43eirF);6n(Eq5%;=-_(PK!bUVoRLcBL1!{dQ~= zP}7cmt9Gi+fn7v8+C~G1axa}DZ%b=a;VJoHJ5~|y@LMe#p~9cyZB4vGPhR@Og#m-w zS1k-ISltjzNrG);71#@s(T{NP!xJ2-%QY=9qpp{(5u&UufhPjLSbt*XCY2uGhmk4} zFIm|j@a|NX?Nxis*(Kl)%l@!|a6t(2v9=j#^k?xA+9!a07HF&Ac<0+abb{vqC`%lu z;{mx+Jdv_T;PiCZ$E0ieQ4Qk-WY|L<++jsgJsM6m>?!sy-<<;{G(XBauzzRc7XXXZ z>{q9to5raV37g9*5?=i(3C16)LJzcN+-(c-!u$!aB#qBGQ|N=vfg1U|&HW8Onhz0$*dg$p zJAr+*%jxPcrT5sB4a~f?ugLd+h90gZ=#x`$2etYM)YFN0hv^9=A57V~9S7VBe$Y zwNqwr-=k~L6gr}dgu-Ie!Xy zuo-gwU^UdmE+aPC*WdALV1K2Y`oQrOhPn^27vm@N0a5$@6Mk|}F#esq4y(-o-wJrpycM1#jIJ_IjVr+;@=ph(x>4GjZ~n5i4zVzfmAKcO2*-$0Qw-d`VPe zaoFno6m}b7W%UKZ{1Yugv*2Ed#2@cw7Z5@-au;Gz6`^cHv2!SBkd!?bjoAu#y~Lhz zNMR{?FX>2Cbutf!7Qba!>z4ip>c{b}q715fRf0YPg4@=@Yj$|{a4vmes$iq+n|yfL zDj=c}VV*jp-5q$tK+A35n+F-nG>!yI1uyQddYI(lCbKeAd9NCZTTeKQS*zLwfd$i7 zfnTRv*<69MUl~Y*kS2~Ylpx#9CIa3(o1+2XI|$KW+Fdp z73dqDIX*O$@4(pvDCCpDp`gPC1I}0;W?+VkgV!F^aS!Qo`-R5ru!4$BBuOiPEP4qGkJRC^n;#OR z713VM<*lD76+fFJuy~746TSZ(8YwUiyUdw_eK__%4M3C=)=Airyf~^tzjIDRlVEnl zKK9O6d5J`|#z=yLcM_eXSTy`y=dD57_ z^#|&(o?`9_nd(%t_sOcGT0-fcbonHJp62+4(b zb*pMkN)vd{YFIGlv>Mj!Kx9`48 zF)%!+bsNP5+0Rr7Zi{ax#Q1GjmYIxZqq3}+gRnq=d>LtU8lUyLV1J5COq{x`W3S+u1YTk%=CG z=NZ_Og)_U@G^A;dwnLA40jJJwn%WBKzCq1-YqoZs_FsOQ%Chh5N7lGtlC$$6&Esrl^-#36pH*dlBrnaN?MGv9s4Vd zDX;e@6IN(GE5zIcFB;)|U35``lT%kd(M}SsSW2IwuKPPO&4-egiV^j+EL3Wwd=|h% zjS57@uvCwAsCrjiSb^X$nHc3{^?Mh;E9GggKVT24LZ#+6YecXGK#<+Eu&P_#gbogQ z3HNw5xU079Bs5MR7!b%|oT4ZS`S@DONzvd$p4oOj5{-Us$P+gdJ1Vo-maZ-P&G~$z zeo;Uo(zfLly#n#?u#}TCEh5n^u$-M3{}D~?E3?nB?1##*XL(|ZQJDkl#yg!6WtFeS z+F^0zhgB@45Eu~6%eydp-1cJu*=&H>GAE>kocTEUF9S>=cdts^2yK$-9*idYH*Y9&<@2yi{<}h>8WYIwmALSw_;biG!Kvls0q&(2BGT-P8dwLjf21~(y89g|JC$j{53eB> z!M0rHEzgEOKjx69S6B6Spb+1k8~=mWo7lfq-hR$igwp%Cwbiz*eBYBLzd99G-V!)} z-v5lyhj(DKZg@(|+oFJ{?i(lt^@c*L9jptvoGpwt7!jGnAfSdyOC9cM_p{O<+%W?l z6usF{wsD9kuV^>k-R;PSjf8Gq{Bs`7N_Lq+HJp_d2hN4K}wBx zb9jppJ|yKH`n#TJ1&& z=7jg&RB*5UGv^>5@{nJ07*KJ;dccSSAp!XDRYuIuX_zZs?I> z($(~1xHAEF6EF^Dyd@Ms5|a*-)ePkaTP_%GqpE*ybm>IeYOJ+WA7_z+acN*10K ztQHG4+5OF7IF}^po$Ipf6Z|IlLp*!|!*Mj%8d768)|$!_PdMzi;_!d~zkpJTGfg7T zNNDQzzjZ5g>7giljRzh+ybD;f_LQT539%xZ9sv!cK4cp2`dAyq)7|mNV)2wM9`R5L z;V17LfXy}ig>DYu|2qOjCj#b1rZxDRyJ@5-|A6?AeJ1F#?LcqQZn#GGBCN zBfAVT(5N>Ki33GLV^I^mVigHc-0n{sK3xS1s3c<(L_OeB0jA?15emW)6Q`Myh^FQ2&0|Zk-S8I@2>ffAV2;~+(7^&|by5?R8Q}Jb~llwlz0w2#BoUJW|g42|37U1YBh>s4BH9fSy%NRw0jtv&|JtW_| z^@%?l{yqM%G+r82_MUkx=y9<~#I)kNgc!EV_uAV3`Er!>!INb+yfY`nL}0r+Kvn=E%rMzoUI)E(G|HbE!Mf zux*|$6V!fosaXRO;i% zswP**4AimyRHjhLYD4yHkvEk%73C|?@8Bue&&mhr^L%}LfG@&=-Rpujc@o?}F>zsA z5rpT9nl9mtP#H1g`kFQF~9CBTjT@ZZU%sF_tP_z z5`Xtq;Q^@{p_>v;zqBp98rYW^ft6h^STjqP>*iwfdy)UCYKC-yiRop*>(z0o(c$Hq zg6REpU>~w(CoCPv#J|ok8RZs6on+q#0;bh$dAdD;m|Pv%8Fjm!%~Ch09hXu$J6#{$ z8v3t~yI)Lx2c2Lyyj*11gHhiDUQU>(XMOkw()*h2kPo0;4)vN9c06pj&vs}RQhY7u zn1CcdAQm6EAI_D`$lyDi`2sI7Jbc8;YnRhNL|1-}SC6?i;s&hQ=O(#J{nWG9igd|u zEvF@;gdVYP9pz+%B9xf$p$j<xF`DFw&IiBy*5XYCqeBE* z2Q(H`+|4xKY{iG5#jP~jtEh9tnyRRrt1hajzbYo!F*V(h_#>cGe#n4o$rt<^)_44a z+;b1yI_{jz&53j754N6q&!_Tze^2Gx%w%{E#o(%49#(e{pZCa&O`g%pvE{QVoA}7m zFio+SVsvzf{L+phJlLH@zB&jL%>ROBVFNtQjED4DE|e?Q)0^K&i+E%?467~ChG5e( z%SoAqxWX_e2#8Xfy6~>Ju);^EzSgFv$gkI!FoK zeq%+uhEjG)3$w6WSqKt(?p-D_bFVaUZWXogIj-?%GZ%z>=MPqCYAVQ zHd_W!8Y8h3MbqB8*LJ*HOf;ulR#Q_*Q51AZlSCacgT#9mpl){7v{52hPM!L z-UZ?vMrkU`gIn?qgy_g@a#COxJaPK0*sWX*!|6 zW)g5~jBn5f&iL=OgweJFvuPUNr?sH%C3BnruL4rw$ zTIY>42|N-Nj!puhkG5{?F?zrp;+C--g$Kgj8x$mYs?a;x2=_)q-+)~b96>;bd~~H) z82c#QcpXlu)*l64Tz;pNfs0|QyTs^NmhMlf&wwK}lG zeGK7ji7$}OQ^{D0Ani8#&{6h@dg13uebJ%0McDCq+NeB0n||uvf&WZkDggbR&&&sd^9Rc-(?Ku3G7B@ zK|IGRHRoY7>&=%_mgjK;D}dUoSr*KJJYIkFY>eJHbeBZ~FLfBMq;Rn1rW7!;sTh#y zle9FNNSx)E`3V+c1G#Z8HL*;X#tt*1(DnQIWIAKv+zz0Ou%tw_j2qN|(ipI+MIX7; zz8ONY4Wp)T%%m-n?t6MAg4B6jN-|331NfGA79&De(fQ@03a8!IGwx-Y1ke8fNd^4& zPPTd^@8x)s#jaxfcj?MVjuB?zc(s_z$;ngjv5{*FrJrs2^Xt(CJiaCrANjCSH=D_8PPH3;EIR1}K}rdoZ`e*3FU=uI+*NlMnla*} z%%ci<0MwaHsTm5RG5araU7*mrXu}XqoP^7;g^Gb3RnRF}g&*m28!4Y~61yg9(gkg@ z{<*Ku_NLJP%#~inWk?#m$*g?Z+XG7-ecKMy3xIJ}Am%OhHo3Ev9+Jhj6_s#2A6NKj zx)a_nM};DONoqD<^l@ntCTlEmo+0p2uG01*>ftEb@`ZM`coGK}g}_G(YeLe@7|S!k z8v$N{d{-jWFDV*35G|Zq?(~n$hbo#FGoyL8g;}%1{Z{Tr#AVn$#gc9La}t6%@6@(z z5Gv*E?D!4fh8ah4jrcT=w``wcK`}6d*nWbB#D|e)5o1FMJq7I#Ngz7+H(9wgJj=M6 zFJc^*a3b;Y$w6r%O(Q{cXW;!DEhI{-7FZe(n%logGwcP!GtduRx+0z zQ6*IvMuRx?$ZkyNX2|BH=~vr!dOI@mA+8mg^G$QOWwjgd$_w&Ty%uxr^7~R$B1+3_ zeIu#SxbMNNxndcifbH=`v%VN4X?i-!A{&R?!+cbaoH3c8q3efKu3HQVejVuj89_v? zil3Lzg%(NOIQ1oo3PTkZX~SSwG`CaaD+{`VXo!)DGzCjaeetq+AKWh2qb;u_sc3(D zlZLe|1tQ6hu~E_-eIA;3DKZikn0=eSh&Bb?!t z!qV3G;)zSG1U+D7(Yl6xB01hPbYwbtinqFIAQ7$qiYI|PLc%A zV|u)omm#Jjrljs59j9}?xjnoo`$P7mU>$#WC+0OCh1bJoZ62ASmE2 z)V^Gul1909UOfW)z#>FyTeW9_%V!z+CM+o2!&iW6}5~Xv8Z`Ic_Fw7A=LkdGbN@uBV8au3>2Hk`b>5B>@Enrb{gPFUqBB;h0T;Ukb>Z|+l--tQ3kL5gKog(dX zdB>+!#F9-|hFx!dV`ro?Ja=K9G^NhFhG?51tWJ+`2FrV<>>N`=;_TBQ7mc&&l1@0< z;wN{eLOn-oHdSMLHKbrcanBHp!D^R2n=kCX@rYz8>qHRkhT}5>P)6RjlIm}talVps zdZUC_DGSRl2>?%yt*S3FutlHK+?S_IN@Nfj-NUnkOU>s>7E0*ZXRpRvp66NGN0^&G z;L(`Xj`vYc#KcAR^M54!&>ey?#3(Mp36~gdk8rgwYV|wW|5emY+TbY0E<|N`e%HOo zqijDNsTq%&ej*TD`EEZh+VUOmXHJY+2DBk~`oa$aZ{!aTGbkv6{PgT7R3c&D^XG93 ztYxodyl&uDU%Wl~A=K(r6EL{)NXs@n3nQ)ZkksZd>YQ+5t5UH5Ncy8v`D_OKIhnRb zd9*00bI~XvhHW?wsYi(B*E=7EfnTO~x0J@FMw6-U)R(r7a#)e|LR5hg+E|UF{$aIC zsD&aE>ZTtkTyE|)gO1bs#3#U)wAjaekYVOKUx^>nk0Nn(+?KajJY-dBhsrrhIj`BTX~4exSJEz-BK5b~8=U)4;n@@&hjn^mkMtnSKYcMj zhC&+n)52??Q!8f&PXyW<|G*^kb;=ruAzP0PT=#DIE`yLS+1b=l=c+E1GYiVXaAA#z zUd>==5kDe1;KRY=!m5Z^u@TY7^X?E9m-vmS%kJ!1eTBPYml@zQ6LH1>L@{U<;|4>G zyPk0hRnczyycVj6ln6+nzG7@t)@ZK9VMjt5{OTDRXlIKquGD2K?KeJDL`mj0mSb!I z6G}!#vtUIC{9;8u2Zq(n&=O*t?AMIw4llXO*a<1&oIJ}31nDrt`x()5k2vK*5`>9} z?wzTpWLq=v_($T!5AAXKqC+k1Z?0W?d$RMcgA2Cf$l;Obyz_7(JP~GdB@Z6DPf_I*Y^CjWM?lxeR*Od3*=#{JS4D?(0)P9X(qVLLCa^+3a?_S|!gOg*f^*aJj6`zSj&`mIS%; zxFUjLa65fBu$IEPVztztRSD_EMVPxYmZ&|WxA@R(;xI92@<<3LB6^1xZ5m~ea~4O& zwB#&z$+U|P#XJY;Og(U3to<@YZVQKORUXu;VlZNlBgj( z;b{aU5s;C4V%~3anFJ#Y4qH((DT5{Y_OitrzC*r)p{LlB{J#Skc%5?V<^z}%h@{rbLOp;n{8&%CmZ!8+R zE#1wu{u26E)1dfVX8YfwONG>W^WYi^RHp+(@`tZnBSfWrIexahcV^0T)>Q z!uLu2O?7)1$oUxhXGN{B0_&o8{%=3#YD!)&F*IL9S9djruRgNqdJ4oR6yYXFkCR`e zA!g|m>`TEvTSU=>5h0bR(cAl6Y|BF8!WI<*SC-H69&Hdhv-w`;a-?7*;cLP57n{#F z6$?RUGy*i~z0F{-^4bvN1;(9S^_VPkXzkFx2~cHBDW*sMftB4=Q*A;~+H9TtfO*r> ziWZQ-B9wwf$>^KH<$D#|O(F|sP%UIM7SVKph=Tqt1EFqLc_QXuCH)w6DzS8y6s!k& zKmw!?}=a%1TIa; zK=n;60FH-?(5jx5`3Iq~h)l!`ZT(|eA>NSwzJnbO#S9C&8B^nkE+YI;O3NBKTuPxf!a*HOO|Rv3dS9U&dt1U zesTfMZcn@tQ&VmM&ba}yQPsP58=a9WRs%>51;OsoBH!K51a4PdisDKPF{>8B;!8ZJ z8m%GZ>>gNrBZD7X!#m4lWcXq#fqk6DdkL9L0J%WM|xDlSf$m94ib7StGO6xPNDC>qHPPt`duz z!NP=gI#~|$M{^;1)uS=_A_I#NqajE-%Nm4M;+wvOanDN1Ex;$Mx}uib4xf9on?oC( zJ*&&gq0ji-(Rt^%w<48|g@yXDmt!V%2U0ar&Cp0* z-pbtp4cE~u0dahG5383-H$9pOk7#Of3!ImjismcSqYNmoUc3^~#p1Uu8eB3l{XGsG zoDCh^&HEH*xuyh<+2xS7(4TWUv&&@lU)>6HN~Lh^8cAp+P}_~z*X?)kxb^`}S97Tm z8sa!1Vp;&7^=||FI^r5AS_lYmaIm57ofrH|mDhaphnH4P393pQL;u(^V7 zIr=Qpe=hsac^5T$oR|q=JHb%C5_KP~_VCJiO zlvKiaiVIz?A&JWUdErt~2X-!Ehuz)({h25z`M4+_Sv5m*%Wan0m{iyG%c2rF7>%79 z>vF%je@Zj2%FZ}A(I_bm@V|nRIdZmshR8ET{I@Xz!i)sMx2)=zl=3B&roY0J6pBY1 zO)Z$9$t_;qJ?iwJp`?|w0{=g&Z(;{qpC6X+u{-gtW_!P&GNS$Wz9eA~z72m~yP4uY z*(}Re&AG`+QZ2H2FKgE9qp!ULE$|R^VE^q9Fe`2)>WL8?jn%OWTqqv=Z}&md@zn;& z6Phl*klEsXX^u~$M?ash(9e!9b|Ipr1K47$T{m81(NunV&R(09Aj9pmEjay5`EzPQ z@qqv7?Em>^&~U4XWFoIBs%%!~N(`J!@4C9Ljwc^E77XR&_2qP;D*eS2SHD0BN_9?a zHqiRJnQ-yjzC)tXZ1x|x?5>m?O=gDIgV;|PEv-_2L8ec7d^`5W(}cI$2HBx5>reUz z)fsWhDJVJN;zA#p{#wd^{TJ439~p$J9m{*KZG&clYF6u&9oWNL%{ogC{BHb#jRwcB z4N985GXXYdY~I0kYjcW1o2uETJtQf^EpuY$kZh0`a*g?a3 z9{$nbbN?~Y>tlQkwW*89o%x{rN@jL)D`+ISB{qG#xz5l3EZj%yhrEJPmzS`PcEKkr z1cJ1->mX>T$-9ARbnK&?WWVpPhkufTJU$z>4&o;|5LCvR6tAnJKmIh%Tm<}w0CPII z8_z&SEEM&w|0f6gakN;a`*bTgHc6iS^IxV8B5~KDl%0!pDoy>E0xe*6jlf|$w^*s# z@{fu5ciK5<5n7oKO!hjyW4gIlYS`tiI|EZ9W(B|g&zE(yJBSVVGiK(yS9CP%7BL7_ ztx5augd!)v#$ex|`rn<2!(wkEZ+L=?KWUko^iDjB5uV1w4yQ3H|CwNK@UR@7L=T!3 zyG{t*{-+>@mazW*({nj6tbd_G`Cp-nyucH~?Phr@-*TBEyj^!*nxPg)E5!x_TtbAX z+I;3_w+F`T)yaRv(z`0aN|q}5pY)}_ZTZ6gyf<>t6d74B2P{9=LkmL0oJ ztqT(F>SaO(6Zo?sRj$@|eU-?phsA z?%X}U*5(|%ffDcUVGX$N$F8+otQ4@GBC($@l^sXnH1V5%U``*k7~_+;Z%&U=uM%Sr zEtE*d0=*P9v>l2utK^sgUXIiq-(FvsmuN3jL%r|}O?xqRrg~Ov_Qvj}z|E$b+ zdY?O8LuLV7fp11P%(G?WuY5l?ADbTq5{+KTF1t-=?SVn5$aZd=e>;E58!a60kwx1+ zJbM?}@Gl$1&lGJxCz8FNh{BkcXr-h4KNi4^&+$8Eq06(bbBQM?U}t~tf};NpaAUXh ziDOIR&8LWp&yuF+aQoG_-o420y5F>HDj>Puz3}qLWfKtp9Kg+qBdGaw~O1~e`lg*nq+xD#Gag;e&z4*`F601 zl9TyzY_g4W?~&IxcP#mYD*W)|(()4IlhhCDx!XDs8_wu3Y;$4^B)=N^f84!gS6soi zE*u~P0t6545Zv9}-GfVj5ZoPt2MF4@J55M%cXxMp3DRien!DKh-20y2@P1^B#TdQ1 ztLChlkM#_U?6QS3q20C#CWeZTZZC2B0~>*@Oyy?Qsyzh+vTu91s-kmHsbb1#utqu% zw^?s3x-8U8EVNyKT}?6mzNB5IXEBz}Rns3$xB`!Buo>l_K&@m8_>tYrYN+t3va}qt zLuN}7I5;`4h7xE5Cd4zi?T+&!nVtK?QQ< zco2Puw!PKLsKKUe+FT;YLWM^*$+jtx(+0Q0`l_D_6+^ik_CMg{o>OMnq8%A6Kx9cP zmlA{Q(3#AnUux9mJzpS$7AEGpkIV2`TRYh&8MFc{s;%(JO4FW9SghFoz*f8}>1YDJ zpnjUIq|Zidh~u4saAjJ5v~TjmX(X@yW;oP8Ttf!FYC%_c$cF#jtT+fjrz(s^Xb^sm ze?t(gw_Euu9mQG6Y}m|uoOYT|(uyLiMz9M6U2G8uxh*!k+4umXG{ZJSfHZRIk#EJ0 zg1?6c#KthNXoS4zo|5k?u(a)--_9r{;%+C>CQ_qGBAoRBVKXPeXu_2k%O8L=Zip*P zR*y>v^Avdi1Hbc(5)U6`60M4(QsG2z9?EL5Jr0Y}@sacSX4g0``(EAGF@j*v8Lx$M z!<9Lh2eD?)H?J<7!9+J_8yomq{Crn2kOQjzWcP4X_tVuSkQIN6=Lxcz*kk^&iKy@_ zlh-HY_HrYi6RzS76Dh%Pfs<@+N|lEoi;?7}df$saTzx>xnT4vsy^P^ppizszJslX# z)nPSqaB(nIKx`#d|Ff0_>IGzv8EnkRJQKT(a?o~J zI_yd#w60HeT#UAQT^>K*Zg@a7l)-G6@RgLr59rG`Kz-Z?XLe$?_2*tHw%)|7*0Stc z#BsYFX)Z96ouKo;neqX;%XL8iphPpP%LN)i0!bCulwy*~<{ z``aX0%Xv4lq5Eq*4~fGOJAW?a0n&+}H=n5Pxf~}Kf-o@e0p!|=SZn!f!5F+0$nFvR z{g1JzxnZ?4g!YYHhYtDNO2Fv1IWQ&=qcPX!tu4_r4wSk(OgN|cosT)$OTJX(F?58* z!;b~JsV05a8O*-9C7o?v&jczpsBkCwh%?SUc6neHYn`H!7aiweF<*-RyOZa5vde6g zQ8zFDOpV;NI_G}WlBf~Sl>oz@S=ti7Ma0btH3}(Tx^de#`QP!^zVcRX{Quqhmt%G6 zn4+*QYGVEj+lPY&LjhP*=my}H^tgo>y<4rPg(Jos#w)vf!T&epH~qz2ndTv5PB#G078E%ucH6dAc@a0+XCOGaXR&P1e10RYkwYiTaFYSWNQD0Ox_f1X|^alEV~gxXi#$+gfsdPgw}y1A69) zk8)Q)^vVLvM0iu6K%CDp1k8i~0-M_7?MGo*b(yBb8ZK*?MWJl*+AdklyHccWtuwSvO5mMOai~b8OEBO8`7KP-X;TPIQ!>8^rNMSV3 zna{>!b~LHTdJ)X{!5$DucVtURqWaMF`c;)5FBbjJlGi5rD5G9)SBLQO?7)<}w#8q9 z!=yI?nOa=;yyU{q(KOSHOZxaPGszPkUX=jl<2i7uT5v0D4>s346^&yH>H5Aeu;G-{ zE1QIhQ~~GW9)}?vK#NtHqFm~*(LU~Z$r$~nKHSzdfK{D-#UoX79jF~=(l}MeLV8~b z-cfD8ssvmF@)I?=`_n<_(XnS$)#9+{)1pEBF~ksIHE+^yh=bwR&I6zKP~L+TM7-zXeF>ax14RM0A8o}`vZDtEX4MI>QJBCW0c?&{ZP=|*&NLA?WN zm@lGflSAe(XmOctMEy1g^D8IJG) zHyn+GQF?4QPzJ#;e{B<#i`sTNMJlTK3t13RFnHW)4Z{8m;}*1^*xV!FbyBxLZF^}l zfK6xv`Bqcd!}UO1E*6hf=OKvW$v}&o8bicaZ61wxNJhyYC&#rz{-C^}& z#&5xc{T{ApzTY^|NnPLh;rX(RlZ}7h2SJG{ zYf!5ZyNxbuFnCc4H`ZCbm9AR66ET_V4N;PuMBXL5DnB9$2{FwrOzQZDz1*HHENxVY zcaAs)Vg_*-ow^8OQW_X--q+4q?~%m%zElnc_lxCj<6Cifcf#xGiQ+(Mgtqe+IbE?6 z&{edd;@sHF zy?SAy80CEXAz_>gr@|A8L8{v+!i}EetY*0xSK0%0N&w1o;>T86%6ke1UgxaFL9wO# zabsOrDlU+)#ydxgVHCrQ4J(9A`F+Wz!ANg0W>-x7RQX*2Z+KKewDR_>0OW;3UOEJ? z3&m{dpAoxxg0AD9^S_M4k@~~bbgf|Uhp>WUPXj*Q$I*m@V9>I2xJmd}9tAuO4%Q@M zjnYwiX-HpTei`AwLx*lI0}QZ}>^*O<6=LXxKA|bGg4=Z-)1R2{u{ue8eMOC2f!QQ< zY9NU#j`^;?o@LoLzxgepBN>TTfyXQZdoJ7{vM|XGbPyAbG>Y|}3fTm=!Y>f(naw<` zi?}~W4EDNDX!yipucbm^*jO(^nHtT?+qd&a$HC@hRQul!4V)u*XZU(yG8PMy+|f7h zC?*2l1}WrZ4c4obo0H$KnX@rmkl0zS+>G5UQpOG?)vb`x5xWQo#EZ{aT6|54`zXjD z6^3kk-q~dl|K`m%yvFD>$k#Hss8~MkzYE`ZX_<+c{9VI2xyz@fOjpBuex?1C$a8GS z+l<4ce;X_wCjH=ASn0_xjGYn_B^?9FmhKK`)F%IeEH228D_z3J$ER#cE}ux5U5fq@ z`|Uj)VFw)ol6Wwc@gPnXSr%P~8U;VaHWjk#ZeiG;>^cEFw#wi*6JFU!5Jw@xl1}U! z=FJ$k+|@WfX*f^w13|pDf`bd#3p}Ond8hk$v;w&(vI3Q(fOqo5_xw98G&ZXR-#baY z?v!_O#ndeomB<;acC_df!ruRIB`B22ei68qHT$FjMPP{KJM<350Mcp{sL&2`X-Xpk zb@D{;*js~46a=FPHLT-sMdqZvrg}VKvcyxc4mob=5GkvWI=;P<~m4^lQd z!2W^0J?z&S`SgwweE7uw-Zh?64h=;v>;Wi@xA;ozOyc+vVUcNPg$azFZKCeWp$oX? z;LOpI*Q1cfF5z^S%V4-NRB!?#4qp(_|LRUZL0y!;lMaIV z2En;h)@MdEmy*Kd2D?gpzgTiQcv|C(uZkG$-aY~RpW*ml%w6G7%oO)-@6uNY_Oq0@f0!x`ZIzP%MJOoiM|bg_)<2B*Cl*|y08hl%*!LQ;9So78QbOPCJM zj!`Xxx$GagkTK1{#|e#&S-w4R_V$*9A7-qj;16Mvjx_n;Z!@F;lp24o8nQ?GYV6`f zTq)FKn`FqR#x(reR7F)Xd6go~ZJJ{|7fub?p+GN*$&ysR8qcAHnO2{fYY`l|DGUZ68ZntXr()gi=Dm~| zc*ka18xGSi`*c6y?@slmfvBGdDzQVI^{2df7@ru0_c%U)TzU%&zQUw&=-?MQHZ4^& z$qs>v_Ha<<>-pF)OnE;vug5L+F&fHhJeJ86O{g9^1xAU$ek)a|*x2fH$k7)JNqz_W zwK3viaP!n+?5&0ko+?;}bz3n)Ec`scbd*+PhQD#qDulCsQFMuIl=d=M4@->5ydMsB zDQ~AsX=s(xEx>n{PCC;W6Cs6HI#a~4SWFz+NhI_Jd05d>5srar(!>i#1*I{F&JCHG zkz|*D(K!~Hhc&2x9}hna0GhiYxoqj7m3eT9SopFjyn_=&Y!Ix}<=iLNWb{nQ<~}!r zwTml0nGie{{1C)y36cZSkYK^*ZO0>6p*uizwKq?-IWCRH%5NY;aujflG|Q1i!tAm9 z(SKKIKP+=FLzd(!)cq|VjQ)IPF0QNRtUxg0hH^feG|x~NmsTy-GCb9re5KAA2^xj; zDCi{oQJ@t@>-Pf`erc~sVppW+Z5~Rg>AYhDN@0FcKHW;)^_x(^SxRs5^iuz>I5zpO z#CRLzx1}ng%tt@E-bs)8z(z#AkR#in_q^#U$eRAl#=fr$Uj3!;4mWPdbCDb^(C6Td zS5;ith=L>Yw(*DGIT4gtksZZWvz7&-<{=o!ym&r~O2#AVnnz3I z-$>nmfy9>`7s_St$~;eoO*<4|T@hc2pesMP4)V;9)>>5tyNXRu61u_?(V;cwC758K zk+Qh7s<{3X({H`$!~R4S+8#q^u#A3!A=K|9*G9qP#i0r&u_B%2a~tW56{eY&Rx>&x zGs?}H?v~8=VAHg`hT5bkL$Po)Mg+%!)CWS?%&~=?k7y2xP%*Jz0W7b zGV+v*q$2+U8CV8q{9=Di70hZYHTh|!NnD#TRvPb&7f66KRXZ!hidR*nstZz@veqC4m8AYYgLY1| z=w*0ulDr?TkT!_xw?n|LMw1w34eON%wL_fMiqTgktd<@d-hK~G-_;v zz;hIStVPdvFC?SBhCu=wP5m{qe^{_OKy>v=mx|S*dcBl(N3pJ*z`i0l9Oyw%q*ZA3 z6pWSmuzE=I3&F8?;x^D17`<6YIV6Mvum`sJ@#3GpW?VVxd7Vmq%4ssp_e?~Eg6Rk! zEBokKBU=|kRrSRDyx+)$X3}kG4*Jkfm!5h~FR@Mckgwu&!77RfUc$xB5leXp!#=2?)K z=~N5+w8+(mt);cCy0%+V`oQ}!HTx&0N`32)xx71zTKkk!eg}=W@Qt_Bb}zWgLHa~- zpM5Mq=52+VMG-Me2gk{BN&miWcQb$$xQpoV{GKfcB5T~H7grBg5F@ACBd700-ddG> zlS0XuElP82IHMIR?Uqpd+Y#|zoAm8BQbn&j_imF)!FNRuiMzA1nCE<4jmgQ@NmxnK zZ&7al7p%qWf$?j)B*z8!rlw|BChMc6<(7EKD+(4M$|bdlqUWwl4x|~BpxctGGs4Ct zwmmM9WBJfLq5corqY?Uss>pt6VnRUcOXjp|kNT+w(pgiICmG$owqMxvf0448K+*GG znW%{qOM<+6iDvVWwPzwqk)Jin8f7a!O*Wx3n(ygVhk7$sGc#^ut7j3$f#KfN`3b4f zsy9nX75>=)yP=|WEJ3sL`{wDKcB;OyI=}T(8rtS5Z4s*=>2w|D5CQxrG<0$ZhCSPZjyN!FyADKkX_$I=x5B`ZPULGb0vwElvmjc zXu_&Ohy@aKKmW!1zN^Ma6ho`T%J&1tL;Sy(-JVRYyb749qmf+NO`5D@)ANwm%dWr_V~h%M|!j<+MYs z{dho^Y%ddp@@up#vb#&d=S|dl{+4z20#8^C1O3&^;f|~H{A*o5Z1!y`C45pg@iyF`}V=*koOLa&YU*O zy`bprZSg%^KHJ~Fbi&+E*37~p%~xk6tC5g14Z>U-x5S7`B_D{W``_1P+I5+RNzq?w z5;we@`Tv`#p5`dkf^f^HnAJtrt@%d_XeISvWN_LanTwPPlBtcPu(*T?&P|gmnp8V% zd{45h8#&ucF{&(Esi%KVK6NaYA@%D8A4UI;u9d3OcDD=TaUvp#&+>5DD;usk)?0dq z8x_||p#3w45;phd zrf}2&UsTG<=b^ZYk)H+gnv~F4v3EW(SHtmNRQ@`s9Lv5gA4?ONV4h>!7xF#|_rbv7 zX+*&Ou$RGYW2#F*aMsG9K~-GEHO00Tb<6`9+(*f}ebd$H_=U`ibaL4ZQmU|EPeJ{` zt=ck1Df24MZly}gp|eejh8ZltBJ+C?&NOG8?s^{Lpp%j5KCea-bmZu_7*0GaLR(!u zZ*6(W-hw!_=_RY?e$q@Kyu|M;j8em7<3tB|1k@#gnvJ^cC-D}sfv(%bnTh$sfC?E>ufb+_2=jEF*NjZuqFY2j?NdYs7MOj8eeUU;k|Cc$1UE9?= zr+I%-H5V{_^qS{ zszCi&SfOZRj-zLzPb3r>NM1O=R3cpoP;<$elsmy|A< ztcA7GxLw=h8EFrB#~f-D>jnyb#Ha7!m=LBDpVHiwWjPQqSOKhM>8o=a^!_}ZvFLmg z^10P~;5t4-ds^!&D`|?yF5_}CmwO=^)YA4&=CYqvCCUY#U}|I0D8*Gdl%&K&*A|Tk zSNaQ|v$Q2$!7{`bAF)Oa*ChTr+Cp#g_69A>Tm_A3BgOBhH zv1PUlnV_5z=0t+2EYl$SXhLpP94^l5ncn(HQHzE0z4eyf2b_8EffIbvupeJ~Lg*~?h?_Wm7DE5lVMsm3WwUjpDS zw~DkPdZQ^2g+BQ9_IOrP##3Uz!0|DafpjyFd)$`YD5|xT;d~lu z%*dYv67i*~zQg_-(6j-z$^*KJs*MtNqbK1Wot0`u9;LBMh(z9e0~NBpSm`i`JU*V5 zS8wYjqLgWg)beIaRdv)J|YO#PFNCJMaoTBp-{;+Qu- zis281>si+^ZMUZ_?f@rK_wwsl7K}MP{1+3h>9Rbq z)8FwdhS;F+sHjsU4CFLTLcsHYlJ3~o2_oWdl~>Y=3!w7u(j=78Y|(G4-aH$tI;xtR zM}s4l^_Vc;8G($9A^JG9&+bi4+<)U0jt^;a)Z0OxjLv-uJ^IE=Ff~dfc{otSxs>9d z(BhSA_sP*xlroN`FJYWn)rRY6tsh&s?6_X87wn$Rnb3;V*D4heE|(5YnL-5tGYFc2Lw-I_P#$P{Mff&-bbv1kyKe&j3V z7r=>WY&P?gW5M?gM;D0DYp^dqOhNHE@&AWqif^4IWt7Z#->J?qO(dqEFvi&`e6Q!& z8fmA2NujS{X4Y>&-NvZex?tNhA61zU-d7^DQtN8|SErrkLTQg~ngFYnY^A}jVL#ii zKRqI|Qa^G{3do|Lsz2w{&kt?$47OhQYKNpBKlin^uT}Qs>&13o`f|Dh+#^~oi5_wS zn7xJzpV7yfpiCvFJw4urENvQ}~EifY|o#d~19#G5H;Ya4?pW7LM}Vtl(bOHhhq}Y0e*-kTdwk zVg&#q&6uP|>E#IGp_e)T3@ER;ZX%7J#3Q9MQdR{tIz$D3As=DEI!j7h|LcNFF1Ly; z)uVu~{(>HC*y6F=6DxQORsg*33M?rp?OAP?Yt!70SNr;7{gp_5FmCy(+DMeUKt9Q{ z&b!zaS&&0^hY}>o)Jx?qziP|qTS2}tTt?x~lo~xhxOau(E=v`#xC@w2tk`B1^mXZ% za#3Yocc&vW&C>>ES`p5BnD;K-4j|#wseXcRRA`vPagZ>d7wZ zt17iJl|PcR^Z4o=k;MzNo;o`{Sw&?}4>I98_Bv%5lJ}(o58W|^e3$DQX_t0u4|B`; znH7g+Xd#wAnT(qacE#VyL6cbjV3v&B!XG*6Ks-95ITtF7_v<}8&ZgGVtr_n7#|XLB zYW3H%v)sGRF_KG~6{aZuN=HMi1l;}X+>Dsp3_rC(8Vw~reDjFUvEr(aGj zShB80r~5CVP!;B_GJ~4wZoMYt{j1qSlQL=nO3A@WLxta|OU?{xDEIi}jPA}>=j4`m zENUFwi?3`7+ZJEBQYveg(Za#~t88jL0y7vK=9fO$DVyT?w*7V%@ea_6)%gQxVRA}} zr9>F2+rk>$`8q4Vtd#94 zGa?fl1{L2-PP&oiE{0p@C=8j(st^ZOZ@;bt!Jn;c5ro<0*SA+o7Xbd=OJ!3& zRp6iMA(1?mi8kk*fqIW@8`@>#Gqp0fWp3a0%ti9ASrkEqgO_Rz_I~>yXXwvHa~tcz z?0OnYNK^s=d*`7Z;Z&{|rA&y|572X-L}XA4vq%Tl(@QkwwR$~Ek>cR6_FzGY6SfFIXoz zZ*8v?(^kU};C)@%i-ri&EmoO@jknxY2=M*^I5yn!$W(;G;pljNxATdx_laC4qY;-t zV)`%!3(C`qLr0miZxqK%9E^FIuwOHSnZ12EB~oIn$m(4Jjf#6D=SqWvu{a(X=D!)v zF1;*=z$n+3MEuqA#%-i9m2aTZH;IqVK{FH0qg4NTd`-hCSr2kiKh z8kDu>G*LAe*HcYHKo$VEuikP)*k(32k4e9N^yOjy(ROn)ke~={zKK4XlLP>(M&_XGIRnC*_d~(s9h~1S zAOzA}sr2i|37fWutK!Mm*30F5Oet}QkTnf{zavi{v-LFewzrkZzwh&b=x%`zS<=`Bf(={s9qu*iU>u&hZ zDN5DowfKnf4kGmisDOR&E^6H0P!7+_%A!c8W)c6!a$Tor3w3Tm$n^f`^sx*_&S!}1iH=n_p~1m`e&5I=?xfw zHo0M=$94RBJ|kFia5WEl0^Nq<8qt#Tq5rvEPhc>#`UfP-Rs#_g%K-oGFltGOzY&5U zau-O+7;$d3QeS59AaL^PRN{A4KsrY;W^h99HLD~Ccm<4Nd=%7qO;bI7^>|bugyOYi zgG(1TKXtuvX&?Hn#bB2&OS2w;Ecx3S@}_}{U-d);@W~eZ&_pa?WvN>3hHEM-)NJP& z3npA-M)|?Bc1ON1=XDwFbh_C9FdB7{wFe+iN#*W`7AbW0mQw^bd4tDDJmFG-t%Q9K zS5!X^>*zGrr*Jlk+>5UAYM|XNlNJ9&J;DtF$K6V{RtYl;y0PQxH;!C_ar(*)suX*_ z<<2cM6A04TDtv7@-OQ69!Uva=S-!mrvxs|B}h2yAE2Mv z@^#I6zs-hPWn^8*M@wV{!`Ppq5wajJlan~LVd>_INJP@X++Cr6YyUf8*?#Lp{3QR& zE#~f@-h#j8G7md&&bxbM&JP{-W6kjyi5H#iaP}B>$>1C^`Z`XFIrZ#6u61LhcKS?G zCmc$arM1|HmM17K+QK5jOP~6#mTs~Zyy76Gw>y|;Nx*kjO*3m%z)CrCxDIRqQQ=9odx6MsI_C! z+4&EMk4(rrItYicQ6MAtr2G!hu=&{&K^VG-@AL3jO#&`$S>*>Z-gE^piYC(&;9Ymz zEZf-sjyNO+$FP>i6@NDNoAYA)4lD>h_8+&16akFrtkR>Npg8z6`}X^j?7mI_)iwAf>Ezz zYa|GwS@ddEE%O)~wE1mqYo#Ge1YHso_OqNw$ZEz5$)+)Fq};Y4E6s{aI>l*VkIhO? zz#2H@RomWfZHnmtAw@o!*(f%_*;d5Y_a@6l3OpD~ zl=E#epR+*In%Bmqm&Vgl)9F7wjcDW7R_DfmF)8>k_avFQdcIsqfY5D<)~27I7}PjA zx-3shLG0y$s$oe~H?hQi&8gJ8Z7wL+=Uk(jgDxx^hBnwgJA*QZpkK|G^TX^+w^Uyy zyT;y-|F_*imN3O9U|{ymZ|I~=y+l6DSw|@_s`yB!TG&el5%TS~&2}Lj#e&9a;%aB0 zM4fEop_6l>B;x-pDL(XHnEBg#1#o>X&{uN+FXF$07K)Zj?S4Ky>BTdB8RxG?E@}oM zc6m?dVIs$Z%;=aMvPqn&+g)&-v=!y`pzS#mszbS{SI^4ms7C?HQHoBWf_Jw4d?#I9 zs%GlF|4L(1_;E1b7d+;J=#L}DFX|L7QkAAQ zXQX+@1CmKG?|ZHrvOUwUMOcqzmx2Do+1EPe+4)1%IjX+^5M zi^2ZWrKuEyd>OW0nV~Su@;oaFu)KOAnhpElADLJqem|W1a{{^_;g}EJNtMS`u;Hu1 zz)uJQQS#X`WBwZiX|{Y}te)3=UT}e$fBhQ%>w9ls*{67^?v$wZ*)6EStQ0ipvn z!0V=hy#Y@12Nl$HbVM8*xzn~=qc;7&SoU^fGb$n{^QC0CMreX$iT?zNW5546!5L<26aQQ`Wa555=WO*QPGArTkHhb3 zlw+dNlNRIR0=0_aJv(Am52aRi0J#}eY(s8d)F%=I>AzKh44G@772(g40S(8}^)Jq_ zOuJ-Ls2KCrh4U6NXwsKD&3Mo5{9m#eE})TP8~=VxR+`abKGAgKcaXOET~XdJq7RSc zPlKn;z^z{{V|SfO)5tv}m`*?4Q8<>h3);N!+^X+B%i&ZPVj1q5s{i2SH~965MNV13 z&VM1)>_GCg2t2QHg(}4*fXpAbwaLb zFK&53oG^2zp0@v-KH^XUciT*>Q$IF_fxi9@B4%#=aP!Kim4113y7-apMq)c<;SEQL zlx9M|4ctguIm#9F1@AV4m#g{Ly$Hx~@U&%NDOC_ZWJkpM?o!5th^jYEmDV;MJvRSrlLnnK%NeQ8dB!*_d6*dZC>9N@z!2|;<(}+P#rW+A z|54OI$yFuZmH6V6_#Wn*bDF~!yno+C`DQ5m#=tSFj@0ubYkt(ry?Nq~yn!7UH8Ntb z)H%1PIwE#xtyjz;=C*TABc3rzQ^${i?qZenI1)7Y!@l8<`smvU)D|Yg5h_q=?%^KN zy#REO2o>c6$vs96Z(?w(9GsVsaPJO_9!@M;_9s@8Xk?50)}iw*0XV*0KB&d;;J~Ft zf#P6f=_)cg037`n-P4Nhw4$L;(lS92kZu*sqi`Bd_%4+#q{aS8foG|$IysP&>`PqH zFiN2Ut-~5-G5fF-{}lNkR&;;F2AnX>7fioToB!*NB2HgC^qS)>TQo~XHNF)FaB~V; zd^e1HtLr!qH46t-k5A$g*S{elK^f1KZFoTuKnzW7v}q*h9D<@~cn?1}_4M6 zyE5$;;&C)h19Y2M=Mf9``4yozA74M`AK08tHNGm6?xg?DB>(5li$4ZdO8%?uCT2(e z|MY&jf{2;Ve;kK{r9JDX(RrJx>!+uY-^2fpO7hEF%u#Qe7{_;O?Ad{eQnc60GS_vAT)iEoK*n$N8q103cy2!dVx%v(_{@ z4R8Dyky83bEZh7b72X$CcnCxGJI}$y&=!z1Z&gxa*?}+esE}%a*1d*A;dn4f-m;(V z^D1BZ)IUrEubT@aX8r~4R$X1a^w%%io8(7B9i4AnHuJ6%y*>dx7lRnY>w$0xr9g$D ziAy3=2~mCPJ9?=7pGErPg@%%{$G+p)osivJw)=(Mh}4_f2-!w+yjSnfJ9Y^aGKRjs zKBuBUqPr>eRRrZODJbQJC$Ndg8DMfemob`fu4ve}SKr@hiujIyjFl&&A97<^{qVmQ zB@zZ=q}3nvr#QF&_r<}&0#`E|&G;Rx(_}1|vG$PvyP=CQ0^f417pnznWN4Am7vpTW z(7XL-ky_+v^f7ywJ}+lULZpVXOu}FPBeMokRQ+gPxFwP_e0a&Qqqxm=Kz0~|fp&I0 zyGMqRNk}WwfU+hk3=LQR`}e>i>wOh#y=I;0dY*++(i-N>ZeN~!(JP_|F1PfsX^zKO~qb99NxSrR&5Dz|)* zt?D#nYz0!5moG$_>J0%5-|gloc6;V4)hHVTj=l{gnLTmtgvVd0gMb_bO~+bBdWY%a zp0WSljN33|r*LL`em8`RHFLa&aYDz1A|u&7+5D?na`~!XLp{bQv693#jf~4&>HiM` zvr_3)P-i_iJYQ;PmQOTpVm8TRtK$HfFR$TugYYk){Mh*S5h5TEjl<}#S|k5Kp&R?>S(6ldDj=4wdy*HCZ^E=)zHG$>j%gU z^bd=!saRm0{d%`GSPDh35K%qI7BXJ}dWvM!ztGT_k>7P*6l0 zt^!l>mD9Br$$0kTxak~zt7@T(U9))#*k^sCJyt`FvCrrCGb;w25X+WeT%AeddX(ex$U;cy_Xt1a_0URZ{V;Png>I&Ea^(P_S#Hie%j5$*L$Dn z=_fSwKim+S^jY6?MIx1Ls^&2nF+0z+)^UAXoSd&%F_{p0jN1F_UWrO9z#IRzyAYO~ zo}RfH2)|oy^>VRWYvyuS*y*3w@&9uPwH;0zny=I6N%}N~icO>%g}p*QF+7^au{TCn z$p;|&z*W5E=E$bhPT8l(kPS0%!1A&Fr2BjD2V(wNNjWP0D!b)cwL5d&WY)9}llHla z&z~(0Y!Bm7Qyq0gv1p`>DhXuT#MxpKA2^mHHJ5wh#Kz~}|F^gHN&v;%^H+#XQjlox zOn?lTS*JR`zu|0-wv2Px#Z#%52V9^i0mi(=EpIYbzwo4l6}yAN4qjX3C~DiYHxeF;lJUfQk_UL zBe-MaKjwb2XhkU#Q%yaVt}}6JcpQWQsM>>X%@3z7#%DmFH2aJO`>J`nm4o$&xB0d5 zrg|rF26_S>4!AHw=JR9yZ8yKKkLDJDAV6CMk-N_3+hhAkTxR#eBP`O#MmHeq*!<_u ztFEv6F~m2gEj6Wpk!JEtdoXwlI0Y$%ST&apnj%KH(wHBqk!)1kWz3=L+O3wBd>@Ea z`3?_d9m^0|c(^9s1Vb8e{@ZTCrGa1M8)x^eyx5x_Lt61}%a=e{r9Q>ELBYPi6WL}= zoFBUqCji~Y9)21*i^S)aLvRL@mSnz0am)<}P#_Y_JD9Mv-UVHTpx~-?S>)|~(oXcB zglbn*133Rm<)t+5m###W=2ptS>O`*VCLl5p9}>(gf?_28}6JEpIpVXTbXpV1VtegdS40?2SNaHRu0m zXCy>6A=3<= zEeb8ZWhsN)PL|iW?N%8pM!5elplKX)V!0(ZP)`85coQE^;<;ueLWQYBE!*6vmU@R8 z53affKw#bQ7wl27F3-}p+%Qph4RExkEk=%;Ar8(|Y~1bt3~A36R{31YK2Hb-{_<4q zwgh^aBN0HNAVQG2bE-x(VO=QGx4Gn!UvJf&5k|OKx*=XU0R~#HgJKA!^CZ|#f2G|o zjLEoNTtmQ0FDL#JoTautNqlyj_tGhGi|xis+xTO(YQOv3?~j;~bsrfunP%Ib>peE? z%hZN`hBNG>R@5<^4H4Fu?W5{9u}?f)9a{A9$BS8N(g+euD4zv{(*{r3@#$^e{zD%6%>ODgm=NQWW;??ZFDl?B?>km^YZhuk6mD^ zD4ky8O2vlg+SE{EI(-SB)0bV{kH4eCrwl#(hLnJslIWmOtW=c-o#u3yUuDL3iKzRw zG>mk4`H^J|qw(s>H7ryim+0T7ll4q~rr7Pz{Cty*rGC9{)7~&cwP7|fY@~0R zcF>?I^Lsx76C|+A0*N+6Bc~o5oe{p9#e4rjccsQ7MJY{q?MDueB2{_0e(>c3p#zW% zpO--ZS#B#XGiqC&!)@Gj3Ov7;nlm5vPRI88F8nkVOE3UvyuiVzBcDPnBPZ@I$a~~< zEOL!>coKWPu(EvbNM271VgSLy`JL8$8X5h&8n-|7^SNRY`ehix(oV-opZN;vyhf;* z%*W~8- zUZxDQ#jzSPTP+rh&~i;i%4qo83O`EwL&K>BFq}*u_gWYi`3I{Md|eRA2{`r5faBSj9imhAQ+wv?ujv9Tc z-SD>Gp^f?twjTrWj|An~%g<9;KUG`oNJYIxBjmefI1&CG?J)4I)k&Gm?E;Ouy+LS zF)feo4>NH~_^2vRm4_Ar1zw!6Ank`*mk46L=$VY=y_8r`v26yW=<(5T3Xc_lr|(%T zZ^;fJf_iZ%R2AXHB>#@(nAu2vWv{jWdX=icW%%(hrg4CC*&Acs7PJ7c2OIapJ`nx| zqf8OXqRxasxJzB!G>YwoOhsgygHjG8(|TSnJe#9Zx$+T6r)J`%kN2aY9I~jejNg@% zlsw+OFNmiuh~1)(7uzU=uP-e!!9{6t{Q{?frVlu1c7SE7YunA&SMQmbXkj0^_2TxR zm_YuoI^;96sw)#t60S@}Af+5Jv7wn_9o2j~m&4y(P)?7Y^>B$2WS)Y&L|)8lm6&(odPaESA0C7(=!n0(=ON98eAmy@SRT| zsflNXbz_f|*LCR0{~{Lt$^)2nu*+BFBUCYZe0-t)f z(oxg*;C{fxLc*i0^1RY+I{)x=&1do4lu}X=?HkXmXUoe2;jWOJ%ii%!OOD&g>RNxt z%kS!+SMY`K?}+(btKVbM(4IQBd^y(v_4xW|LG0{J=;banxW*RBWY8dM#W|P1j~A=P zzD}gu^+-vl9sTFt&Fh2yD;_7BZCk1Qk@I8cU^C!hFn&Ff;h>_`Ie0Qj(e-1w4I_N2 zM4T_4xkWlPKQEh)C8Q2jzm9r49EH{e6?9D~Q%vmo4QVCvIT68arrgh`<*AyXOezvg zfoo^c^V6=w^^=g$$c@di<>NR$1sfaG$$3akP4+TF{T};unR#_oObS#46 zp{4n}HvX;Eu(Y%umDT?E3HM{0$H~u;@&NTD!5k1ElHm8K>Hk2uq>n}2WT?_+k-I;V zQ7Hv+O>GC30%f@T?r}=|ue}%8d+~vp-7Zsa+mq&N3Oc1)JRSZD_GG4SA7b2@%XFI5 zA)d8w^o(*m)gsYwn>Y(J+$Mf+qX;W{@5OC#(bJHXjvld~Xwj53)*z43$^>p6Lh{OFib`_<|tP_iu2*2e1&u@&0{ zti_@h*qN#yFJk?VrTZE6R-0UkqwzWVPum~u?WRWK^CnGAoe5`_iip>Q4=dP~w_~Le z8^hTg1^4}TzS1yXtTKlba7iHmLTmrkY+tC{pAmdwBs{6b+nMnsUFQ*pSm%=l{dhTSr9|_4~tgh=d{_4I-Ta(%?{1 z(%mRXmvnbG(v5TtIdlq0cMLIfcXz&r=ehU2zkgYawPw!jefD>Mk~#R*)g1fcJ{URO z-RThTd!`NZPnBlVKlC5N5tn%o^P8c?m7<}S*jn_f6Fvbzb0&!2urAb6Ms55Z=23#T zJ9t>rB-~L=CiX(UzoDa%Nd}Vb80dsoYc?s(Z#ECx2l6*wGm9|-f{*&1=z8!Or>^a) zq^n3OZ>yk-LAwz>bV_28eIL()^J`?2q_T9R)kDLtI^#zoVs@1ZpCmGgWVn4n$W<4H zGYLa1InJPXIKiBErt4S!YLr>P56b9=(<~>Bx}9&_l63FCSTzMV*P8?6}@-NkoPdWR45MI0`2c>%^bDuNL-=0lS8Gr?1&Q)W(+I)!ZU7j5ZbdY{@~*)5K&3A012^ zEfOZ1$tm9K#HVGEF9!W8k^fTDKIG zvfjTytO*9_iy);y*eqtTRx;nPaVs|roJ+`K8YB3;U>^lpv>RmB%ASvB8hGb&=x*Js zW5HJ1=0p#!=RF>&qlrxA7i7hO-)3ul)av>jQmRmJ#d~<@r%0ehd*~KcIO<}Ff<VKwkXzOEpDC`Xx*V$ z6z6X-fcVvmsbRBm=O$fw-J2ks@Q*}S5Z0vAv9VqCl!H|qkj7q4q!y1}dZO?$$Qo0; z$UeS$25t>gzH^bMAPqS7x^5mB6xqIbQD+7F3_E@_oCc=yWzXxmq_zuci4PiSY`XlI zD8*D?iej%{LU07wz_mvd`uwynhX41=5)tpezl4uNg%6;OMBJvOn;-9C%TEPwp1uNR zw6^WFO15JR{Tn~xOW_zjmsR8>E_R2*c~uOOW&1N>m6HM;ARV&&{3UCLbN+ahPu*x6 zt{O=CShAAw1UQil_)ee9Qg|DQL44-J62@MweJ3k$Y=P|TA-mPyN5a0=I=wvWzRQ@l zN4_Gf^uw{Etns8Mr=WBWxl~4Lhiq#Xx4q%))sscfBlN@g4{#KTD00djWS8r z)*qw)YB!jM{^qeH2rn}3Hzd#|r#}-o>1{%lpcB)0^{xUm8S`EgI?1*OQ4R>Omvpk& z9-Cz>w+lD^C-^Su@;&B&rN>PAf2zpbQNKr9(%reQ^s;J)$wRb2Y z^Dm<2Nvrcs!toEp+nW7Debrj2IS$Gsbu1)frfkR^8I?sD$b;oI8IK~B4<@s+vn znLGQ5ED?8=B9%mA*C7CD8PEN8ye&zXkDOMTv~JCih{g3L=AM|x3;y9C9UE9RC0pHb z_uHCJu)(l!#QGYS7J24(@9Wz)*8*1BWIU33Y_hqrYSBln3xuCGWMHNG6|s?x@s|Q+ z+~dt@kXZ?mnMFhOF^a>?J${31{*>fx`+3Jo{-cJP4d-)X=7X@(a(t<|@3oNi@M1dK zaIan^_S^pucPm4}6s|TZ{hJ-O-xV}ie-|pg3kVz`L+on8kt6wc0=PVu-oQgcR~B95 ztZn&zc%3u(j$lx2Anw{mPI6L`Y;<+X05vS6a-Rg(B+EQX4&K+6A!Oc* z4V(jSDDFdbdyh-8q_C-$l`Rx`R=}Qe{MYFfRC+|S`4$8gG=97WE1=!=P}FSkOlr!E zx&ba@S9(6jgN>;5==iBZ{9DV?;t$Y}DZIaZBR%`dT^TKdw+zxECi##mi}J=EX10;( zrbJ8TD{;9NF`-Ikx92BME+wSo!^QVwIeZieqzo( z6V?{dVHJ}Z?ErCwcnO+F0Tez~!9ir3`khdcm5bZ^a-I=Q?JpT>Q-WXLHy(u*~vAgeIKLbr5Ykc;v zvG*lT5?3Bga~r+TMu0>zlKvGzdO$hn5P&R?+U?JS^VUn$>xOFV^jD-K0}}MFZDcJw z$wJ_DkV(pb?+!=@&7=MC3U>IS+~M$&|MKY2FL)#x^tAkZ@6Donbi2!Rlo7tlW0=r0 z674jS$wB?j6WfyKhu2B=AM0yLqd!JJy3H*Z)`9exMotZbekTo)M>kBJy9W_xNnENe z#O1Wz_s%VhRY55k)Rz3l%;>bU(t@}sU!TB`f}r(B-KXAa9u_%i^Sbk!%Fc_TkOn&G z%)brWkm2#}czk_wYF29QXfG-MOwn~CRw5l0 zam}~%?uC}70?Ke2qcj#g&WA_NJL&G)?Q->9Hxsmuvomnd`YH=HUnt|p%L;GDyr4^Z z7hwvrGLFW>I+%{s>6XW#g}%KWX$T6ccNN98FDjYs=rW)+iXss*VTAhoofTbfmLROX3wsY5#k~26iqxIRevf9Y(Ih$A#QaTrWe+G(b;>OX})aqD}U4PG*chIiH zk@#*n`#+_V2)!;Mw9GpEBQ2vb_cgzo5u*arW^dD~-Noi4S;aYpk=S2sw+esfMIdcq z?_si@g-k_+vs5(OAz0ZRhtIll=!fQJqj|uw5H1lF<6C1{R5Q!>>P#%;X9ukvn(qG) zaKR}kPQ{TEw1s2o(aMI0({;Osfz?3c}^l)uZmay!^Y9>X*az6&+qX+W@2HA zvWGGnyf@wdrO^MwP_Bp}l{iEWgsufJZVH1qNGbm(zqNxm!pC@S^bUyDCDi)&;yTuJ zR&YeVc|OQMPLH3Ne#Sni9?S`cAv8DeQz8Y!Q65=ao!3cC)*lwi9)cexRem>VPjE%b z!06CVLAK6Gx(lAMpRb8HWjJu zmQXwC{gIWmz0a5QdQ=5%v>AOC&nxJ^pSf%c6jqlkBov&W_# zG5;0fmxm~D(Mwfw9d^a!XGB>CQ@x*zXUu>3njR}-gz{t-pyirY&t9jli{F~(iFSoZ zSSHtdU;tGozSs{@9wpL9o%mWxAmgw+GVxF5aJ-jnnz`j)Q(AH0@rh=gWy$~x#AC6) ziy6QOx0VPyI^oC9hic?u8X4~*3f(YC8|uBE;fZr$q558%U(}=|W@o^??i_Dd~|ULSr1ovq?8?-*{uYhd6H)lZi?CdGqPK=)pY z3l9@A&(phIlaC@pB@AESKEY#mjm98SqKaUp%a9o8K_iK??r_Xigs5dB62GrLAanil zLvsHWI$f>`6P5;|S*ZJztwzfixvp}t9q>Scx*apxa5>c&o)DJ(85>UPQ?O2B3F+^z zhOnr^x*yYa7J2URO2|564SBmFjjtvwvmfD3-X*-dOMo|<=tNRSjUfL5wNu~HoHW*~ zbJ%6Kn69o}bn3Ng+Nny+5^2%8=2V^7bPp8KsA#)NwVF&oMx;a%bI+Z#3Rv*Y@zy+j@w$nUn9RU;^SyG{s^>?hWnB$(SCGI3lN&4PJ1+m_<)ZN^|*B% zZtwRXOD$jRe#2FOo1&a~;5%5YeY4Z#Re;)Ch9#X54 z8>d85iZ(^5!e(yh-vcBZ`{tMqnfD@C0sq|=2iqVtm60bdhBy&Ux@6rtNkgA2gX3~R zHSkwF3usHRPlp_LAjD$kFQ0>@4e5Mc{a#bdul6Ahn>OB_I_o7Pt4j}PnPwRGD3{H= z0j$pEnmE{Ay}UZ3N!_f;x>zrzdBD2)kqbV?5zBNQV!b_|;CX-cQ%EbCo+}1AES~1+VMxx^dL}*E9~i39`8fPjRoC24wAx5g&Vx*h z^$FC9I!g_waH!8`1VY=FL8IXgrn;{-SmRjQ-$TT{zr5HeHb8y8HA#q+3cT*H0-g9j zjcPEJyEqo^QN3(55~K|ZK)AkI{Ii)(B?sA?{kq& zlCE7bz$bMkGHEiM%Ll2JYW@2f(a^m&Z9^y9$DYP-lK8+!qYA?q-8asyb#=Y4z;kxiik8|T%I$IvbL6Jn?Pjb)%Qg@BFbs@?wN)x)mc-ex%|WZ@Hvc1z8l&;I@QE2vPMH;pZ5IA) z?NmK*m2m z@4Q_5y(@UIWm;^c>5o&sP6l(tyZ=5bjt*a2URRW4uafZY6v(ppvw~Z<1(uY&3bf&CEX`1l- zomLv#e|OEx3uMsuTsqfJVX@XCNp&VFdWV>{5slRrO>pK&&R|=bnRn#tjwuZ zuTlz`ZB)x^kH`MX(ih`F@-;e1Rg@H5#?H>vv{#=Hvh}Qhi$Cejg^=c zD&%1@^lyv(EMcKBS&=>yE!LqH)ygnWOqlLYlv?+5%2NR4N>dLBk0pJ8i`(i5U>-go zJ3E|ygSH7OEt$Y3=IeIG$nO)LtryE`UFXPSdf7lRh}(jwa$325tzYy@jX)q*glvQQ z2Qs+V9w^J}ixA=rauG8i{<}_{U&sTm;%wGWMkv&}GeU0Ed1BdRJ(5dRh+s^11=sgB zp~Bt zQqM>J=9GL@srs@4O*PT^*O|Cur7m}nfwg@m7(MNB#6=1F-}qTBU=h9}ji)y&_qoM@*(EsMez{R4;9LS*(6c=L6 zp&UEOKK@?H{z$`TP`73$Wle_sfaD<2i~j$dOkKXXKsz~-Pp4Ax-Z4NFz*ta+5YX$4 z;6a~U&y1Qk0Ml_N2J6MBV(!=6P4iTBRx^WG&g?B3D#ea$IBUQIk_kd{sEB==e?d()+dDJGe*&nQor zc4ZM-(uS}0Ci25a-WTI0IY2<+_{_g;f<1^wL@M8s*l~G1@XUNTSZbCy`O{rpQUs{>EJsR)@msr2?9r3=^>gxS!z0BDt#T{d>7nKNFKV6fA`$%W9yM94T7Dz7`8v;F z%=c6J1sNsmthEsLquZ9xHi@L;QSZ>pb_A08%ow(ev(1!g=Cp0vsUzil;YNim0GGcL zV!xCIr+VVokELeHDKUI7Kzjfk76oP}r5`Zc! z^!dX(iosjv|L15;L!>>(^`psY^>_nEG+4`zDBJxK*-aE?q>56u;tv%S%?YuInS_3lAp_#~{h?a$?{PEM3y{i$j=tVf@HBJG^8?%URgHIh??MOg(T! zd=5p0wAh?N=}AFxL<gP0?vIRry`ieIH!EaLeK1UGPN)sWXvP z-#mBO;{_(g61j~r1$a}iMHtJ0{hwjLUm(GN&OKg>4$ZCb`P=kB#~FdgnfgPC;8Q z1ZWEb0ZC%dpk|e2WBiv`Tq`q1PLccbYuL>{a!qBcW$W0*oFO6i0vHd|m;ZTZbD+w- zFEZFZF1IT81}K4p7aK8v2JyJSb;RS1*}^GhdR}R2|MBmz?KZj`mUe!)Iko=!ij!bt z2H*uPO7J)-3gokdu&nELBO>^ZTPB5(e!+j@iCMF$w%e3OhnknG5&{-2L@EeI{jyhc z>-h-3o}4Bp$}v1ErUM0$rZ*z1RvFi^C7*lpIkHnt7DBodieDJ~zNc4816iH;ZRJQ&+$zAPV74 za@d$({+^~STbe(3|YLU;m+Ib0PS`pzj zE$Ns@2n4@6U z?eJk0r)kK-T$+*KbdEP|{&ODne}4>0)DI*)-?K&RIy#ctAAS;Zn#RZxlOFnH3q&A$ z0P#NKFLdJUOChKo+O=0zDL<}=uGl>t7eUBS^v5@t`S4_1vWi-Q%WuC~se~b~{PJ0sI-a^Nmiw)ArK%;PGlPnEI zS)}paZ-tofrlZ*#2qZ5MRj6nL&#O;;*WS%5kphsUS%PesDc!h!-jk^%oHU&Lz$?+0 z+2lW$;Eb22+qscRE%7ygM+5=<$}gwPr(FW_cmXxY*+dj=gK-PD<0}r2Yh9C1=z|t%$jPXpHBm5?9e%$e{6{;U zyuMH_nF3NIcjQg-kh6G|Hsp4{Ss#Cuefk=V(@%%pJ=u$Ji8t_7k;!PXL~-q)=#JRm znldRCJd>O2h_g{6fpu#YCG&@p3h}vyyY*zY?c-Ie2NOZ0GWs_k|955m;Ki}wijIss z;qF9@5uwADg!frNy9WsAIazU2={`gspJlWaM1?oRa0CbVYsuHZwY7;pq}hzjZHF>L zq)D@Wt4g9hleB=aT-YNvqpk({Zp+%y=1uy%M zbCM@J@)!n^O7fVTrE&T~0~+E9<7M0iFgO`}Vtw$&kOh5Hf;l$^ZNAC>-@){NnlVQ4 zhE{Kdlqi1|4l}uHOyl2~KwdD%Pm!lFlg=mHs0!OBUcqw?HGlV`#$M-6n$&i~?L+&^ z>q5C)yK*iuO$VwQIL7g)o^dlc)G9cdyT{!dIoB!*YC76HS_CzbCnUkO+0n{plEJtg zT1xMn#$N~5F<-VYrp|?P-y$R!-h2PwQg|7Wcw~5RtZR5=h(3Iz`rn8BpT8#}X7Q0H z7?!>lLVv;hzWoA0|NiIxW1Gr*Lcj3E?p?1)w%t4aPV)b3DF!S!rK_VZ^=*~S_ZiqJ zUUyE{;xlXyWOmsCByf2jzbKaqG^Q{90{+FRUS3kS&(}T`v5I0dCQ4=tVwxVoOPCV= zDVlpltWknHpm5KtP-Rn5su0J%>6c~5f*l?_M=(~x&G6cRNarnEY@EF%+3NB7DyTU9 zk!sse{AQ4OlX_mU$buKE{=D}vW^KmZ6vO9eYa&YQGF)2m(HAHU2=qEuFFAV0VeNFi z&-vj{b09_I1K?mzp{qRgxx$)HT@24F$4Lk>32ZV)`kvL}NThh|=TR&wB~^9yc6iBo zc2;rAXv*tm~+;Xa4BwjQ~FZc6a&3&LpYWe=*}l7us`TxeK&y4+Z=lO_n} znh?*^F(;(c!J$LLBA`P{rlOC2>O871%<>ul+ERW3xpVa&<%_X?laawkU&H^lb7x2x}hl)KWD)d^TH9 zsye}FKW0)w$5Q**;_Tqd^KCSXZHAfS-t_eF$>+x{r@If{J{fa4z_t9@;vSm>(i6GI z=v_mg?*Y|wzs1u(KHV46qaZHoQ$4*5eu`C|U_^tO~ z72UVnCnPT!t+`cVN~O9~~ZUliE(bZLkSM#HOq?oLP52#X%N z{56T5Y*(VX9&=)h(~nno`?+ApQvAerAooM7c7O{TW|S1wq+|BV@@|Omtz{(G)v2yR z10w;O@rQN0HIxYkz<;J31*6p&1NT6!SLM(UUwMwrcy*xX&C@+_1^s9dJ2{}<`x$~D zFC%+B?eK<%ZtC`}U70jGH9Id{CYv~1L<7ZnoWQqmyaynHK{fbWsdByE6mkanWKw5- zCOX0et$P;59KCCV@#{F*VXYLeW?rkz4>cs&4p(J!b++Rox;pc)FxB1=O%^3fF=C}0 zF3H-vCdsaJ{>krbRdahzpegl-1tMtM!6jp% zTwzTLV+Hp$+qC{1%s&KLwE}fLFFz+HDvEQD+tj8uZPaE$Xvt;Y7d}x&j`UzUDeQGp zV{8vTDF4%{wa(=u(E97^$xiu`Y*)T8pz1pfK0-nz?hDLhtHtZ1Ffb$Sc2{Or=Q945 zF;TuF(F$hiXq}nn3Yh-B-p}3??mJ73Q)*ZJAS^+}`vV)#cl6o5 zam`beOOlQ#^hp(c1Iiusdko}qoj~&KdY4A4FewR#ANS&%u)2fLm)P^`zH^WB_|2fX z4+S42!mRBp@AoV!?#vb+kyQF1Dy{?sB$X}GjnD#F;^rb7Z!=YfW>+@m)-YY~M@NRX zDRY^)YVT*E6i+=!%jePqKRwT~=j^|Unz|ICJZ!2Cd3$a38T@r~S62K?%|B)K=fS~N zI`vQn^XEC>W86yfkB6gQ_ZLeGf11?)He(P(qA&bo7zo#08Bm z5eyV6H91S=ihzTh(GDCACg#F6W*feDq3p9LC{=ig!dg7zn3L-j{3>TlcEf@f-JR#w z0%dfxs|{zch-qc9o*3d3_ahsLrZ;yT?e5o@ zu0AS;mSN{N$$eBGMyGuxZo?2b+^OJgW(Xzq*m+bZ-ZZoVr zIC`J?hsEZL#_^s=a@wXD&A~)4i2ztD!Q^~vM2(bd&gAGR!biF_c|PciJTj`#QGVfv zdba5QUK_es;W`kjn(eo-oIzzd3A8lX2Yi>cFhpN`+Ba+Q+jl0TOCD)LKf{=HtHe^H zfwDW!-Y_IFr{g8VL44LK-gD#TuNxCGtTjQG#M-Ttmx7|UUGOP!Y84|jM5-m4$4dmJ z;!8bFlIQH+ZRyO^$v|6Fjz+NM2NAn&BAmk*cYTB?_0e)@rv4QdArkVAUDS%Lx|-1 z&jf)st66j^_n_J|RRsOSNHX>pm(fIK%{`F6uH5cCCWq~c0dKr9Uy*G*77rM0K8{Rl>WoD*E-#Ypz@=5 zt64il@4oh#RR4GQ-DQGVQNcAqhk?|aty~27RiZOpsj7nEG>iz9>J*4Sh(W8gK3{Qg zsEnQsqFa!u+IAo8rJ$m*!tZlpuSLJT+&ZzKTC63$)gx^^?{}p(v8Mvo8s@UrV=Y`% znK^>XUb|kmV`GW<9gk>2Qp8s2kChxhp?vbY8Zb=dlOjylrt-9^7M878lc{~xSt1}7 z>Tl{zBt7Q7s0>?+K6aTUZ!HN=+Pe@6-=Y!_n7rtQ;@>r-$4W8DmR2g0nuZI`3n~Ip zN)79IOiw3IF1<6MdGw`A8;ugBunYt=X6@R$Ypq%om}!{ zG>cz-KBbDwVb*qkBHv$-vTzAz-Q1tZqDjr%VMi?VBVIP0Q?C=;$LD-xATNaX?5Vg4 zKP)k=v(NeCCco~-rV{V_W4z>;xsA$=TyIQ5_HSBok6h+cczZTxYcko2pN_rzffwAu zRZm(jShP^?=pwYo@M=htOZAEWk%egG{Cg=B{O?JLH&O04v>*kOSV&1b09zw;=fo)_ zn)gTA>pVYQ2`kFs*L~D-6pZQm$};{}CE0v=&HecrLKJBm2H`;_^_+x0kXfHO({N6^ zdn~fbh&@%Sqjc92#C2>f8PDL+Fg(U?4l~I+?2D>UecVV4+hWa{f}haxO;E9l;X(G< z?o@I3yfRVT<^JIrBdaq3#0uae>kiF`U#Tj6lGCPv4MTt=Dsm$=WEM~~=xjgjA@<~kbI2Njle`70NvvN~ zw~qkEBCus_m<2*k4RNjuK)@WB(Q_YuK`-Z?AD5pSmDh{S({RC553T^ia44gH_uFb* znoZM3Z4X2~+i1W2NkBgFN^D7SiAFXNTj0dU=GTTtj56VS@&n5TK=VOT);-8i=q+dD zfoygokU+U}jQln^^%8LI>A=Txnte^(HCZzYu~(O-uAKc5X8$u4$mBzQS2$R5#+%G% zRbAnNdU0&yxX*^H-zh7<@%IFp81ye#jp zC8DkHdga&B$MOkK0Evrer3ai@B zR{zctw35XJI(5XCG}$9Bj=2?R5=+=8r>kBcrlWUgGs#LkLS5kAC7~6)Gj;#A8Bv0O zy_R)G-S9rTgaGs!7t<<$H?rne-3RRCw7||Cg0`-3F1*n6`nM_3A2vBh`>Pa_bd1#p zA|^|n$!}vEssKyAR&P|sG>W6;{fHF;)Be9_gnawA59A<;Iw$W%_hSNZ(!70*6Un8T z+c%TjoIhTVcWTtFB`!L1EG?DM^UXms;oTdeUc^7mbz0Ttv%uPAVyQ`N>3UcnGbsn? zlCZj@?TV)YUosafG;rQ-Vmgc@FsjeLfPJWZt%q`e1-Of*bI8$%5LUBn$xRtM9FU@{LJEZwa zWILr%6j(n=W4fpu;;?j5DA*Y6>CuLr47a42k#=$2_G zStF1koBJc?@Mp_;5%b;uI9lK>A3s=Zt{=kGe4CjNAH~IFdoJa+$@%4r;eAww^{ZD( z=Tcc`5Ah~JPCjB-4`C~WZH7d`HKL$%zi;{FH?BAz5QJ{;DOza(w&s(;LGs66OGyK< zcm^Q%nwnQa;}UKc0`rQ=yw!D1_1UBWPF@mwIP}QOz3};wg_|9>oTcIjOagxh1 zC8N$-ec{ybXWRV#lQ@uWpbCpOEjK?d=teM0wc89nix`xb!`O_Mt6jm`z}NI-|M51B?Yke)89eBX zO~eUg<{$1Yr<7?yOes~$!7&U}P7}dF-@Ah5be$J}Ch55qqc}KCK=ZMHfXiC$Wsga> z%e_rTS0n>(@W(gC-{{}~JC-re7|6%#VqD^=?|N?33kpLbivUbo`=0;`2?$B>r`aJA zB9xCiyb7hw94mp8%lBADiwR1D48Q8xZw*xyq=p1xk?xgZEeT^g#FFnhj+A%6ya09l zTO51&5Hc6!3VK|9KN9%Cv*f)U33Xk^1jOG}O8w`-_$W&VCGbI~ck*Krz_T<4N=SW^ zFvtEJBcpy(tRK&nXhzgS1aGb48ej>{#I2K$q;JT#w*ma`2I}}c3~7uc+F3X&IQSI7 z)1s-)UTaMuIt@@MULxi;Kkzh-b*iwOs$1jkY59K0nF&X)U1zE4#Pg%NGdkqh?-BWIjxI)d zgT?)B&cuUTDzO74Dv7_&>e?l}t8?{78W*@8u%6${5tc2zDF}xod}g(&#f+JwD-@Th z^~MDrJ@|U=?P-JCXEXDO6@R(1gyJ7@rsmnaG^*=Kx}b9;yC!Lk0k+lDjKyY%yX1DO zkDe2{?RO!iALOD3=BwNuYt5+N-|bL5iyi+#q*MNIh|pQD&~E1K@k_e)6USO*DH;Q< zEJdj#?1wnn#}Jl(9k0c{OD&nGfQ?l*BT}gqhNBoDuTDtj$B6#gv?;SVxXtCy-@e@d zzc&?3WYV^j7kpwRrj+gJ@O=}fn78w4eocGXy73m@{kqi|EZlXcw>^Bex-)q#(hM1t zWK5*D44g;`lVcc~P3Ch9@Jq!D&gCbeuTB$G^UXh(XN8e~#7pWA!@4AozVaUt9Ac=M8*#sr(kX+Dy{N=UX;ie!8kB9a;cX z%GKLZb}Jy0pW=~h!bKkz;Bos0kdP=Imfep935324B~!d4)efLDPEJ<)Nw^^9wcY$< zy=(5_`xOUDiGRa-f(6%&-vaEQ(X+U@f^Gr z_W^Ws(tzvuA6ZrTBVUWEv%$gyho!!5rdLpuO z;-MA4^f0#p0iv*#zy6*|UYFDAx)$9ZC6-QJ<2dD9D9pS}T#hmDJAX3F)H}3X5js}z zNq3M!{0OcW?4u`Bik>|#OZ6|WUpVG^Dj(wW8RzzU-D5_y>*w37=UkQuXdEcjy!?GTBsC2<>=YvvrbySiE&2JKtN%o&%|d6^eV z)cB8ZbK*9{?2SP$ezz#xvp(L|z-f4>E)=@n8~ zz|WML$Xn@F451CA*|g26 z8=`Tz*baZ5x#^ZC*$a&6=^>0yi@?q69qNSg1?WhuSt#Hu(PH+? znB4(113u;s2ywO0OdUhddj#wf2Li$}9eYS{*Kt`7VvHl=)-=A}Cuyo9z?OAHbsH0H z?z92D&-UdxA?*nM3O{yzw_o9&d4eX3zbOn09rH&Iw1XEj-5uKlt;=#RI^->`od%mg zu!X2hr{TUK-GH9+fWRSa0n$%D0xW!X(xwoHyMV~x9X~=hxK2~$F*bM*!m6BhuJ7%L zbK5$n!BH-&s5|=-^*&WgF0sHBWpC+wq^0TAAVMsQGg}{QT56*;Cn)tPhUs9R^qSykoQ5Kr`dz`3y--bG|y5FZEw6KkHh;B3szjPNIFqbELQk+`CJ1 zXNk?O2R;lA4%WWWI)RwaLE+!GbkVL2b34YjanfGaYUjnJ%lNygOeeRg7h_M7Ini1k85F@IwlyA|N@k5RR`_KH0ii*O5p z7^%l4^g7e|q%N4mxp{X8e+P-sG-VyD5Z)6a3RAOUZq|%~o8c;@j}g92u14!$67^p1 z(R6xr9AaO!P4+2rt9-bYQY)ZxF`ZKw zlvc%{9yMI5Hdi#rlCV`4%EfY^K>Ydo7vLDyHd^}&->A|7@6+8`;a;&M=j@C1n8xRr z6YxaRtPo!8<#~^uz4uXLQ(H0nG9l^N(B62cx}5KOUBRA?6lX8W9Rp{}ALM~d!DbJP zU!->xXvpttEoPS7EYh8hrWEIM-@1DbG<$U(04?r@veni9j+HWsU{D(Q^cA7+`Df80 z9Kq|8e{z(x8w@i26D}?|;TL(A+U_Bj8M^AoT1N7*D3GvnJHSd-?-G{C@VYPzvA6E( z*T#@hR5W@Vsrjkt=U>&9$!y#!#(@3qG~99(E(@G?>dDndHmr+GLRtjP2= z^rBZTqPp-OPGUVm{Ap1B1}W2aCYdA)u5d$eLIyv6)AchEF!2-AbHho+rIm|+)!ulG zeKX>`hmp~b070ZV`FZ=W{pDETUxZ|HHoDZ5H=FLbwdv8APB!)5_>js1?C)29u*a^Z zP}{uE)0QuUgqz0;$09zF?GSi$+TY1&w#cZF-$-bV`#y~G(5z!yz2k4|^S@L6!YOfq z2*)zdg^vnfL{0YS-{#MYQArp<%>)w$u& z0tz{Z`{h|vWpLJy6?Jkl-e)xTe-Zbhc$!8T`{kUuX${-hUkD+-(X~`1 z+L$lIB3f;Dc+okqIz(brnDI9i?ZB$i^>JL*SM3xZoFd^zm4I`!V5L891PbGVSoY|> zPwM5%9i_yJ1)yu5sr7=A5@1IEIbcN6+>5-ip=6vbsI*SUw zsYT1`SqHJ%t+mSBMST5k*R>_TUTOaG1Eajb56~>-2_n4gI)Xwnh2qS*h0i?krQkds z4GE3c0~t*}Z!I7Me8L(=DV3X`n!+h4!VYT}F`SOAZr3 zH=54gyNc(P*Y|)X)Lp;fY=DteM}ce}cODHB(Evvo-6MO%)Sb-{L4hu~<<_6zB*Vlf zRICN#4wO?zgS{t{0n{s-?bIY6kHt{0$+w(}^Nw8rh|WI(d{a+T;%~gBWn)<5@@P~3 z&dK<>u9!P_!!};)S}j>m7llj5{+yX?BVW+)&WirOs@^)Pt#<4BPH>7taS2Xwin|xL zVl56g#l5%(X@OF#xNC9O;ts`w7AfxTZtv!tbC2iy_$Om9uIyYR*?T8ztvP=)8&52G zI`>?0^=EKGh^F~tw}kI^wW@!<24@U0Zv>Rw)5z!0#<#7ZE0RtS;4_h3F`ty?u6ag& zD-kGG%?zD^8nB^^%!(5;@m!t8ERVbLpPk@6!C}i0{Ai7$Rn8DKTmt1aAKiV256oTW zs+c|rn&qe=LuGm+6;g%>7E4BBKiC&bmZkO~Xe-Gp!LCjb8ghbSWI*H#KC?&XhqMee zT~wrSlLTR=R)d?_pbeBbj#DrToWw}a&2O5hUrl5T+Vqye2wUVJ^z>90 z6bx|*pypo`gRkrgM$M8q`N!EJr#JibC3&DTEEA4dT6;`sv@~*nu)km0jM_!5nI_3m z{T*|_{G+P>U2JNuBazwu+bu#UTp%igv=TZ{&FDnX%z{6om^@&BR1o%L{ZQ7c>3vkQ z{8z~7*@n5G@Je__H(iAKP>m01(KvFJRxlZ=kJ$F5!K*yGmV5|rU8pZWthTa99#nZ8 z6`k(GictKAuZ4tAa9j0Yzh*EhNLC4f{NWZTee8iOfX39Y!iBJj*!1*6yPF6HpYB}= zP^VA_nyA1O11~Jml2CJOig=k}FZ>wg@aq5?2VTW9=*9!9u#*=93&eYK2=W10*3vzG zeR?O>!y+X#(FP?D?|sRx_tQ%A2s_5QpkE`erG0gRnW66A!Da^Y-`T$K4d(O}#_=a& zKJ6pS3U3Pyi@cr+auPQPe3B*e`0XLXESpAo5?LkgpB)vCKQ&Sd7Zr(5*OLQDy!WoY zXkk8fxpGP$G;kq0zVijU`3BRKxW5dE~wn-TE z^<=JS1Ii7RHy_r}QCBilI^aJrkrY>k6#h!iOGZh?_Uy|WV(5ZD3B>NzAqeRFG`J^{ z^r=W8ixtctoq#VIn7pl2`#^Qz*>^9lfmfllCXQ@{J?leVnB0Ox0_*V}MnF-|zzgAI zq^VcRh{3v*WDa~if+Go-uFpRORR70pjA@IV0mD9l*20jKHXhtpwgfIH5<>$wt%@ z$RR6SI4jw$ZzHDgIhx`L?o}bg7t^qlW)DFG7FZu?ASxV$oF}y)0W|_w1d8H5L~fGr z#(sgiorOGQi3GK25%{k~u%E%{cR?)kBR7yM0zuDyKqcAefZKeRBSE#hD|LbIfuP#` zKf}@FNB|d-W>V<;?)95*St_(pI3$konr1}VQ6Lhf8EP>-hTxYEkLRI7Ns}N`%fsGw zlT(p4XMhV0ob~8*2%i4OZub*@gt3HoZ~_V>b{&I3^7u5tN%%B&JoaGmH17Smwg;${x z!Lk4HAmDz7^*OqiNeC0~^4i06Mc>CwCg2si)J{U;fRbcqFmk0p^7PUi>|=mJdrPg_ zbw{mKO7$@E=E${A*tONZ*rrv(I+8C1fbq2I(cN zgcFZNE6`HN|HOtJjH^(V{LDdYxOi2wny>MKg!~}6x#p-vxNW#Ps)+wc=|%v-9GS2E z_?*(=={TSVSzrG^B(%}NxGBypx7zmlsG;+{a9eCqc_I6RfIw4FFxsEUh}`RW`Tk8S zGQu$tj{ZJ@pBo{`UjPvaAl$2UrbJ*WNbfZPQy~v-fmZ&xIv^JO?ht-?Fg?jWzkZ?Q zw&Li*|6FAgcDUQ@1`LrZTK)nA&5?H9ckywrZIJahDkUPP{k0fS_^eXQ$YPBY-P7a9 zn5v`(+to=xZ(J0an|Zp3{-{E?7U}=WCGJO@u(s5=jQFH#v%2Q1DBI-h;}^?4H_KZb z9W1rVx~FPRr;9rSQy?=z@51(FbJAmes1zI*-NaPh-a|}{23oF=?|U&P!eP%@Ma-h3 zzel8SBR3Jx@@M^oSk*OY8BT%ZH$DferwcqDByjOiyj6?e0bC;iQcYLliRZ36u=jlT z^@NUd#v1u5xLPB&zbj3&SJe;~r)y?-m{<{&VAaC_j&q6Zv5D1gZjhazR4sdIzQOKT zGF?%CkX6?OnxMc|IUb$y_C_}Au7tduMA*^<;q8?we(tV*<` ze18w@Np+^rsVBSBYv~G4aa#VP2)cFFKaKX^13%F0hg}M($ zR`Dg;?#spc2Ih?bgGVUq>8o~DwTj9itSWYMqp($l=A(et1EcCAcrWem-<)@mNA8oG z)n9Yrn7QcvNNph7%Xg-SGmZz->atCI=k%KNeV@$0({la^`THo=>}pdO}ww%$|3 z88q0#FK+!ECJzgLwZXvx?i`qk-|%>FdQ(TuwyBxUD5f=EbY|`QyX1Yhj33=Q^2Kju zHX>zue=>l~IUC0Kjktp^v}0m9@5N06*`qxTvc^cp%X7SezZbrN`$|@)Uxebtz|Lk-X1~Azdd1PEouZMdmVbaOi;&4|AKD(t2%HU0r#rFhKf*?O|PE`IFaS)K)J( zwIm!}Xo%4x0qtt9soNS_i#Kg!?v+-iS3g^86}#!J@h0M3(>B1DLROx|nFOWe-JTWb z)LO>aTa=qz?oVG2G>3G=!xO$+_~leHqOB8>XH~4%o^b{-wkkJ)9sYrHiOqAJ%m0iS z);7bcr`JFs;_9I0WvjLJBy&RPSY zS*6^GF(~*y(zm|8KMA_Fo%`_kj;(zF6tUX1FjTs-eTEb zHcBaYlO-pljlHu;ya`r^Qyy%4^gdj4Uus|p7~v^#E|Y_6vNt!_7jw)C_LG*1D|~Fe zJTTMawX3z7ELcD!)HlX3Ytm0|PyqP`cuY>ryYFby`|sp3eDF^;b33lb>h~>t(qqB1 zVvp#zMV{a=_b0v#f;77wRywWsF&=@D^~r@?HI0nc=3&6-OK`c#b>F0-<>ALs`+oki zEc=^C?henJ&F}E|B++4Ku_!U4)k7bY$KelL;*@ndMHrNz`fUXNs)^in9wc}MuRdt= z>4&;Dc@jnSoryA+ux*ZPEA zk%%7P#;7s%FT0nT4e{=w#h0c(SQD^){8VGE+yLmEsaEwS0pf*e?y{HER=_?v%O?eZ zjP6zSx!CxNu7E%H75vbsl|7?YvGy`&MQcyH8*7;|I03l<*#`DE6|sF!R_Yh1EpRb)NQ-Lan?_p?&F>N$btN z+&idaLB-!9@9bKz+2(Swt4%LU9l%JGx0x9JiUDENSOf#R70nte&tvVzO60hf9Pfh#N z7RCH0ALE+`0hGb|BcskepXKZJL^0)9GMH=rEgNj5& z0G$*GyrNx&k30;9lHyr6R16WiTJl*}ZWbp*;T^EHUf6WFY+{?U`%_=WmT#AjC`p@H z%$aLPn8i3lG-GE!@X^b#HJrsvC1+5{@b>mLGAoLYRFDYM4<%j$X9vCcix(bZd8NhB z)>M&(?>|bfk2Qsn^B#acUc>!y`?B9nH-y3&&z4dy>{|Xy>?&4CA&`YR@8EY%dcVcj zzRL{%VclQ72Vx;mdpW|lYzqvuMCt65z3rCU9^)>jKTVtPbCmB5=o}}cIwVH*$d#x! ze&fcQHsdU00nUSYe$9`@LO@JGUY=%PS(pvniCHqAO{8{)XUbH~B5_?-RtLkyY;_U9 z3DL8ya(j(`u*{I{WJ@_30Dr5UrS$~Fb6pf1&&Rv0P?{Q(<$;qULCh)pOqqtU+dzaok*t_Va-tSQtnJl@^+PYy04V(&p6*n1JW>**HPF!n0w&xS zjEC;bpm?6GwjwwaLM(D~Ev*j@_T%rfCBrUXTiRsV?Xvjj{+^OdZoWRi9l0MY^#A(ZOQgS4V6$&oQz(m;G__WNv*uJNB~?aQh}g&or6Rz^&&+l%hYhq3&4CZ6z=WoReaU<-W5bVg>uF@BetIp16I(MCZ|O z5$&JDnT@OlUK|hj>`8Ht8PRRmu(ShzQgKgW#%bJVNyCi3>3?^iUe>Zs}isTA0#SXqdj>4A}Q=Z z{_(@!X6VU8i@ zZ?2i)8-2O4B9ka-Rlu3s@krvlf1-BUNU587et&;Vm3M_9@7fQ=yzF1P^hl_q*3%bk zyvS+p;RnFG1Q>O^1_6n{ksoVS)yhJP=egO0T%G6H3-GvJb^d&N6mN{?5RO&Fw1YGr zF_DA$bumVNbGNCdE8dAzTNxszCfx*HG#*f;x z!Q7+euRk!6VE~e^ash|Y>dFO1E{aT?x!_#}XD#?`s86&&M<$V5};=OC7oi4jl2J+!X5Trw*C&#%MoV=7|V)R zr+jR%PoJ72yiHIlZ(90U!!RQ^J$$aEj?Bpj01RN`%;bP(XZj@_%=;-uzNcAnf-`$_ z`T40~J7^2pZp2Z5ygt4uQXZpIz@!VIh|A*kSRx2%ptG{Xf7=)E+}D1SyXN!}mwAgw z^3`f+_g`6%nT3UHYs53Yt0}b?5emv7YxP*}E#(m`TZ&vQ4c&ibu^NaO0LDhwcSwJ} zO=1F+w=2L9eLJ|7VpHzaG2pQF*QuZUiMWFPnpOwSU z0EIxv(yUieWS7rja0>JW!2L$VVpje<+|eWDwLRfahJyzJgyR1AAJRGdVL=Jx3Mv%YgRElImI+(obHU7Xhj= z@=ir|v%mVPFPb$mL-==P_ShXYiVwT*MikO6h?y#ieH^P1W;nvtkhNP;EbrdwfHPwG zgiQ}*e6(uG^|tcQHpk$yXU{>u#tnb|gfmR?2<+05R}$xkFF|RQeeK)wc-s*GcRAwT zdOVFY%NT8L)_AAcuVO$3o$TMTbO)jYcw9YMgBW7L8NCrHiicKz-knN*_~s?WqeE zU9PX^7?k06Ef;uF_Q8}Fc4zW^5KG~?JK4L;;pIJssqTf3<-73+zkW zW#_U1;L30(WI}1~hXBlQ|0hpE0_WP1&!yUQ(RT^OW=W!vk^c-E-@%?<*nDiw2b}DDg<|GaKuoV#xq$=} zIiMQMY#9(bnJe3a32BMCmM4C}KTK8+;I=*mYo5YcY)8F_LMMK~2 zbfQ=WpHGU3tghfG;pWByhB3PNRd7XB^ZLrHys6V#@M--|w5 zyzOez2n67;d#~E=Hd1)@6>d4?C&5s3ciOEKd~ z7!-rkUg6rVQ6fxQ$o87d)GpW0ewyu)Pi4*+PNvCX6eAejTZwLKwcbBCAowO^B{WYMNq~;Jh0*ps}bqBaX&-XX1MqFMVmDuyxWQ{h+Ek2D27>>nj^1!usZ#JdW6* zfxtQ5`SKAe{Y0p=qaMV?mX zd4ASL56O;|z5eC}mh#%|#k%}_Bi8n@JpHUw{{%J`DJG7VBT9Auj)ZLCY<-04E$|)= zAy($z*QShpGPu`768K>goGwmD9D4=m=a`$FAjlLb*4n9Nh{2GOCqQBsel;!&Y3vJr zz~(?{c>$jA-~~|9j>f8r?v@w~dg#ao*QA*JjKccOJnxNJZ4n>IaR*3Kw~WCiIAvuf zoJ{~dgB;y;e`bVXp?Z+kf%~oh-DIW@$J#X%Dg_S;ETBMP4CbR)Op+~x$3u4g2j6c} znNPs3v_2P}BuJLRvxkBA3;~@E?PQ&?XnTDeUQN0r011v2W7D~>510#V2jXK)o}Mv) z(qLiH9b$+b@E`=2x0QXbu-Ag%jo%^+zOq>orXF6KX0WsoN9`zSCPM-Ls2!2Q z68YqNf92W<-pP$pV;?llku^;yi~>m(G5F*}Web2oVEbe_hsI3;EA*5FI?fP|Q_nm&&Z9`HB;lM7lre?e)OI&rA;|F+GR{2W zA-t0OV=m+2x1M&GLs%qG)y!hw(sTbfK@ASVQhP@~ickk%FuLS=ZSnnFI)TmT6M#Rl znb>DanXtW_*E(`+)HXI7I})ikl8kCHebscgXH$0kg;1wBxPM%Y7Uv5@%mP2`D)6xS zf|dTA$FR}2j4B(m;hhh58$UlZSq0*AnN^-KumwyXgUgssGmAPs*(SUYoYdATnONi6 zF|IrtrkOGAD8AFp$1m50yk^q&Q7f^!wafcyeed{+Bu>r?Q-+z5X~wE84H<9l3oZ;h zdt=tW)z%7>^?Mur?9Wl8+*5H7R7K~u8*Q{T%n}7QSojnGgkVyG^DGP84*r_+Y^DwP zDjj`P^n?p8oqub>ZyJZxG!Rrpuv#}PwMGSPY3d-JFY4&0S~1GGOYHTRQ<7O;W!ix^ zZs<1jzBFszoDt&TrJ0BU1V~{jccd4l)d#+YwdO8mZcC%;SDSbfIRHZ%&AKI%p4%n^onm?X=+ zXHI1^!G(%r>7(7CBq<;uV83M2WmynUU7`m_X|QQgT!XpbiKseXL1MA-bWSoN-vLq@ zWJlCs2hYJ^`oTQ1!CaOOBodJLE4U-z3KLno4kUo026GPKWuY2}bX<%YMM{V5P>Il^ zfnL|Dt8$|wWYqi+HGD$YfMKGT&3_ZVp;1FxK*V_h;dcOGIz~tNoO=l@YFP7-bD|Dp zU;vq)yd+2-e;pStrUdd^^{*C%!wPhh`Q6+hpoDOo73Bqb6V_h?0G22AzgS^8W7UE< zfu`&sgV^l=P6i^kB$q#|3$htas7q991FrZnEwUsO?CW4956IYa@Kcnu2KjiibSqd_ zd){A{c43k;Rrg`kw6r6yWWRG`E?cX?50Tdr*vZUU3(Bbr+{-_u&!Bfmo>ix5pCl>> z>9@uX;tVS*9uuog+VJ0%@4~^=pI#gjB zKem)oLan+)yGbm3GCR%u{A0ZTUd}kO%qrdNt5&$pflNoC1J{SgHADx}fa)EP$oL^4 z3{vqJ3NiqZ`z zPMquWwAP14JBm!O5}}eK%GX9g3lJWd1ZRm#9NR5VR@`CAI+it((t%qP1v1#x()A5; zg6ku@LeCs4oE`I7z8|uj90z7fLU@t%cfGDepgcj0UrF0Z zN`tR9Bn+gHa`&94cs^v??DuuJ^gC#ZC3+5>))FTxzz#DWIfh@Cf+`TNT8m}v+FS2pm&dx z@iRtpNFXVhA?hzdVbUucr|^LMBVv*zAA6Xz6o#Y8uS)$9n*;sTMAc`+*DF(ZFB0(1 zS1y#Hu>7nw*~)tR#d&2^6p~;@NhEOai3FEqTM`&IKCJi_Ag?)9zBTPbH;OTp>OsGl zHH~UKhPU<`2BXMx!zC4-7-X?+L^Yyf!$-q&R}LYsNN4$NgjAy4B!z|q7^_N;i9MZS zU8~;i8$IvkLp%_pd(dSJ;3a%p9y&dVoUt~$#KaK~ztE$CUZ14jt&x#UvA!1x+`a%S zy}5~o&FFW95XW4->%IUc>Rz5nRhIV-KI^2IuZH$$B%V%~Iq*-=)f6ah4q=3EARNIo zT-$Cii-e(&Ah%Z?d~I?)G_a7H$Hp<=QUC7A3Lt9w2&6=I$LSj>JVctr9i=^;AnVcF z7xrkdG@)wN^4d=5kf}!9!mKzQBgI1ipBFvT4YTLZlh3@3Z_Z_RqnCKQz3wl!_XId| zo*rIdgJ*ftpEX4@%PRo(+UIK94jg=0T_xTdp2}i6$`+l@+^`j5$#1H)#3^=6T5#)AY15k%R6=W-^S+V$Z^skx^f-;CaxeeDd>OCBpr+U9&R z^J!{9a0Sa3EIMI+0lAYniM*M@$L*6lqF)l`w3Z%?dDAw)9_3 z5g)!Lq4jYFg&cebdn8QLgTUO1mVF#`E8DpIl$q#QAhe`t$I#wH#LUY+~{s zbvo7Vy$VBvmJIYs`ZCz+JV_Z86;{HgrG5AwsLOTiBU0#DNHQ?c7F_U$6T z-s<1#)R+<3NmlNu_NZtN+@NQLnZyHu0-HLm41>o?V#tNyrdhZMnN@u>B@RpMSskfV z5W>lyY*91R_uTaSwbk#iFkV^gt~kF&qgM%LfOV;`+~Lvl)MqoC8wkX3s_vAKAg?eRvCtJZJzAAJFpg_4wUBA~LE0K>^*$Jc!pTOg(5_?dR-LZ1^=FP?>D z2S*u-9~d@)v@550pV)v!JIH?LNx3lu*FJ6~x_bT>QVqj%-eV7Ghi7=1c8=so?(s{+@q-0sfAW2$$Ob3kU;FnM8g- zqTyUrC#m~_OZhJyxWlvW1siRFL7~m}f2hrlD*F{{FT2Li!ChPl|pE@5~;f7>+%`IEz&Rs|9 z!-LQ9pK-fR=R2Xyi|ZZHF|Iv%-^R&rLkL57qwk*L3JkXu$mS!ey79x zlZW%M1)%LrSYpsz&jZBa4u=yFbN?NDcxh#zv9}nf@;O{B6oFY<^jbWs7J+aDw{8qR zGDKtCab<7iOwmO@pxT)Ss(S0o{XY$*rKP4j{5ZOJ@yR;^wLp4Rzu&_;L`ZCHJm$?| zvd`$v&C_+egQutGOr6b#%-~&TK(1PM?6WO&bvDHA{k%?ay7F13uXoJ<$=Q1=*}`^f zIC-Ys|C!SvK=i}yxqQROF3Ismv)uykJ+Q=dOyoK(Ds`#V8-geVzWLUFA~AP6 zb*Fl0GE+zVEqfjC9pzvmmdc)v7^;%OJ+M9Z;&TF9)tHa?7z)RnQjC@JnJvNHWOqunfD^c!M;8T3Li9W*hDnK51zb#FfqS6_HRci9#H8$Ze4d=MVEj@gAh*>1 zaG^mAj_EgEhjTx?s(A3!BkPK?^R8vD>%H8E^{aPyY?PI?T41D@1qQNLKhu1 z(Bqb%go6XysCutU>oLNozOR&a7tJd=;_%$acDPdvf+{V%VIMHfqAjq+c(MG>WyQ7y zuf)+1DDzoG0gJLwblw=Ury3@6EdU}Iv2Qt*$;8gx$5oU)X>NI-s(@C&2*_97!3=xC z^TRwOd`&$GF3gtB(}i=+RkY1rZadv4CPd>@b*fCGJcP04IZ{R`S&wfhU}Qb=yh}l6 zc)m!tG42-+-OgLx1sPt0^eSnV@JQ&D0QNH;RXtD;p;qP@(-LJ*N(69U=c<5}C6?(` zqOq(3e^uGxZs0!>=dH)qo(^spL^HVH6>FDS1Bs0%jj0wAjS@>{mlkKAlwJlT39+%x zpGIFz#2Rt9Bl}W&5FT@7v4@f__y>9+xkx&*_=k4aWGy3Xo}|4)3dbU_r;+(yDHA~= z@EA9=d>ZL~hG`l3!CdX?_Rg~_AChznSikj#5?IOeDg(vMkXJ&rO=P(ezKW;9{Uej~ zbYLM?6VBH38?fo_bj4l+S8WZz0sh4cSKk%;$QW?SImVEE;3vbK1L9#Kp;( z6E3hNgf|GxtbO2s?#CB1b8}t5QEO1svC-!D=(PZ-2yCc{SkRlkVvu_E=MDn}+5&)@ zYSq9b6yaMnOV3wCp zZQx>%7B^ik{*p3~WaZi8KDF{e-|0&O@ukvhI4>A?r6k}n zO~>-Nk!uikik7f_c-;Ae_cylfcef zXd@0u5u$hK!*-=*-PCtqbj@(j;e3ln`|5)E%&ZjVK-6B#wH_eInLkd(NeDCi{&t>Z zlEfE!8+r>*LWBjpTr~iE7T8hYUBVlOe=hn1IEH;FvY$JJc7iB8kXb9*pPI%jHiG)1 zsuJI*g=m#%du9eb|Iv4jNJU@r+GnjmMtil^b7j#-_4r+IqP_4s7!@xKf})Hd;>$Vy zF!{@A{TJmOto4}!bR7$nWQm%#Wh)vmW{9`X6;&{T!J&^f0 zukm(z)rZ`XP&?h%$3g5VKFDtQ;oZmU>+tH zZ0-HRR_rxTU8@GGNqUq1uq{X|M%UNQ0a^sdm8enqbuP8>77hX5T}7ia8LW)+RMgO4 zMvr?R4uOxY6+UU_!L*&r=>@!T(Fr|C7IH5yIq=9xP2K;UB=`VC;?*Kkcyz3vMGhox zEB#fHfzmEoGXZD0jF%+y*mA>3nXaBTa@$fZrpmI=x5w92BGyl{8 zvnDppiqpr)7xU`NFX(2q8zAz>4xo3Js}faRetS~(Jh1_IxufhLIJmGg4_TDjXgd&zIT@4m=>;ZRBG7Sw)(;#dN|8h#1n3K%4FS-pRE@hgQo&iLk`=_U#EH zLl!ZG$lXrvRS%vjKNh(E1f|WC;100vuEmm4o+aqnR{`?FZU%7#f{wrkR3*ui(1D~U zq)_P+?FM^;`^95Fix&pTIXzC!Q(eTLUi0jqAHlpkv>glxsKskp2b5zTJ{U{m zhE>8?OLX^2J&-%r$K8hZ$Fd-!JEUSa4-AK7pK%sW`ojL8>Q`b{v!;d(x98jU)`U+u zA1z@PJ44?eU*3F*;}?;>Gu@@{)tp7EGHyKaQ237E`0)<&ck(`hLzf2bhE9)=f){IT z)Z5_RD8CDuUOC=FlRW2)QTsMKF7j79 z7EgjJWejnWYdO1cy*mPw4#%cAO;*TId{6qlu_*{N+$!mK_ydS;@TbyEqUDpkG@j2o z`T8c12z2`PYTl;869S%^toR7xfJibtJ`6Bqx;RRC<~=?iF0>Zo%VQ%uUMr*ffFwBD zszOteQP@H)b0~8H!|3^Vu@O2eI*{jP^*R0y|Cv4CUu@#7SrmbMxbP_bx-Vd1-qgHj zv^-7I$ZR|}pDyJ74ZRCf=I$V4dsa}4STx-5tfIfa|BI~X(LgK%A`RzmM9fo%O)wVm zQj=@R>^GvHFxc)Y&4D~GuGc`)5%FSNkKSbE6Nz{KKEd#VzFb$B(1B z{iimO-H2^eccsN~RT5PYE~;b207pghCpCux3qMLgkkm#rT_9Y5jWc{blkqPnue9b3 ztWhew+TuVubQ8^0eXk2gUZ&_7s6~|aJ{l%^ss@8IcpreNGhEcpxL~xNP&2DYsF&n4 zxgQ%gZns%~gU8F`*I;$mieOUxu_78GAqGeTH9Xq}DjDFZ=+3r~3G? zD0VPkH)v@B9Pr17oU2xW;`uKfi{CzloD5y~K};n7XDYqgm}YzE@kKDY$z;tn0l8B| zdo;^FYz1p^Ni3#xMemdjsxWW=_t*O-)~nJ-{ZjnQ9TThzbt?P-{SG%k;>GH_g=}Ks zFOQ)c85#MFWwyrkCMV&!=urZOlex7T6a}rIM#K7{A=oA3La|%vYTWrmQ^WzeI$myP zUsU?MDn{!*bBMv8w&-fPLQnOI^ptV#o8%X}xuN&mp0Q>Q!@F2U<6X#Df{nP;v-`NG zcs~5?yW|QLOgps-DjClfSdN_T;jOU^{Pi=>7an3g`*!SA(HetYS+#KFQP?wov-AUN zC<%k0h&FJ!wQ;}a$?{Z_h2Y~VXM?0TLnaIxKb!5KADCdiTjxit1J2Ea1nzAmfrHLj zMn1&adP*z*PA%N6qyYz?oXL*xIy&3=o$uUVc^>-05xOY8t{98#JD+WF$*IT!w<4|J zgmUPvK&0uXxGD>nsl&jJcCTMz%7VtTxgk zLdc-B!?<{C=Pe<{*VaJy_SHXUVn-2QJ!j}_eyxA;ZS}dWK6p2JnbDSN+7W!19AcP6 zgsTKzeq5^j+PY@}=lrLIP29d*K7Tx5bg9zNm~R^57rt13l3j9;q>TK05Ky`+=yyxz z&(UAd?lnv3tBBkPf1L*v$%*X5nCXQo0_cXKUl{~S$8=NW3_(cD7Dc<0_O=hCoEFcC0|GkVf?BK}om6m1Bd2vLiz$?sKAf&O z9fsy7-6_We@m_SG8?M4&s~!mHqKhG>Q4|uxlr&@Os0nTB?>MJ(r)!w-z|w!{;;flr5Q7(#7q_9rlo%cQb zn8I*fR+y}`z}C0Acw@LMqt@J|jB3U8uBI*^m;TZq+1d$LkB_nXJ0yYwXbBnF`$wx{ zmeh*QUxXSjW%z3dL`^Di<+P{_HfqNy&VoI%wO3@P0b-mDQNbBLzj56zRQ{-`X%=>9W5-a=rIwml`0d!t z2Do}(v6<{35=k?E>H&d}G3BKtG^=|`BtyzOhuoW}O=ucUtTs?Jz~7%nG0xnky2P>< zpJ$yh{6&20+l}w8%Y$et*W#D5zqEDG`bZr5Z*j;nHwr{f$ZsWT-Mf&n$HeW&c~%TQ zW#?XzbG!-(a?%gnYy3|Fu(q{HbjH&pFN2{%%xP zRC)bL6@|fM#H2Ev&7zv%Z_K|I{hw?jDs|k#u&z&j^(~o>#XJXuQ|tW6Tvq=*9-k6e=kTc`_WU9&qpjC8Xv#8?N0o} zMUlmnsMK8b+B#svSsm7FU4jpTu8P1f;*F`j3QNqAF%iJT{N5dZ`x$9Rt#^NdL*}bg zkx`4s`;2L(Kh@^HBjFH)`?1l-&gVnnn7(XT3U|LRvLyc1(SNHhZu~}q$E_6IB4^Pof=qz{xmc(OsXt(HC#QsWqKlK>Ivmy*65@_t z_g8jR3Rrm81i6-62@v5htXU|9sfVzBly4e_gs*?aqIZCU!qb zoV6d>cmrR*Llo*8lf4#_E`LUQ5-B5NGvl)!HHPD3Lg5$MaubsvT7zdm2j(gEaH;;_ zH{b-(%e;f!Et0wfVy=Yvb`FkH5DT$w#ICnWgdsfU)kK!lZf^u2ffsy5mDO!qGep;? z^-%*vb}7*3SRW@gH~4PsY}P`IG&U|hh>H9i8_K76{b!wEUH4;WgK4H zl%LY+9V`loJdFQ-UNImGtdT~1cDy&}!&21*DaJ5z_bZI^V?{g za)W|S8DTxHsd{K)hWS#=1))ogCaMzO*QFa;%P{;Oa%=h`g<<%!joVnPH#HT@7dYup zf6qh4VlX>MFr05H;rA!3YNIU<3C4$LRu zo4Wav@^r0x2rXeCBqbt7bOEpLhl5XSqlEoUu6NxLOO;5HsP|PVEgdepFcRdZn&R&c zC^@UMZHD*HiP8>A(+b7SuMd;K+NQ<(b;FVcbV5JCNqU4Lt#1p&oS%sOHqiBa=40DP zSF!YbZ0p?jzZzaXU}eDY+^j~AIsFuJ&O|zX-k1#A%%rCBx%-b8DrS%$@-;PlI%FNQ zU39iz`uhlE=xx=`BpcS0k56W@-p@Gi9Z$BNUvnbR@(6O{cH}O;&pwJcn&swCC)l4H zHR@BoXY}5u+;QCPLOl{3awhM;vMy+6IssD0MCH|76kN(&_^(V#W2V&~=aAd-xl31_ z1+Ufo9sFcBE#N26IGtTaMpB!_8lL9wb01}{XO3zvZp*w!f&VS%I1B;#G?O;Y`=g7Q z<=gHe1y4&}ycHufSzfN_YqJtOB|q#}=bL!j?;03Xgq{}^7TR{v`PH3hOS#^=t2LDU z@+DwdIk+)kkWG|kur}%=(P5p=^j$5>H^s=jY>|3;8gkRb)Oj9Z|@2Z!@-cWIRCU44MTB?5+$sx?6e9D>V zqpP5QIn;lS*g-)V9X3dTLM8vdBfUKE!X+2hIt`TkyGWe65*c`AHL|1}?*F}NRs`?@ lcKT=5|L!dsZ&+GX3`W!t{>`}%yR&+R^a`_CI=tg%Ms z8e`?moH1ua#`8v)f}A)k6c!W!0DzU05cvrJ0H*=~K<5xYzRz$3C!u_QLE1@ZI0680 z1OI%0lBnUZzfU4MiK;s(*_t@H>N^+%lx)oH^c|gybM6e1ob8&26e`)w9Y9n?^d6!LHLSWkrxGPG1B9h`>AUsmE4t}TLIfcJ!GGIL(eKao9}53zjbP<* zMY;cB&qR4x8c_dX;q*xVzx`zXCWeL{5hRqSKr=*Y&9|)9t7fSHNrwi#E^`(x-f=Xc z?MjMno3WE5=AZERd{hlJ*dv#*AOC~!6<^4v**k8q-5Qz}H0acPT}<_zX+<{RH)EO% z+0YE$rzv6*_E;WMz^ydYcpw#ORTLth=J|xdFB!0k@py7;g0^d8x0UQJLUKdUa{LS$ zTJECr2@G$rfV@MFE55qRoB%34WIYwSrqW8G?~COhSiLc+@w@jDQg)(q87rVOM`Pg@ zF>*9-UwA)UK}ZVDrY^D7yB+r0>s(x16s~Y{aCO|@U@|-I1?`6i7aZ&OMo39{+z{Q2 zn&7i4%0{V^GWiacj^y@%_dH#2xSgHRAIIYsJdLf7PRqSdBOyYq8RQjew7$5x!(q>a z&4L9a2xTidiCx8XaP$Km+=0g0)nh5^vG+Inwq(+~@xUhMrm;4?(ga>txIP=|$bCSncR8<3< zS`6&VrW$-G)m^-8&KAq#^J-g)K{L!;8L!;t%+e@=->H!-ir0m7wv!evFN3sEC!<)D^3XD8RyJW_hhV~(CAIw5pMM1dcgc@Ob4m||j z%Dc$s`j_cQTMlnYhR>kZMbG!ZODc)}nO)uQ*{#rv=vBl-_KsOLpNbkk+x%>5@P-OE zF}Eg$PF4L7({QHaW_^<|Ue@P-HxmX1_@yy-r_CAeXFrsfz^5AvIuLYne4cLwpkI}A z)&!;(%K?jhFZB#brJy-q|J8d!V!j=J2`x59-!79rd|Mw;mD%a&2rk==(FG0Olux2KY@ypEt-sGME{Anm?!S}gV%->tkxyb21iVtMluF+k56CDX|n5Mq> zkpD337cqTkH@tt7&v=7%K6f9U(6YitqnG~1V5U1B>?TXPYkPHmEnN7LsoTyP{c6h|OsrqrZ3@dMc~h19Xy$}p;m&)TCug!j)D4?Tw}esIlhg&0m1nZKh5y;<{n(qs})}!EAn|j zL*eCW>3Mu9Jq@MUz{gLc!B|#O**H)hD}nkl6S*lCdFAl-8J=!4FZX`^>->sbu1H>E z|Jqz@zP>B+PlzY@qxLkSK<*)n$!s7UDtvU{MGs1b;#4DcyWJiVJ~9e`320>((WpMk z3?^3~a)~Rr@Iy1C)5xMnyIj1w{#IByh%7~1x*;|OcGoyWc}D@wfN@{YM==f?$L%D; zw1&UaH;w#BpEXCgrWr1zeeY#P@hDv_73 znfMk~?+0K>Uts3ES56CDdFGm}Zm%9s zjtj#G0KlJD+tG3rJo+$=)os72dJK+}C)xGpk2<@~4#eOzIJl@iAqux{-_x1uR|)`x zY=3S5S@29FXF5f(wN+&lVdW>Gxq&(+qS4@xg0ic^%s}-T1f%#}I zNPGDFQ)D9}8_VBXGYB1>;oM|6T$#<}E#z3702C0Ly-f|ancmo-a;=rrdJeX(POyji zB@sp3t1NTaIuZ$f)*|eXDYmnngR}XX1R$mPJb72|371QLqXPm&Yy|qYHa3P|&$W5p zG>f&><2hmJkw0II&J>`@=5rfDYPSDW4`Fe+xCFDLw++Wo(-}pKmJy*l$RYCT&BD&t zo|}!#HC^3Jvh&iiC@lP1$+Jq63{@sHgkw_HNwXR|?YLX*# z@#tLv__8ZS?zHh41k{va4Nvt?4=*MLIdpStKoO<^5?w*ktcV$*;|xdJWKl=ti(N)Z ze~uxAl*W^hhh=Jv41w*`3m4lh%o@~@1qNa`5Vn*efQAsbq(wAiCH$U602#!90SoB; z#k88NMC87&qG$t3x&%W1%Iy}J0g0Q3-4UJY@;3-Vi05l;%zi72gJQGLb>{a#GOozY zV8O0f^XU?Ov(uqpl|~NEiWe1hcJ*;@p;k0^k`BJ%dm;lqZ+fxm@mX*I-ytd+3Cz`_ z47QBv#>*?seir_4Z+n%SE<1=WTF2i)u4&}#F-^P#s^|n^Nd5I6IIEgkE5<+- zMM0C)hlzDoSYJ|odI-Y(EZi!|`NLx^uMXXGW!9IIRbvj4gxWwtDWdg}M9`OL(gU_d zfmoS~SZSj~QYb~HB+*rEqU<0U(%^xIitOb2s^3fO^-!U+NP!VhCx8EDy+%hpXlLY) zaEjtmsS8Td#JFwkAhxq~#fjgY4Iz+LpN!7k@F%Wgtt)wPmgdG?;52^q4Mw0(;jTB` z`gXxE7H=j>2jv9IHk3{DFpsBKvoWwVD4Hy2XIO~!zuH(lizr~=*V%s5zciRmYP34c zFA<=BJ9QTy(~DY3s)1Ml`<91_f{L27h%xDb^Ow8}8mF>O<}U$5__a7%YQAjOLvZSk zJn%bv2>sDIfOY!iQ;8IoH~jSGL6sbHI?8B5vi#Ju9bTZq^|ZFugu#$3Utp{!kKf=m z)c9Z?i;_870$mD=49V2S!MGqR7(RuWQOBWWftSuac60~ZB8ublg}!41MlS}GQ@z8c zYKMV&6p!L|9<%{EmqD>o%?)87g6bz~sr(hgr&rlT={M*fOfq5j>O;^NCq zyLwDgeaYQ!<;-sjS*+D$6+-GtoDu=^lw5AUP9NC4;rBJfvu8`0c}Y!8O;kqlag?lQ zIkmI(kET3wx5gPn=5)I}U1z38e}%OXEVmFcHuKj|?meO$_+qCn|N;VX&w47 zHj{@Jr9K(o!3~4={NGq(cY9@=jEjK700c2F?)^o=E4dJRe-x<^ikIHA5xgzdH&)yEP7T-KpPg zCv{FY%^?L8xRivK2bx2k2DSryaesxzXNvN<>3gVhT3l?c%c#;W7;TK6+yBJgH|eet zv-Z57vAGvrs1d(i$66VWQzKhvD;YeB&Z*w7U)XaNEM1k`b?o7#PIY0#kKBxDJv+~R)zc5%5P zwNy+G>WHB9NJ<0&0Crahz`m~8TEcCNO{o(oSPa*r*N%sWz?&<>p#)dfa4Pl1(2(OO zZX3l9CV~74|K6@@p_Y!GP=2O-zXv5N&xhwni=u$ZFbSd7q5}XknIAn@b4HGI_I*ep z6hv?K4Xa}9G>s942SRjdYg{Q3MC2lpYUD*S)#|WTc}NMBde%|p1F;RIp-t;r6#_VxOZC$zOj18i2E zC`sXaun;jC7h4iA|6iJDS>Gd5-d5-K^sPR76-P&VNDOvcoZSJNH$FowxVw)S^?|R} z@xA5h@ux4R9QSd1Iez#15mhKHSWJS9G617FtdbQ7QviVoQg^GiHa+q%g#O z^G1i&(nSQH|ITH%Gnz$ZC4xggOgPakPpz9~WG>l8i7!n#LHeOlRU@cese&*wNCdgm z|FsB8nLIdCRCKU+mZn@okQ5~S`!NW}4$-Hz8w;!~d5`K&X+4o?%C+QB+Bsec#mISvxa4ehyeN@L-c+!Qmd?M|(K(Y_#OJ4iQ?{Nz$X*_pd-v0PcD8%9 zi*Jgn01s)(xZSg8pcsvjFhV1i_6pQo-ltN7Pt!lW)bUZ06v=HT zQKpK5A3s6Mk%g!7g~XN7acJ|~Q4~ONA1!kikEr6y zH}(TluG$ck(Tn*aR%cL^vQXTE4 z;pvGwMr7O9js0|>_$0u?ZED4tyaVgoefq7Z(ghmsvygo^kP^nh0TQjYbTv4*#qFf!jx1XEHzkD#>rjvjp zf?sMpDi3;0RuNzg0+5c=Rca7SmRF-SocK?D!ax9$qGA(qDhVI2Q81*8U}^YG60Ne@ z=~)`ez=OtZHFvE|ha(d-<-d35G8?_{0Dc$jN6WesM;T&K&mV3R5o%@fL@nU3S*VI2 zd9@_md%!u7+YpAp00o`vQ+FXRFR%Bzb0)@pUkT~Zn3$OFIVjIvYA78noQXx$yutoB zpTO18&Z4Q}T|PQaAIStDl?x%c%K`Q6PD~K-p`@5d52K)%n)sp!DW0se|CEGO`A$~d zA*8o9R$R%VXA2^9O~mSQFZ<8oaXs4@66CHrQ&ovbjB6@WAR)szQISSyCE~_X)Ae+o zTz3QiNkUe|@@Zlnt)y+F6TLsX3ZGRMHv7WEEp)!VU{!J>K#k(dbR5FeHTn=p6|_V)ZYDqzeZtHG!&^Rj4I4=i~7&b=ZQ7ylh>sM%b-?FSWTVPw00cO!&X> zPY62`k$di$^@*ICvlYRu$zXdqnm>kVxlpu7vdGE3JlL(VesYJ1iEoRV($}08MFRr$ z`LD78Lh2qXIYA67RxpALvfhTo(O6MK_MbNj@e^RS`5F=gV{P?bwNb(MZTJx7HcPI% zLp_d0F8#}eS$Yg3Qn*DGXIPjH3kvDUAGJxdZA1`+Ux)O`O~C>fIw`z6q*ab;!Uz*S zpZq_3CUCT11Q)<d1hV=IdPWyLN>%v#4%iG|45$+sAAg zj|Oe;S`-TEoM0YSF#H+rx!mQ~aiy*3ciV^K$tt4nau6&%Wgsz3`f5^V|c0m)G^IR+2UwCvUuPy_!V4 zr??xKgq%;(5M_oo7j3;|T~rUGiOQ!74;os${Ahy5BnTqN)TBg*NGVMua?l}*n1Bqq zx=XXTZxCR&8kb|;^duNhu{7qMy@2tZ>?+y`P`K6B&X;hnBLps5Zc2;VAgfq_l9oy0 z`oG=-rCzRXx5@j!PawpokVpXj3sG@MNL?QK_gRnztvQU~HeY{wX)X&O_B9KGB%`#^*Oa34WvuY)SYDC~K%7GcO zBf^}fCn9(*)K5)PLxeV21m^Ks(34}$g}9Omxz>qg z_OvtP%?GpRIC$~(=n0MUr){`7!zAEq<#AkXEh+AIy&5seA`I@hO+t!XUb}j@%Rx^=DT1ZHX^!6O zTN}*pzzmi3Y;I_ zY^h*5aB)M1dU(-kBJ1@9_OVuVleX(ikiwJIf7Wz0>)!aQ83jcT|0_#0NF7;3cDmD0 zoWifRKvs?4FW1WOf?inzDk!fR?LX_&UU3ShG${WzJrZT&x9virhlj`Qn$P#MtS&~> zK3}a=leoV~jB+1Xxd^T5G$>F(26aUA#nxIR-$(C}`+3jxaes4^oRk`w_o=i83pO21 zsypJ>yXj$ypCc99;|@|#lD)ID=6Zm7+GF?UtJmYp-(0<3MbCXEs67+fovv>{TTt9x zTsjn7$h33nlJlA^w?2Hgwe`SuQ+jQ%U?NA2m>7c}ulXa^^qbJtRIc}BiM<+E(gy#e z!l^^YT5f+RYMu_4H!QEKJjzI`P|cM32V1HZ|#lU6yR@Tc|Wdwxxnn? ztbG-~>m`*^aus~+(LFi|40-`MD<ZQ+B|vd zzdCTLkKCfRCFke9;g=Snc!uGIFX!?O9&rqxI{vUnH|5CnDyQ)ZimkR|fo8|2pVkNF zigUB{WG*eH3C*#-KE2)iRLjxde8W<_Edz}mKL{I_9=8LOl8*yM4#EcxSih=S1LgIy zWtr1%)WmsF0J%Fyh7>DGiF9B#Ig7wl$28K?56$ifufAx`!dm3Qfbs-Z#hF7>vJGEV z@G4n{ZPz84tv0$bw&p9VqV;yrqRG7SdDGDUs2Z2mUpEQW_s!A(yKX~RJLOZ44 z52xR2a~NCZq!#J$&;NKv%}c)&n>uOuyV+&v86(CoNBuW%%|s3lNRS`A-Bp3aFYsy$ z56Tk_S5K2>0EX7-UuE4YP*YxTwoWpMpEx<4Pk$@kD3g+6%}oCaO!t9$wtyZfV(qSQLp!nqgaur&=52-0KLd#6AN4#^6y;j5 z*If6IuoKpP+v&cqJ_n#X{@jAjEl!^Io9dZxxU62k#|?C-B$SEpaJjB&3By!5@DiDC z47gH`dHHO24#M_nO{qQdXaoFUB`*<0hM~sefnWdPVhfmVY|v|+Es$57YrYfZffPVc z-Yyv4$m5b&eKX{q+qxzMn1uk*HGn&s3B;qneP%^H2rw&aC;Gm4iS=bkbqcloNO5lB zVZ1(PUxz>PNi2QJS$e}}xtc3FUnULhE;y*&K}%-3GTtrf;&fwp&^0jkX~URZINv>M zH|p+^Eg*%Uce^$?%3H28Uu*secz`t{}$^FMZ|uy z(0n(_11W+avu$|0k+&qVx@X8uw0%tq_=O7wGKptZBrvyA^F+IyvP5V%{vj3DW)r_ez#H}x{V5O>KKhEsiIc4gW0|eN6_;gfnu8ebc@nZT z!hqA@@9OM2JpVJr>=nQDCHwKbbjoEXo!|rR8$9+U;UsBNThxC`6_yW(^F&q9d8r(r zP`W#sc%7b#OoK`_a`2NYQx_WSH7KI_JD&ayzENIL=M|UD`_1-DV6E$@c?9nFcL94= zA3+c(IAS%J38jRCi}yY1g-=qLuEFZG+K0tJDoP=C$Qm0!Y;W?SxKMEM6{wfmnCYMW zg4-@kqvPn%G_h6^+#z6uCUn59XdE5w^R)L^ZzHJ&A%d7US=ku%cILxP>#TN2Hvefj zgG2Updy$;eOvBH4GiRi9YEs~vrrN!xn%Qky7*&@C6e22oe98sZV1-ewfTgp-{aPRcot+w56xbgxxMLQl1KqxJkn?r*KM5sN<@ z?R#B?mk6^FIwiCiy<(TXwA%O`-g}dOvNIS}SDp*-&kjmGZfAx$F!HKe>^OJNK1vFF z7=EY%BW(CeQwAc0OUuziz+dzK3J3M9_HF8Qt-vx33E8$+n|G!-Iv@QBK^Vvy)DaxQ zREzNR2m7pL&_?@RZ5pnZX1ynRsar@}yWZ3&wQk1*&JtEguf(u+_0O+{-@S)$#o)dvk$7zw=j=%~xaW z^{>_8tP^V$9QB_l=HiId3agKn0MC zn{@L1!*c$SYrDR$x(kf{$>s6gubD_6wt6rXLB zj*A%Vy-i^TtFyl|Jl~7do_C*r)nm-RZ7KIIr`t!o9qTL4u%$n?d1zyAHyqlyP=s&{ zcq_{o7#BvPv^^+_GQx0v+){zwQWjL`%uM_0h&2`-jqN8RaI2Q?mvTtXdKFG*oJGGuvn;4_9HgzS9Z;QVq&P61!=ce1kNE1jGg zh>?USsA>fx>I@DldtM&3l;ul!+FanmSK(joh@IUtoFaO};r7+&CTw=RmbR5FGhy<& z*zQGAUm8B35T+zDt>)CvLc@(!?{x9zbtfwi4U58x;X?%Jj>!%*Q_dgEAG!Pt?Bg&sVCe+aXNnR~oAPx*cLs+bQ(l z(8k7K@LHMSOSVv7KA7f%mHweK%1R5hQt@7c&y^dQ-75*Dq884U+Z08Ef=aR#+3sM! ziQql){x%aj@-L%StLB|LT*Uq9U>?tzb;5FIq3;b5S}zZ-Im6}~n`y#e%hyH~YHKkQZ$~j3aCY^|aG@=>Yk8%9eBv;l zhb0RyeixcB#Et*JL@9OIS+G^##B`8YXLO_&E@qm`!CD8Hr2s_Uv!Sj^KX_+l8e1>d zUVMcS{O`Vog5~)}_G7I~b$V3w#2j7K;P2sNKUoD{Vs*t$-<_QkN_DO$G|he~xPO47 zlUwaZdb%5nl@S(r&r?`t!Z~GFEs%VNqx;1F##O;%LKu2u5qVf8q1iRZ!FifpDO+W%9xt2j zS&U*mlg`G1*G{7FWSb5flxZG0NQVXD^Y<`Gw9uUm1+3<4INlMq%hH=_2|3w1Vn5Np zSA-DE){8w(<$Czh106-g=DuYiYo(91lH?9SW)MO>WmV>=u~}VQRl)y&M`FnmYD|cw zv0YNp(K8|pPyD)Z-OWO?Y>n-4RYAVi<&=ll>-~pxj)^$XGsP!9zU76pC2p5=w$ACZ zg%S?bP+zCUN;F2-LlbJC{=qX;Wo2)^vsoJJCl=yD%-EpblT8xLns(c=qzHZG5$G^x zzA$UE6nWR3h4__hu)ON4*NF7yjfYskSK?u^pnIm1^|}RY*NP?ucBncT?McD7Yl+e0 zKK`8Awo6z|5KwnSj~@+IpNkrfHkBLq6eFamJDBTkMF`orUD+mfG%SiM8-RTA2p7Z1 zuJJt!(Vd{L+Mpe`4U&J5+nenU0;&Yx7>435+L}^ELBCf)ab3*C$oJ}`9OZWG zmHJxf^*RapSERrm4q3*QM!7q4soYyk)l?i^(}r6d{=qimG|dY6hkWbQGLoz-`I`!M z;6Y8rY==Iy5_`%@Wj>hYV!enZEB_CV)g+c*&|5$opmQ3KDbcm$k{mbA{xXY09y)I3 zsa`6^@AqXe;h<~XF2p4T_~IW93Sj7Rr^_h5sO9o?^*`U$FRKrn5aL-pL{&<~XOCbR zk;kcJg_gB*^!g3qH4=B&?{M4&Bz?{Vyl`-SKY-q9j7ftz<1}p~)63g~T&w_ilm(w+ zq!vi1J>thIJvQIRN=|4sn>DO`e@31bc4aiAo1#JtDu?jnmvEcZ%jfG?yiEK%OYzFR z8fW93DH_Z6wLddM#(9Tp%FI|xu1X&LCI9kMu&unw0~SUR8>KLe_X;H$#)4jK+b*io za=qy>sj;(9J=UNAGc+NIl12xzl)S|#8cv-9@m!%tq%QRXf5LQP`NV{uk@=Hm9aXry z8xt*yrPeWcYf*e+u%1rr;!+Jv;gY<3p`4}DP`(}$bI4hd>}*e-GZg!u1O>I1|qV(?r}P}Oaq!b!oHcqh3Vh5>P{j=aa^kI84=V> z+u;gYdt^Xn8=E+ZCP{2?f6%mJ9JddlPEtAmP4k&3ql1RlCT-Av7e z8c|rY3Azc%_nII8h2QHWS41WcF6XQ!t972{?)cCRNcr&MMd80i=346!22vahO6Mpc zv@(?RJ`m6%!X!@dsK;cdM}|S4(AStxn9QEP(!OVREPQBH$no*3J1vupItZtwOa$g% zAaT(k*~67-$!LZlxj8SY;%gU~zR)rw916UbCXnJ#{XnLgX6&^58p8?Nkr?InZW~oW zUhKtHad1+1vS%M{_E!b4x~k&K=;ZI(`b=oWn!zYpXYH)d6@q$ID8Z$ROe&Au{Ca;L?J!nJF^fECe(z zdi70rOV1+Xmo4OG;}U+F8s@#?Nn{;f0lU>Q=N~egA_1fbMJAF0@XCmcx#}Anj-p67 zfJjMUzvH1b%Qr{aC4Q`i(vn6WRa{;}hOTN2F$P#?k$5egaB4(7s8yWM2wn68eJ&j< zby#p>m}V|N<99M33W}p2iGu{Fek`oKxak~S94ZhSr#7;KjuG=?xpsyu<#Bao-6{L1zw5o5o zWg&;z`~8v$zPFN)Y*G0y6mqf1=^qFgu8MXrV1{1;XvazjK8A&hQr~FZB8Dh7o-Ip| z%9$VqSF`tcL% zw!InWH`FS$A(J8qgqDYPX6~`G1D?s|px9G`hslgDg7HPgkP$%F*qGU-Py^=NHY89i zh+k)QJV58EtZ@IzaT&S}ugBv!9xKQ5MQWSjg1-hX^-}RkG3h`(A|fUmEzG|$)o#WdohH4`qlnVR}*SVk2eC*9}}TRwlJICF57t_L{C#u3?4+c zkY}6E>5vu&Hdd{4Pe9jeHv^1?SS1znc7#R@r1{}TIZ?fppuKineB`uWZ% zVIHY@1~5Tq$Nu$oEWEZzmF;!ldQXNFHbeU3pU-xQCmK(H7?%9Y#`iqyenY?9zUW#VPSW5IS&E!y!WH6Fud$0w`=}vf<;bk z8+L7Q!U~j%GB5!@di{LZW94Z!A8i@ZeMg z#)4s{f}8hntoLr*+b59{=fCh&qd~uaL(_3m{nL|cjt#qezu=NbiaoafgtttTxOW7g zYMu2I#A0t8pk}RYu6g1mUBVsHWK_^&MAOzZDxtkyJXN--gE?v`zo8pr+2|$91$soEi{e0Qh$czZ!x5H*Co2&6U6~=E9HhKUIAlYC~QH##@xh>ec z|7)Hl!8--2ik?`q*~M=^x2King##c!EYy{W*qgiHI-C8LGB~I1-UPUxT;`5^#Iy+k z0I?|KU8@b(nH8fZS6RjYfJaHTWyx8JsEDU8z9R|O(at72IhzjV!rpPD&g$yo_gmrs zs|NiYG7>cviJz82CPEHAe$E$9Q+c~CRQhKksm>(xu7B|mKivQjKwv^TJX(vxq9?5P zCQIW%jn3R9%Q;PyQ}L;S^!vu745J5R9^V{|%)Y5C>r4x?UySQl*N)X=K!>fIP4w-h z^Nt&xUlG1x_0d0|mZV4n9|;M{k&0m7K?&)0Z|@E{8~uoVgbk&Xgu|!PlX%r@E?!mppH;=ZrpYWg z*-`15<5F+c!`bs_z_-m}F>p0(sM{&!jnu|#c3c@s1QA5Kii!VRF6kj;8Ji`d>rA8{ zEXTYg=Smaty!!T*B%Xi8--+#PFn=$|%VT=puB62waVE95nR&(Tb1~Y3KlJH%@F!pY zvjWfZrAC8Iq(GmEbR!sxV+HeXjO+BK-Q!e1$1FMf*ZC&(wHsYvQH??Ma2IlHT`AN* zrG>!3?>qFOpqrGdpoUIc#Sc=1Yl=a}?XjtZ5|+`8NOpAS?-pLPFj3om z1AnTA-2)D~`RWGTtU}Q+TxpI7Dx3y0}lH z8LXY++-#gZOY zh>0SE5UxphYTT@Ib`4ng?;-Afj;f-^GYT}MNkaf4iMo<+1fnTzRCDjka3D#VCxRO# zz0XNYP*|n@S&-9=uC%Z}XLHCftyCy&dB>`RvXAkLBsT0xe<6ZXmd17j_7`2v=EYSTEbkBfOr|u}<81vXkx>+m{0C z8W(QAtd$jO-^#)$w}Yz{YaoIU5Ba+k`vq&L)B8ntL_A#$S7~&m7zX#@CbIBnhPjDl z?2rM$F+N|%ScZ)U`o`Bf-a5m*0JE>ZLj0GSi1;Dd)94c%j&HZ+fD_ zF}%D^ZqJcTqhV?G-4jjyF;sT&aqm2Tb+U~_!~p<^vC{@V4Va2P;0LoNe6#!YSQ>u> zsLQFQ{`Ble;?$3KUp_<;JKt>HAxsZ}*$`Bg(hS78oG>#`9BT7J+pB_Pk2FS0Qb9AE zqd(S`;wO0k!KPrG(k@=%(L!=V*!qN#8o7Z`hiV#ps{%rA5M=-IjEAh&o2yK)0OT-2 zDL5Bnr&PE5$0>6@xuJ9z8QI}A@B46QbJ?pri>~zn&Yk%V^Z8g!OjfQ}IxbIl6?$h7 z0+=`VClC_a@Wx@<^b9tX?h$6uAhPEtIrkC;H!c)vpFdkY1LDs3;B# z7OdfyVc~kqjN71q2^uLbRZ_APtsR0u1mBKvEoAFMfU@HV&@Eb!f{%v6fUcX%u>e^< z>-jYNBqfy;C&!kCe3tb&+unxOX&sm~^bf5roiZRF{kgs8NQYeO!~MC<^t+QV=>H9Y zs}R{pS|P0s>I7t{re=;-2`$jHr$5&IV!;HammQaPQ=(n?rKNtdrp11@MA4Z~L)iTs z5NdunlNZ5S!J0u8$Z8JxVjFHB1-GW^;<4m08BIthn5qK~5Q1s0;h59lK^2ETQskuEZ zc^5xzq}Qya!g{@tGeoX-Z3bjL<1#hWgY0P9+aGGp(&fhvkXAJy%yV9gP@s<23_#-X zh2uNMTjnzddWZG0zIy*MHx4ZaN@1-w8$6tL*lst!jC1sGkQ4=})u2_hal_P`#&)~w z#@&oV)rfiT!@-iUjU-ai;o+fuHQOQ!IN zb2kdFL{KXw-RQdCFI)ocWVx1z$QI~o7JwLW6QQ=rYVaX5emYCA*Q4msyPdxJ&{?`U zUJhBG3X6{Rz&JB+q6ZX~zppmbQ$ZrAI7pL}ZoTyx$s(KX$@|VLBE9bomxQ6JDD=rs zg^hxI_awM7v#3~pi?+rEi6pun$f_0ma{&?n5k@6+CGcDMzZT%v=XcV+{3(R7cJo)f z$aUJ))oUB}Pou2Ql8iass%Kt1(V6sNlQo*@nt2;jTv)=1CRn;ILzt0?u5v}r!?qcp!|6BoTH z>vmomQI*%T2Bv7hhD_=@$vlWuk2*e*`Wg*-l-$f>IlaG1Ij1`0c?$n$?^|F9RRq(R z4`t(VB0ZAiPS*@&KzpkF!ZAfuZ{3H@6?Hvz{j+gx2H5u>rpZL+@xD*zlQjiCLy+!> zzMBmg{gR0|Syn;XkW$}sEYPX%#+8=#oJqeO>|*GR9sR>l{E}$Kce8_XA;V1hVH+Wm z$JBisT#&@>Dan_a!RB_ruYv^e9cKLVor;6IjhAhGw6E7O$YJwqTF}IJk(%L7H=8enlvY*Z_%I0dBV z$`pBub4UyiH9;JBAcb+%bd<~Js|yY7g`nkF$}79He6|yR(_u>~R(Z_Fxfxs0mvtV6 z@(}u_U1NQm=mSqNjh>CWmHD-I$hMI0sZ2nAdFMcBKjH7$n}ZlS`3CcXA&_tO=h{4{ z(zzf^mqJcQPumYWT(-kQw-+XQq-$Mdv@o?rK)SmpGk20a5DPeBQ*Wa>*wfKlKF_`4VbMyiO}=nB(nxDwJsv7o z=!Do)d^Iy?ybm$3$XP{WBX`uce*@whrmj&q6>02dj;!0G` zAM(-l06_k8yjQF1ujvOIAP@^VLp0grpErLcRz9WcFb`xp@}Sx2ZWsk2lRyD~i)^#I zKCeHGvmt6pMdk5%zU5WNT+MV@u$X=$`(qT_fd9L?LG;r|Q2U;U>DFYIo6h9#itmaY z{J0omWhCoqEc#%%Jrrgrb27&nP^pLGnSt`TR(G?CE4{D09r6CXG++P5)e&Ykn5`tK z|AwRlxze}+MjUhahB6^_@TmnW!0qy3GvpEtf>3~=|F@FQ$I?%%zJY4nm(X^kl5O9$ z4cnQL6uM1!9sg5y5p;z`Bbhi3nkpyJ9=1C2c@2I+Lnyhz zG&i&|`=dXL(B2*`NQ=vr+D_Z0#_#_x1`up4E}UHLonza5VxIRqAFLnFc)XJU^MCDS z^L{;a`#+M}t{+z!WIxgVKN|OzU=<&}-_|gM&C=26vb(oFa2_o6^r9ZN<4J=R65W2K z`#!<39M%@7APBt+-;Xg8zrQ&f8Olj#hZ3OhZ1X#tsd=LS0*m!$1d)Z!)^KD|=GoYm zh7eT0V%L_lke`!WO;-iLh0)4w^bN;$SMG1I7REJ^MV&icrM8ph#wbZ*YiLNK>Fr-n zKn;Q4H)R-uk6uGzyNfMwxZ(Nn)s)c2UM`I;##L6KAV{8jw{Y@AiB+N_Zn`<97AlbI1e-e(~BVNzNo22-(3y}o{)6~ zz>% zFb@ReAwR3Z2Id#W0^y(#U<|h(m0sNZ>_F(yy+r(9Gyorf-=*K}EH9PeotXpD=!63Y z(murq<@x#bxn5EUW9Ds3WJQp_F^r_@>iy$!{5e7oK)^XO5)cCz3{~cq8o{Zoj9#v6 zYmTS$PGI zFXNfA%TRlC=VJ$3Jm`9qD8LW~9=IMk4`F<;nX>4BSmcvoGp1|Ixg8DL*V<5LFMCI) z4BFhX&SAMc6LT_-QfU1b*pCwSpRm6bL1-i@ZO?BOrqlllO-hvBvwd(n6s(pHd%kG! z<^zr%%yA`d?A86H#kbDDN%OvSDfj=mDM0Ja;dKH(B!*L18Z|pB(*_r(G{9o``cE4wN|mtvH2mvbb*O3*ai#cj90xlh(bWk8M)Eapm!|qvO;(87qNLv2P$Y^B$6B#|yo=)(OL-Bw?GL8r& zQh^wh=Bf&>%#7TY>*8S8)|m6y-3-E{=Ik_D!9R z_%fmyWL~Fx5{a95zW#$hPKf>1bF?nUggxsh(@*Ss4)s1unats}oa#%;Xa;Nh1v7my zH&Xddt&p>&|L(quOla>B8j|imh$!^gR92kYZwsIC75|@rlV~93rHOdyW*NgLL0C)E z`dd8yKRXJa*cCM-Im?uEAqXLjrK3U3=AC-@RpyQ@|C5p91*)KvW6Z1sxEQgwYo`21 zr(w~Fe0khKB>a&e1nAd9(;$t>#=bB_+c2;?Oiit}cDG&;ZXt(r7I!i0t;k2ag69(R zLwcp?0e27t)0oi94QY_8OC#CjA8wX9b97m$@0tH{AciLVnk6ESoc(_&d+WG3f^J(h zg1ZE_;O_1O2=4Cg?gS6+5ZocS`{3^G?lQsM9o~>n?)lEW_q_M&ubQ6zRd?6!?%I2; zwRc;)X`NL4izn8d`O}$*^q!%rwqSfwBd>|Z+m~L!_(zYh0>u(ZwA$%I0>^iRFe9rg zbzNugR=Rn^{9|KUdzF2GI8$FLA zWBSq|^~)tI=>0Rk8L5abrZ$)Js z>{r&T7Av`;K@7>YA|}K2`bU@HIKBRWk^;mSEd#Yi!giYh@h_&-P3aK z5Kx@&$51eo`??(c0qX8;vxSZzY~yOR5Md(uC67OY6p~1~HUCKNb6P+&H|B>-Zrgok z6DHo-ObjuAMj}6hZ5q7OW+<#QsQe^)z4pj4WRtP5e>U|Q*x~&kkq7f`zR^PK!!P;F z=>JYPG^9-z;eq+~7+moRUkX8*Jqx*!NGH#2SFc?A7#wIWsF z3F=N&6H%fui`kIO>r_DA0+9j{m7GRuAjoLrUx<{I6wv?tmXm0RpUL}G8fsvGlpFE; zGWaW;yx-;rD$3kECY1xCB^|)6Nu|4*%1eI)!6;bnn)=3%@=WHH7TJ4YM^MF=eG4h< z9Ci*CgKCdlbtJHvnthPX=Gf=Q*RzhAU#9}U^XAW&adEBcxqH=pLk1JTk6tL+49DD(J@L=UF3RzXCky3~{(_W?CsEA^(%i=!Onle??aDkGecBAW1-D|10F7BzLLm4B?&!99940oqxZot#= z`dH?Rc$jV*rC`Qxv~w{x`L8d1JYrqG>KS(RydL|KGNS2j^{2cs{ZjmYS!0ykOpkvIe- zO^X*75xt(^ngNXzFCKm{P89xR!stveD0R~QfhdELL`fc$Ir&Vhwamgw zSWDO9bJVx}Qt<{%pL21OP$-ZC+e=~@vBROSYvubv5bFDP`dpW{+l=t)*WY*`ha)`G zK*Deltc=_7$y;hRRGPW9Y!Ndfh*Or=^TwQxK#)RAYeN;*IQfZ=*M*#sb?0Kon%{YE zQZB&c0$3d{2=4N*d3JYip%=w$>)lJ(J2G>ItHU|eVB4tz(%61!d$nx-Y{IkK8O}o& zfOHwIFdgL3bVq9DxvOm-? zHuNoI&Ofq{%9`*aSJxnI@7U`05Ad&Uj&-M-YX2a@+wtNvcIs|!}GgvKd)5$lGjn(2D4I>+n z3-)S>mGh`9kwmI3(gnd%B1nhbDSkMgbHhUTmEdq*+Z|YygqknAG8M8PcM-=y3CI1y z^cRCO7|et5#VjeB%b7mLn1a=+S}sHvfU?A=Kg#Z}f?jo1wLmmj7~%ia-CVwzssSNm z=qIb?alWtf=QXfBQL1?;L}+YveMQ0pSx~Dq^r-Dq357g*ysSA2r*tMdE1^Dpw?f&c zZwckPNF^j3A$?!R)uyc@DeR3A5BjBo_UhAbee%9h0YF5TUR&B9BCg%LAf!)Y1*6o;1_i$|FM41qBP6Swp}y@It&3K zDWw8yNJ6fk-;r}3BB`pl1l8d@>H*4)?hMF`}*lsHQj{&hO}&z{D8z zWz|20{r)WlNu_L`Q&bnF!e6eeX-RE?r(9a@5v_5AUkB>qZmMW~QTj#e_w`eaF}Xnh znwJkl?>7grWWJ7$<$H&_)t1wtowgRN0;}USwjIQ0V0lc@y`H(orJnCv3%{5_a`V&S ze(Gmsxt%2v>6ut=Z?;~@M+|-LGo2muF9|>PRIrC$n(71W7Tev|6SR;4i*v%AT^v#E zs9H|Yf{GJz!2>4H$Ne_Y70%S5Fcw1g^eWQ16opQ`Oqe$;(j&D(QDw=THoaBIzz%I^ zDkirE_9X6Yu^sDNI0(P)42IR98ir&!Hl^f%sm-S5k^{yZliO(R6(PLR)o{DsuCieb z<>hM$i9rKZF2`$ff~GphegO|+X@u+o;{&$aq*~~k6TZPP$HHLyciV|}X zEsd`1dp^&xX<^67lT~>{Bn)+Dr$&@Y2MP%C24Zk6;c3|vgj8&tEniSmot;liZjrmF z_|-tjvG5PAASlo_`-A1oJ8!HS4pJT(I#Du)ll{p)g`=0x33~&E+TyT+4iow`RU*i;Lw!E<|HU=ADblyfhulJ zZ#`R5hcU?T*ufE010zq`@Ys`KW)<&YLwZq^6Q zBuz0)+N=_kCG?*M*?=RsQxonfh4!94$4UC2lvXaszV&y=>+pNLa7G0g{DT}M_ zR|0op202P5^MTD%$FulqP8tcST#f>vd8@R!>jOhr=-i!Bs1@eD1!hVk``7-9+~`fr z%9V(6Nyvk`e&3vDx#e-dq)QYS3xt@_N3$0O){I*f6+l2iIhPY&07JSd$z~_X2AEo+ zq0frEH*t1|(u{@ehUMh1^+W>uG;dAN@I4RBCb#|tE|(&pbeb(}vhH~l z@tB|eA0*#%K|w_%O2IbF|J4hn`P(x~_zIe%_g0QV0)j(9?YFX@F#6vfK;rzK_B;{A z-Vqd8fLbS<*8jBgufg|viywZfK-=7Yf}3>U46R)%-?l_lM%nT?ex>HLSv)GO9`9B` zW`5s|4}zpd;A6@0j=$qxMKJlEB=Y!FjRXEBkRm*Z>xsjXUDZzvjhD;qUWe@FwlF#9 zWe`hPcORVM;4f6XDe}@VX!5#`5uP)c}kD4vcqSev{;+C-RZub}a zd+0i}alu)(3!^LlDGa~%h_?H);3+l|FW;;FG)E+%J#)N)AYBLbIaN zB+ITlhLlO>nB0}da&y?Mh(jiN(b08x#7}Skl1NEzhc$Gz=(C+ih{t-AG2WhGp~BN) zcO3eF;k^b%Aoh(~XZ`6&NlzF-{3{k^70&e>i7k#!xkGG(=z>i^FG>3-ZDoY^iudL{ zeVg@0d<;4GDHj^y{1LA`(#58dfLcLBr*WRiz(-Va_=6&KkEEL8^YFrPNb+zsT#X;!a(s*_Yy?B^^E%=~kM#G6m4nDNZ*DUDKr2?x?ji5iP;VQ3@;t`Ap4iGY zBdrmC|COU9qufi>5swv@&|F8VGkd1;i8lFky@G2Z4#SjWgR`9okl zvmpQKG4bVAm!StJgjGZoJ6ol;I@QGMi=>S0Y&GDVA@1EXx-o=W+X0t-a{e1?Q@-%*e$4<1FgPs*=8A-;$c-7YY6C?vRh6nRn=_ z%`gSImmw-)gEu7f`=vvub-X@WIu3l$Kng9}9EgVGlMxdi8>B-Tf|lOm z?npJn#dEZD6e>R?*iq;COl@a6la#_Aaah*Zk0eL~FNCB2 zm_Ja|5N^)vgQf=?83_)8-4X~c7pA`KnWN&av?ZC&7!SRzQMF4V0v14-Nh~&&Nx_cX`pF!e4y+Y;IQS!e?EP`cUfRK`uv)!ubZPY_E>1xIbYQs zEHo6v{@nP40o=Vfy#TpY^TtXVv3ERFWSpgPG~aRCPZB&~&Q{~1vG1>rwcZBGu|-yK=Hk{=RB!M)o7?i&$od?)L*b^48U zIdp`JcXA^6-;iscjO6SosOZZmU+RpFe=8lDHJhIcW9a2K`s`<*N2}4 zmw6%oQiMh6f`}ybWZOg~E1?KL!DzS9^;_^WH#h8yi-nj@V%#~yL&9iTX7yyO0;8(r zQCoIi1?l9^!o~L&Rb){lNll)XjKY_0d-QP=F9tU`olmXhJd_-MJJT~Yx8J3uW?bZa zN?94{cX)B31LVH1HW^Nx6qY3$lK)Ty~eFCNtb24NzIx58&MfFbTlyw59H2-RxK-e z8|?{Hk1P1WdwYeyf9G4} zkds!?dG#jBX$8ASYG%0pqwA_LVsYtRLvk^!%TAr{p{7I!+2?#+=LpQvQKhHK@2kXr zY4H;69O;+b-0%LSsJgQr$H#2584cYS>`g9u|rrj2PhByYUO-k;;a*i#sftxlm|r6o9$6 zTvk}T_RXr!QYho2V)%UFYO=MK=M_3r(ljleISq?Z`~s)~qg8hG~V|kxS+p zl725TDXgh{vTt$5>->Nx!IGd*WP$_?BV$RpHPHyR=rwN=sctsx{RRU7E0Hd^%`)bB zjbEOtjy&6uTMp%(w;s?|4$Bi3}4zw!Mviet#A zCMHG`v74SEYC=}*Im-O3IqXhrrtlVLOFJWRKI=$`1)yrQwZEK{(%}jxJU8q$bmU4< zMi@@MMJE`Rex{IVb$*@BD2o3C-I5_M8cmoBd0}e{RiklP&Rv)t`97aN5iL&l28W5g z?Oir@&SiHm7%h|uiRB}mc{)GRu4)(vIcLkC6hqU<;ckvpx6P3vF)|Ia64`FK!SEd}b0mH{s=QzBQ zB-L0qeR9_`epmieW$1EqgALQ;vL#^px(`7a3;%;U*~sX)V9~bncwHv3W+2ssM@o98 zu1u+Uikdq1<+_L12ldNCH-|*1;v+yq{iJf~o}qqPQY#7YA)E%eTm`E|qGNp!C|lNQ z+GbUN^Vi)H0Gf%jd&CSCs%B25*Htxyro?1_1ihQa#X?%$UnM-#Ppot;`AEm5LcFlk zkWsJ2@ZevD$I6TSlJbHe(m9{^-3~$(HLot`nq6uI=xt9^y~s1++PGHgsj72pqF(R& z{6tM&JiSEWnSYV%@|~~j|DJ`__XvVQR!@&XvR`_WxVB8~TbSz#6U2Ygnx;SlLNxDB z0c~MdJiuOzMg)t9+!r{q+*I>q49Op47`kc+URsWPc;DIq+~IW3Zpk*nu< zU<1(e+u+KFoj}Gy+%L`dg>c zf(Rb&1c#$x%S)Nm>(>@j+6ysse~5E#Sh<#RvD{ zCQ~i3t|i!?k|1AL3&Dd*)aI;%42Cd3pQB)Wp%Y0EGfC)cSPn7@uk0`hX)&x-6YM&b zl;Ri3mb^w4zX|(fH!kmjLqsGNu!<8-OMg>G(RcsCQ$j;+OV$C94p z_qxxE`4x3b`{6*F`MBj9cgtk{AsJwrnE2WRop}RHGjgQcDah(Z_xg5Q7mj#2WofBR zn>KdUv>GeGI0<2ywix_<=ntuK;S&C8%e%X0o=R?8%+$QvQD9a9NU{8gUa>RVi3D

u4y6+vxoTR5)O$PCLQ;{-(5s{yP$IBZtJ!An>^JB36TkoRD2{{3_wHvoxtTzm*6&H35nA_=m6k=45SW=mR; z7miab6T|UjbV>*^dSmACVXKaytVA$@98x=)Iv+_eFxnPukH$rLXH4hhN6E_H7*ZfN zCoS^14}l*4cDFbeQ=jumrO%^pa1^!{51bDI zAo*gsku84L`Ii>J+jFp|*MJpvFEdIc%*Vx6=0AX$&OZugkm9f?-94yRd7fU(usE8M zcxs$~M?3#mPW;b^dJ~WKR$cY))(;fZIhh(CuoHN0@1SR*3Ox6_>Jt{V2jFNqeJ+gG zNcS%nz|l?sbsT5(oE3D0q-r#jeI>O5Gx27GzV*KtG^Bsil0>MWQUpvRUnt*UNM|jZm{#4>SL-*uOa6^E>6M z?LzvQs;)%olj=!*Fwu@fak`c78RchMgzv*CPG|~rNfP-A#>V`8Tem}s>+G1fnX;kX zG1H5z!*-MG{K**tR~Ce!Wh{@QQT(p9TQl5((CP7<(fbr4#qL_#b$t{I>Q1-T4OZ4e$n%a z^y=dCrnV(&<7d3fHCd`_Ptd}J-t-GpbL^z$JafI1z5~awq^sNR2Ej%I2HmB>6E}LN z!GL^KgWE!*Di55@Vj5D?-TjSyNBn#ugYVI;(V9GBxBqJC;oR>wM4p@!1sv9QQQHS5 z1S?LFtg^BG_eRa7gIfk^0zCWVCss2ArFHh?yG&W}HRAj!%lM_=wjcFeK8?wsh0+wr zptp$)96(%iCqDFQFObi_oRNO>b?tL;tk6|ET2^sqA7`#PvC3W(L<~j`?Y+$L6GX}0 z9sLTjUoO;_2oFD(CJN-|2PuN!Jn` zMKp8Kol8WDxGpMpSH>Q7Hh1_J3pdSq_sRY!6RS|LH!);~uE00k;TZZ)^>_&5h1WcV zkvrP!0N`=1)aau1u^$?<$Np@EN@_@4*O}7W?#ICT6`}Zdtu%eud3t=aQ9J!e4nn=RSD!Nx zGGlaQy;L&o)c?LO;Fu)bBOt~u>WrCG8LSq$ECJ2+ zfa8qWQ#>v-;*;}-N@|`QwsLMP`7$7**I4rq!W?8-(N2N^7cB)Ql~k%UE3e!ZZqe{k zF(LNyrS#18_vol&jx0GD!iao*e)m(=anAsxoXa)HbagYNZ`yI-A_R}@r1&_>k6Jx7sj7N!S&a5Cii{JR z9*4(Hv81?}Kiq$G5lpu}X0+DLizpa+{=JjwZUv%66qBqL>#fK9plyP{^qGu~43VX0 z05lF85nD2kG!+vu3aJG{>76^e5xDCkVWNxl-uE`0Fk+bmZ~gLvyD2VLm-ve#GSQ_= z)xmLe`X{ck3ffpOKsgyt5A%&Eh{MJpbx2gun(tcqdYYKZQNgNb>epHz^5snU^?5=b zF9Pm5;E*PwJUd@iTz8}QX0HWhcR&OGW&nyAL4TjkzH8ZfWr03*KS#DmQ9UOD+l38GsWf_Y^OQ7FTUn`>6+2vQ`A?SRffF#)~9%(N1=+`OFx zeHK~@toNIVlvnNoS8Zac__V#NwKw>!%$Ih9L|V3;tD&j^8fT&Y?Cfc$ zk3XjYEjoXP<3W!`nwo&}rnoY?OkTvl-S_jihwIry6Y@zDR+5-vX2GC}zd>45a6S-7P`C{WI05S49lw1wuWbQ7ML3Tn}{VSk)OZqxiRHXhqQ zXS2RKQ35-X`YN2rkou>OZh*ck)y0)MJWY*mf~I`0WCSYf9aIgzupTsR8H#RJX!PS! zwTi@6CfZ)EiFdizY*d3zz=s6=wG)Dm9xd zHTp`Ck-0p547OYl^r}UKu)$;hDE@j}>l01?@-k8kiK@97DcN|@`+dWS3VQW+FK&4M za>8~Q)%x`&hkgYJmF@SFc#mo2fkvIP_jdu?B06yXZ8rLc_x9ik(;S#QcB7*#i_i68 zIgne4cV2y;&2~YScY_D09%ygo{#1@>&<)~Gyzh`dm&>`ha}C*%TL}$(>K=CE8hhCnsh$Rwg%FIEpxR#T9=*%eTQN!l81lFv{~j@>F%2fvF6chZTL7|*HCLR z5*98&VN5%5F5^u)<5#j52yxS?l9x@pu_!%JgU6TWc<^%6g$AX(s>A3o*h$Y8FoI0g z>bMu$ytmnYbG#oZ+x1&@Oh5pt*vQ?2w-(jR5t}0=j39&SR`|Z-CLHpqLJJsv`m*x| zAYDHhcFww*k@H@C9eZ{A5<76Z^CS?5 zm2AvxFyPVao@u}?sG%E*G*^wF8e|S7+X^kz1$&*;>3L49uZ4t}PF;*P$g?v&J%kgj zPLK5fZu5TPMDr)cVw^|Hx7FuY=^rWPVg?x=dcuPO6?;yw910Ed5oVpmax)FN8#!|U zL@Q;b`OC=p>*H}DNcFw7wg+J-Hkx`yIUu&9L3~Jn+I8=t-@PraamTBsuoZ?fq%KLNj$TI$xA4nX#huL(s$fY^CWga}4Bf zAa6o|z&TrTf~RTrdhDdY1GuHbj=_`i^`H8}+|1l9k2V!i3`(s@T=CkvH^6n4y}Gvy z?R|xW>3DpsZ1!5Ui>$Wj_>m1h?U6qu-B32=cfr&rx(cD!Cy4n9l!1%s%^Tlz5HX`G zeHG0gz>R?IP0d@ZC3!n3kd=BGdMrv*IJtx~elT^v7{17LsVRJ)acqXaVp~CWB0dcz zDO&Zoa21FBTuE|0^y8~T#!>moC0!gf<~0=#lvF{Xtf9-Na>bqjT3Ol3%Wl4k;@qBd znNyQa=ckK8{nn5*?>N;LO*XrB-7(uon%eeEHZ_IL6wz&szT)T7!U|*5wa1%zk)p?G zK^1rRn;Vmdt4VUso58EadAc>HVzW1&Or_Q((So;)h07B5Zi=WNgLYI_I>=B-2|lWX zop%fXgA{}T;M>}}y4Ew7Y$#KL>lfM4JLg_*qdM9@=>wZpb(3=){PBC|-n4O@w#rdo zv24lQ4`xtJ@#7$PpPY8S7QBShH4|Ve9hJ!%>$YQy9NwNEmxKj3Zn=B4r7x-{-Db&D zdi7KN0QkH{z`H8l@{pm;7Qtw?kDL2W$(f%6eq+;;z!@tsCMr!|h+)}e5DXR?IcTbX8JikFC8Yx zo`R5GOnjWSa3|1wf=L*PkYr3&=KUvlF?5PS0c1E5krmep=u-%gs}qGtIbm^r2$lEl za+UI$#3^OC`hW3^lQ z?+fj4Sm`Nu%8mM3$dNN(ide|?p`6?8sVk$?5T^rYlkdU|TN16*-u>sOl5?K(C>|S?`8{Sa&OJ7mT+(zl4^k-I^R4G1gWOV=nYC}18u0JuH zExVI0L=VRo6y0QtK@cm7S+QkL?BpH z4^wo&rZYmD5M1i46>Z>!w={_n!|)1JSRPypT?DF}cYvM;DOQpRLCFmh=1#%hEr{}I zt=<0$@-;>yYbAK6*wlmzMon!__dlQ}EfZOiIFKK`ISLtAs?}iv2<;=YFMZU zD%DV0FIi-RR@}k(rBvrI6FnXgt#8K9!u_j1r`*zU9>A7@ri)@YsQ&pK8DrI+3I9}( zQq{=IPBLiTynSm$&9-$dxtMlnWM^O@7s-Sr8v)TqW#Qj-JuC9=gX>=p&;?jI7dq*5-?yK!jP?wkyLTwcBWmP_#Y~vE@=w#p(tp0SxRN*)9v-O z^tUe9Z3&Y!f%B&f1=Z`U=L zMjP9kSCACv%T=tRIEuve^C@bI``rjNn})T{;+qh?Ah^H{Rrl@a!;KV-dU}Wb^FA?0 z{}G$v(eyX>$+uKADcBERlQRWYCU;TQrKAhn63}R~%}WuqAqnC3u%I~@q`oP3{Wr;v z;J(ioLIL;(Bq`b7*7r_{{}Ax}WCL&a{0nk}L^&wxf6>KBDi9C;x2Hi05*)qvK*FGZ z%Yl}kfek+o6G9S_{{CfVW%vDGVxa$}@A>~W)c>d~`u{tahC7jDj02}QsJa8Fz<;q) zNtxA!Ps^@n$D;yvam`=x&IbWz^Qq9XMW0y0Bq6&o6P>tOZ3+Cg>4XWwMlHIeO(+rE z(Q@Xb1d*(zi@j6n0l_a;@P8))fKVc4G!)-v$nBcqU9f*58+R9dW?^Zm*jH~z9(>9R z;eGM)SXxi#G)fU=AJO%lJ}L@GNhvA)>52S2XSB0($t77css-tOb9G=i zp#PU;|M0=AdXZ`o;-5fW!;DbkK_`KVS+Dj6)05m%qqJnJ^RF@z;@kO9cwkiKH8%mE zsRP}AKj4JcsVTX}02^Zd76K}v&tkhs>>KAlKt8Gs+P~**_JAD=VpLV!WudFnE??mA zc%|_OFRoYnUs~RV@Z9Eo>d^#4ET=3ZzVCGd`SIxv4zG6d5I{PS$JdpK_5-%ViMj#y zySLMw9X!ahJ^4>}#ju{`>SDNV21f^&jFGckQK{U0(&xAB4*!OqxZEBi_N^=AE9Ls+ z$?G%1kmXr|?78pnz19?u)+DE<%OFKv@f(q1e~H3KS!X;&V;RL?W5TBbEl2Xg1~ERa zAVBUZx@XSxU((qw+uzEQnH;mZbcLnxI$4~Erz2JQymhz|$1#OdF-XVJt+C z7Ino>4FvAzsna#C3N5btN)dpby^f0WkY&KBk#Kd?z%}a;8MhK1V+w|g!`-Xn9Z)A> zGL`JstA9L|cU=7Gh5Bgv`HyJ|4Kp`vHdTAsKWxqcFYZ8sCg?cRon|?3N==3=2z1Av zt2^CYRu#(Ij|T8~a8-c_SYIS<5L4=Q^PF(cv9i=Y?LLyv?|jA)$)0!+!@G`i~!?)?IxLoaMBcUe7a=rF6nTo?^Cx0>r6Y$dW+FMxav}-KbpzrUD zY1f|h#$~ITuGEy|401gJF<|jiZmw?dSVsjiJs&!4bNvME!VlKsFIZnMq7N7FdsYQ3~WZHsFFsVpZ zdMCp9lff2ifuuEZcO4`R_o>g;c8BB>?1pa9*Y;Yg%h;96^^P_7S0bMMFz4ECK&9gE<^{){k1s3 z3-p=Xu1f?M2wsMXE|2^0Am$awVt6+`i%OFs(8A02d>8Q!tcZ7;{O%%N6GWG^U zf)bNYi6JzykFT#au*gK?ZVn&Uokw;j9=dF|!uW}(!Nk%t)YXm{SdKlK&4E_1ATGK7 z;xCuHus)tjOHD_WP0}SA-KW>KTZUyS?^u`mmrE8g?t0S^E3PB>%Oht5;Ig)z#+gR3 zm`_C){C&9Y#o)F0DZQNz7g4v~Ue$Si$UR_hQ1d_Uyj}Z*r>`#+@f>XI)K*s6)dwZY zLlzeM*rYJ$Cw=2}@jGa~t*UI3ZZ|!%>5W<3|AED{$#(n!iKsTKgQA;ujYrCI&!h3; z-jEV_)IuwzO(9ph6wutvB_gkM^BDsa;uDB^w~;3rpPX*#4U1ADC-r0w$Kc##m$v_B zWR4x0Np2h)I}2XF3T~T_PZ^f4t)TVPg@7BJQV2AiXZQSv&GLLhX(lWU2d}`zWJ7-0 zt@={fMn#Q3X&SZA!MC^k{#;rSIa!B+A~*Y#q@Is*utET$wP30_aq}mb^*#VESna52 zJANvSG2vIX!KhisRP|N+3rbcZg zxl^}-&|k{;XInjf*XRCZE8Y@Ecn@*;?WFNC++VGCzK@h$J8`0bL<3^4JlEpmEfW#> z;ir%gXwldzFSwtno-XGY6kL-{Rh3tJ0*-EO91I|}mwnuqI>k>)D~A=pqY)}NOv{FtU=vS0->4+IAV)x`}86$oS}>7NbB>NbLY(S-U}vc)NBO zNzkgrR(0%HY5#5Q=R^_#LMwqM`Q{ls^ONh*-3dX1;&3v4S?gC)&W7|vi~`;f5dZy) zg`e~9&pa}tFl50-9Z?)!Ca0v0=C}reRcN02ijn-`YA9$q3nL1` zW>BXks1dbOvl3O5+~5ElgWjTC z?$!s4$vCCvknY3b|Xr)C)Li^*~82Fu&X(_{?&F2+~1mJe3{DXPqMhV^HdZ z;>4)amntemwEq*^jt|~{(0rZEVN#cdt>OlU_RX;j8g6F4g;ZQ79Qc|!HM% zF?=Kj8dL$-se0BTB0v8>BZj>aebPK|=&h|#H-+~0l)%XH{3r_ygujyxK%gc3Ck_95 zeG@tN??#XeUX%>)J_wbVX)`<;r)c%Y6C&FKj&CI~$MQ`foOV_@nYA5l?gPVBZj6yl!+Gg>gzo zvCDrdH4jWV_EFGC&syuwaQOc|ES9S|q`H+RxTR4V-1lM1fLe742vo|XxG#Zr>G?(P z9&;iAJ#N*$DM4*?oAGhn^v_3wMH_3$+&g-cwRFa?ctw)##S-fRm4)jPGqz+63;*S! zNPBAvf@lJ|>}~Upo=m9^#n4$WQ%#lEYM2Hoqm3H!u9WJtwsaQC9Z`C%`j8HTTrd)+ z4@*_mF1qPo*d+kgAZK6%mXyIKvwMYSw=NBqacdTM?dlP2)-a?aIbq)H&A4WcF9a2!=R-b;&jrCS zl)>)+FZ@ZBcxiviLebEgVe@2VAkv#fV>&Ak$=iXauiX^>OlIUXz6`U3eWazPPg+p^ z^)CvaTe)@R81;PWCZE;i%n*KUOBYOu!h$^@4>9rj`qg$gZZnIUbuSP{IdH1+CET9f zGz3`vta2QnX(=iVn<%D?LMS+z)($Q|Z}n&7MfZuWwUGUMUE|^rRN9#@s|Jl;xbHK0xtG$wvE8{;r z{1T@2`V-($c)77nMd|amHh>sOA_m%*SSVIgqS6uu>=qx!<6WsOkf3`UeVIyz4U^3a zHm7V!!tc{r*vYO|N7a1Yc(VnwV)(TGOP+-gY4Uu~<;DFL3RNb0XJIZDs=EB23+qC| zzAusb0XAoqFG1=Cf!RJU-i+0Lf+3S<%qIhc=%6?uHgDs_@-C)xGJuAW4wOT5!hO;b zYV!#Che%gv-TvGgpVTg^xpN^kus4RhJ~Ik=MywoI)TFoy21O{mv$}S-SEmFWIeXlp zcCVbY4H@iu%u5if)LVyDulKU;+drJR{p+8)%>Og_jWokxG58kSKTtF>prL6tEDI!GrjE-i|VxloaW3LSbaa};SOQ5r+d@(J_+imK+&YP z2HgoQuxEr+Yyt?b*BV~nNThSh!X24s-hKgiqB*zqNa8>~Y^wbnGm3lfyn+-j<7`u#K zhjgDT7*22{5bL|@zO_R=zf^8&QAXP7h-RtB@p3Sd`|zVGQxzgt9I)8Gas)S@NtP4_ z-NAz^aYa($MZ-x@%v2GM+3}QY|0&36f@GMp3dbH?Ev4?TV>L0fBHoK(zWqLAmO9!B z@O=KjO*qbWn_N$?0Afv32mq^!(dD4eNFK(*uhf^TGjxsp`YIJ<vgp zJge`9Q$Yf>Giu{=cubFn`UTWeAEI#rcPu|Yqfh#oohqu|jBWWFmn|x5w)EMz&l%FQ z_FMag;KRhU7g;7<#CBROZ-60fa8~-3K9;h9V^4))7B~&`mg$s9r~M`AG-80u+1bgC zRllkEqGvm`;PpCP-+<^9N7l*c58DK!|6Q%1`TKgSz*hvUf12fJeZoVaQl>e9zMYhZ zy+iM|MC0Nl--{c&2#9ThCp8<0x7zmDqG_?+IR7&Pnic0O^V_wGDJ(Zaa*f&Ae0ga* zf!;VN*J!Jj*bED8=pwh5nyNM~VMO1;%mN8RqCin7>L+62kG{noHYLkaZnFf$qZ|R* zE4|>B4c59HW;x+}**3geVxMxsL#6I=0PXv+!J0uS_D${?9v+B?D{~>0#)gKS26bn= z2T8S*4xrUvy_Y@;Y-4p;KzpTrz%;8V`NCdnVozX)fBJIIC+bko=r_5NslF9FZJWg7 zBUuIihH*0>Rz86mA9Onkxp^@{1NeB)h1Sxi)N)8CqJ4lAMpsfTZf=Oj#)SiO$BRAU zNmbspnRWUvrBvvF(v<*trKzgX_ghm7r3TO<5II7qh8ctkl6UlM*){Q8+0wPEpDf-h z!hSY74K!E5%^aH~P_bUB*Ixp(7ihr|MlGCGilv$?a>nCZ^;qWeI1R-6x(MPDOFce2 z(J9O9YCSmT`J<0dpD_Kn4B`$fDS=nMhX1a_7D+`AJuEp}bacG1NjPTqs?aqgjpm40 zl(gHn6h6wqqL7x)d?;q(oR#qXbvLJi^;tk;2l8RBu-?@1q@yEUd#2c!lAKJVDpcm` zR0YB0a_&qAw_;)|Hnm)3HPyJQTJ!#v$?;;SI+ey(Zo%q&nR_XrjrPh-g0Sc2Pt9V& zI9Ruudz&|p9XH#}TsS!fyX^kxfTcT!Kf}Mtcup%So%9qCO};qVtZ_a5dMIU7ngHo( zJ?Q2mf($-(AXat#|4??$;gLjboA1~YTa#pB+qTV#ZQGe}V%xUuiESH`WMW&}^J4e? z_PciX+WxDmtGfDBovJ$LJm+`cPp6t1K8Y+Q-x|+IY;YLCX$~{r`bbW-D+xn(!}ftX z6Od?_i_OX62)Z3xVfRYtoEq_EK8>*R*$$Y&@oR59xzw|M_-)@lg)l~L0`tAg9c$fl zZP2=YZ+H3&`1E59ae6UnjPLP~Nk$BRwo=1ncq=}bi$?l;GoKMw(M=ac6@B~f7aFVR z?#JR`)j?6l4j7cRDN7OGV<<(1V-c7jtHDBPMRy{kRtfy)sQtb|}9+)jk`XFN|Gogcg2(>waOO1Lbt2{m?~XLP?)qt;Nr%V7jp-p?)HJRM}0 zJ8Gn>xonpPQWYy6>??Zw)Z*izE!{~JWaesNz5899-UhWGeXDyq6h%(=Qmibw)6Acv zLiaqy-(w-)_`Dp^Z5=}rq#)QE=ahEtom`1$sDMhbZn*K1^MX7rkJqN7w*CI!Xa1eu zDjyOMwKv!%$G0X1>NBR_dg!YNd!;$MZ)IFJr*(V~cdy>3^t#G@vG=rmyM1oeLny`l z_!E?4Y!+ml-*MmvOR1CliXm&)gDxzy0!=h<$!D00 zJ|-vccUue|O?J9?-*uWinR6Ss-G}Zcq6Qty!QD{1q-g$6bR2^&TFiPMWWNi+zPc-m z^}Cw3iV%_H)vOVtMh&OckmkZ0+2}cs(Uy5Z{f-~~0*O?xm!h2W{ne<-s~!7Go60-Y zs8edUi*C&$rWkDKq~4yYW(jBv^85O{=J9b7J4r19J`_AI-@YbozOJ+wl9I~77}O8geshW3^0NRJDuj(T@^pUB`=x9Aa^GL*?UU>G%G2!pM^qPj zl=o}jvt*~effB3!rKQ^E3c>V317ySNXP!h<*Yk@8>o`23AUIwO$f1%qUP*CPF9mGv zVXON=W~V#5PX_ZpNBus_za#k^FVyApPF!b#dF~XDMjyLBdoF7o^wncA&jssbBi+gjdE>TM^PXAJlka;yT!^Xg zL`0i)N43YTPV1rVCfgDeH)Wu+6Zp}P((m9Fcs8*X)1fWMFt`3QuCcf#O||LQEKdGN zpZwv%PPFgx5OLKj3?hFJme0o`!3aOR@Qedwwe}sMMHE?t@{c6b3V(lqey8ie__zwh zMng&R9?0{vroy^D%T7fXr}ha^#> zM9*eI%UZ|rbyQ}0Y)mH4ZDgAKSODE{@7p`hcs*OGCtn&j7{8FXEoSMOE%FJzr`U$<9-7crTnV^CepXM2K2W)#rD?Vs(Ikqn+ zFA@F|dN}y6&;zdOBz+h3Xig~Ks6~(yr;ht+kl#=SrSi+&K-zGj;545OXJmiw-=NYW z4yww-L4`q~E#h=_xw|t`_44xg&ny><&*QtbdGB8Z@3&^wtp(*rY5$T-)B@!aJ$lM* zf{1%3_X#lDi&bzP6h!!LH|`M~9SvEEsJc#bI1HVL^u8_IHSEj>zyZVIXN$!{b1308 zgVH|<3mo!LV-_SFe)t2Gd9b8N(&i>X?JecJ-b zg%X2N%#ILjxL{D)TgejGlQzQHU~C=!_2i?Q=ZWWW-|uan=fygo+UtAk)hv68){Ay( zvTWPb>Tla8jHrnD7UY2>wjakjt!DYtdl_w)2MdJX@vDmMfTLe{u}URbr!j?Jqg6$j z>wL7dU*kkuuY-Ufny1f^r}b-DgcaQ|aS+n5%ak3eY|dpSkA;(h7KBh33aPFJpX*`W za+Yazrh)o1Z&Ox60z&x4e#_p<BKH8Vx#ml}K zjYuWV)#$!3IK{k%G$KURHV~*G!3Ww;V_*P5j@iA5*1y5DtE)eWedTc~|9b$f{0li> z1rT6baow&?UoD&Y>;TE3=f`vBf0FCn#x;GoT)JH6w=H4d)AbOPa-e<1bV*WA2 z6o^XzRVS!VM?V;+hCIPsnSREP>LxB>_DvSsZnD(tth@-aopZamHM6&zwXmtKw=T7q zTId4H0znI8MgEorD9+|QXi7{*@06MFR4xgUHN&;Nc=vyOMiWwTP@36A*F!vEwX`af zD$iUMFt^7VJC(M`jr7c&v#mC!xrUs4{*{GWXD~5BDjt}>m&4cD>a$k6`q9QhJ1H+Y zRwf|f@ag%hk|~juH&9Gbw4W)pCy^Bwcw|c%cXwkgm|!Z$Y*o8&!V~Ve6D1f_m!`d0 zXM1~0=ewV{YSY#cRO3>ztqry}3+L4mu%L#@XOa`m|`0{@XaE@$s^eMxKMWrzaS4YH5az1*v?u(zTgy z9Bkfl(^$e(H}zcOUV}zv)9`AJ^|OJLA(9l&gCe7lnr@oGr02GuH{>5!L=5}R=>O3N zXk?npn z@+bQU0Bt>-@zGTM9#^ER#a0N`Q;rp4mHLp``C&=559Jp|FT-kV1Wg35ep$z+CYO?( zR@z_w=D%&FQyE$OZ%9K`RqMa&fe66=GeQadpApLWz1AMHd!|D}>+I;fZvXK<=)9(* zr(h@{M{E&IhrnODC<*9`>XXgCSBb>cCh*t|SHiBhtGUqAI@dGC@f`|p$QJVx_A9;+ z%mecs3CUq*VNnlI^Y7fRYM++T?$z4#PcCSjXhCxEfcA56xq_LV!5UmSvx}+9F=Im` zzzaKk8WNP&6O@)_sEg;@{~Z(hp@}+zIok*Ao`+mibZ)j?d<H2v`SufGtCQHETL3~GI6EtdCrQM1eP#p-eO;mA?T(NEBf6;Bwt zzqQ#{ut`^VEBD0Qep?}bciwz4Br!(3h!jf(4uQ*LW@4y7O3oG`%S*t=z?r$av_kA4 zCvD-N;tGbHq{ZF$iBIf*H<#H8HvTVBT`DiE`;5uqs+&I_q5WwKBqV6InTlA66tpJ} z&go%Rzjt;G%T}>42S@)a(;=pjA`C=Ecw{Ju;m>ulXdwvSaVRYWh1}f@vlDCVObv{z z%I4qbmE!-yWM=8rE%(0~<5)*psKvb&fs0(?S885PMxY2AuIOyQi^3?@jjk4@&Qw-du{S*8 zw8R~GEn4Wid=1i)-&Wuwz>d@?O4paDIQ+eIGi76=AuVBiFk9j2ZoRmT>|)Wb35Q=G zgQ_{hB`FO@dZLx?t|@kvt%-`E+oFN%;AT*A=)Fn9shCMn|06InG+{-D9|e<>K_&^nCP`+P$vw1F0M z0nB_$zYeEKaM-@`(D+~CKK@2er9He6=FZu+r&0ZQrz zp@qP7`!QRhzo@o88{D=W6!G)`Ih!3_zV^N;J~Z9x^te1JlU*n zzF7A5`gqY$i1_bz;BI`xRnAr*fZmSp?10`6Feo9rO;Er>^Dw}q?w>m#eadRt5yMd( z)g$#%CX}d*nkZsK{Oj;JMXKcWxk7Yar%N^ffkEvD;2AF06wqM&^Z)nwrKmSazQ^6X z*SFYi@?qG^bm=t5DhYCk-Oz}aO8<|iwfj|-No zzy=G=f@WC)^K-Lsqm?O9@==c4k>MB|=4e@4@Gfvt&|-q}6DhFR;j96TcYwtckHP0g z#?l~)xVLyXyp4%jHmlRfe4xIdO0Pk`?op0fq4}YoF^v$%T1};CE9N^Hzf?gTo~pe5 zvDWnb2h3xCpe*E@c{*c;Xe}*dPkj_eLDs$p%7MHOh0OJQ$rBmw2Pc0TQWwV9o_C%?s{lu2ab+LHKQ=Ot_0#a!{D0I@?b z4hl1!vKBar`7^QWC?zK9(>7;oGQa936-r3F(d@f_3PVwEe!le1Kc)6p&RqJ3fY!v&?dgcT6{nz2EyQ>A27zO=DM)13Dfu+g@paIfiAqA8=aC zuff@MI=ib9)gB(krn*|@(2?|m6ffJPAvKrgQBJdCN;|0I|M*vt8xEokFf^an9yD~E zs;ma)ySrIkHg^#rORmCZ#|389!={-c#@OBuf4+KZq3&# zLEN~tAf9t!-7xU!{IJS!IpcJkGE3k^SPcKF7i`UsmR#fyIO6{DonR zr~QZRIRa)1&y#pzQ{rj~%%CkXU^>&`Eh3}k{gng3>WI^JaSdYg?vKqlvN-;_CzG6z zAUw&P*OV^b0?xRfyZP%gI5q^pV8v=`?w8t~nA&`9tOty|u4(a5r#dKK(*4sGFMuJk z%;;$LSJ>2IRCBJ&p9pUshxP7`)x_rIN>!3`ECCes+5_WCaM0-eQgmkC)9bOWcJH$6 zK&4QUh2uJv!}Co)9S@RF|9OYElQO#C$1m2eM`wYBpv~_ysy*Jkx%ky}u&m*cVX;o^ zMstODMn`-kkhAKb?^9BHfsbdLKHOB4RRSR}vN~^vK3#L$$q0b2%0eG2e)OSp$A5;} z8a_oT`&`drthx%=Kk=R#6{fwmK7F$d zSP6eC3aB(LNp-bwOSeyl2DJT#{>m1bss}#ltlyUwedM9+1a+H@mtlH6*$YDn`*Sk< z$dq|Ii1K*$N8@7|J{{0JCRZnVA*}VUb>kmaZHytJ7^7$;3+BaTm<7A`hevc(R_1kVge&y_%T)_g+ zY@e5Xvq_@fUveP}n%dlS(In=s@%8P_q6LS~eqYIwA3P8Ws=j8_cck^4esP^&s`~uS zIy#^FRR+oqMm>cBsIWc+kRwMU7k72@+=_{s>b^JxHGG+xal>ZunXmHJGBmdoalGx= ziWbw(G&8UoLtpiLez>j2*lU&h#J?^-X_AbNI&65kaMu$=FkU)v+3#pJGL%V}B00Q_ zRuhsa*3n?D>q&q%Yrf5H;0~V}DHpABQpkP#)v!(h`)?M&jbIIrEka)(ORiRf1}L^M zy`??MPMImsPR_--D~ep>-QJ#z{YBtfizOss8d8bP-`5k0kn`v1o}?Xr@sBhPRB$$l z`HhCEVJ{}C*ozx0sfG_5XKY^#PHL-d4U0>5AEvBry-(G*43(d1bQd^}6j5%F~*|SQEXR zciX+IY&P$Z=+iqN2P8{|Te1jym!E4w(E|OQEW-WA8GRfqgV~4+NT3;HkaOV!w0qF9 zj1=iCQvDn!58|HLH^(zevXc3@H1 z{(l~%4K1(_<_Wc7w8vjFGESHvTGX7sYVi8XQoRSiT_avt7XwjC$^#5auVx#60^t?2 zacT5TwJ}spNe5tV>@PwITY6cN`AH#(QfUM(v`>9$2*P(Tw{_otOy%(VeY`!xVbbx+ zlqviyu2#wY=HAR$(VikHCx?by&Cf&onm*xIJ36glwepea8HUqR%?x6%*m7xWT8^9C z#l<@OQ+S2GT^m3Y)MjRu#=MHp-;=6HfSN=Jl2DZ@@|x5#p0JsjvF-j-1Eb(0nwkiX z!1|7L;re6uOBY_UElK$iE)PuPw#W0J3TY4$2LIDgl&G^VraJG-651e9(C^oG5@&4B zf-l8M?}FI(%9z#BpIa?=%%6^y*oomxNIFaW1#XMgCAlnjqQ$n4CUv3s6jU{;u10a_ z;}-YA?bi)*dQ|U1{kb#t;9OM#(0SkNGh_2;=UT8D>lU1(=TMYyBxO}gBlw=S0*75w zJ$TAQ-XGt@dtR`2{U!P17B@7Zi4u=Gr=NEA<8gD=QMgd$F;YH07VY?G)Jo9v+6tL` zBj^c@)+RY~1jh(`_GPZb38->QqJ})XPF7#4X}Tm!!oLQ^PK396Qg;KR)T8WvPmWP$ znds_tx(#XUEdS^=EDc})xPsaR3(oSRHu$FvB%N!F)#Z6x@_Ssnv?zgy02V$7kVK{{a)$)5=foL zeXQc338MK21-H*svQ*~%_|VXbtmHBIx0Dz#yGAy4;Gb^FI9uuy5w(ube9|pX?@&sp z?34v@uY-J@bTcUqUu?DUBqd>WDwkYS`UIU!D=93*kAs(pyS;jT%3CM6Kk{u!Wc4B? zmsGN>n4uG_X0Rb=qhAd!fD&Z*&AMSn>i2N(S2I6?es{IO@9Ps*747Dz#oYy-IM>`X zJSl21R)8uom1%Z&6s>~j#FF6~sERez;#)e^Wpx`xys?X#3yE6o9z^vR$*D3*vdRa& z4V#%OHY4H+Nd>MKOK0NmE%Zsgg{%}VoS;=|Uun8hvF^4}RB1n~>^;$gL`%p|nGwjd zDml7}r8YJa=KV#~gv}1Hkp=zi-fR!%2FKN^R_|W%5PvKl#mwOe-k-1&CY#Su0&VAf z&D!t{>aNn^g(&!V;>T|`ACe1;qQtzqZjN!-lUF;XNs*GZUnk-PTYU-hf*6iwBy2Vv zhAXywmP`(1F?TJCitzGWZ_5#JGP@my6azxY>t&I_EI55`xj}W(DdMUoIn196AmyK( z&CiGJvY_ERefJ?l0*>cP9?vw7BSG!#E;*e{jj&u_AF>LDqsymLs`!n?_9{R(*5=nM z^XaZ!fe@ZC)HG|^75>mN2J!)6t2UchQdTOtoQxF7ftxyKZ5le#)1{X@h=Zbmj}}Pn z)lfDGJv!QsxdrR;)H_$Z5SVKZI{%ZCP|xwDF|jctbaiu_joD{Sw8+uY?F70^k5x-F zIy)4cw`V(QeF`IeVtLDGO;KxBhu48kQfpjC250gk6=g>!lFC2(FiS1R0BEzIreuDp~!B)BBh;=SKi zC#z@+g7(4olJ%oQxMhF)$hb2cewFY|8-k*|7%Y_mez+Bs020uGQdq&ut)^@=U3vVH;{m{^_I_vb8ditnR4;c=PG?9 z{m$;f`UX!u@|wQm3cQTnRdQ|(5A|YzK~QpV0xAIQU5&x_G71q?`|%|vjdxAs^Cm=> z3n8vFz=D@o10N&vC$9VDT#`8bJgmuB{99tJ6O?+i4CX*~mU2!+qfm9}nG0<24^SBl zxPEYN0{`b=QB{JLAW;azFeivk#3}{NW#4sA9Rs79oV&-pniuU2rh(LA=zH_Rx}9?} zAKuFr-F4qj?y2SHM;*oF23r|cyC}Cf7Z@!>A3+TReadpax+Y*%xP@9e1d*AG2F^1F zzz#D2NKqt7kyRjq_sCr2ju-ExKRwnx_TT-~-6$t3!x6u*YbZ$cz-q0TH!e2-XF4^l z;ktkxjXaG5m#5S5YN6WUm-ippt*5a3@1ESk4PvcJJJ7O1s9564vuBL8z^5fiV4W~y zRapz7ial;Ib+EjQD5Z+9RnSh4-Skf|FL@6kL)Au2Y3K0kM+>{%N{!w<@@%Qd%;J17 zHp=H0x9=x9`?RJ$S_VxG^-RY-l;(C9{@DBExVms!CrUl*YF{!SzKM!Xns)Cw-FDbD zthimjLLBrfZCL#nTzHMyPjr$w&hZ9=y9f!wh{n-ks}VHO?!*&3LA*a6Fs$`_T`~Ip zL!MW6e=$Mt0gUSL<|j3F&})2lyNMq9);5Fb4Ijgi@e1Va?R+R*HczAr*YA{%5NFVD z{Xa}eWI@)Qj#Gpmr`DUi+pkHE{cXm-?>bkeuR#87{tC6Rp59XN!I)hTl1zFV9(J>jm{6=YS{u{9GH!UYOswiNxbsFSM z2~3&J@pwvkD9L>^@p@zs0yIKYS*;`TtHPOY%M$#=ulduIKj!y-W)jl4aEOJm?~1Rx zda+cl==9O{eka7^fj6vb@W_RppdSzwCQD62Sm+!*g;tG3iX#-D+#LqmZ8;u8tZ$ zpvM&e0Asrvo7CnP(7X4Ciabz&hEB>m%PNh-@SR0ZaCZ_2fWo1#jddRbZ_=Vif76>B z5+IQN@wP^fIt1j3H9GwI1%!1&S3mJIHW@v}TX*$xWGQl)9|a4LRduBt)nHT(dVHRB zf%dE68^3>7qLhw#DVGZXMt&?a#>F8cBP-fPP*6yEVPFH7v9S~4^&BMO?AVVh+RmM9 zb)uMa?p@wXh*{ua0sd6psJPhJ_swCT^%E&Ye!d^NO4A`HNQoicfEh=p1}H^8dOfc@ z&y`K-(4|Q1CPr+z<(>(za2k7jk?B=n519}O-Uj1qf#cvLdo+- zH!;W#qIUB@#Jlr+D)KhLmnh%sx7la@LwFs$M2I|Mc2dfOEiZW%7Yug$M)t3S7up|A zJCj~5LeU>DntnB-!0eH!kHmLUPu!fES8SZhuw{L2*LD$$V7VhcugwBB84B@f@pO54 zQDoPi%`JX@Sz%qT`}Rxj?`d6?%=DjZr04Qb1)wNwK1eX|7~_<{)@v%-P% z?72!Lx zyl`qay?1!-{~DmYmCNj2@ZWFAy_pQYgj$ESzwZWzKT31S`4~T7FZx~x2TFdz$N&I@ zTzj{4DgFB4??9~m4$-r0S2XM@k&UmpP=JqXbaNkx(6kC4iM0AGquuo=h#^^lzB>49 zDg&<{)DL?Pf?eJkwb_aVY;EjkuY8X0atz}|tnXWP7+Sf+JiS5tH9Y0sk1oAq#cQa4 zpYp%LU7v8XFlciayK%t~O{`x|4{CVx0ow+J?E#Ge0|t?)`G=vo%~pE2_x6f1HaMfU z=*$JtoUcKUL!RDb!s`|rmj-Qw?V2clHxa&SJei~OtZXO__94mrwX8&T(u6X8H%4Z3 z#TYUbmxMp5D0(`|!Jvi#y$sF$j_vZU3#-tHnHeS%Du^I0>q8vqG<>XWDgmvz!_y8@ z(0Ol5j$ZkYKFD9FD|lu}dOG?7kPtR?KKo%=5MdLyzaI>gD}na6>f#!n+#+R*37#%t z0ts1*Ba9Bqir~RkwZQJzZFnfqeyn`tS7vk9PFKAjv0J%`OUe}_=C(x(gcYUV8Fr@G zzu^l?!<8uo_FS`4`#eGoA#uuYwfjaAd|}YG8o9tB_*qsBJDH{0#o5%WnxS|C4~pBf z7;v=YdClNoho`39(K@1JlZ90=?-ui|4wr{Bb&bs=Z~P1ze*gZ1SF%X*?<)|f6BzvE zB!hi+k+G+WG-gZJY=C0R@9nqFJm(!PFy6^BUNU=(YljCj3LJ0x zLR;Iz7w-QJLPQZJwij>~VtSds7@cR|NCx~`Y78)3sWsrqtBY#7U$9VaXmEmuzdOBm zx|f&B=4iVZR`q#(=yKQ>>++SuC>9ap8CS8);NDvfo^A3X*09b6=SS zY4nVF`Upq5|6xo3?GAJm_{+!2TlUSYh09NCdwZkpnet+gpERJek)?)9Zns;8t+X$F zJP~T`1>&+9$Y=rPa=>SsRiYs5a93Go3OQ^EEY0yI!%qI&D5b;RIk{Bz@L*HV+FNrw z;Uwlp^H&=&1@L@M!RVQVuTjM5X-i2X1YFJ%WCf4VL1ZguYOPrQPnQ7>cM~?&xp+dj z>vCWqA-A%PnpKQHTCfF?!w$hYg_HwX!p%*!mX|>P5yb}v^M8>2X@>q++<%r#5LnuO zuH?)9K+*iyP9hoLr1k#_>xZ$yNEsvlYsDsh$>j|LR1r!PWzysyGXv%fT5mD5Aoqs$W^eeO zfg^`cNpP%KfK$3LTq4Iap&$qV%$I*|?BZL5eSs^O!t1!z{fYaS%2gdv8ZO&dx^^16 zx41|`L~m^BV;bdn;A3rogGl3z<9sG(dCc)g<#F=!n~I?GRa{uKUYo|16{`Rf>)G$P ze8mg7kg8E-WE*tgg;>KUTCO0IxV#N8BF9utCQhle z`cg>eKrDyfomC!$q>nY;=QgB)NK19Ul$Q|a0nC}U(-}>2h*)nUP^ph({pDBX`%AS< zD~{5NiHQ2jNZKiyNa6r4ud(I?Q05AXIhn$SqX>>BA`@9vuRJ>p1=ep8P$9JIwUK6_F`-FN?0nWF8Ev< zG7@L0qXCTnUNF8^T>TcZFas;1W>CF|M~WGrtF|JrOP1T_OGpER7O^mMSKl9FMgC7) zU`H(f!*UQo`{m1-wN{73%~8r!vF6>M-1*iDsv(SY4n!>8%9v0v`g?=G#opgAP0k|m zX6W$aV=_?F@ceFU>5H@dN;3kK4qz$WU>ys^keMY`#)iO8SdT~Sw?7vaf-`YRRW`;0 z4d#PLDIkeY#FBhKkpC^mbNqya=#I(2XweiIQ zo_s4FgnEAxUCbZ!P%t#|kI&! zNiaUy7;T~nX}Gu1xhrdQQ_sG_JCE{Ub&jW@j90J*>G42QAY`b{<5YKIl-&_(_>u(< zbUPY!rK>hQ;8ZlTdz`^vrxWvC znj^aDY;uI71~z1Ov?5uv zp*Z0OK>{O7iR8jXjS`@3T21Prs8Xb~7}PKU7&Qi{r>)ItT|fOZ?3PDYg=k^(%=Rl0 z^s?m{92jb{4TX{#H7fOC8{J*x3N52!Wup$i3XhWhEtK^B&R0GI1>%S&m7P`3HU{|~ zRN+_HjKz^R%;gVWT%3$N!#W90%EoIv9#0Xsh%pO9&8xkWHOoXpBXS~?iRsNk{jFKm=8EyRu1uew94fi^% z8PU>R?VT78)Z5u&FXtmBxYE?%r?9q{#7?(5w8sXY_6wtp9Y>~mOAG@Elp*UcvOAHU zbwzhzl~%g9e_I6N2cr*^FkPJSyORq81+nn!MB`*0DwEA+nXlpxTizv!#%5HAZ#Vhg zkl_2ycwNSc^WySY)W2pL8us``(S>`51SBWQ_~{468Q(9MkM3=Tf!WR}Q(W<5XmSDl zt|K>+1cWjXW33IJy;<-;t6wZO;bhtBV#pT-Q5g0gQ=8uDRA$J9@itp7S5L^%s^jJ= z+~>&PW07IEHT**CTad-n}l^4bCE7T=PTdaozA=Mvu9?k{Q3~ zF5i|k36GkfW%PpsmYM1qspO|AByY}{1($O=6HsVEgg~6}+YzwOw+9P1%2g~`u^v+Q zhTGk4ghbNAdy@)#6{|wRr-~-Rc-(HRHR@9Z1xWixY<8Yz*(AlIgE0Zwx2dEg{G}jc zTN`}NdwU5o@c=2Mj`v<50dpo*a#d7U;+h?kVg$KqI(5Dr4FXUo(%y36$&UqvU(XvS zYxBb*dwv~OujUPtbE?FFdw!qOS$z(7)Bdi2X6)V9 zr{8*h##35sV|6tC@vD!IwjC6YcpcV8nt7qW$X#tF)7!igNf7n>zWDARV7d}=c}+Tv zjmd7cYo@rpHCa#(TRe^Fb-Fm!T)t6%D?Y_Tlqf3Ehzr*4itxN@HAV2%lzlZbSv)5iNOpc2wY50LKa>-ezzS^w^ZxpfC zR84~cU}8TRQr_^5et8HxU2%)=<-FNehA`=9#6H{{mT^rq-Qs=G&_1TL-l)C3k~}m- z>C3DwKpaHi7xkx#=Ip+AZy!$`#O0Oo;2<3jYx6YRo-$@eXQ^1oCTp6<6Qp|G%Enhu z>z-foKKx4T&%^!TxwG4o4HNEu!7Sd6oxa2^tWd0tAa%iD+XMmTPv)h7kFoV^<~ABc z4~e@Ae@et$C2c|<{^e<3rFlFo_>^ZN7#@Ju2J;vwf4-5%?)OvgxEZ0?WV+5sb@)Mh z^IdG&=fv%dv(s_<-;U39^EE2gKI(F=0kCwUw{BLQG z<*D#R@nf9LUF3-0^~;8TfhhbQ0Keq+mNC2Mk*_+GFv(M*GFrdO=CSs*@LH|=^@<1; zrZ#z7K9d~+4NS(9THEjclS_B(=6#&u=%cO{M+WI{oz2R~2&Ovqh~XCD8jn=gD&guB zHqW-6286otqM|u$0y`7T)}i$)pJ>2)wNWt_DEmH0!UI#Fq#g0Tr+HHZEf}tRCd`)1 zzq!b1r8sSQa@wwkQ5G^@wi;s!Oz0s*ym;DlhXRIvfPz8o#CyFrj~v)w?_!!|{%bFN z`(e9bZY2K#GtSaCMouOPDAXYM1&s$0ve}FD z)$7B-8G>9A!+$-$V6eT+J!)fESesaFrtcxqczf*YVIzV81d3Sr zc^TVQTqpX7iliI;tISOMC3g)8lk3yg$kf)*NQ&2*P>*o6G%FToQkyhH_XcxYY;V<~ zYZa}qj)3n%y~63W_G3Iv#<-$9S1uCh2$DcL>K9twl@{m29Ge{`=RAy!w^NfWUA3j+ z`z-;9;?NIV6E?#npM;Hk0f)JBKY89kfI^`u)ma74js^-YTH>nBY(2>Nm`r%l(Tv%e zV?l3Ki7+?qqq5u2G8@b4@etz6)ripbQl01-H+)VS%gy%`*h@#&%&y8mEyh1<;$D2? zt1i!a6rlaqy)}OuS=eZmF;mfA21h`BFY7VF^^7FI>Ea_6Ty0_o+9R#Bn=WOd(Tz~i zPcN|5&5>c(smeeet-n64`aMm|U9vQ8_vh#p$7kc8ToTtZd<-2cqmwxqI4CiiDCL2Z z*lv5J ztlOrn2YSA%po&QL=~emo2iCTOz25tFKWcz56rgkswux}B;`4R?TT)=c3w>*IwcxQT zR6mvw{^kv$23Rz#KCEb8_~cJ*F$CY%>sh!q=~cD~OG#^w)tGk?nCj)N#ri#uN}q$&zKwn046M_a%J4( zOXq(n$HymXJR{0L;T~6|ZhQ>2HAtYV>Z(rQW^BzzQB{Q_CHr_b z)l@@nlooV|4qh8TKgkr;gJgjv@^CYfv2oJsdk0O_xD?6JwQ=yu5#;v;0D%CU-M1D#FByaj7JcdqT|n|E_xwRr#W z0)5z#>GG6IG_-@(M+U3|l^gvDY|K zlYMIP`X{MYz|qKcS6r@IfjK34gPK08+bBjS&9Jc4wmXEdu#Z{$2~a6erB{g+SsRW{ z!rT!Uc$sL=)If`=TkUD`8huAI-ryfrzh-YkUxs8d`CnIcydt(3foeFvPF}>1AqrDQ zL_{d-<#65?LQ1A_L6Y9taZzg41(BerW|$nY$RITO?*+wfz6#w&WoA~wH*pA%PDc7A zW|lXaYq+G&RHuB}MOGqfn!@wjd(u9eLS57)aA8$;C z)-O$b%2~QZVc;CC=4eC8LDjJjDq+;-O+M= zKcZjcs{v@dxiL7u3*HDBY{Tyt421SJ#e}53szJR|S5vX#XL| z3{s?oEq&VG&?X}c<~r1YH#Yb?P=D;P%!Ak~D}*nLdRj_uj3xj}*?8An&1M`w;U_gP zM*vmTqhOoR=nOF*>3+NkIk`!)K<`4GGYjgRv?N2odu{+5ZXc?@_>n0_4de=F_Kj*B ztUuLhkqXc?bHU=~kfB;a8aam+N-c??KW{*D)zDmTy~Yv(v-y%cNcGJi8nNCn^|H+q za*cH(ZqX)R6=Lz^e$`RqnbIp+;#;G$-c6f)K}+JEHa@L~P&2%Sx)N9-FewyHE+p5; z-V|O$U22g1n@C1=Fd%xG6)s4$+k!)gE@J5yAe^W9giolVd1S3OQA zP4J9Cm**En*?23OE4?3imz*B)verAnBAUnStpO3JkY_^;CXWp&F?j@!e>KUed3BIe0R;ykb>Bf>3Pd?wFL*hK3goY4DeE<`7 z*{r+bb>{>x?|X`&9~}z2ww*9`o|u}HQ;>Y$;L#yE(^>0+jTouGbJSP7+bn6DL=J%t zoq8x^x9zM(Q!0c}7uv_ISBth-cYDO;uvd8I=a;9>s}7WxFDa)7%o^vjE=*Q>YM{+` zM<^ejXHeciN@zj!trdi&*4^6pia%%z5E zFaXU|y+TU`FC3zI6kXgqJnc5#@~=G-s)`daNJJI_9PTSU9Ul9#=(t_l5m9V2r^q%T zF{vZ*qh4gLj`p?mgTLy!8pK;b_W3^#wp;ti>|`7+*Du%t{rpyEvb-nc^dN_a#KgsU zUt9psc=-hwxo_sb-Np&*OKX;NJZPBJ2HPk$ctxlvRttTO3>flg82CBct+(uIB_h3 z2R(7FyT?sfJ>fudIzj5|&I$t7o|bZpyDeBJY~Z7g6h#J)vMC>5^Jll(_LkaYi*^6p zd@}i9aq^}YKq~p2=kDnv0tR7CR{~6QE}{@6%;5|Q-l2fARYCRXaCekfU8>VFdHTK< z9-%a7rKN~7n(A>dX>(nUw`RD{#iutY>)l6+?#<^$QcY8-Xe+oK^$o;cd*CVxesfGfHWAN3R4mI;G*F;TV347yiAc>0nX_W zC}4EObRgUByjhJOmT1o>oOK?xJ6~ z;IVvJxk#FF{~>yv5>3$c5pG~zJWLufrv%r;9h1&i{hgcb@8O@0hWix$ID8 zf@Nd8-M;KN@ zcsir(qAgbuFbRcv^C5=B%79VTyo7~+Kb*uh|860qcK`OYmgROM0h}bI2;1fO3qyP{ z`qY@MQPL?7>SgWOo?!jcu^0OD+zL5yB8sble@C!8x{XmU>5)#>gxCBKp1-4hID2#< zR?iQ?mxP>1?$6!RSxMQHH307N^L;*!f-x9*=so$JWeNoFHEQE=zdOtjnpzqH(BJEJ zoL*m&DczBZ>)>prkH^3^Z$cT-Fwayd1a&D$65^4uc*MiJ7~$W-VBE^ME;s>aD2pPoLm0r_he0aeA>AL zf#HEet>!uM`LfHQpD90DcCKE3=l!|C00L;jS-Oa!=%OwCias=R+2DK*4n+n4NDy4= zdR>(jwpXW;`HuJCs``RMp^@uwyPjF1n3$JOs~r}ZFmp$y&h~KV_phS$TJV3onsmRg zEXcWmg8*t7c86APTk`desIi1l#V-X5pkRgAH|w8W1JnceBTr0rV*IwIR29DsK)-KA z_@%2Ha}vptQN?gJ9$rEPe}iDa>jwsI6d!~6Wu3~%5LKV{=IZv@bp^qcg+YWBGqbWO zF*)z?m>gq68Q}l|#Ua)otWQmqDhQ{=oDEWoNVH@ik)D%l04O9OJc@bFBOLsR9Hp_LCQ(!Bnse91jIOI*bDE zD|o;w*qiJRUR?yEo_x!fVvonLRq}h`VLZ*VTbfPA91#F0e{_31=Xwk=GbIKmZL`3J zMXfO?sLZzEMj|r8LDaZ@H&3tmSv?~NIc$5!P7Q_C|Cr+YdhZ>cSqKO;Vt%WjhsxL! zNON`jtah?BTVt>HlTVuM22}E`*9`!%45?@bJ(=ZFFM5elbE1v%`oqUh3 znhBC5cBa?CukEvk74&Ujta*U(&L^cB|8k@<=4ZLhU0>CoL>^A%@qt;dt3LrhGBSj0 zx^*;G#ZK>D(r1KLZNKjuk^Rl(eDnB)QB$SyWAI$HXjH=YO8Zd}r1^UhJ4OmdTu+CO z`SP#W(CcFq?e$8d={P68j57-jgBz*qi={0aFJa>NdUPHVIBs#LR3y6>(VdtsYCM)I zIb*v)>kC~YyyO9>%T=crR~03#0C?2&_NU2L&!Qe11hMU~zw#abvms*g&*6D_`8M|5 z@6YL_>lOGF)*dRxB0-rQ*a>6@Slo};j1te^kElE>bs$P7);m7iZ^XWUlr z;;%eRuR=*eSij~~_&$3Ilua1BitJSLnqBZo)BX^ekM0_x)#CY8wI`JWRAwo7Sp3@f zoniqGM_zFvAG3h#Lj2v2LQ_s^Zk>lyczk7TNTuR%kbz=cPSP`lyg!(Xt+`lhvd1Gc z-n6LGGp9!XL}Nws)XA!jOesD)^M6qGmSJ^#%l;=xfIxs?L4&)y1b26L2=4A4g1fs0 z4esvl?rhxMU8j@ZId|^;KWFC7^uvoyZ|UyUtJhjp^{M(sDUZz5I#tYB-edCI+#Uyn zqg`nkm1xZfDDG{h6rF3yi|2zQ*HzatBG&YP?8r>#igW89KIQYS%?x#Au}Cq!+XmTx zWeM1B9=X4$#>p0DB^;ObdSK7o{Hh944^iJd*B~2qO@C&Q0Uqg!+9~scEhi<~h|+fb zJ1s(_#in1@+1F8f?XEW@8j_WMZbr(eO*fwSR+eNsSQ_5_ugwq{aFgwvqz5}k9s{g3 zRkdGcOGK0=pL$dd^adsw>#L`%f2tIj{-6?wMR~^Zr4@&`$}(c^t|Bo)#@V>dglqf?V+g7 z2c@Mgd++gs#=ZOe>H=kljSm3a?&&3$Y3mdAT3rx{+c6Z`>~cOHTWHti%*#ES8u)?1 zbiRS_=nO6X%T8gO_d_bS9x!JFR8sAMFM}2&EYB4mH}bYrxa#&9?Y4ja5-!>fb@#@^ ze%#gUS@RW=TIRU8`EiwuBS8Y6X2xUFeVh}RS@x`x2(OhdcUyJ&=g@nV4VCPZ+GY86?dNHIZ8TcSomBUJ7MPD-EZ;*ugH_Cc6p(t=(c&9*7~99aV1()t0k+ z=d$Ui;GI|tvGh0gS1+BSXyo*|4e?8+Uh>sjEwbutSauRh$ zUM#zQzg0pUrm6+Z{;u`bXrYW)Vty-F;O4Q&v;w^9-_do~m-9HwyQFr(iNbg57%9?a zG$*hfFu&N`FIoHcL#Nc(LnKCiM04?G?Tj$}&h9?LniGNBawU-1mRJ0{D)5MOghQU; zRrIYlYux`$(l_Mwhs>Fx~bmbg}SVwbh6RL%bhCVTljzmYiR z_Z3#nYT6^%!kk%7o(6ST4C)>6xI4Djfn>vL)K2Lk1Wu{1o%y?5D$O$ z5eV|wpdAXZcZCD)$iOH4UZV_^AV0dkc>Bz4L_J)vjzRQ-8QMNpr;5BnyBKqA)sq7O z3N(*%$&|SYow<0!N-(heSwEe&Wp5|7j?UC0cet&00lXa_lMz*{+L>;v7*XkLqwfx* z(CmrduS{L`mAwQu4Popp;w=uD4^*VoqgMEZ&4j)kM~V)R5iK^Igc&aJ#zZH`_!QBm zTYXvV?57>}@WL_g1mmUqK1KxU7Wu5XJmFjeBsYc#I6Y$%qGMMGrRTl}6U?A=_Htd_ zNrk-;35TG#`#oF_6!hIFk{)FvaZ&~*oo(H~$UZ$Tua`AH_{j4Lju9O;x9Ydf>AjAS zSum5C-@EVg=+gVB(OoZ(?8~9|3Sopz7U056gp*jhRXkrR{qOhbEQIl>Cm?nMPM&To zV&1=pH9o&S=Ip1%-OOI|;9eG&`m*bP0&-wcgMQb6!Re_{enAQ8Y!}2E;v{>h9(zBx$SVE17Ii*9KFHRga|6f zJGhN*O27g<5q+g&>dpMwu+c5ud$8ZDB<%^CPN%0!@gZBnr&hZQ=_+E2Un+VQUzDHM zy*(dTU((wk(Z>v696*bWcbTszO~kEXCVgKIwI-$ztKTB@7hGLAQlWW=rb7|Ol{%__ z(JB^}CSTk8%)?wby|aHA2ZqFvc!sK<@DPS!X@+4Rl;p3g`CEJoRC>o4s5rSKFG2wU z)<#}p^s!fcts*N`2;Q`$IN_nS61(r^Mb})pWC2#uk8haF;uKW#-+FI?`nf2+ct8dPZiBm+C_v<#k5K{QO2!d z?lP*)3x{=|6a@ZBOog5-s)h*=vc`3{kT@^wHwP?49#PR9ZEjm)UmKk%1vD_Qp1ob? zHKWS2%{v&kgm|)3=1#rCmwwwJHrz40y$Us6c|}EmF%iRd(Sa_Gvb1$MmRT`F|5bP4 z_v#$$rUGs3j+;I=SWewrVpQZq(&{KJkm9a&decOc}g;AyiZPl|9q`3=XJ|mG49$#6SeI z-jyfs%zJL1A@GwyCz}s_UyN9`dG1@8_(QTjs$W2p_}XODzin>SGeF_{6+Or82L+u2 zHFq+EldFH7R}ozpWxHSy z`Me|dwb^`w%D&zD^GcZ8>+wcoDdUPDESA2iB*!&X4!9W&SbXE)BqE}2>hrzed^rsl z@Z<22E|+2CC|rbMckA2??BQrIZR5Qe=(SayC0~j zlyGaTML96}|C2||mk4-NiZb5h4_rRO?%gb3XRB+g{%1rm-q3seKU0SZie(~5m9uL< zyVnoV4=#WTy9aDHneA6T>M^VZs10VtIw3(D|C7dBP7r|FsZRhQ&qgA3LIuVKb? z+j-SNE4Toh5oc_VBFJQ?6QaI??gQRTr2`@BX_@D@j zBJ(79yQF_B)j`W;leshu_a9p8CkyV2Kh!lIj1Et@$E|i)FqLML1H+yQdkND9AJug4 zv?>-Zc4C?w@CLi1WBu=ons;i#$K64>@3SoDCc8(QrVw8Yw4Z%NE+G7D`^(ZxYk79X zu>S`m$LuJe|BjvPa+8n{xeDdI01T!3$0PCnV)GdjF2#s{?#QHOO|;;@vbf*-{Dw^t z{O7s#;jIxOh*Ty&uUG-J_U^~0 zpPjO@VM{vM3*f}+!RVI46Uqd_05BIkNdjR>tXG4w@8YU#RcRg5oJcT*!aRIt**o@Rpu&B&4y%R(S8$1a~DXxBxIA~JtmAl z>yYS|!m>oeSjbH%5pzbNmKgNFggMZFZ0b6x37r2%u&w;~I{*Q#y+i~=1^)F|SAdGQgd-FKuyB9V<+W<=aW&q9TTX=i{{!s)Aabt?vj6aJvlovV zf*7R=Dy0fVsDb~Y=4KS7Q!uaa{(y3KtW%|*fKyZ_1Wxg}08gP?8mBQj*J2`4cVn8- z+Y;1f**_Tc{MM10{xz@ID92HJc2l-cmo#!&awZqDh>rb!ppmrEk)-Onh#wCjNEw>l z3ue+uuC=M^PBSg(J-Y5GhkjZh3nZ8+rF#1z<)x0%2D3-up6IJ)5 ziF^rK69kNBJdMxqI+)(FmGC3mD_DtsfhUkKqkpdef8539^lZVt2}<#rK-Y@Tk!*3R z1ftzEZc=$=bL2|YvN1{3Pvv=J-(5+9-$BNf>reg)w3Y$zG7=;pZX`g*91@90^*Qx- zGj@s+J_3}WIk@jR4_&R_eGM6hNCL%RkT<%=vA9@$Frk}6m23e-91|h_=H1dn{P~RuZR|pEAkOfODwRKArq1l<~LA{%bl(v-eNap zmLVZ*ZPu5ldt=Qh{hI2|LQYKrSnHT?6gLO{2x{Y#>5evl=_@W~w3)+EdCVt)_E^J4 z$&dGnoMmqSLNF0VopkDK?2~Sr-V?3I4Iet)Fq3;E7C?GCPaoaR>t4Z3>)|&Ps}vmE zhH(|tTEtcq4<(8!?lqoWLQoVB$s+>rvB`C_q8I|`Uc@Hep#h!>HwRGQ;o^9Y`N076tk1g`FQRFomhP#M;_Wtt>jQv6;i z7{G)sQ~>Do?#Xed!>GAwEZy;QVCx+;c0NsMOs_nK z^DS;mERbFR7H|?}7|ZFxJV)aFw*Q>vwZT&xi9Ahq@w8=SjAco zpWbj!6if6YX2y!g=8%bpgRd8r{QrTL;k1+^4m^hSpkO(s?~Qak+x8D)Lh+##^U-e~ zoQCB1M5+A0P&1(U2I?Q&nxJ0_?yq%EX4!j0BH7$c6t3=PBZb4!*152sW94N%j>+0DW1M`yIbW`5UV8@z3}k7MU~rfL}+ssbq9{e6p+4t5{vsFIqW%={R{O$ zh9(WFDOMF0gTEiY+UT}^!)<#oH7d*nhAc|lvMKj9=qFYVv}6U zFJ8edTxpV3E^8#{oA;&WA3{i0JfITPagk$*6p0l&@KH07UP6RvP8h&;vz37n%j9a^ zO%}zOg-B~CjE{pw% zaRNUHSR51A*m+$R$AV5~vk=fvb>s%~jGd$#>Cv$mm|(DKO~S4;`h{9;B-2Y+TJtHuZ1{WwYN}Nr0-sK>Xmau@Ag1@+J_~X zjEsFf9u3y2bk$lbQA|*McJgpV(e1pHGLGc{g*P7D?DRaKGY&4qd8bCW0xOwHAkZP@ z<}kg6^Q63EGMrmXi5JRMd-r*x-R8xPkz1g&cvu9UXQmPdekPczY>UOA$~KzlR+85% zGPpHt2njch+DygNvgY^AHJT_4YmIwSBm9FX=`~cPXoNXvT8Aj3HRa_jBXp>L#uWr^u{3# z-DuSh?>Y?}h9!3sjxOKf)=h1`7m)D52O}z0X0`#i`fF|vA~jp)f*8!=Z2fF7R1rum z_M=O4vA?W1D5{SlNhhEj9ZplbhQcGWAu~>zb{}fXOXmJQIGGqc|hjh+7@?f!e@4lM)=+%(H zBV*0!QMKW$PC*|A?#EK8OpN$UbwVolJ0~-}ufU2aD1|qeYEywX0n1}O!REL=wEFW2 z&*k^Y7Awe`wo`?N2fGh@n3oV~qs4}-M$}~g#V7=nq!jAQ^1r1ah$~ZV1PRXWPJd%3 z(2nKq5=@BuzWjKDgyxv>&?@BNV6oYJeSAk7OIV)4IZ%PiV0`-hsp#&#gKqFOrsDv3 zyvi_wo&`>Qz$G>7cY~V$S0pm&rt^89t?W!Acd#rnRJ3+?m)-4YOs}CY2ND_= zlMJ#X2~|SbW-3Tl3LrAzp3Xo4`45&jst6e9QTv>h!NoLR$1Ztlw$_EV@?K}_2F<{> zF42)H!`{Pr%eQ^`za#zdHC>JD>X&{N7!_r42K-{mb>K^^RhOScb$VJEEv&WATEi?p zh>W*IWaP<V(F_pOe9woBqlh;w{Rd?DQK)}lLSoT10*kx= zhiOucj|Bz~rb=K*EpbPjd1sN&IFW)FMmZ(lV?6AmMEo@mpc6LpYvcgi^EZZrjqa&GXLer_H{_+v)D;e0)(%pU!6~*_qo|w-2G4W9c%cYal7vNL;yc-?vX;r9|L; zj5mizeIqWkr6v3&y`PJXQfli*+@9%j9`z+;Q{odq2!*+mZ&qti&V6ZBYYuktX}Nzy$%E8HOZtRfjd185{MzXDJ*=cs7#Q1 za(c;ZeTp#O^!Nbwx~fG39mTugPP-`3E2b5MEVQhCXI83YZeCkAo4K@8rhHU1$Hm+0 z<8BQRR~Terc*gra*}0tu{XN4^JcK6?p66eK!|pPx%MuwacWr0>Eo3ftYSN*XTUI#B z6));W_Eu))o5ws$_lp@P#u@qNd0(h$L>s&vKo?jNBBRRtO*IYMCs(2FEZC#_oQ`q& z)w@yhJH6JK+HGG{6t{Zg^48k1I_s(xicv5XLP@2AJT=m@C$n1`H2Uv!JXTtqMewN# z@Y{|-?w@%iy%QxBcGM8@z2E9iyOLAmTI(k_p5Q8pW49)dde1EIZ4)DL z+LI-ZFpq4Z#7wo7Ba-$e+<(7)m-#>fR1@{G?S70>j85wl8`ah+OWqt2WO|z{n+?nX zx9ed#SF5p;+(B_G+P)cGzjDq|fawLFuTZ$df9E@6RobjytEXLWB$bIEgrnNHCsql& zA=umWE$mm3>G6bT)EXZA%VF(KNNqgl!$>BYf0H4tPJ$`=zMWW6%QueL@>Rp<65Dh~ za~Z0v%kD@ZaE*$QViq$s)iQF-P^NXq_v*DG1=R5-lHquJ${#+E_akHTM^xI%fStx) zqsG;hW}++_sz2}fbtT6?8d6Wk!y!^~vb%M*){b1h8`DUVU4-ZEskgJM0m1&#{@}`A z0KbI)lftuV&pVL7-|mwx$}l1#Ka?&kiX}{>7vam(Vq0n%FG{}_x6`IFl9KIE+_v#f zh)X(!bn=jIA{Xec-E$+4DI6i2Jf!b@=$63YiCU9wtZa1<|ML3kNZNLn_HewIEq~cT zY4n>A;;6Uh)$s#|T%i!sPjizsPn!UA5n2e z1^KFby`Cu`hmc9$`g;nsQbKClb-pOST}}9?#$0ZQf3J6HT8aVz@i%r7RHG(}8{G23 z-`mz>FPETRf<)7|rz*7aQem`Sf73zJ2mX-l{Up^Cvr2U@%?HqxciM(mZ3S*QHR<}$ z=AQ#!5fWALvhfiV3x2Oe19ObR%uipI)X1Yq$Zzz`cP&;_@};_Rq$rzhC0y=|+s0gM z#AH^3Rhg}}oiz7#p@_!Jzm;nDml700X$c$d8wr3tTQyRn;t$F*Jk6R-DS*{VxfLEb z$(yPh9>~QgSFkCy!rlO$CXXecnX$Yj1k%*W*X4fzBg0Kn z2a6jQ$xxrznDD=ID&t5c%0ujeR>D5A>h6QJoekhboe3Ojf4K{QA(u}PMjzY81afr_ z&yIfIL@S6lt-p@l7$Xxib=mMMf%?qv_kYo_+eExKgiwHqg8~}q;d%bABB&*xF-rXY zDF;|Z09)R_MMY)B>1zMkMMjMNFP$avf6I|(#r?Zz- z^ED@bT!o-$k$rMFR_+clyC}LivdQ0H_NR+unD~qoFa^HEqKnrxAcS3|x&scf2I7Bz zFII>5?~5PufS}XgS7iT|SN-q17HYxB!jW{{MSzd2$L5#Kts?#~`ic0DH0ytCi~rF) zMY2L-x{+I#+b7rnV+scL<$W_M*pA5X4in$ZtGw_P;m^obiNsCFR&HF75VArp4#kV^ z`o@$T&gZ5ckznLMUn%{Bq?Xk(G$l~3)%0*}2&qHWKXVOW4qv%@m~NyaLcQU+!pvuP z7}f982DUiq!=w0vsR%?-Qw_+J|6bP#98f8gpg0D;RJ`yE#;??dJpFFAD1I55QS%jFQc<0dyUcZHP1IMwaw%YBgRZ1xR-q)G>A}Dh0qh@k_9XQzhxKk0GJMm@D205WF3(Rn;0*i_^bTAh zVXLLXJ*Z2SAouo@HPkfJ)G$ryj!NTy=~@)^X1lH!)1Yseb^v19xe=-DnJ1tDl^Dz& zx`@&{JZn}7ODCamy@Q6&XVxhX?AUeJfI|mN9aoYXOguKo^2FI>N{P@*%2mXG!7@xSq!)6BC5Et-f-taBTa4`-8@3R{^MSAWeAg>V7jti@7M zZv-<*k2vA&3lb=T@6RKgS$3OANa^8d)=S1JAcbvj{#8OB8(YEH*nPOIIJ3qUJ?0~& zn>yU$^`~71iVvFY^^v9{2bAQSP2&@J<<>fp7LiE^F@_|$2e&xKs^)5mEe*c~S1E#nWo_6y3j-P`S!Fk{UZ%`@Ho;Y zCW&|<_SLG z%b!)N!33%zoE61>A!;2W8o{D+!8SAswEOL7OzVfBb@{k7*l|QV%p?Un11-lVoG{bi z+yUsv!HN1sQIX%zQ_ois+4(WvJkkP7mI$Ao=#R>$u3d?8LEk|D&uEnOE<$#(+Vc&B zrY$}Vg`bjb0^3hH7me8Tf?9@UEJEP=*b8?gATMNxNy_Z**=Q8Ph&=f$(yWXdfi!?P z-C;aAL|+FB9O-JcEq!lVF$}KpcMBb*h{gI2>(FNJoOgFGtU%^Tv2eX(EYJf?J}-Dl z9Ml*SXt+Jvr(U2noksBR6z^gShMtu@o&H=52}dqP;8)y8pMBl(f>AkO-njZz)>Jhz zVIL?Xgus0?2I)C11UHy{>vc1|W=z}f2uk$!!VDa0t$94+8fVnA3M}$?!Mk*(h2%G^ z_r`qO;rRuf*OZXr3SprF4-W+AWpqF9mM+rwql|x2Yc(*Rh}VEF(w=^;H=oC<}@9?!9alf21Y_KYk2SPGeF9`+!Fmk08IS^2TkmaAz>=SsA-$D-*d(AKF?*^zQI@z9pn$-(Uv%)Aia9t$kJ5ng93%Bm*| z5|0q)!(y+7Yg4flN!1nHMRDQ$F`>!N>4U(uqyflZAoFX3|zQ3(&OF)0gCxOptgNk<= zmH8B*LQwQneQy_9It&{NJ{pFjY*_Qo7e)Aeuzm=eE)nP^{#9#j@xYAR-7#vsaQ-Vj zTupQ~8zjr~XKUZ+{y0CTS4`Jj^3D4(koyswAF2}0lF9`odP6uiVf0kd&USchoRVdL z_FJ>AO3lNFVXTySEq%A{G3T2!CyBo3kHe&5iQoo!f!ppcNVJg44ZN%%z#1E_PU>wX3j0Qi^p&YKjx~lSZMb&r9rGez zFObhN*gWog!n66{eG~DBC?&m;<%vYM6l(B}daKW|V0Hb9ezoSvrK>?UO!gR7vwGIO zs<*AMf)24G;)z$d` zxbn-}(+G-aWN~osPrzW5(tZ^gW{vA_`8B1l4TW0go)twIXK1YF%T?Zy=Ihbl_v;ia zMxzU#3!jcBOans#EILqNvRldR8I>|e!k5Gp83o?P?FC0eE1J$@*oKkiS9T#_{pq1A zfI5v&Bl?-7rn4pNrO+J{a&^BMuir8~Jkg4JynW?}VYL`fb<4;BLwOJ_p<-mf>V6E^`rv$%CBK~)Pk@-qFc#a_9{8|`;&Jj|Pmp|J1z80`->&5CYU z$?{9^PA{hCJ?ez^GOPk_G$&M|$T)VzT=eLt=xi??>Ysl+p~yn;DQI$gvs?&olbpc= zsh58?rb2z9s!Uk5>m{5O)+5XCCc2OLKpib$^H8*joSDOufqvHQ9TcP|7HxRaEZTH6 zopmb?UxS|r(EPhhU(fLotYiLeVW~h#sV+IKJXhw65KxR^Mu#8zB3rd!iENB5Zfc^! z0!*bDsVNfZsiury8&V;)GIYq~=cg?8UHwd;#SoA}OzxeqN@cTs{eCg|I7QLLk;p;` zkbn5c%(6s=)N7z+Z#Dk3oGXgaIrDyELzXQvBGXNIVZf+EQOr*8)AZnI?l0E?MKSd% z-nG8p8?HnDAI&TGArogSLSkMKh+wL}mkn5vVgJbW#vu{?p__p&M9%kj+yn#rkGqZu zpLA3REY1Li4eZBfUaitrLa@<~g#Yi~xw#xL>W@`>sC6ztZj76t)-m(#Va+Lj2+*%`_M` z0S4tR<69_F<D9eki%TahcPWIHLOfxv%~i@Z*V_DcTCVPXpzr z92g0dzh<%esgiaqWj$91LpRGZx!pNPH2MsNdhN>M=c9f^bB!vR`Sr+*E7zVD_$WP- zZghq>%MDsjgQK3-=EN8UX2Nsc(TVX!#l_Lt;4;y4P;8t}o)hG5GPv=*RWW`NAs&Ki zb=wk?^f0qx>mz%zz=W*J>@Z0rGdt%Bk4ZA*;`TbS zGm)#;L$rewqQB@UtK%D15BaW}Bl>KU!prq!rV&K1#z3M#6KzjTsji5EFPO;9Q+XvH zvz+a2{dn+T{D7o6e025jVMpZ+8g@8#q9`J#z3gEXXhbkqb0>Q<}dFfM7v;jemR1wVN^MJmF%uZw2vtD=sbRw^)=KZ6s=tKlIggqK810mv= zBi1)O6=MJKUM zW~0foGU1|cF{q^6pP`?h#?vrDg%?%w{2g2yRsDxM5wb(O;jP%U_7~n5brd!wl}jhC zaE@P&Fmu-8;&qlTM2W^i8*Miy)@Z*G7vW65tlWgDX^LP?^6%cWi*mEhIZ}{nRRv)g3D17c4m>}gR%61=|Gp7FpZOD z?N}_SGwXL4-iB4hlbd|D1bNeQF*{bGCDZgJ$3z>!?S%HmOSQ>@^itX-O$n7}G!`_e=axgV$x@ zYD0OSbG>Z2iL>VH#ESLl_Gt>Z+-&H<&p=Yz;Ez;EJYC%tO+u~abz~Z#9^Q0A2AjD! zoB*mY!)>$i4HxN*zVc12b(k;{Kh$Jz^oG9` z;i2|W-^_8~_59+OgdfLP=D0s8C}PlEF=O_*rtW{UDti`S%si?Kr`#QE*>E%|cjo65 z@%{$~1Xn@SP<4ig{)*8z`$LZ_1CDSV-O=Az{CS}S-7uOWE?;L>KfE;a2N3k;ywWzWBuqw!-sqTJHSHtI+Yd`uHz7ONu;Qn(_1NS~K{Hc&ZIYvPhu-Le zc*J^$;WNzcQkdzr-ghAg^wOa{Si;#eqdtOAx)hv_VjdajYBTCqLeO?s;5F1{IC96y zIrK);%G6FJ7f5-CPhP>6r!W>$NkyOVzr;=dt`vh!!c-BG)zk?c3VC2q(iqSK<^>Nk z?pBWyVxmbtjkndtjRAe29EZWAd6QidQE0DTv}8EqlO0cMX^5x>TF!RhhJURNh8B_| zPj=QLeSnm4;Bs@l{Hl4@ZnvWr*8Vx@!q5>X(U_+x(dbl9MN3*iCN?t}UkGYr31=(e zwbF2_yxn@CMRzCZFz+!}%NTT>%Bn!jd_?Z@h??Bl_)##l$I)ROgpv6V!r&F^PszO^owRxYH$4AfIj*=(X?wjVQq@>@!2MUfp zR^{#Ngj;45<A<3s?KV@io2Y=ksiN(~VP}+nwy*kUZBL?2iklKfC z!K|dDREBxAvYuOXH2@ZC?EcFB!W2)NXDY|n^`Z}o*i-#x%5n!zT60rlr!Barly|DA z_BgGeQ6G@Y!a;1sqBispUd(C*dOV_a&pbc!lG9^@yD(f^wZGSOQ@c@6bX67{SZlXz zj#ETwIruS2GbrMe4hVcP`eG-1wIr!-?*)IjUmbJQO;JmX4P#!(+w z4YV}bZSRMF(OeT05$YQsSvPmUVRn?1$x)XW%QfAbKa;*nVKx8JNCAXP4FYvS8YgPU?pvg*NCafs9+v>?NUQmes)lSY;S2<## zW7s;bH?;IEAQU5j8>A90427?FYq3`JvXO@(Moe9R-~$No`604awO&bF@2&<5?B)2SkOW}n zDgt8GgmX?;7k6iC!FN_P@01p^{d|I1G?ZiFMkGDH zpaH(RK;wYtHWng@>8E1mSnP|EQ16n)s9%%%tB0m#aLc~|n~7WZ@Z4Vqx|S$9UZ~_E z{dGb2QN_MC@}Xq+gzX@gj{@}D*uxO|o5HYzK}QW;_OGfl0;TW{lag-W~bqgvf%R>CDms8Dq^ z_0iVYh7)1y%Pw@O!`SO~48ne7(}x+J)Vmms2LxQENE;ACh?O%yGky%cQ^9ofbz; z{TQyo!CpoAB7l)}NJk8z_8_@OU(Iu7G;!|?t+qT_duQR5HhgNBlWhnRu%duS(7Dpmj`D_0->#y}^Lxasr2>W;hcQ z`K>a@*h=1EtG?3x-p+60J|obt1QD0vGTgY3AC;=}V zAG!H>4a-5|n@hv~X5q)VR|#08%$*>?csBNUN0W_)ykAD1FE1i+(}~1dkH(H2CpGpP z%>m~Wl)rjh2W@R>CGuP+S-Y9hU#GuH8l>ViG-sa;IF3u$P@n0PY3s~2E|M#L{88yh zmF_^5O3^pP;5@-#9sXg{0^8ge`=~F34%U54btfY>Elo+hct1%!}6R<>ehWSk2>3_G~_gh&On%5nKaIv=~CqC;}nih!R8#d zxW~P}rpo&Ym^i@HsgMX`3Rk_4W&6T9V$&sXIUcQ9jN88x1gpw(ng1Llo}xWj#eHQ@ zr8PFizzAZBqT{Fz;&)_Cvc?bSUT}T&86MAz8Ot31Y6`1np^*)PbE@IZBF9e%OR89F z+0sXib0|MPR}3^L*u7v4T{KES2IplO>&g9Pr|*#WP5bjr=X&CFLL+On{Bq41+AwPJ zlvqnpgM~gMe^AkqG>|Z3e;SFucxLJ87tIp;w#+aX!|Twf%Fig8Z?<;PtuKdb{=n zlOzb3zs_zW{yI7>^ZkQM)Z1?&krN2CaQIs;-UJ^QYHN8Kgu5Px73vZA^Z1ZY=F~Gn zUq@l^#WhnhQ)8dJ`UYpH%RMUe1QUX*pGI`pnMf842Ei4uBn;`J6*VhE6&Gu3VVv6! zof)nVoq{szPxZmedvm$SstCO+$!TpFWg5`r<<@+D z5v5DxaFFSZ?r}y1&Dkfqht+7<$!JmnrLjrb&5MgLoL)8!ib`fd|9IyVO~X0d$v?R2 zK5rFV^yc~~%#S5tRuKLTz5kecD*u=3Y8`ocqrm2G7Xy>UUBEwV7eH;c+$<3whdav{ zw!!AA**<=l36PlBdGIH2q?R&jwNC{s!GtJq{9lKe&&y5#nvjJ}1e*t4^Vc=Uk1me! z52s+6N{7(xIeEaPu+Abp1ckKt_F1z&^i8oO%B%uRcin_iZr&mnTq!1jYNDQAk5!Xj zm4IlrF?@NYR7%_-ikIVmBOdJkEMWn=LjET$-k%r$s~7H%!lz?A6o}(J7%zvMiuoB7 zuYE_*znywf1OGI_f1^eI!>$TIVG^tcb~KXH{#rZW3+w*P!vYOwN$b`-upe)*f1r{d z37vnuX{}r?{^YI%O|8Cv-T~q%@4&MBZ5$cV>ZKvg&OZX$Dod!(E5OI+ga(K1J$#bP za-sfk1{cG*QlHm{h9$)P!j*)?z=`v+`y!NFdGXsD8YGuYI_E-xSpO2_$e$w#Pv&ii z)a!`F&$}k+vg$p2n`am5Ep)D9cS7bS|m!h)2_*fbw5q&HQLLv*|8i%q(pS+@Hr zB`y?~5PQ}$_`y6bnpA7qmViGCWPXXa`_ch3&R{?UID9zZ{(%m5pEXRK?jJhr;bXzU z#l)aL-0^L_FG2c@3rEXOL6d_PaV)6(h0@!e=YD4PoC)7o3h-z#OVH9HlMmZ>@k;dd z2mLu+B;jegJRmY`R_GY2-gJMPsln(bby}zpUlC8N#paZWUEyMcc5*1MxH>D9iUTE@ z(Vs*vuzam5n)dX3oxy16n>c#hRoZkb1cQXbIig%|U&A_Yv;`G>l_PNbapz8pyt z*v+^Btb(WJBaE+CNx(W%a5B)VnfG%hOLlcI=3l>!IWgN?e-7zcZ{SXRIcF&COST+H} z#^IoPoigU#mGKjB?tc)9CfFyve0x(fXpR1^Dj+Bqgu1)Ek$^)(Vg`iN@Bv4O{`S6) zRbL9Tu$%@Z5a2Pf|BF9i7928|@$c{$s@>h;dYJM!dg12Wy}2PhO5+$Er@-6HpjWY* z;!qFC1VxCAP$7vUhIUU+l2aD!-x;NDE~PJzD48qg<@d!67coefddz$+ZkvWRgA}@rOHc_ z=%|@=JH@EC2_>%!O@@{p_8bvC@ zEd8M<$!PC@C%H8uM9d{MJ@ta(VezKpgWu>xa&_Q>vSrnTy4*W3UeJ+hCUup3+qr(J zFA9nQnpg@MVesdr$Gb5&5u8vSc?78_q4I+v-55YePl$n8_;|Rqoqjv7H&2`aoZoP5 zh12~?ie zDMD)Y1MePLPN8b>PDe5%Oa@xOKGwo(F8>!JGP^dG!Q|F+zCrlhFPTQ~tV-%+f7Gq$ zav0On(o`z*$FFfy;`aS$$?=HCVk8$s$D{Sy`)KZEz2DW1h)8a3>F<@9hYKoaRJoVr z45wvnCMXfQJf=o=C%qit6351Ks!mRu4dknYdmkejX??ssI=V~#P(7)K3-^DT@l|dT zgBfqDuS5#KRJiS~}*hmm5 zSDO@fD;Kq}j!%v)EGIV;{AezNvsh483*XpqO z;_M7%WVOo|V>!%~pJ#Kcx~+tm%0H_a@uygwbw(UAznu<+hKg^GdQFQqkNsb~y<>PJ zao6q}W5S7T+r}iB*tTt(6HaW~wr!goOp=Lh`}92TyZ71qT-QG5+o`XeuFk)zx_Z^R z?{)uLPB`sLfN%=a+S;v{TK;&U95=_&@l`EY#uBRq-vHk9Y)!FNHx;0Kd#Gh>1zJMF zDCZZsNI&rj@$|`QZ5susL8TxxjZ4FT32xXOAmJ8{)`20PqP%G1I5Ec4SFCvDkb8wm z9(HnEQs5c7&SKdrn_O7S^!oGJ-<+*vH=OjR$3y+N?s&a@yTk36t8&LG)GY=>YZ?v% zaK(9LUS)5%^l8sITz)F>Y&|&6_%_38w-<>IFYD}#ua=1~H(#^h5!q4&Xf+g%1Lh9< zKPT@sy=*}(`?DOJC_d@AUR=#bS;5%yS7)srcj@uFIs)Bn6AOb3ka`|T6t4De5*0I~ z-~}z`FVZYdO|hlz9bHV!=cgyQFBoze%}&}CRnHfRn5jCl?|2oIBuTy9?%aW#yFSG3Ynn;&rGiM|2g;1KBsNQ*50Lmn=2pin5$~)u3(}zi-tj~9O&0i9BR;FhJVBXU2pzVPf zBBi9tvokho14ReYahEKHby?{HTW%n)B)6N3bCv9#z+F>#O~b?d8rm(nu|r51ZlR z(_`>}Y)qW+Q7GA2e}3;L_Y;?Ze2t+Iz_agIC^X#AVvfYW6K69mrK|hq=n&sL*L;tH zpMvqrQ!G`hv;k~-9s;r>B9MMvqBgaw`)+o{pCOd;P*UY#+U8vL1aLF5o%5`ptJSTQ zA45bf;gu|z#;wsMn6F~E&}Mx^!U{3(PbZ*#XPB{J?FPL0kN5Z5wWpA$6fVONK4`aD zW#NN|lOopqd27=UGmgaM$9ot=(FfNle52aND9Xqex%#eu9%47>MrV<8;|)% z@1%NCf%%Npc({XJ+fSEgelmb8I?M(Ub&gFo6=JqQfz(mQy05Cd23Q!VTFWe(-RsXv zTV|A)!urQHa4Z21%8{`|RFzu;b8;L3gqIQ4c!#g+lAomSp$|;vi8X@!sT_>(S{`Sk z0*|jWZ58Rw_1|VYw0l7}>Dsw!PVUsZ&UMMt{og8>JRSuWQ$Kn<@78O5rZkHlx}-%P z*nVGI1mHA&>0{r9YE?Zineb<=naav+O5g%qyyu(fc2-;WKEmADDbjuY;ee@@Y154t z{1yi0#^_m?4n=V#mK}UnZ!oMBmVenTPwcfn-;LLb7`%!iriZ|9ROxB_-U~~iGKa}v z;Rhut630zgH0jvy){a6{^Vw9z7Y&=l@|w7Ij%_#zM5c!dbFcdJ5Ig8hhWbk4TQyvxD{{Gfn1Y zn=rp_x5X4!!Rb@ufdOQr$BmRvKYxn?(xk1I{6?r4L->o?PZ;|Z-U&>frd*@X$?GG| zD?V?m>`dkLiDb-Re9f*CNj+a^U-a*uXKu&U;x~@z2^=xs{Qio!`vo{zN|I;`<+z%4 zBxh96j6QVo4il<{JkPs7!Vh>n7T9$(lFDQ_*(5*)oj2{<4X;r| z>n^7M#LC|QRw=DUiwlXlx!YXx@j>bbpbJ%FXT+E@?<^`|Z1|1b&+jjHN3*%T4m-Dr zXoj1G^DdraGT?`AxijB_g(mercSVh?vN83gQWt<1oxo>*wxpXLs&*lRqo3GgXW=({ zKT}yNIT*U)?L`w3X-qt<@nMNt{XG)|jBzU+WO9SBL7&1K$Y{6i7R(d~f`sqE{3fLqi8GqJK;h ze(je}G-Uem)!#1BdUb566N*P2QZ$GPCEhQ++RDbyyI79Vp)TB3W?speg=BQyIEy%V z$MZ`bKWZz#;8=E0x$F^vgT?cIN|dFK?VfJ>x2Yo6m@pbu*| z?dUPWXZ4Y8`o)x0XzAU1WY_-c*#3O?+O=pRl^|93Qn`GYJ#Qqh-nqQ*ar>hB$Rm`z zpKq4_l%=xylK2blm-Gxxm6{^Ak1DXiWjO;o6Sx>gnPivAXS{ueq~RfS5W+7VkLp%q z=X%tR9*W16Sbo`RG>!am`U4wd@9VjSG2P+3ue#whF!*H2f5tw2d3Jnibz$&oV-WN6 zT2Y$&Rb7E21PjVeU@#Qv!e&ai0%J4u_2u7&Kq%0bDeCKtbXx3y_4};TV;LFP;(>AG z6NhgSUbEj`(a;~+M>zqweSI#rM)v5ve^gw=lOT{y&CtEZg)p2FT(-b#>UgyM1@~*O zf2;^!GaY_e6uh6OL>B&SYhm0!_#H)%&xH)?toaylgRu;-(u{FVg}_ST=W}Hc%~#!z z1}L_=bQ?BtDFiE=4EMT;I)nMG@%48&v6ANi67aVlY#wK%Jc&nZe|J$vF_R+i!e_D1 z^L*U*rdrK9+Et9>@^eA}9`U739zQ(4Vv}pX6YBx&AMMi(^7D&zts(jCS6gXcRg>nA z1&jiyvmYL9hco&Qn_r-nxn&F)0NdZHVH;SuHk+(=u?Y}G_7 zF|$x{ugE!{okp;kcAMF+g;KZTn z<)Ct-COy*$q|t|dcGRnS|MC7^6~6@r56QH1*_Pv5TUL)+Mvyc~J%6KjvFnQA2{}V5 zJVB9#h=`+OK7}N7zdu#Gizc7y;)}kKj_8V@QCVq6<$P|NuU{LSB>`HO6vXyJ@!)43 za~1ht8gD4z|=$e+}&N{M*3enYQ!*Q_3v6>uX#Mn=++;e&E9X*{_zku_~%B<1l(e-D9%G7 zJyi>4QssXSMXgxp(A&d6a-DxcLv;f_=o0Z zOPTvU?8mq_#AQ|Sn_)L~Y0Wx-ff{43?VI?+<~oUHw)dojsZ($~omq8v&N0%4tk?|` zjdLCO?h*k86; z*gMxHFny6d(8TzVT(9v9>?1=7+XQi|d~U#u&V) z53gozQB3Qq@$xE)83dhHc|rG)3pj6Ze;lsiCYr>;Zz4}0uk!iZucp49)$@4OdD!&p z2DRc64t934)n;!DA8fVm%Ob_O(VTDZiWXHaDUR7wtv4}5ZH<4the9PlOV*sHmosTQ zS~fpTfc>*|F>m>WV#^E$KBk1A_{~$zTH;yw3^Pdr0unFJkI-BL$uM18;O)vvXxb4q7Qq0#I=*kvlpj~6imuL z>%C5)_VQn{KqEJ+z5o&u92}hYJ%9>-!EWY;(h#`q%<7wcLx&g9igPf`hbzD6=iR^VV0_gqi-!+%$!YvjI?jv; z*=Q(8Dg$Oq)zyXdzLmvo`n6%;U|?Vn5G3n7>rMV9q^mdn>$N!B-KtUoE8CLVL_|%) z_rAlU&)#s>Fqw}|e?zjjDNN=mcMUJCRJ?f9NnjohtR9Qo=ZSfH;I7%0uZzL_ zEn`o)9v%I^+NzxMp>MkP5YdHYpGC5zq!_T8D?bW&F+dHq$F>`xP+@y$UY3A2w&H!B z+Pp~vp52^|&9KN`!JMF-Kl$Eha3UUdp`hTv=6Rb`?6#4qi{%`dZNV<6T{Ieszo`ND;0)VC4|)Ef{5R-`9MV(G^xi-VoMc%bu=lwI$;N8VfhH1px|Ph z;zMT!XxJ_AQi+t8TQW63zhV{f*Q%>KQEobM4k{_O#Pq{4nl9&4WY$?OI$9E}DN@bB z8m6RB$#F=t>a8AU>3Ck%Ake!BSU1O|b^m4EHJ!O)p+25KJ6CYEl^N>oyYsn2gQYAR zJ;anyX>8GFj0!R9IU&gWuwNnT0&x=Ajhfx8x(wgx=g89Bj_U*bP9Hxry@FE$lP^3c z^m}R|NyG3Y;j_v#?Lqylh8i)k!J^C$7lxNrsz>;^*CLxY`CECRi}A7M_aT)P-KG++ zhm490clS^1*4_~oaA8IIzkS;y6pUu1K*;KFYq5F6sdg{p-0;Q65Jluh!{pLz^;Bdl z?Z%;`j`Dvzcg+bdG&eJ}HZT7mPPNr8j7cHwnie7d@=BP}=Cm9`+o68F@Fe zTHHeUc+}P|CBy*V{8xES5sKMEMD2??#oPPn5=PV6PZq7`tkP77+Uaxi0Tm1kOiW(W zoBc|}%~X{k5O?EIEPZyXt!s^N>udVaN_}o0;jK?k|6&lRIsiRTKf~7Ip*V3`_cTu5 zjTu?WXUx8p0=UFSr!429tI)|dTPp@*3PizWYt?sZHy zPf~#@fV!oSxJAX`f4tLH`-cQBo__ffy3&n1AE*EAmG)4MSEB+oq%!?)X~uTo2mF(0 z>66S0nSJK%_!*Tlup1AI2y`OA7ksFy+lhCsAfwmJvo~J4mS7^M`<`(6N9?iDIt1l^ z8({s32vvylxC4c&1arjq|jx&)O!H#{4JxaCJH3S*3QpTSq(i8$fsu|Jf^hb@e?Xw-KP;(j>_ zBl7yXF6x&WuPfNr?kL-NxivL3+Fu)CQjMdzSOg1TxZ#aGE~s`mUnr6uy-bR}RHYS7 zB-j5q(y0n4uon1DyfF862dlSk>(Bii&~){Q&3dBfphMeY!MYw{z@j?V@;pVswhZYW zquZcEU?AJlzXM`{4YZ7cSisa={Jmgx$eZo!;qU;>pY`;8ljK;QAui8sZ*yf)ux=F8 z#R7DA+Z#L9hl+oJcAo<$iQ3#sw*EODjltVWtGjD& zx?!m%nvITjQ7gX7i!WV zVM}YS&dI5rq<_d{a6eabe@AJLGhJT0)O(ropAos~HiYHY<0qwo1bNnFa zrdPJ#lC(VQ_W{X3Dglm6;D{&x3^v<9a#3JryPWwra~yDpluO3MD?fkx`t;_MmeWAe z{VTl%3*Ylr0b-1TLcK9nc;(G z`9J%;m%C;pBbBcR;JdKmW#c7Cj=147_=W)~%Fb0$7-X9Wh&?y&(Q*AGWJX1%+s+z& z*9@Uku`3D#MV;n>_I&=@Vl1%X5SD8XSU5~wk%rOCY=%SQy^IvUzn5GxJAN`X793&9 zL2XM@6~oqi%waG3R;d7Iu?g`%k5;tHnHa@iuLvEN=Z$Pi18T*&Rn)~&LkTzi$eZnW z2I_F@eX^`CMvoVUa7*QNW2aIQ+kQ^4{bNG95T0cikAG`cUA!Vvy*{4X(mqi#w2QI% zx?*s`O}9$1g)8s$Qb~=S=-Q&Juv=quQ^}0U9NF7~+D9ka5JtTPByk@AnUw zaiyAbK7yH2WJ7+N`JYJ3>-1U)9((uYxbbViy-H}7BnT}B;-KMxFp#a;^^-zZED$Y7pqL*ZrU+@6PtIUmg#`mR zT^O|3+rvD_{zF>>y62VSml%~`N7(jq{Z~K4<_eb9Y|WbaJGqR%+sW#uEH`Rz_PJj2 zuuyyN~|DUYN3Ws`%P0{=XXifo;sMvV<~Ye<)SF zxg=>KGHQ3;LT(A49HkU`Iv+8Ic~$hl+AlTuH_i~~n?1~Rkp zs(%Z1!sQsx`-$cIs-buVn|q?(Lj-Sp6J=!@c)P$C9&)vx!=pN_p5k(_>f4g+c+q#< z@SIl-nB*Jr>zvVy4H=hG7GcEOzBAvMvIjd?&0JLb))mxN@>VAaMLv+mIvU~C9f`l= zYFzumvY{MM>HoxJV7w4m28^4-U;OlwYsJ|v z92&=Ly@+If$m}_lhGPIHmP$kSJzEOzQx`-1KqiT=*bE3y%o>!>rom9xgD^;Y#9|$n zOS}6kQI_|3OhF$d*nAN>XFQeQI^$&{qn3by<76|N8^VeKy;|Aqr@iBqEiS``K4!`D zwG&Sn=mn*Oeb_x36_3B^Z7ZLVV&1na$Opfg#wJMP@w8*SE`8(%DB~;OyZXAwHrGLw z#O*WZTq7d`6^0h-fB}mj83vmur=p4QT2Y?8%=RkC`QW=+;`QeCernD_?_BLHpdtbT ziy)HaSwJNs>%8;JlaS>=M(cX{dMCCbPtgw+?;qB0@OEmYFR_TIrc^zyMEV z%B^)&nAb19`bxEFr-k6L_ju{UM`o(p-{n^8zE!nR{BwX%J>23PpJDIFcl zWnjjWypQybWAs=$j1SWIXK3v;tZ8sDv=_&s67#?s1OU}`1{HEFTT7m&#Hgfq;tVgv z69rZjo1@QGS(`4GDT58)4`o1-Y-oX)XR8u8knsqzQ^IDm3fz`u6OmtMN!%h+lesW3 z7jgnyhLyR!w9fRL;B;QDFKU{(`hGWaw7qEB8+O<76%KbL+F+chtb^-FI9R>WzpV7E zB4ApS#XEzDv{?QVa?27&0ok*|^HHT6Z&qj9v7y(Fk#+Q{`>=f9mQz`(Tx|;Y-DWK56P#JJs5CtP z#>~iaEYmYziHg!o>b~!>w=C|_Zt&`AhAMOQlr(|p{{13Au@9IV62Po!x-8CE&)`^xLh{ zvppSh(OI+%=oE?`!~(2O>e`xH+&VbJh1ihK21SElpkaXKkoUs2qtuvgju*Y2xk1H_ zk`(8LF_cHm!A!X(=w3wD*^1M*5~1lKge^By%`_AIt?6K8vn*>D9Y&fqpk4c$ae*&W z8Ed`AM-W++kxlDK7;my=U7!~ooX&J|VfVpQu=UE+Lqq6CMH2`D%xc6y00zjx{2WZ` z_9wW#={C7YuvQHt8z$LIP4DimK(N{clda|fm%>t4&W@^lk3{M2b^LoBq{EMki>k|x z9z{3f^>r%}QDoc9TK0#pJ|^33b05*z8QuGaj+zF=Po~Y|(v{!cAoNFR(WN4{K}CvP zP#BiKT0)c{Ay7+%j8sBld<|Iv8k$4pB(vGA>3*d&q--g-=TjmHE@D4KrfT|fW7kxd zs&jT8oLNZ;X zCxS3#&Z@}<(9okeLIm;*PU0bew&wGp3kT%mVa)Feh1(!}%em6?WU0MMFx?u_w^p}P zq9LdL_LmFXoF&Pn@GYVVB+^fDY-rNo)~AT~rZ!jiKo*i~JI6%DxY)m2JM4Me*9d8nZGt&_NHC z&;SzxXH+;DTtF_%aQQTKoAu&Ax4_T{yoKv1_uau2MCl&Iz!$!bjxtASxYvNEbqarF!`%z9H51r0q7GCQC$ zbs;ONi-p^Cg5I=r5zYNvzu1~fsQ2EXkbe!v#{WDR?hP0de(owHPG3_H2k z46Tgi&-C`rvToR@WFH0zUoXcG#fqlGcKaGZ`jcagPt5d30mahNRN3fElH^=<4dYUV zlK5sxwYkWsj%r*C^Wp<4BJRb7d5W`}pkT(8#Po}fbrYJGz_fSAiG^To==TP1nYVt$ z!aofvV%I?c_Q^Qx+8nV+D}uxlD85;yMpp2En(AZEIw==;;f!?Z8Efk*Y8iP|gAL6P zLoCfqGf2pl#k#WHs-{1)^w@)C8M*oaqS%@R>lq5^#Gv-pnn^Nn9yxY)_F@5MO{$4R zRd^e|KG#I`p}CR9cKkm2{xZKH%TI(aT5PTD;{$#)u_eWiCBVLkyVd1hrfxS`1v3I3 zn_oU9q#A=88v+hCr@LFa60^UUqq1K;?;qV-U!*ponRprQ{qHFF_|6YT{O}|tB|paF z*j%#1KCVCPdricYp}nAztrb~r{-Wg$n+xC|yn3DxR(|TVAD70@Ks{jE;@8>!fqHzr zK3{LaCirf-uH)JFCoWHC<5JC4?H5c($LT19SgIPnVa7Tv*wcB}HHm;a{4bRx49BDE zl|@a4J*c!4%)Vw9_Qrw%LDdOFQEH*1ueHlus&Ylvjg`s7yX(GibLwGL#{e2h|BGJp zix^x&wD@e}J^lf*^J{^xv8>tDyrH%Er&!x{UtZPtisSy4$ImU`YkFU0R+z!Xxm#O; zEtY_6w&CoN1trYUZpm^M7S#;=kL&w*+$%wO2swYR{h>}oK{?08S{yh^<$>Iv%T>Sg zYjYe8zm{8Hl5dLkjr;1sV?Dv4RDDd7Q>rbz>V(~7e{pf#8mmq~NH=Z^aQW`PzP$?L z46bge#oc~pm*s3HL2kGOP#(?y+M895sB{w3NFZ>tz3RZ{6i`9Pt#fJKdE*>M=SV&5 z>F)j_>^&8*!3C7kZt z*Q}NP(hDKFuDlHNO*J-#NKTY5UVXW?^(t!Z^AgPF_{$mU<&33rEc00F*=9%0uo?uP z&dQCoWg%J6;fMY2`6-BP^V$`jW~g2t-5wQE1|V0XENhhaiwBjwzyT_zzFMs2`a=#E z8bo4`q3Y#Gj{=q&g@w)gy~)amlH_K0-Fm$z5kX)7!Tuqn_k04$ifZq@7o&#CN@ab*+^Z@`c#r_L&xP%c%c4lhK7$jFsSo2MbkyJagxa1=O#0uLp+=on8z9lO zArv6zIpuRak&#^CU0y|>hklWOmA+-v^H49Nt1$MXW@$Gzd{ z^fYb_#c#>q%v@$77>xH78kh3uUEQ9y_aCR2XmB=Oemz9#F0Pv?nbasCR0#uE9>bA# zX4Ylvmso1cdpzj z7xngg5J1+HpY`M`Euqw}X{5?92}Z(R?69-u?@SS_f`?^X@R@8K;t#H;Y&N1BK_DQf z%45CWGe5zCT5x_QpMUsPnAF0)+?;k)OJ4dAC$&L zk3Ms4EFvOEx|8po*cTZytfO$@=xpW4Mm;EbZac^ zGFI||tSFg!vr(Y5{b0#h*1(sHd*54Kx>cEmRhHx@HBG$!gnOX2+q$P#k}}HZAKfC= z+im)BfMhh~^{3)nWR6^LIs0KX%QQg{FMTJzpx1V>+S{qZ3Uv>1>bMqpP!t<>lZ zzTFYnc$oC~lt@n>sir-djGmSr_A=aYSZQ9__R?8()zX6oLdi;+6xXVI%YAD~XY&qS zDetL-fS!4b=5Kp)S($@NJxrt3K!34o6bG0}?$wAAVkwmF{1yblJfDsqFquzCbT-FY zN@tgHN4d{ffJGXRfixQ*AfME-5#_nPBb3T0=#d&^t=;K~bfRhe-P#oKn#xG|$?M9X z;97ZOrsum7^581Q)?JB9?OPO$2>g$cH~Y8kMm(ue+f#|HXxR-$h+;$AEOCZcW3$%N z$plg6PiUkZe>{xT6#a;DoON$G{SJdVQT`3u2{{wcfLrjk(8R75X2kYGc~*6c_$MdU%4iyY-c z1eU>DJ-iLa2VZJPko+?N*P7JwtY2U}wm}moCLb0jj=Y^o&+5-%%#xHheyeel9^xDLuH%5 z>Bb{srN>j9fC-&4Y-ig`s=(iC?>e1Fq;XGE2>r>qA&pF05jPFwwyRob+ar2>o7Jg3 z|AhA>r0{f6B`E#Lv8A7#0znai!`;$TL43N(aHzkka>SBNMn`?=TFWeTBRLYQlcM)8 z#@Xf9Kh(|^TU92ST{o29J_QC3q^DWz)AN3|sLmw~>+LH23Rm3fZYD)SivtOm-;A}^ zc5^!&MyJtHUW*KZ1|KeI9Q9tZV^_H>R23*(2`#(FV%-`Q5zoMIG2LFuftI&#JLl}B zMM4YHUvIwfa0<6_FdJ&P`|6>lnkBCzPZ{LQ;^ir2C5&_Q7Dv>$>#8eaQm0vCT|L%# zq439=G&wB~Xud=@j6g9T?e)dgB^!@vBP^J?PgkM6NFaqmrtH3cdec|1nJ;hDZgl3t z=gkZp^6ot1^OEjqV;`*`VN*UCJ&MsnGx@VIYQ9Rue-efpriS33=0a|goBBh@d#7V- z56VLKal@O;Yxl}6;yErRuOYeET%jF<9&U@rtuLhvMTsVPpnCAl1{_FN%NN{nxfil$ zP))!l^t^f=j-$(F2DbwkCY1mtXQy=W<=jiiw%n#G1S}*i2bxswPP}zJKPbNyfSrMcXMi5K@-zm zbGfuCI*oe=D`@tB>| zIFwm!X;f%gkW^uzr!Y7qCL)PB)ZUST@0zTHUQ%Ff$XH6#-coSY@3eNHTc^a~TH4=D zaM}IR0a&Vue(p&HLk1(YhF<$t*G$iAxmXUTK40Ht?}6KD){vYKU(zi<#I9>CAlO#` zXa&P2Xw3TD(4;G9p1UuHR2eBQ=cmY;?4vV(5nNl6*8RkGrGnF0n)l6gSZHnw(qq@> zJJ3ttz5bwXPEWnOx~_P>4rjZ$k0n6;;BvkSv2i>CC5qPCTlX}jDqi$`>|keo)pT?g zX0+3KJzCS~K2aBb^n@+K-ig)orTezQYrLGfh3h8M1njslMa|VayE9kwqZi!2sxHIM`sul+K6sDUF-g;j5Np*W`nugYu^N$~LQ; zAZ~oF+{! zk4{Dp!(Qs=t@lTGA}_Ytd1rOIS;2QX#;f8{ASn*o9-Gx<`~vUjVSYAM1MD?g1LrfW zaq$#@0s>MAXjz56i$d>3rDh|MPR2kV(cgB|b{;`9XBbgZJfJZ*oSznqtg!oM=x-(_I5zl^?3JFeJqyj=a`#VPB=teFXE41pu;{UtZy6mQ zDaXr)j*GL`M>;(5tk&m9iPeNz=~l(ZSjfrv?=-(m?vGc?AJf}E#TiS=J!|zZR3Bah z`q=Qh{dVFfdrD&jH1;la9Z}7jvOSJZTZl8bfPmp^0BTDX-e^0e`|HP=e@BmBX-^PO zIJWxQ-=OlzANFhZre`57SU;AXw+Qt@Wq>NDJNCk?&s}*ep`=3cSOcBO8u|b50;ph9 zb3PKndgdzrXkgzsPOSh}6w@MrMEfd>)-n%so2s-I5f1L__>P2x07s+0R)U*^;gRVm zU1HmQGuy(SZwzoT1&wp|T<# zycl*qGvIkxy!KD09_HXwfA6+m99Eje@7Y9uLqwWx>950m@Al0uQo&w99vLZ)-z^eV zI~Qe-5s5Iez`y|cVKy{!v}ZO`!;Yc)*LLJRlL?9ne$wu+y_FSRu-5UfE#Hy-?YojX z5S=9pvFMW9-2Y)M{p$x+A_YYQUu^WR`p5vFSl)juvXetcPd{uOY~+XyVI!9vg}Qgw z26BjwmH?tMyKG?CgS<=gKFhi*S>9yELlvI@Lb_UoW6zDt9U0hz)j-wLXmvwV)K^*v1z9FlStR}YVjgxr~P&Va|R-*L6Ni3TU!;BYZ zx>VYlwnf2J_CI*m?ncQ;kToBE_N^vaTSbamp_m|c#i~c z3iD`*1dGW6`4`AUugy)tE4g9A#(=WAI_7jtTV@j> z(_Y`)kPSqf&0;5q*a}A!Cbf54%KA&V zi;Z}-fFtYQm%Yk2OwWwE zblXsZh&LU=^oG$yd5zhgxA5S&saRhJlLM34O9AZ(fAnI}uuuqSXlTK0Zkf!w48K=V z2jC`Q;ga#Y1S=tH)!O+2mOfXvRyl$wSmFI-GbFbPHrvp9Kq&;s2J8@+j_Yr@emT4U zC`I~^w|O2POcH0p4&J;cU7JmYUjchMY$Olef8rO2MvV32`Ho9~U>XNf*OUfF&?4?Q2o3Q?2*^sLvFyhCg_Bh(>nb~uVxgXv&P@FFT z0wp?G;rGjJs;=UVe^{zoq=JHk%o*f4R~TS!3;o64*52g zVe{Ycwh|7LffYR)ndvu&5~K2O_(BI8I-x?&lAai{KzqK1j`+krr6dD}ZWaXEeC z^NA0wFw^mmh5^m=t`p-0y(9X+As z1hZwU?%sp`0r7@7hcRC#8u7-0vvcr|LOefk-R-Kxx*@-z==VlQp&Nl2zt!Mx+*VBg zjuWePyl9x5VLUlTZm-RjX5H$737>>M6>HJOn?l=W&-M!CRlX=`RnrqIvX4>Xe$ zc%K5Bm$JYHW)Z#sN_n53U03lB1V#GkgObv!nsCOBoB2DcP+kPSZOvWas%zoYmt>x) zZC>mEN@#mXPgAXNO(D6-@eo{F$XK7B-yIugS&#S;w^wO;LZkj2qo1@T#bgZuQoO8N zrnej8p#7EjCrB*Gq33rW&a*GtSIaukE-S;#dPmsW{%6}d#!2*$I~TdH*3@Y0U4RMvbIAUM$XW7IY(m3GS92YGw;>e=xgp$zuaD+rfZB+ zuwUFN-tODFfUK@Umm+nK%J$;R-;rfY!xx9A>+)e&9O1yNi!xxEpb{J0uo$BUl@>Ti zsj#$S>tfG9VYo$rjTo{>4s1bY){4&4{|9@#?shN`ZuI{NH4opE_8mxOyjjj;$;}n` zqDci#xQq0PbOMLfOp-v7$`pQCu6h#ec^Gcu&wGx4^>xcW?N2@@a&^|0l0eH4x>00wF6Q z$Q%Ca&;MZ(|LHmZg>|j|$7}qbJNl1?_Mcapq<~>E|FzFw7Rmn$DDwZ`4_Mem)r?4z zl03DRp7+X^C~vA-qtm$dX}vn{tszg0=FLu$nw*%Ju<&a0RE>9Cwyy!5xXtt_r8;ck z@kzuGC9)E(OxE zh=e7E6)osvZOR!P{}$A~9wh#f8gYj9x|;+^cXj#VqjcP8x)N1O4=1ZpLwhA20|bk$ zE}0vJTsfNT_EQT)<{sa_?Hqj<(}4=D)ZoH3V2@z>kBq?J&}vA{fZMny{KZMi5?Nf_ zius_Ys6>*i+a+1ShLiI&ld8@Z2G2`s2a2HJ`M)mZb@|0;K3wd7SxRJ}+Vq}$;X3>~T zREQEFoG$?kC=myn(?ZbtDaSSd7HG#>x{;cJ zVWExeq9vbLfV$f&sf?m|a<-h42xdz$os(Z&SV2xyz-Y(!KRb(yT=*$TDPXD_0>+f^ z`wHQ?h>SdymwZ+O5UL7OZG2z;f@VWWLNUG6j_3<-{D&U}i?p~g5)ckA>1Q*-hl$tU^ z0XsUGhCQg+OPLn6?|FqAjH5{kAjS^1VY7r+W5C#DnxmKa>`y`|>fg){f6S+zSV|6B z8AD|vCtAfcz4~sl!)hQ#OcGDU)M9+a?m2(ML4El8R?!4$1<4Vk)_ggW{Y5P{JnQpx z+CR@^iM;xHA7uAg>|m{hYtkwG>b5JuPi?_NQ1-;OZNB$7HuJh^mf{I9Oo-um+b?vAeg|TSSv+O2*vDa~JngQ=R9;C_gTUxMl{ZenF&#Uv~qOYoE(DH!PJoNCrb8gbXp;S8^1Wix{@ZtBdj}j@k0U zhy*NdO$O9}CiOReU?r^K0*c$OM?(#jZJ_ctV{yA8*_^6@LSFz77{?ghbRG&&MsZA!KqC z&5>sP_-$NPD=s>@El^YLa^^U~xUO3nPVqP*4Ggg4>NJ2+3d3f1G9|j`s??dz?$cbk z#9Kif=?=eRXobm}&lLKo3laxCtFGKi5?uG=N+a-k=Y?}N^2nT;N%O4$-HjB|1}6)n zi-YrNvwvIO{GO~0lbH!>Tow&0u%0ryd5JsO< zy2a>w(`1BC!dT(NkxuL;`aLl&3)Nps6C>6t3GkH|7%nZxhS+W2p5N;ZXDAzPuMf6I z;7=@PjDb%v9$XDND%^gRMM9-i3I)Xb+A2Nl5qfr~=06PJRreUW`QIMvRw$T&eUv{O zqp9Lh>2lwDw*-9^#BK3FqWS^zs^mEP&I{%v`~Qk}N#leLIz<-)9WCgze~dvn;(1LVjF zmH@Sw6Km4GbX%QL0+_@xOc4|}gAoI0U8s#Wi|@cEw5}efrMZ6oDbL`Gg;W%+#{|Z5 zpVs;BL1L`61v@=tt%DO6FtTbXF+$0UUKs@0898u7T9iOtW|DZ3QI6%vvHd*;j_`oi z#prZR^TVGfS!O^hyo$~1%A>ZJbCR@u|KTbdMrYb}EeM`E82L~zWBZrE!zz~gyzH=0 zue%wm>`0bOnbl*l;>MdLvx>L9uXrV`2o(>f*bkT24_8wJ6TQDs^1kd>q`4DcDMewM z54~UO@6%!b z(iEe+$t{?zz;!}%k^)x@52d+%7enH zGIKC)n{KnQ#>rB_7DtwaV?IGvg~^8@43mGn!<<`2?2J+_#BQK zTlFnXLY=CvDjO7`6vat|P|5jqS%)exO_8o!a5BmC|0?djpW%GB0DudLnrKlLi;$=h zJtU&U>Y@fKtIO)WhA69-UC{z!nl9a!Lr;KpZOKTKMzsF>rs{F|~F!<%x%kw}!hfR z{S9aSA@ZhL_bqp-l_7$8trsr6)SMiO4@q&}ml}DSTy{MhYl>rS?UgixBx0VDQ+-Vu zvs+J;GY+0qWNj(s``yp`G45gEMTxQ+V*nyG-lai<+xQW^a`zmhY{JAR*u<1KWd8GY z`W5h=zP$q?M7&rv+h!MoHH*Gj+9i-W?l-%Ei@t@2?y5dP$jE8N%sQ&rrhVK}(WU6! zNTU|YUz2`vOQ`9zh9ZK$>S9cc(|0hrc+V;A;^dVJjw#I@iH=|N1+~gQqA#<|(vmxm zDE*Tg;dm!f@*OvFZnKL%;oznsd8h6QjJ``@_Ffb%WzuO$wQN0Ks$?^pxj9Q$YSmIu zy3vVsMlsRvKz8*i*`mmHE(Mw1aGl#2h60LLLXm}EnP=!Lbnb@tFP|+PMqKGXnJDJj z@6-bZP_j7Ka?QC)-h{D)QIaG zP5HeL&ViSW6;;SE!Fa`zk`34cY6~xJS+%e&xss%el}3}6gp;kw#msSC7D5$HFlJ&Z?_N3^XNL9_BG4A!SJo1aknPoA zI-p4W0E-WR;d6@o>_$H@%c*3tlm2NKKm>`@RtWW6V#n1*wu>%JOZn$NQw`%b9^IcN zJ%;pDmzX)fX8AxkZhTu)u76;M1ab^_Nd2WG37E)4r&RbxG+Z!9m=th$A!0p-f{|H= zjk166*EO93=ksM4Gmw%gjlc7_H!MGy->Iq#_kv*l;5DbO$6lcerTXnd*XC#I$K~#R zIyJE)l7YQ1*al@--Byr{<*d7ph=Ua!1x(u%+srnSk)tv~=!20j=2ZS1Y$R~zE9_T@ z!bf5{WUKkYoK{Bs&d>weDXPo+fT%?aAmIH)0B*z z&AyRq@@r?(8O7hVh^2GVZjo+Uo!WbKpU@5*qN z6W6c6#nt>&DWbRg!-TSwf-6c=Y5Q^OsuiCp{A0wa&ISzF)Ec?HfC051)|k7+AJWLH z23E1NM>|MHi9jfapckjB?8QsH0-G}HZzsOgS=W@Kq7G(-N;VttH6An9@>>k$)1Ja; zNYLt55^eoM^l@dY)GY07zYba(b=OXAM@@PnzbvvAJ~}y(c1f|8AnktiAR| zfd8y#o5QcEHiDFau75B`o1W%vQXzXh;)HCQLk5c4{%Eyim8!H+g_k#fy&#GRMfCT> zZJ)iV;_7Do%grk|XR@!AtI~Pm6vT#Z!wYRR%K`3YqozO)!*7PS{M~nnh%)^y#|sT} z2x&=B+mYh4aE(vN+w8DVO=IZ%Yr5)RwH@Lax%-;ZRn|<;TXkKQCv?6P`S| z{7p!F|bu;bN6nR$aH{B?&BCnd6RW1 z8-q;Ru~qEJpYvs%dp0q;%8SR--fTT*1=4w3iWAHa=_`Jg=a9xksul%VtlSz3@I%-I zc36#v?HB@*oNlL*^ZvOaDDfdU87glpxHoDe0qG1l!?ybX1K2b_a-aE$fPZ#AZuy!> zv#;FqW23jD@jJYJ7TKQxaB4?MK724!HYub`w}H#xkN+;k#+08zGHuH72b#o+cbikQ z_WjNjk$OJCD6R|3X+nlcfbPl|5AvEjp zorXvk^FmsP=j8OZ(}VKL-yh*L9q4=uvni=w#rAfwXhI}hpM|G3I0ZyUzw2Ud$=KmG z6qUgkUJ&%YPDC@F`UV9xH8le3ohfB~|9n4wSA1kyK5v=9(E{SHuI}NnQ{x+KM4n;m z7Y=Z`5~O3%yWdsVk`aYSjS6Zo5{|m1741r;4jA{7l;d=#oJ}=&|j`*(S zGjJidRSk?GM%utn^!iO{lJ^SO=8)uKimWNS)x6#1vOaP!ZQm=M-Mq2b<>+hEZ%Bnd z+!jr6Xq$H&%v4spuCe3wDBOD3{`ghP6ior z$k;O1ekK901%Zf&aE%-@i-n?>cREscDIf^={j)*{hs>IUEt`r==V%X?V{=F+H))| zB;NgptD3u6QySS}z+KJgy+JwVoTl9vDrOQ;ztaU=Oz4vsE*7?N|3>uQ=~mgg>h6=_ z5hK*EwgFs8M*&F53K)rwqBD8W?ok&;wpuwrM<(>K^jwt!XJ~d2$Dh<@^9zG5^Pe@= z0cr}>PM)E%ik(R-b+^|`BdSeyu8tZ;VdiXZb-NbQWXQ0T)rdm*az2>XQ9z8;EaSUv zN@fy%>{d*V#-?O?xBU@Y{-uQ}!oClUkQlFMHMY>jWGlsT9GlRAa5mEi%`<_LE+W&- zkNNj=oO-PM=4fw6RquZWd-E>roc^xk6+b8CDp+KI@i4D7~*oMK)80{rvvelyWWQCL2)%GTZoD2Y)~E<-vOJ3l__X$=d9+x9JcO zsz^M^oak2eL6q1+GN*W)Xqcy3@le;<$$iz6IA+F4+6AA4QfOY@<)k$$_oceoGj#d z@b-v^i1_M1I|6LjCYxT|H>f)c)6dQw0eOz!`#2)}IyyEIJV zor&*D^atU5*BpDvLi^$az`dL6-z|(rOt1`ILV|OwHi{kPuS>LEHR}gH3Y0(wtZ0mQ z#>t|7F*=U3P6)Sbo~0yGCll%o9K)2LKIka%O&(^!Zuju>BV#x|%GI-cPSC6K^-AlF zpkr((e0}m$s$wcHVw_5@VT3`+VqTAU_fh!uRlz4}2_nM*OZkyA)aW$A;GKVbe56Cq zB4uam7V_EHpl|v1pB?8hr0LI1SEBZi0oO|bTe>;kzeXNwmn&(LgU<;ZnhU5}k;$zQ zP9(^DojfrO$!0>e-j}xNXtM49+Vx7ndUNp<|G=E|kW?WK*M?Q8?&iOoZg()jap&$|8ZO!=#=nT3@p1cY>0k|vy*YCl2N$N1RTF(`~!Om{4C_c%<^2(@DW zs8|?GS!4u}?r7$BKbF+%4Au6 zylCK0;D`1#W6`AO7x@9v_jX5av%?>}`Qbxq9;z<{b}S@nk#3fJM|e~ej56#)>)=DW zGT{KHR8YB3^o!>BI+BS1L@WY&;e~07ZYUvS8}}*IgeOEO;)>Ci-{O8dqGb$0kfJR* ze(XuEg;0eIXK|33fjnGYOb|sXjk*>z5Ck{mql{~&qWlEeR^!tt zNiPji2-WFOVpO-#EM56MWCNj8$&WD$rF!YUpx9T2R+jNY&`HptDXHz}N($f;&Y*$F zoVOhUD9O0|dDJqnh>JHS*~PyRg|2@QDGWR3ML<6gYmp{H3e()Pic<_zy%V)$&$CJn z7wSzyy;Z+;ryEr?DL!n)K;(J%0G9j)BF8sE>`H#~J@0cUCz*4+X|Gg~qa-fk{CEK0 z2e%9gZ@JJ-n2jp3e6KPxF#>|Z36}mOljBuXucb;VWVDySIH0SVM;!x6U{~z#O zWnCitQ1d_g+I4UJ*U(LmwL*k5@G5I28T!$}RY$t%<1@691Z|l65L8dM(VP1bH`E=@ z@;ra?Bwe2&%XnM)vnjTKBKT=E@X>w~i9>P0!^)F|FcHf`Y70*-sn5Rgp_dYc#FZAI zL5Glf%g#gTW+0B7eU^j%h!6+S7rgxjB@$5>a;p`&dZ3W94-bzC4;+V*C6a+eA&mOS zL<*)RIZuYk5|z9}JJFqU1+5UxVQ;=R6wumY=j=`+6hT5ubNOMthQE5zj?s&@T58wqu=(A@J!*wtSsNhI^Bk?|pM(Eas7BU4qg zcBD1mi>_|2RC>|`)oicF`eL5vSJku89zsbut0m?{4jG-H_+yCM_aU(Oqo<8is&Wwp z>YB1p%PK@{?V-<1#Q4Aa+-{b5zI#XSP+@=U_*_Obh$}y+X(e!t-S1dTrwSf5X|D|M z?Z>#tkOb*v=q*7gds-&?=-c8$uc8OquM?)s!uz@)P_eh#>mos*3PJlmKNFgt!P=z% zfgrM8@6UfDi>fIAf2uEvb<6>60LAFbG}QaX6+#~!UbGDl9U4;@5e!qx7f@3+5|{g~TygOswJ- z$~i)}^1G4Vh1(gd>rGaSJ*kT_5*aMF(D2i6K!m^kcs z@GioQ5h7~@Pr%Q>pK(89=A^Bt1<9|0sX@m>_s3w1ZN#6;bmuwcE$40KmF78rs7z~0 zD^82hDbguP(<+xPm$yz?VEs}0Wpw(!PsfCz28}m(h*pV~h0>OmJGL@*vEK{ZIvk}t zCSJJz`%rG8>h%Xj)nnC_lG4)jQjgLYb==}nbr#LlN^ixdxO=G*)ak5&0>zwxV!2}V zDan1}{f`7uW(r7o`4i_Bau(Bd33VZLpHP~D6JVtRDp{p+MRPSaOL`a-Y6_BQFJm-Su)k*Xj;~51kD(hf7E%u;CQ66z`D@5AXn#Nkv)4nM!u(Y5V(8l zgtKck$yzSjO$#I8Cxry`w&2KJh zl~I1IW%xvrsK)r45Suiclb}t*GDCmEh>#;UzsMbtf8ft7Ra;JYhHfFlfQdvA?Kyu8U_# z%cy1KHqSl4T|96W=VxiQYL0Sdx-RK7UY$N1&_tpU{udYEsF@e#TLol(fOBd8%=$nUke)$&{*66?ksWTbkxBJ48E2 zBR7(Jkp%1|pVc~xxS6k+1yT@G4k|cim1lV?t|}^*iI!k&(wA(SCzo@VaGSUrea=oV zW8Dw0b}#EL=l8D=cQW_fM>U1D#DPAFGMy#zeZy-;r-{~YY5A2km)7}pJd zZr?r1&Av^2uz#w*|8d9uH2K(Y+wcbb#t!-&Gzpvz%sI3RjG*~^b&XcDcBOwtK%Pj5 zzhITuT|J9o_R$3c&Nw5`f*7sN*xhBu?^k8B!djwbh}6$6LZu<(O{ zdKC4af9*PHxXx^sq0O_-tIt32_VYe`o&Gxc6)nQ4YkosjRlYzzed3*cEVkUjx##)J zmXjQ19=}Pb)%P+%eO!`#mVWC$Lj9y~Z*kSIFBKKi-0jk?InL_jCXWhPvYuN&b*Cx# zx5z7z1*vB2JHwtF5uBC>RwmB(hXuSi4md*=*@Rp!I|2hOhi)km84(&dztXb>S`Gcq z{2P(79uzU6SA)`V*CWIu(r}xZKQ-K1BD8La7CTsI@9j{xP~&=&Tshp0 zKFP5sbFo}#)m!k|H@e;pzlCIQu9*6y{qwxPq)7coX=P0JmNxXA0nm{D~~F$W`0Z; zPBoB_IQ%SbZ&)8YQEb&DX6652uXeIqW1MJ=6n#a?BCs+^XK~0E=e8j;xmDmn+nd?q z{pGNDdlI8?G!x;y!#U%@;hBT?PISf%ug|UjY*14K39cneqduSG{L}e2vzYA#hAhqf zk1Zbwbfg+cnhET#x_+!#npZef=(k{a3@AuB1VxeYvXbOLsdUoh!Sk!ENdV4#Ye{8>eRGDA-u*&FWd0M}px!7EgHJ24~ z;;?$-MYqSfyL>l(usN^PpPhB1@Fah)``mpj*Et(ndGp}#@Ib9g@3Kbc+a7FY0&!Y^ z2%&>0z~AOUXxehl%tID>{>6ou;J?pq5KwJctaP0VjbTU%Sr=X)v%JMgBAvA zYTBu&SUh=s|9hQ85I4D~tY^6ZqVP&c%W2T@00aPCl@DMPN1Q2h4 zPmsV%5R&l!e3pcygMj|)|4&f<`rn@+ zuU~)4oGXcIT=i#a0q49q#}P1mXZLguG6%-VHD@g?&;Dyk0*S;08~CYBG)xP z!&yBSlwKkS`Buh#O?|Jo!N~ZSzn||9PUp?DgW3b&ZU(=WD3i+zJzcI(DhXW)t&j7; zFgG98fE;(n>?UMrgN_T9hw6)t4uwK~y5eyX#6H49ImESSWq38M zX-kLt1`{fZ?DTR{2@Z_48O?8V`*<1nPjN3{>En&x@nuXCC5S^~4HB zA;jq+uq-=mUVUP20VcA3+KGmy?|o-_dy)OZXeQq~4QAs|M$aLA2EG`9z+o?N`r$py zIm;7Z8A9xg4jfj!D}cqoqe4i9jZ+2vYjC^jF!d|$mgXH>SojV@tety>*~f(4kK9i< zQ03xl2}*(pSn?o|+pAf9x$l2CuD(YwBg!#Wi-&nMJkQzp5)r`1TLi?E}Ytt+_jJ{t_(f6iD z;2amn;_Yc&=0^hyXo-$jC*iV?g=W)QSlGf^a|0RmyI0cV?_n{$|32 z`V|H7fi*?WG6?no>8QGw#PG1X7qv444J|iD&>d{wgWs9q zFtSJ3`TRjLDVGS6D+gF>&)esR6FI?XjtObzD6NemQYZ4NsSk2NNP5UK+-a7%HUqfj+YnG7SxxRgMwJNsFL&srBny`B)r@1K`d_zx5s zasK`g|2?24JYXjOKVKj=DgU^P`;QJV6&$itz`}z_y>Hu%(*S$#06gFUr>lhSFFg47 zp#C~||N00=@%ls1cL`twZ`T4&7c47-VuYS~A_ebv4L8DQQQ3z;V$$(co)1S2hLNm2 zyJ;@dRA$_D>44y%AkjyJjmQ^wSEI28S7!~ zsG#BFSue{Yv($eqoNy^dY@mE8hPtO{4hp<3;P}Ofy;{am>hS>{$BneQU;C*I(<}<; z?N4llp3hTS0|5tZF4K~RC4NQs?_o6W9%$hH^Qz70u^(2Q$txX$gd@;};Rf_vmfzOT zo2AE^_r!K#$}wW#d~#jK$)E8W&v03BgM0!#hW+QFWcma4#+Of%h(_pa zL`s%A4LGGU08LP9ueR9-fbsQdP5rA<^B|(no?Cda<>Sgt>qIdu&QC^cy&JY2cn?P6Gu=+J ztmv>UisL))qwGx5{DlAe@mx+5Hu5Fg=b2aFa!hO;06mw2AzPn^#hOzPL`CB^fo3#6 zV12>qHcj+P?g!-~Fun-T4r>!~ELR7jASm|;1D@BoMa^fTZ09+nZ60(~zsRg+tPxiT zOqIH=ZsW^rrppTc^J$1sAb_32PUlO`i`FArC0%O!FpnfaJhAIQpbb3v{V;NH4;VX2 zjNpAyiY1WJBi<~#WH(?Wl3rLZSQH+WmngUV&7**WsZig<`x*Ge76d;<;hgvntE!5; z-S~9rw!a(MvG#`<27Jb=KbTzK0rBiS>Urj)tsd*b`+2DVM?w*)Qdj``#&#EZpFs%_jULNHpz154B|1 za&zBKHFuB9RVOhJwe$J{cycALESQzhs!9ZbJ-AI8Y`{lkks~|B{1_YB2-sPtB%!rh zPb7Jz^pk;J2~O==(g2OiKmExa4stJck68C@Df+G|j6nf1F(1d{o|4a3N#rJjEZ2=N zU+>f~+AN;3-}n0?YM-f8L0sN|!;=J5)J@1524aM}wdmfEvv`T@`mUEuL&6a$_vjWU zKeG)os-CfH{#`4~TZ&I1f}%cIDypC7Bk{azuzs#$e64ud?8&KJ=VSksnO&x0l)P>>05IN1MSUA;x!U4NXiY}%RGLo%WCAz-WEJ5Ez3={Z@=t@R!vW%l zPDG&*78G$1gseBNmVqCr9R@29$lRD2Yx*cmKQP+1J~{JjgN+{tu#gG!w5zIs+)gOy zGjCBYqB=D@?k876;l@)Sol)IBxId^wD=n*$P4%cnJ?cWE!5I4fe{pB8=r#aQ4+`>q zZkkus9QpX<*2AWEAs78weANd6$mJ>^;7w^0>;k!QoQMHjIzJ%vWoI> z((0qV;tiOE-DVVbFp!Paf3L`>5$5EnBW#tS&75M=y-HLEOX@abB0&D#490k0^Mrz? zw#!2R)z{5}jd8}H{Yj!Rq=;Dg-U_l(N6IW7z6}Q+u~>l>()j*MG&obzo&iy}6k{mB zbJ1T!teL^6M=hds^0(LJxYT+{X$2zxNsEJeuw4CU-FM`p9yHTV@O}>__CQF)Zk!Ye z!Wt#!KjL^!?dzH=T%A?=JTWjfEPXXC-pu0GOTVcD3V}Bj`X&}Y4RDHLF{5w-4GjtQ zcnyVM%ZkPUJZ22YywA!*5#kcb@DceWU`}1z0W8hr5de={9HgFlx~@{qv-W0ks+^}% zrfeR^@asnS4gxPm_*txeOlv^aZfbY1VI%W18vI9`*ky!)6%Y$PAdn)m7Z0nG$$w{b z(fK39qA;^QJ|%6Lwc2CRx*>?=_ie|d;7*1^2Y`3ls)C9<>oDA-gEVO<7Xa`O&*6o` zGDBs1o`>nb*YP;|(HV%P2!`?#@2fKG?D&(SPr@~Ck^q{J&etP>g|(@(#0%7d@i$m$ z>uLm&;7*eSg>9QUPI(V&9H$R( z#asZ1Jg%gKK0U>GpoN{CXdG*k!})fry;L@8*Td)BsAS1$gj*n=_skONVWToL_^nI| zZjwwxg+1;>6Ux}uX8EUIIKn~&SW$kMca%2Ys@8O{2jYJqupmq==K~DuK%hLuGGiRv z+PfGPxlj3yP z<I^$q~Zh(N4~erbol1B#nYy4*<2X8;m4tu(C`M)?lPPJy~5xVRu~0G|Cn%m3fjbq+_ zsGSEnqVgYBfm~15IOJrc^h*3PlPqZR)z&kQxHWQl2XZ?%dk12^iQlN&uH;y4XL1or1wVxSY~bCdmiqa7*Z?DgY3FHl9@4$C!6`YUznVNlD$U*H z{o;vYFWP%{kVSY;{j&z{KkI^B92m*no5}uc|-n%;$ab>n7_a1FOFJ^|aD*qkK?EwIm6Li=N zHzSx)@({<~1Vl$lTWyc8`~F72<3@Xbsr)$Qq%_d49s-oC^OZq4c6c;z(!KB~q>XYq zw>ywn7#RsQKi98%d-TG45z~?B0lDjP6Sk&U{nFRLcst!nVNHaAD3?WR?p8iFz+&#E z2^kTrf8T9aTp0J@n_t#73dXlOHOzkU;1z?CXaM0|1AIZ%FxTSGR=|hm@CB`u88Dor zyefap=-8kVEK^aCkCnsQMOoaQt}~5$O_$@*{bQavSU^EUqsah*I&L~(ehD}`1wv~h z8n!9LH$O#K_~5o*H!BC2pySxPhMg>rCj14!c9-?$*IXEwe&244C~pj)BdXHBSXTbp zrfD9}^rKMO1ssoskzxq>(z$J{{RK!-4+kO)L&=x|CgvJe+!;_Jn6dJ-$S(EJqxr~4 zReBKyWUEPP4{HX4xmR2}uU@*~E1Hk_G$-VjcDdHM`Q#J@@3u}iB3Oc>xL4*n$^Haf zDq^$?Q?5u)@tfiRX`3WO4;7~Q?vaQJtJfg_luiuVgf>47^=4uC8#4yd|B>smz$d2Z z?y=;5`A!svkDlv`L3u9(qx=~W+O*}Y#{sUw!U{+VAYrkQBPb^O>7-W-Lx?}XSHF|s z*$}=DwG;vAhT8>`i>09t;o=#*7WrTUe}pA#AU$#FC&Zpr1X!ZudLY!_U*rU!ex=8K zIcxRN<==Fl21p{3V*qDP*8EcN16=tsp}R3GRwu5mK*971y0i zmk)n_X_3C8IPX(0ef8s5@&HGag2ef?uT0iYSF@EeXmTn#U$LOPg4$pStgzcCtEN!ROG!u`+MdtlosUXxq2O9&EouRU&Mqfp=n0W{r zo1AjJ<0>CmuWxi(>fCt3apwbH8slNUb<(C;M0-bMxeY343$YJ#sCHXwY5Tb2LS+W} zfTQF=Z&`%p*4q#*X?@9;P2n3 z9Z04&(EX5;ljs`B=RQ^ z87uPs!JcJG;HzH?Iv}9CdVjy0`nKi`)_D=wSgU{TwOHo<*bh9e-coyR`l6+pgfyY=h@$WxIXj+a;xP|#bDFA-0Hw+t=o#o1z)^X|a z94Ez*6)V?C&q$B`uDI^<=4&1s>?2cIL4RzCa6u@iz5#g;bLX} zzzp#4gjs!Bkckn=750BxjUTgA8&?l>+K>PnQCvrBrt?BEjn!)k*~N8WLF61o&--}V ziDe8yMH$5r-%O}V#=0#-unOcYY4??hzgdYejV1F8UEPiwcQBi6CXKa>`>24*)<$Md z(p%TISd)x*vc+C#L90|=boo1$%oNRH!3!?C?j2{C5`jl240C^|+bLv*< z6}9-hy`x74?|i-U#|7JY=o+AL5cxtdw zXZtUg2b?lT3k}O*xi99^oZUA(Eg9`qPVkrx=1{Id`S*{OE!m zQU{&;*1navdX6f1E=%?v*L0-NBmic@dSanNct!bYM<7Ol$4CH9Td&`a(vWp|8D2F= zL12#)6GxY@|G?+@B0G5K2U7MvllCn=jU)~Y@r|}R_1Z_TXCxJ z>?dMUSte+fZYP%6uP}L24MbRt{|mh;nxlyki<*jfN)b)}uZqg=IiT&8={p7B-hfkpMzCT#7I++&d7y&foZPh_VbMPyhclwFN3t+j{ff@ph z)brHuU<_;+jNd_(ApgOZ<6Pgj?R34f699v4{W12M30a1qILQy}%)YXUqB)f4zK~yg ziCuUoO#tE!F@mv1dil;B;5It|rZO&A4A~W+;T@kOi;dA(c46_^Or&oC+5>vC4+cwq zObp%&2WmM=*tSyQRlu2O5V?RJI~rb<%}?Yx(fZ3(rOy?f1_>YG|4bk)vz4#8K5bO5_V%;4?Qc|U zu@~W-O$j6tz>%u}IVg~GR+l{kV9L16Zv^fAJphV0ggRO4dqo!)Kz!I8{h0l$EK+hW z02_tBs!(qhIsPIaD}HV@;`ss97sH$pbB5=6|0k&#l!*7J5Sohk7M7)zzukENzVg93 ztoVd?Cu8!*bY<)F{ZoC$(+`8v^7^!!=_sy+qAjXlGv}xT9CS+nkFyTcjmV)?LI4o1 z14R>Z-HSVX7#OG%_;ZTf0V21Uw&)SjuR=7Za)tsR4K2W52k7-wI3NIC(1ueFA;Y?U zzQej<83)KeBie|fr$z2>g|kMROZ?RPt==E(*nAcm9?@6N*~iUJ>z2CkX#EWPP8#x**`EOPQ}qOh_O&!THmeSKC|(qom!}(kF&d{axmgP3(fdZi z1to^<%8K$K0=5-IPmb1Eh`N?b4;a1=#V1}j%eVlQM-3;mazP4Q`icAIElca;d6SZq zM>CDS+n={W%OWKOAHl3vyFLK=t^^kxof-GkRVuAW$2^Q*eV}xjsNgeM&z^=JT|8Kr!H3Z-*nbvQS?F zChHBwe=)=k_iP4uo!wbC?hUx-4X*Cm9Us7%EwN}O_L)B}0s#+dwn9e)j@U7Fhhf_u zsF9E|7$x`sqk{h4cE(>gfy|*Tno2z@SOCOe(0);Fu!x3I*(ZKf-W?Mp#JHbgP;$+xyv%w!aK+c02KmBQd03UJG69rR9Ti ztosgN(66Gx8#J`ey}O-kmOILk6|#k!U_zVS0Tm#}tOEfvbjPz~5iv+YD>WU_1 zn|n0tGW9MR#Iqy7;bvKhfkdGL;4uzc4V4?h=ZXNTY2%ZiVt%aPcVhc#2$%cI>qT3A zp5LDw$0hb|YXFl-G6{PoW&LR2_hx|WhC1u0SAlyO+j-6qPALY?8T)6Xck zqjmstTxZeh&tuWDZ-^&-AVGoYI z1{iYXx7qFsXan1ubHhabn2PtT#yHj;moct{IhAYCq?4_x8ZDCdn%Y#HJh}jZZWztG zEq!2jJ#UUyAVi1a2B=66b0i1g$5*{%&*jq`OeT=!7y^0gC7U1pY$)zWE7rQPC1)s(GFEHB*VoPW znyejY0;j_LO(~`Ak?-C3j_M{Ss#SgmiA zwPKTvAwk1u9T9s?cR`vyqtF$vx#>3_Pw9u^PZh#S7Nuost`02fuV%01filOw`1dV$jH zi3JdWo^>y>PMRhajCV2Ki9bp0wDX1$k6&e1#?|;;qUR3WQL=Xn5OA7EAMrQ)CpR*^ z9oBVjSEfY)wZv~H>JcA$p_2fs_KR|2M4AN6jpeM_dx1^jlRV;=ND*Dm zEFB>>)ZQ~ApcJ{j#ZQTnNxp_YW$zE~pgXV->X~y4x{AC>3}`B_sy^-^cF}byEGNX8 zT(yYN{!-^-x>8aVK2X2#FdsQ_QvWW1rUk>pyRXHD5Z0y0?geEqIi`u@bSSS|*@ zw|Uc)Bu`^jmjU|1+m37nC+`5udmVuhK(DK%(tT_}k*DOXREz#3<8!i%W!xY1Iqlo0~RWvo;5)kn3 zYjz3C6JGQ1et;g@j+EOl6X`xV%$BjDrg>6MHp@kwf+RGXFt$&o#gp@dhD%87_i=Xl zJsXZTH(#8Q(w}E5V8c~7&vxz&$WSTTsYXkok`d(M+M)B-nGV~HB3PQFmXOiZ@X52xSay?|{?GzPBCYbglwq;lg6i@Ar zEw1w$+mE}sG0ZYJLV<=E4Bx@}2?mT0MEGH#PJ^vDoGyxUKxw?H4y!H?O-+a@`xGqC zRVKsi`drIn-g+@|i5R7;>HZKj6qkU-Pj3~N@2?sSyX|K$ z*U&b7%R(#v$ivlp3Utui)FY#U5HGrV5^S}D`BE~tNtQ?sp2JU_(a8K>)cPlVfxG1pIbU|MzWmO3vs-{F$B83&EfLDXHw#yY8jf5BU<*kKxrRUm2#oaG;r&8B@7AWH3SU^Mcns|do`_XDb z+x3czlkHp8XR|Shn!<)KEd2?#H}0k~)$67rwiJids~a8XK!1iun8!8T1VopmMtRMM z3aWa^~UzzeVlI6WhhLRrZ(mRt>>ju+mJP=kPM zb4k#x1EO5kIbo35exS*`nm<;=(@MeIHcx-CxYb4<8m5ig7np23sij*G$&{#cV=Nlg z_s>`oCYo?dX!2PU;mG(KB78YX^3&fhYivO8d8P}0k>QJxe6B6e`)Lrc-UZ9A1j;t3 z>4+St-_rn@81!JPlyNt+XQCpk4>!B@#1I#TqYwuG~nQWOC?$+KG(BQKj0}1HB0GNFDpNiGIQ_SCt#d z60MT&P{Ssq%b0kkjNaqpvx5x5Xoukzmm0TLCQn3^`&-vo_-t20`b5C)0+YwGsh3Tn z(S@Yy=C^rwzu3eI^ne_5U)}!F7KU$TAU?ALZV}wv`0=tTN3~tCY6+&EiOk;`$Lfg# zgzyV*>EaLb!wCP$q00x64n@=eh2V-!Xky$rs}2;U_g89FVeL-c;x~_zir&yob~^bN zG^9S|t&K6r^H9~GP`&A|#PD~`4@(aX2*W3P3$XQ|GFC}lxIh^gSKjfI@po((*P!)4 ztYmTXgtFBJ$rQvJk0{pl)#h@=?@k1cMQUt9N)P_u^c84V+qgE*sHCc|M5K zO$Usrm?9}QECJh?5;mN#N_SP|maB#&S93FSaC-}gN3mQ3EOTeV+y|(<%04r4N-BW1 z(+{F*f=oa%aarF>?)dM2g!kpm`m-+kTZikxp%{Df!xJ;dW?JDGiFxH%_MyT<-4E#~ z(0HlgQ1i*;$R6dWAN5k12xK1Kh7o#oaajn?*46m98F4;~DZO={0F#oq?|$>O!RfH-0WV$ z%gV0p@X4RPZobd6kDszA8{z$4#Bz)q33^nVgKFv8H@dl zU6aS9Q9kuEe3NS8zZtLj)52(eq{v_$A9qPYYaT(SZHOoQql$d!d9`zFCZ21J^KhZj zxxAfQaHJlZ!;5FB)s7q$uC{MO%co} zYOwl;Dkn651N694#btv!GX4TASd-$OCOlZ>I}!I%(&Kfq2NCWDk4hq{*TSVOw%3=1 z@}g|)D00?sE1!ix@szADX2nTQ41~5yCqs8cl5ODj7B)C`bT}*;gq_HZ$EKX*CaP$y z*>NG8a;j|&pkm8;R{w++c~VBJ5<6^2B5SGg1tnk&C@SoL`JP-gPE?#EOM!%S_cL2J zwEBN`tXpsz9oAT`Pq$1xDO`B)n%GCi7<)Iytiir@xAbO_Rk9Ir<>9_#)rFT@+-K?!))~ZKM4l<!B%kr?&>_F6Xp4lht*$2Q_k%xelWBIfelOpU_d%&k z4+gj+5=*Ol*Kz;g8tb5d_-U|0yD?wj9&v(^u>=qlWi(8xi_{%NRhpT<9$eOcVDd>C zQ0@Pvqx-bCD7>pb#IHrs0#M!=_7GyQVHVr&cx)j=Uqji2tLg9`e0)kabp}72Jie_q z5(;qCr#3?AXqv4ER6n1HZQj25I@e_F&BEfRd(QxsFQrecG0+zoHvfh5hNovSYLr|o z7^aqHi{j9kn{STTJw>=ru2-2$QB(V8hZSp3)!PVuMBK>PZdy4G^ z*^V-bK#O?_qxgQg-ZeC=yx6@~_=3Qh)rjZruV3@?^`@*cV{K_{@shyBN7ms+kJ8Ll zeMOhE30*RjcP8K9r*%fanickH1GLAwq=VT--0`hu9b_mSJ){o35<_?ktlKFb5ul`x z;=1V@b=VZPaXfkj%k5x>0Zoo>lBb<)U9V!3?F7rf(0=1DjVx3N=WY?LjTLWFE7Eq= zZo5nvXVDjBjy?+=9qLEAz@<^q1tub6dPw(@@j?f1D%mr4I&xutmgrLV1B&Kpp+7r; zUmWz6tbfs!D>{~H^sIna`7r`y-3aKOB3Owpj*0f|iP3v_w&&xPw%+j!DS@Y^QF-1O z&Sfth{AJu}Q`othx-sCO(qwkyHXuT{9UK(g{SvtOx;F~%$RSoB1iczw^UW4={3}io zh>ip2B$1Sz(D>ltXy0ef*M-qgtimH`>R^JBm^J?bfPVVmH z>hKf-{vWjbt2858bs8(KO2rU@)rxSquq5VdOjdu(7b}-_^mF!h#?dO zWX&vBXIi_xcCY!J|ASEm*K2{hC&}Qw*`H~1NwsdfJN9i}$?F+4Ayp9W0^UrVYJk^j zuF8O>pKY)BxeSOqTY~`ej z);#@*jcFHLQadbXgq0G~o`&4InazZqP)41j5nvqd6+Eh`06hmyfY5i#++{)u`wSB*whHo(nw!tZ|E7iI5(635xcEDi%33pRB$s zjD3y?_M99|nA;HiVHc#1CNE>O9`LmcknF?4pi=U12Ir+S(J`(hmhw;}w}PA_2z`2n zq~+tkQ4LG_h&!}8rzL|OlN*SQ-%)G5hg6-&B^GCI%9~^qWUb5-c{H%lylhc2`a_+} zZhHq=+wN?1ylRcHw_P({q14L3ddQK%Bx;OlGmvoqy;aJR9n28(luc_^XFZ-6Y|)LR zF-!;N)W>g(VNJxnz}INj?AJX0u{|K)#MCoan;AF zXA>l~hEPd7M+y>XO3Pq`t8%w$p zIeY!+YH^Dkqk^7jjQcIkpYsc}(p#^X-LKkhNgA)DyAB>>N~_{ZPzuVuCWx|qv14y34mt*X5T(y5^d41F%EP`Wu;M4D{Sa5 z4Pke>)~HU@A)969Q_1ijDS=Lixqq;>2I`w8sbkX2XIz4s8yj?YdCVumx=ay|I5w zwKiuzyR8FjrIjxEqsz889&x(_RL5}9?c6>^^c}eR5m<;k~@6| z3*d}N{fi~;Ba>*(wroIaZJ}ojY&N&bwlC*>I+ro$3AB?wiC`FmnCi5;6!t+wn%=J$ zbqBE{${f_!Z#UZ#o$HBNVg0Z!sCh7A^s431Eb%rkj1{>QRt)*-;?wjZIM`a*x6_WZ zFa<9DY;k+7UP>=s`a#RnbM-v;H2bmAZZp)5K`isihgOyG$++E~DaX zg=M6-{xIQ?>x6kBtsMq!+I_Ww4G>ykcY0md*44z3A_-|16`Y>A&!5qBkE&5h8g6v< zvGi!Sk`55Irt_>2uM{}a3N?Zvtdkz?quRKr%EOx&Nh%J$B_=WGtC*{%7LpJtj|~0{ zaOHQaNU_4&TUQ~HAAm9dpI70<(|2&lhMlNJuPCOb(}tDrp?pb?1cySItThauBO>ff|i9ZrTO>M zyQ{ir;)U{`Dvi0P^-C`n`X0Nd4Q5#CsJG>3D9gU-D5R}gO4WVbzFCl?%3cl!H0EU; zhh$sFZ{ScLVV0p+`nH=|Wd`WH-<5;WsWvzme0;}5j*cN5lNxrvF%kUY4OkBjE@H+B;cNM%|`CkVH!oVh-A>Rt`x1av|2tUICTe0e(o$~4b+v&pa z$e&>(3C8j@*Mr(1#MaVJV72_zb^p)h|Gn!=ID~=>Ar-Y8&s6Te-{$|OQG(Tkug)lu z=|3+1*Q|10D1hBYFXbPTv?HMgTfY zKj8t4Z3oAsJXQhFnncq11Zb@5Kx@uv>!{!Zk0G#C;LTqfz&cI=4c*ce z=(_716?!4+0YqQW2+w99(0~GJ0s7DU*w(oKK`I#7HBx}w-ukP`u8a4UkdO|M?(PQZ&J74C zjdX)F2oloLEgLCmQKUfxq`ONRl$1ug;jHUE?-}Qu`+45+{sZqA{JVhQTDL^dKo(aK@guvLTs;835}pp4rA zKm!tx{!Oh|iDDxP?>Yq4E41?k_jtC4;Xz2Nh>RFInWk=zK<^y}qd1aZsywN=IT{cp z_>*&gQ~c-g7Y&oZTTiLOzqHZp_>ek0YAd-LWN#@@ut?W@P*E2^iM+4^&`_ax3dA&- zpsJfK35Nk;y|J<52WNdM*d9BLJp! zLvtEl(+ltf62Tc$B%8ScDqb!juP7zT|Xq!sfHy#XDXfdkdCOpvNEzYMOA`UHVeGp=%{Obh^+t3c$6Dq;bonPNh~qR9#V%|- zG^6QgF@kfpK`ml5G(eY}CY&6$Bl#1}`VnRXJ0HqDx7~Y)CjwO23rq>X;y&iJK;s75 zk%!2a^})|$1Fq+Pg@dG5Z74}dV1F(6^uvIBu!{Qv)TOD23~Xu6LqcKiP4j@DMv!wW z$#bN|UECp(;XSiO_wV=}6*zJ^y?$H8!DP;9^{BXWmLu| zQAHAh;0DSk5uh$z!$`p%&Dmf}4DRLCngs`7dnq$PP@&lNqj~93`aB=VZ3j;q2``sF z;At`Y$oQi83xTSl{hjs|nROo+_?$aVUulg&BpfkZs4Dct9~*? z%Nd+5@&03F;1ZX3=e;NePwT;lXteqiq=1d8kgR2^L)VlAJYV>&+T#Zn3<*%Bti%C8 zvux%`x3V*?uQ?;!6meQynYD-UK=qy1q*Ni-@--MtZwe9l>GnDO zJ3Wd28H)U$Z~F#%+c$%T*|cP~gNPRB3iQYyq#rm)L!Yv$$MeSS2^i^R1@oGGQYf3a zyviFMagbH;{u5HWCNveF;MbNn0m*NoT=N#b5#{0FGX*BTy9ReP3`pkBc8m8^%kY1B z0mSYf&JG{-h;9D+kd*pG_mRRc1n&NMs849hW|Tsc%k?S{EODRLE58pQR_Y#nemhZ7 zY?RDuU6W->X#61$#%aOAk2Co`3uwlHyw3$xg@YCp*xYYH5&JvD@XJ`kC^QAuQrR?( zSQ?hY$ymb`=hl}asLC!V*t9^3z@h7a+jq*qfg|m#Z>Q4E`;yJtfa>38|E4Dta#43? zzRpK`+)lp_21QF32Fm#@3!habaQ6YOpt|&Z8N4ZVI2x zZG%d7#*R{-p{6~f8eEE-X^aHPonC{Ry{uuIwFNQde(Litpnn1#ku%F+ZTZJ z?Z74g4{W7s(+--1xYlS5oB-z>s2bUHgj*&lBTiG+f$(?q(ZfReD)3F%ymO1VW6hYt zGN^&Fs(xdN0JQ87gXPRSg}zX*hgE-GnaC5i$@0{ZT+RWRs%VJ;HMGglRv-z)1FFf& z3c&l;yBQEpTC?zCn`T?0dlT-rm%d$oO9^lCu_>E8RGb0YvAH4ho*hqaZrcuU!hAhv zfXT@!MPX4%h~2DG-Mehy!l+jiiecF_a+xR6a7y`KX4?r8u0;Mlh=3;ATN{_|2*+~w z=So2cYXK*shaPyGc0d8G+zJ?;?TWsp0IJG>J$nF*4hFax^Fpj0gzqESS(Olg)fWGs zt4$OcWj363t8r~xW2=uT2NObf`Lwy)4CNfrc!ma)1<05MZGc=U{*6^~Z#OHigBJjz zqhhl1>fgA@>i+>lwH+va`Sr{nOj9gKFZ~)QSo}i_hRU1KgB0*CHX2tF8uuH%@aaUg z+y&~EqFFKM*Vm=}*hk8Lh@MeWoIQu+y7&js1gwdi(vPe{umqLp!j#>>i$s8@Z}Tj_ zIs4Xvwdok7U)j(4B@}4-Xm^3kmXjgG&G<^ivb4Q(t-*wT97uZhR0{^Nf$Hw>Muv04 zlXjpinhMjsG2Ijc=l81yQj`G@psWs(z?oy%jCdg51q-b$f^Ix~cW(c~5JJqNsy<0f zUE*%$>#$1L?9n94`p6&EmjBqDjFCl>XxzaTbg6uV$v60$NHxJ_7IfoUYfJldTqUBrFUf6Vzdc&+xgQS ztNQ#fKXo-|nygWDa}VZ-pUvdkyVX2B>1+*^rj0lgfowRGPYLD6`_{So%NPE=q;0W- z;+}60pq~NB+M0;Y44Sl~#|&m$=JKO92<&$6o1Fyi6>iHwR&~NHwCJHt`ksx{=zMt& zxp*NFiis+EX`?TMnmY!!BHwufXN|uu5c!a^J#C*=SX^KID^a9(Blczw9MAe(kAMC0 z-Qnno$2x#iV+xc$mTAbszuzLY1MiGK!g6+bQHhfm2o$1gu{-+A@;?C4XY3Z_@!hKm z1O>v!rCJ_F=!PIZ7RN4&`Pc+^F?u4)N~7xd9_SJ|&l{I~cfx>ZvYUj*@!fYF6gq4Q zDsJ6(`QtD6U_|+&UcCu4l-X?jj-Oly*er*Na{L1!sWwo!>A&wOrTURZ_tq*8QjrzT zS2Rqr3=+GkG=8Kwk!!hLJXnx1FqqTg@CQw@Lpdscj+3W}Sc386>E ze8T=j90&?ZmQdgUS}imZ4})_;gZxk%aNVIJwcfFqE^6aXE`p5OkzF_}yxq+9VCMZRY7qae1$~ z8h3f8U4zz8Vd!O^xrQNMXi=&BTu9eyr(dd;`xVAvHMQ;reJ4mKH$}^jWn%-uR3OAM zv;*dNRmZD-DUsoCh#68Hw>*e@>})9UP`XL_T5DUj0@O zF-zHqk%MYB=WXj_xI+1a+$|0_S3-rI0Hh;s_K8mt-Q-vnnFM?K?jSk4SZ%TwAc1YV zP1QCMWU-YQRC@^qndp@gUIuNC?X4i#Uwi`(Tx{KbID8rdX3)HB`*T)WZdnpmd}+s| zS!z1v5ah?t`zD2^=V(LF6|l6PO^QyZe7wL)?oKq-%GI_hOx0mcbcy^0(W1^mN~$gt zCHNe*lUfWp2@mi+Q4t~60+ZX-E2=;+9lTqwCV{TqBrmjHR3+c&-`R@MEi{jX)Iv03 ztw&1bgFtp|2I@_2>u^2YHZA2EOZ?l793z31e)Ld$(^4)PtDrSxB3&IL*Dk+=qCj1( zv&d7X0=aq>B-vcb!WuOjRs!ji(ID67a!Lh|&T<(SsbuvM0K6_06FyE8qz2k(3vre@Sa*jM`p>ZAYY;A{P}9 zebX8$D~Y4KZ`%D)RcVp}t@Tq!o?*dj0q(o0|4iYGsz6oe4r{bN6nfZeMFYf%`b~+T ze3qE@(sT9EV_mOSf~`aq ze&ECO^!Us8x%I-ai1|AV1!V>qkw@-*lYF`vGwdOB>%T?tuWYwzh@a}`sX_HIAyk=4 zR}DfEQTe-igNQP`Eix9F;^RfrsmOny3b=^=K~g=umZsSsO`k-I;gZ=3u0SS0@%0sv zig*q`o-1p>7|w|$lem;to||)joFv-DvVJxwTg#;N)F@Ym7I_0Eu%TJ|`f8i(%FMJI z(tVbphTFVPfcL5JTDAxrslN&83qEC~Pyftglu%`#mZD8(rTyO$5t}gNyB4?<>IN6D zh53}dS+`uvshF9L6!YQG1e7&+_RcUi0HhG0qm|HC+Le>~eZ8<3xggsMv zWDKsckCXFD;UD%R5nk{F6Zt>jLo=52vBuojOSKoE2}7dkLS8K$ry49}_J5Ir3Rhku z>l#q%vpk(7yiCjZ_mvsk7BFGRB04ffTwg4J2=8Rgw72R0?z(J^2$h&)(@46ipqNdI zc_<4olPV_@0Hx(?X6f-IurB=KwDzX4so=gx@@}l0-b@5+_)oZ$@e6xERk)Tf{#jLY zj@Q`lwh(bUk2%QfiRDa*X1T5dTeL>ZVKV>$DcdN%pD(yV%oB1f02`Cg$LpdbPRgTb zJ4ZA|3MUMP{w$gN*g2M`niBu{fg+Iw1+*nxjs)JVl35bAxa}dM$LGpM_zvX>SpkGZ zjRWoR3n5=?9X+$Kw9%B1X%;Ws|apT^TjRH zs$iUCwZ^n=$(I`jQRDV>AqYejM{tVD%w9?9Rkd9a3}Tk~o9RdTN@-v|aw{PtpryL|LMr3v@`I-=x^-R#bY{=a1QpuF(>ptm@QHa!ocf_WI* zb@h<9`CvTdfGycsC!COBIhH2@uAbOQ%(4GUi#L*oNQ|>_E}3|uV^Ph_@d(fX^gHD( z0p=WNq<4AB*6TU+Z#;PC0JfQJ-a0A5q&y9 zk5t-|hm%w})8Q}jdGCjW;k@ho6ypy@lo(@^iC5`wUtxX?^^JjGKSr-1)>~{!Yh#wD zJEhh57WE9>7e~&MeE`=%$SrT>+hfWd|L^PX-Mt%+-V@UeCBO4jX{>aR=b7y! zR*F>aDLCwG;MpqfgbPI~ix&n&zl^C&T~WPM(|1V}`fu=D>jOno-1Tp2#hZa>)XQ&A zFs@$a6{v{+b{RtN!zS$Tsyfmwh^+q*dP-CF)5H8_8-5l$vwSv~Qb7NLD7$HR9My|-k5h&x}TonU%($$g^v?T{2U_!Jl^dWUoybup(67KzHmluSG(kSA$Y&# z;@Md8f?^-&{*{w4rKTQ@t6PfWN0l+I%a8dy;Pr7y)Xx5PLbRS$hbBQ$O85QIyG`$T zym}CZb$~3wim%+=NwMTZxwf5L2*+Cn85=C+q=Gg;io|h)Byds78{cKhg9IAcb_f9u z$DyJG8K&Uu;LSIZa&NtAKq@d|t0;pL+HEXJ%78&oC{`UV7~t#cN!%YR88DE5`YHI#`pcGV_;VZ%trPMtv?0+ zG}!{cxZe!e4~mi&%siq1hf;u>fCSRyBV|sx-f7VjVXX|DRiE%FIHu@K& zsv{y9KQRBpz|EMxEjuBkbXrQ~a1QkN%l*4~4MMp4%*F zE@8MZW#(hd4l=^E*UKuN1(3B6J4lTm7H7+Re1lt|_C*QJ0~0djk}yg+IRO5CpG2J@ zBOOwS?plDuO+Fq0Fn&fi57;{!+WXz_rGEp9fio{t5f9(OXTZGb*SMGUAUu4cBFwA9 ztdZe2NWCY_(0=8Z%>r!kkL*w7>U7CVDkwOVnRWTX8(9D@_s{+bZVzSjRNbAs$6}TQ zjxX-}yJvs>5}J49EAIf_ZbfBz0pQx+H<~L;V9}adgCP1$*-=|ZpiF(=02pP~a(jjWjLB?db>)G% zOlUC=Sr3jMb)UqE?(5B3a0SeQwbB9JePu8mR6Bk=(f-V_ZGENd z#DeK)LT&_Q>|ezmfQ=3-@S=kXMUa}}nXh0KTUoui9$@%8WO2lwyT?6G6Od(y_oN1- zo^p00>06QDOMzt|IAVkI$MAW64qInkil7SEg#EU^J5fXPpas4M^D73@=e+NoPl?U@8T$P4T%whNRS*&% zP23pV2<`NAmJj3sxT6nQVIjUOV(#fab)*iL0I$yY*~N~aJu=m{9`Kx>uzGGW1G2f<`$o7_K)m!`Mv;_QzV$3+V!+8+41U9l9 zUsfM{Ez5umSMni5ekZCS%Tr%hVSe{}dL?y+=am}WPMst9&!tA!TkWr6KMrG0*&@FA ztCjvB)Bjf4Vk%aWN^J8%zxo?Sj={jR$qklmIA&~0-=krt8(G)qUJpX}$XX5;K;fl- zn=Kp$ zJvmQ1ycq1czkWb=qygZQ7RY>_mKY=!f)W%`vz#l}AQ|FX z@utY?5Q5u+2%L`Wd>^Vb(*+0*uvcFQscV3Z?Jnuyl1iy>7hFco0QrIy2oj_}uwmt@ zONOce#Ve0?+?5`s690%hCA9k-9l1!p^nIWLcXD>y0^=z4+kHk-d#)CujV14 z#*@UYXOCi@nu>J;r5RM$G6HUT@-K~V(gN9wKT96kr^Esu~uxYaW z&E=fdx1s1sx%y*(U?DQDk%`NTJ>7p%nxe5_$Rkm$1nN2#jK@Nkt6XuiKrTrI!$t1W$b}#)5>Qe zWI7j696n=X>kO)4-ecBaCK@Rdw>_6X_LW!wz$-2|YU4d#tw!OutWq6g>JmkLTDnV( zU2VSJcQ5=oD0@@6VZQ$qm_#D!QrJdkNyV6yu0C^&NphGI^|4y42ATXfKgOTX2{cNq znRaLPLUMY4RtHWl1>$yyKlF(0@SX~Y5!x&A z?Lj<>uJPVYOF0KWNPB|Rr$bM&m3*SM@qFX2^{2%hs-qF3uI8v)wZdCzO%jm1_jN}4 zE#=Sms;gEl@!ooIlMQ&uwH*8;wkScJ;@%yBuxRUiE_AB%$EB*@wfwdU(|j{*;%q1e zJ!PhzrlK*jOjpX``+b^^i# zgM9TtH)&Gud@kVoCChucmi)#K;=hbZ$Xzp*f;-Ew>P1^1yTwkyDmz~0)mp4T^N{fC z$}okiJy7e{fYoqU#%2bxX{Fc=vfo=zQEK^>6#u%}5ZYIt>n2xqq6hAW>1=F2m2!9+{vP#>RlX}-b{BP zJ@(uOwW>RtXvCfcxQ^WgxDqsUN-kI5#*67(@$HB*n33HgaGo3szvSi2tC%2GlVI#R zIXC6f_(tW>uk z&Z6`K%=#Z33HK_KyBCfztw|!qL*qofO#alY=e2CFt@KT?t?pVS-E%pb2rQ^yo6w8- z;}>p!E%3e8g*7IE^ykH(9${);9{IE&Y9!B7f_ZQO6!Z2YnP`<%^-3M-VV+}MMRdUH zM(g(GwsOpkd#$)-6)?z$@+?@(wY}$ZNw8~{zJDn)geO(_%Pr5+6U`rhmukC_zot>U zVzBV+WZVe3+DA6OmCN2FsBv%X{^KMrDl}>Wt0vTYjjhSJc2jC|HIrqmI>~B4()UH=2jiaP zJI_Vy*<#wUL&QpXJpG$2Vjr(sWLF@4;WuKN+Vzjs_;87E*KT+5mMK}}uv`5PGwtfd zmG_NRWEf9YP}?f-B^n4gdWe(EMUfHf6QFI$q)2J=o`!(#ErL{!6^~A2w%?KRIk4^s zPgs1cu9qUsodRkdoE#y>QPGM>DL9Z2 z88EYeyFV(66RW<(*N7LAcnIz|S3)T4jQqq8LTd?T`C((KKQPlB$r%Io>`popA~9@< zjJ=LB6wH`JB9$!*kY3eZ!H|Al9D$30?^3HNsNJdP^-9BDai_YBaJp2DRgv&1!$Yww z-@gJlCQeVVm!?sVf5_7T(&pze+YwQE(+P^s!#IRUPgs*o8WvNf_93tdZfpqF_jt_B+mXSN=<&DYJ* zGZv=TW*i)MZLD7!0Sk*A1&mY?P`u%%j`LXKk$@wCq&xu2ghf<^&xv8 zdWCSyf=}ikdV79S@&^PZ+ySh9Q|$?n2xKHVBm;E$Nk~}9XGfd8eD3mI$=T&kw6C_; zO_8jV;(5sKEHOirRY}ZTzPTakIN#IC#@6Y#3yMhaHERV3q7NSG^B#Ih?kv`WByWMaC%Z;0)D-FwrHBHE%J8E|zpJE(_yY==Mo zl8TXLN8?JDT-2wEao}4rab32J=#=E-lWS^hRQ-|a{U8{rC{JYRzfM z(M5&XDj8FRIG)+^C%)0+VLL(syumv5uC^=?gC_;DRULy#Zoh?UV28uM(GA>l2vZai;w!9ICG5ww9$ahtE(u zvrMGA4t`vgdUhyLlD74fz4e{lwKp>3v3;Ux5FTqIunm!}JlnZS!O4EJkI@f~0e);r zOZP)d^y9sv9jf}Gq5Me>*!lUK>JksrhlnkxG0s#h7*_3fQBwy$DE7gy9Y|*fr>b+~ zPCJ^U{ON9|(SDA%FpI_zk5}#phxM6mq=z6*J=|jyEF7fZ?uindi`jG3Ne?`p+`US; zfZw-1Lh%xxbTpot5729&c|1(IES9{m{1<=FR7J;d{-<`4^{pTuQD+UEZ1~;Khl+2X zD0ZmmZR{LU3shDz6K#6oRdzT%G?Z_?ip{28Jg@o$P{LOv3A!8Uc>&6lyR2o`B_2vS zBQ_i@-~~E1#M(M(HA~5E8IX1Pt4aw{Gu`^skJS!%}@7w*YJtCerC}y3T95~NEXm}ap z+aa*>OMS)oo3unJ$Ab`K4mYgL1%IK+mZr4uKr+<&(Yz=gS}}@wKV-wEhR22B1a`F4 z=h8I$rTMD)?;sS2I42;!Y4Z)=D?!|}WsB$tQw%L#YCS?BZMDrmM>zYwf6xoF z;a^5r)%H^=d2^!*Aq z?h8F+-Hg4RXBZt8v?Rt{3#&rP=`xbB!c!A120yKLNOHy)%%h#AA0|_1S6kH%W$x%# zBwIT3Z62`FCcAlLdBjlNhK)WByW^Yxgxa{RqsM-Q(x45Wo>Qy4^zb+Hl3Kay9Hk)g zTq3p@pGHC@cadtWo;^EpvLM$JBIWyaYB?joZ_+}n{mXNqRy5=*DTeve(X8ioS-7_~ zSF;&ARDaD%Z2~(bKE8ub$6V^2u?#bm#*h9=VoC;<43Dp?bTlV%EOhxpcphNnTa^Xt zwNzybSexaxD3G&##dyco_^Q9#DXF(P-qP6~=4GOt%yd##iTV|k`A`p1=cK)cmR|WC z`A{mgbR}GxIOV5I*Ecq&n;v%*5v@mZTkd|4t#F`b*lX{U@ISQ7ys1e{dwuZ>C%N=6 zs9*TD`Ox7M5naC2U!GeU*r4dBZCMpo{9Ksr)uk1Eaj4-%Ix85C87=q8K;km{g~r^5OfXXOC6 zxu;kc9)6$tPCqO#7Ko?drPiHV{KS7p=bPS=-kr)iH%pdE&eg(Vfg%#Bzn6eGd`(gR ziusa%k?b*!dq3idHfnMLnwM1{iLRrtS{`ld^Ic^7)0?jBkH(W^J5&={f8Nj#FU?+5 zTXil@Ql#kfe)i<->;Ad>CNpA}^x5xnQXbT{zqSmIhtTESpTP(iF;xQ1{3@SV`Gp(~ zibYA|B+JCVbO^BCDH(^(j!C0;0h^kb>Gv;KnKUwW;P5onzK;mvDn0PLudUWRs_dEQ zNKB7Qf>p>&3-Dl_OSR@tcr125CJ~gaiT;5&0erzgQoqdM*j9>X@vS}$<<0|Ry+_SEaFyG9C z&WYnFO3@S91$vjHAciopN?U2fPeQ=qeKKQ9H>&Z4g;h+B+eXjo#aFVNOhjHG>XL`< zML%QS8E~`Mi^{+1l$W$xvM7CbE1IJ~o>9F&#)WbIW>2n(w5@5^X~VJh#BkC2OX1^| zpCW0U-;0vutYgYs#2vbdllzBqBaB%*zbNQ+^4)SG9Is~D(k(_mC!7k0>B^hcMPHdF z8k+s)bhFHI)D>SG#~LJ=$X_QaxiUy9K&$eKc_@Fm`TQI6Y`fpH$OK%)Z5~PIY5Y1{ z&XN>$j(t~3UWTDp4{NoE&%6hRzFPM_S1C^tT}A37Hq-6sZvvp$fY{IFQKySZ&6>FL zIgzKOaa{HzB%KSNSlCV`Fi9mbNdyhJ?T(NYlbyK@g_l3c=^A^c2KGm%_ozSj2+RMe z#h7}eQ+^FpN6W;H`F1bW{i-G%pR%Pd)m%?EkJ5Zys$WUskgYw33&JobI<&^v zir*B$>sQX3uUbzz8R2@&aND4}7Dj{-h4_zAB{3}a$K?9KEh0=k*1Hr* z=wOwVfd{dHnn&S7eoR``7FO)q#c6yS)k{pbJi|DSsv}N z)514>wuoyf8?y8p+CEDSU+&(J)*uL;Z za3!7^>XXzQ*8~TdpJFPoz6RxpcC$CQ6o^T^>pEkH8pY50h+*ho|g8dUy;NIE5 zT&yf|8G+kS>AM-aws(WtnW_o&7Psk48x8jE=1YMbEvp|=0!aR3JzJ*u#9nrKtE@-A%zEL4BrQE8;;oTXA|9XI7u58taC8n0O(dU~__?-R-#~dSW!(Um zwYd!ar!Fj1L}q)8>h$mnn8h1qof-Ndh7le7bF0gcKD6pQ+i`h%_!r_$Dl^1w1sTb= zEYoLw1Vv>mS|<;Me0fdhXk4ca3Coq!d4DDa$oR&nZk9>MzYr}+ujJhjeV<&(sCx3W zb1|_!kHeGBRWaKqIN%w*c<@Wgvak=dd!|Gj20ES4M<27dMFmvWZG@s}Yhe?xjd!$m z+#8zRGW%b&buTNE-Vt++Id|2Qo?@iXk48uuj9KsWagsgYOWYyzIjMcjqxY14#el^> zDE3y0miTNi*Kyp`aqU`!_SW&Y-9+iH&v+SPf+97#f)+o^e!Yk;o)Z#OEk#JYVK%?u zo*sMh>0GQUU{-CX!;i%N5{m%E8q*UYIcA9Zs*3oa-aswz@Vj42Y-;TG&&gKH7ZDzqN%W8N%( z{8{gh9>|Ilz%=Kn*MIFkFBpspRr?#K--$HQ(`muP#G>Gbq&tsWaM=}w{)&fO!;sl$ z2C58rG9BpE_Te+K=bz$c!w>mTjLdx_$D8KzxCB(QvF3XlvjpiYjr3?+c~xu7t&qMI zYrH{bI!Y4QiLt`|TQQ_*{)i7)d96-iy5zsI(`d>tw-z!{YAX7~Q8^*!-D5z!B@1ut z=31AV7X=MK*uH0AZKD_A(iq10lJ%5lZ8zwf)Xc0#+^i1gaL5dlV)`I^-sIntAoZ^1 z`zttb>VLh2J4IY5H0YtU^RtJhV$ZV|ovMkp#%Dc9t#@+V@X2TK!m+)tcrJe(wE2TA0u#cJ8I*jF!-qOxLsi-hbqRhsDz2U{SE*Y!j?G9RJoD*LmyF`iD5_vNFvBk4Mf}@tG@zo_TE(5hdE#(wzd$2!C|UV= z{njzO9AtY(Zzrbn$TZkv*GrZTpN7YKDrXb7IECzbnloK5=@Rc;T6_N@NG^zM^UI8K z3gt-@DR7>n35~~^a?PrY{$*4~v)cP=*U9X?V zsxK}!YbI@|wx&aG+Us)${FDC`nRmDCJn`dp+rh|AL69e(r+y>S{Q$Q34vGCp=WmR3 zlBxc%N1_k$1}vaQvb(gYkb`#&lxE0xLTvkD1!bDy4qHzuhtdgk%=<+SUnZ{jsTqE; ztSmAAO)3d5d(;!k|B0{Ljr}+Gy=j@?1_FkkPdlRaj~&?+Qwoa=^B&1Mx~XR%9bKi+ ziTFI5^y`4a9F9eyQ!7C-uXjuZwM`O--TR8kZc^mkiw^S`<78NijS2CaycQLm?W#L6 ze`iZ})4h&*Y0=o#CCGtryd27xD^x!xDl^`p>JHPB=%(uL6^ZhE%R{)}Ct&f0Tv6Xf zzI~D};FIhX@{Y<>dZ`tf#W>Af?8Pxr$L6&u*}dNJ2SQ^oDi*Jpyr_wjkD08CSB*l# zWJ{&Y9!B22O$Y$d^#4 zTkL2+=x-5Mo%ztxG<1bOVPcxfBS*9*=_7l?kn#8FL1dPB0fFjnFZ*W_DQ*P4NbXA% zZJKa1YKwfE%)7}ZIt(Kio2=J};NCkP*qw5ZWzdoeuzeCdc9hXysMNFgZGohbtp@vH zo@A5bUzdR$?^6hK#BL4eU47Lb>0j9s92{nQGN3R^Z#mwDzRxb0{<<176<)85TmVZT zRvaU+ekM4=o=rLZgf6SE=a>FmWBc3e6QZqIHAxJi z#+;q8m!P27d=Y(V6&!_$Cf4d=x!jc}aJ9TxszcAXp8RaOQr-0ZZ#$fTtDM)0PlQCc4dc4@Uf_XqYAHiw36f?ip8o9`f{SE2{TqJxXsIaQ8o*3J!jFVYv>&PzA2*TMq(qr`10eWWx>;1aN zw_lc^#4@J3m%vbZm+KfsC4x!!R&T|SIcd7fnDd>o9R;<wYula*qq9DR~);+AkKEX&LABXQ`zTB-50paOR`-k(Pvqv5x95No1E;z);o2eeo zHMAp`e||dIW1#~sxh8w4Rm*SkY>WDI0)W@L?MLs#!_TA2^9 zo@Js0r>s0f!>!4!H$}J$xJGf@ZkHsU&KB6o9q3h#r?g-y`}3n5;^BT z8dDPw2IIu82l?3dItQ~vEK#ZMTzdSfJR++H($3RQb#GosP+jZI)W{%Q@u>gRbUzGrIB-_YX~0t~j^sexD@8m`*1p zzr1C47WD&S1%6Nc;1^=`f{*TXCvyNpGSBfRf3cl~Z_k9I@A{%b=PJV|n_()33D2vl zQ`3R1fWk?d?I=~AoL$uXoPJx-LbaRr^if>k7LH!_0N>)HN-nR(B$+S#ZvMC<)=B)| zXk$Byyu@Pq+tc*lOW{X4^B*!uch0+> zR&=utmN6X++opKq;UeJKvLPJs&vR-It!MnLYiH40Yi6(f{hqhTG^}_n>Mh!H+|4s0 zUa#Ai1?hCwf4yRxjuT9zOEYfufalkuU?{u>wX4kCL-eSE;iDX~-0vrdyAQgtnZ5R0 zAL0+fX3^he70^Tf2sPp!O>2Ezq}X2N`?s~lm{n1!PVXZyNz7*shq#RUSslaZpkMk* zKf1?ZS?bR4?M_@Q$b7Gv^z~|;N8Rv~#z|#D-o>_a#k80m@K7{wUtn(hbv%9NiRpy4 zlRGQ{|wM3a>I`>*cisMttSZ3KmD&SX3C(e zZ@8>4mr~6C{iFZ+o4pJOES7Hi>?$c}@815$mlP90f=p4es;&>gf4nsK`~Uxs|2<~^ zp92<@{T=o~9bdrj!sQrfRqHqev|&79PXSFcPt-z7^xsAqFieyTY(wY`7rFbOD<;%| z$k9*%oP8q4pf67iM}Ry#G{gS=HA;?Hl~B^=nXz{@U7z=~IO63SO?O0{PyhbCfBw>m zi8>6HO1n9hA0d7njW7mY-JZzEMfw={h{J(UqqN_6OZ{PoIM6B@QMYvFaEdK5{Kq>& zGt>l8tM@re#&7#FpmSOr1l_teK>sE|$N+fOGz9DOLi+9`c$Xm{b%fpD-42;YbLccb zB@Vp&*!=IY{AW56X212)|6Gm61cRFbp&ry%lh1uZO(-3YPGQo==5*j$cP5b0ZU7-5 zAJjK-2&ffIZc(xx|F@B376X%go4p+f7JEy9cA%S!t^tT92BB7Me-_8ma$}_XjzO!0 zMvSYoJqPOyr@GB(&*#X{Li^X(M+-FDKCR$9BfG-Xg`YsS^xf5gt)iZn~|q?STIJap9a`+qFOw`d;3 z;OVspZRu3ffoN`oLp#X|_ULbe{C_;06tYO@TS8tI4Bl8R6e~^e|nHWi^#&wD z!g{8X!mBj7uPU$G?FV|pYyeFRA80|5uMaE~1_7u4cp!I?X_FsPwjTrDzoDQM$m%wP zS}xTZlOL1a14{xsP-mx@sJ7Agrmb84@`ri(S5I@$35MUXE1&~Qh`9NWJdkrR9D|}U zXtfaVeod~Rz>d;vEXZUI1ui(L(T&eH<_9AXRzUY~4P*qXSw zNdN&kqYC4Q$muo^!|^iH{z!qf#C=bgO$hFi3hqJ=_TpL!6Z$?)YW6=< zF0WsC^n|vNE0-9|HKu;&ASm%v4J0Iw?H&GUG_nD@D1I3r=!b(_h9I5!Eb4JTTR9xq z+Le?41#a(8phlW$fkM|9NSt?&x5Wp|!y-p0l(3CYgd}YM!ekUUh;Dq)g=$ziVY;CV zwQ+?*jlLMRfSYs#Sn2*pDNXIg>p-8+GkN@R5VYFffSLkP_Ah#mp976>qX)x_UIUP1dRf6FdU~YRN^C$-5sW4KpPa z4Ow8$HhLd4q|axjEONtpd-8PN+MsdSqyDX=)XqDBzG@aTM)m5>PJBKm_gvFjf!% z`QH~EjsQ9_w&|(?eVeAD8VdhF|8j`d0hoR+kMiFuEMd_C403T0aHEDgyA&Ry(pC>$ zva93e2LZTf@p>nC-S|ha%xfSB_SoA6ow`i_Vb_0wK9|$%-`A2(QSx`@V84M$3h(5L z6jw;6n4vwHH_ zPf;Gl;7DA2+!zR@KP@)S+=}NBgjxg`-_50BucXQ_T3qS%KhqC6(TM+FUI2EIO82K1?F9mxZ)o8g2ETZo{<$k?Cv&|~pnp8b2=y2$ z_*Q%4%SaO2oKBm=Zf76Hy?G53Dy(_-{I`#TtZqV1s&tys#+3{vBuwOgk1LUyyx9}* ze)A0dr_$@M^=Vd-DK>cY;L@9;~Q`Py&M0{(38?R-B2^nr;vq5Mt@zRl=TLeWk8VQ3m@=*XXSZ#sW#9Ut2b;p(xw zXhA_ceW>k?@#vbrvU?ixx8G79?xHO-EUr zSv**y?Ju9v$OT)pS6%kWi$;Y<>GmG@*&U8|K;Ob9nJKN9dXe%SIO-6D!*ADx6G6(( zY0*%byhV+ti||!nD}W;Q-lv*YYC7SD5e$9(wloI=ME0;NxwFuZUzzojc?oks+?_?B zF~#rBr6WsD*Cf1*KrZa^@p8{O(4pvqJ?l40(jurC35lR1Ty*%Tz!y1Lw5AsR@+hLi zB*iB7Q7qeX#q&O#?L(^$=X=Evoso_JAYT;eDc}!S^9HRbgqoD&2&SA-jysdER>~MW zMqSp`NY$V5$QN+_^>PA;j(1!_Mq!E>^j-cNT$` zqanu@pbQUG5@i;Tehs)i7-aR=&;NNIk_6Gi>;U^-ch@_Ztdo@VKdT0^$v$0N5wJo?N*I(!w*ZO6zN5S2!8Gw}A|Ec4+ zlu<2X0Jk{6*9LPVDD5HVlg0uguUV^op`HtiEvn=O1qL<=c8b;##&yN4_4Fcjt@93* zzu<%zt5|qIX{MM7sevy^Ow1!07Tg9Ej)|Kk?XF=|ijva4P=|tJU?K3mjnt>Wzjzp; z50-5goEaBuH1!f`PZXQ8){@i zu*ILea%?>SvZV^z$P;Duk)oTDZtb{% zNav-%vGv8*KL$So_kb>K-FG!-=x_DF8ayER_zs8Q{^Lc};U9-uA7+Ln00y|34A;|M z#QeSpm3~DgrkA7^RQ>qB z3?Wr`TI3%v=l{%%VslHZQ3`6o0nnex=X%pL2*95GfUBZUhkVKY_kPz^79>VfNiS}d zKa?q4ajnSc$Kr@O$13>e-kMtkLD9gSTkzl-!kz`MhT{>b|{?J^lki^VO+ ztGL9=X~}NOMtaW{{x0yLNN-+u>^Xi&L*}fgxi8&N!k2ATCsLhvoc?vYc>kNPu)Kw( z>+Z+Rc4MKj*L=C(?IBaP{*~LHMxp3IKtavkSxY#d#|%ZK*~Q<;iT5^r*ly-p@AoSx z3Q`?AmfP!a-1{UR6hf+uTSkd{8>%=?yqUOJJki2Y<`#7kXOopIQj2- zC#eNbFIqiPZd0i~RwNZaCW+zM+7sjb+K>&c90~sgfn*r3av%1bidU=AN}~o-j@R4y z^_Pu=?Z)nqo6^W^kn8^-?yUma3cqdN(o(!o9E#O|;O-Q6C@#SXPH~q4!Ci{GTMGe# zODGgC-Xg&rN^vg^ck$onJe;%ly|4F$2Oxo!^{ut$oZlGZcO`NH@mouVx#atb?2}dy zfx;=2bS@EEBGNMA*DXj~)P*uC=H$JAN^=Uai+SbO3XK|ZZPU2?3XR=vc6R9dRULp! z?QQir`hGyEIXdToo)Fym)(%~(mN3aBySP8|_)7oPeen`L9nM~r&H=(uTe}d~M;S@( zI>5VOxK=wDk*M{mWp~e{!aT*9R>&DNtv~-H4OUyXCURm%vFb9T{YIlUSjsb2i!~V* zG?ufj8_+;?tG$r-Pd2lDPQ3Ut2~6PgfX91UA+Sqcax4{l%Bjhn`cw)q^rVR}uDny<%Ebb= zy*)(K+ejw%l@?F7k23FIKQ5;t^m-!Cz-OOxj9{}FaQ-5;h8V>borPxDW&_0p1hg^L zYJ(ApT5*U|;Uc3)4tf6&WBZ~&#joQs&MTz}k$Ft3tE(daUvwHe-eG^6p1zIGIsr=Z_doZml{$s6Drb)2&k z>U3G&zdko{`_?f*cl|HOD%0!l{61>SEo?qVT|v9xC3!g24C1qO+P*o=Jf(Z{=xe`6 zzFzwnu7Tl$%Dkd5d$D697Wu{eg>?OSJJp-E{FyW&+TEK$Lr3~p+Fj#nbM#x<8-lKq zw0{icRZX=IwNcJGmdO;%Miwjb>|~`H*&>(c}+P@Ho+3yLyi| zsXNEoKEKG=-BfnHDEDBIGnuygbpNgU`1MDoW2HnHq2Dk$4WN})^pH2dqP~7*oCh=Q z`%qE;`>bPOtWH({tyfvXkm3i=y`az=TqE`A3q7+0<{cG7OiaV%#)p$ma7m$=ETZ2l z=RVg5UFH7kgi);gJewx3 z`iR~Snq2%%bWHeEDN<0zuV~*2=4c$k-`+EnJtA6k#?>h}beVG+lb7BPer`Fyy6Cx| ztHNb3p^7mdA9%;(oTnSZ6zr^X6UVDUAenCzs+7&y#{^Dfw8$*@Qsekz0X0Gv#pZYS&D{1t;u5iQKAHK7xTI~vG4WP& zT3DJ4afwm$?_ylt{bp>^k_^Wmm5s|UcvaB=@2nXyg!iN<^F?k z9c(-^HQw*@`bm%2wBYM1Iou>Zy3yji0l}o`d>D{NcVOT(-p?aEy+As(^t@Z{YX~tJ*TU0GQq$HvK8V-zQ_SVk! zV;e|7_^e&@ev(d{8NAAaV=Ct9@N53Rt~S^iWZoXmcR?zyi7K|08(o_%4XFW}`jr+j z8glqn%~a>4zx(rNa7Ci)GuYQe=ujZXV;Jt#1$*BU7xkXCaG>>*F>~o&bTxN&5Sct3 zIL!YA<#I5A5%}v)>;ScDlpFg1M%1CoEypzh|8G2reE+=_EgWM{t^Cz~NZb${QG3>>Xl848RFxPL%D?;r=>I8DWI{CfRR(FM-V7fUdLrL524B2;4$=zQ zL?lh$X;0+SMDMHyb9YzX@t&C8G!zU8|r!%b-p-*3=% zzw-S^@@?3HNEo0 zqGr~pO_rI6k_d}q`kMaUPw7DQ{rH={?~Ay9ii|>Wu=Kh%DkYr=6u|JnZ-#UvID)5# z=Ss?52|l4=h6IEnDcgtKzAF)-Cth3RG;x(^#Hs>f;{-GxjF+;`iz5$cCTiNdg^vXM|V}jG%l5c{a z3NnLEBI5Zf8c;W1IeOSQ!edt3zhY(_2CH3q9VZDhN4Kfskr8QJ;KPV82%~G9=GPC8 zNh(+6^(t=In7_$Y7^dzg{Xsj4@eE=Uy)FL1y3AxudQK~!wMC2#dYKfV^HYj}x5oUf zsPn!?qStLhjAvT|A(BpfwHNnJWUKow&GB&j>`<4%+$2zaW_rYAF6;O^(AFI{K`n1* z1i*OmbvjO{yUc9QflpX4sxiQAD{~y>X%=Z)gP!E^c0OV zgOMlK+tVUxj>_(0K3n=xjLdX*kYV;M{`Jf?ydJJktiQh`CI%^XD^!2w_KK!ONU`nI zYwQ3eSi%nJrqLP}8KY31*+wpgn;6J^@r9Bl#DV;BI?(jfd`Iot@I)lR@gyN=wiby!q}Exyu$E-}h1=pZ*` zDz`R^{lJSn_j>*>NlbaV6A(hq@%wHIbFR0mJ(xne@Ovbq(}Ks$ze@V&z|vLcQMbFb>LrAKkt;u6YtK3~8+3r^!b`w(59 z_~b_BN98##U7tto=oWUnB^aS3VDUzH$fIi;`Afeype9F604&tvB-FLKTz~+)?SR7W z+1n4um{Q_yv8Jwt%o1378So0E0^m*HIDv^kj9oRBjbL(Md)gOtTV=am^B-I z#jvY)!Quii&0pZ?d$pGvsuJENXYe7v8rCGdI}qK_?A#u3)Y8ogoqWS~Nm`4Ax^)qF zKclGaSBl5BUuKwUepHdD15p~`lE|q`UD|93({d4@d0ULgt&RE2+CpReYCoC#oWA0$ zbi3mSySr{|<^&kYv)zRE(8p^Dwl)%qDrnp2^JF+%5ZE!k)RJ>Os~aFHCd!v6gKur<&$Zlk`hqo}Q{s zU=b7F#bf~!C>EMzEWwElo?2@~3lf*iR!wl$CZ_MiA^(iYU=>Y`IM)D+Oh8bcg3=XHs@#9L?Pk@byhz@ z3&Z0k+dFywjkq5rhILq_Ao-U@zM`AI>ZOQV^AgE$`$4YEe{AkFVh$5hR2Mm~*FQQN z3+!m8ilMN{3*L?`uT#pcoH9SE4Qsbb(ayt|i_sgG%jQRu^35V4CUwu(#me}~`3)Jy z9%!kmn7f~dX6C2K-y760>7wAg951{1?tm=HoXsd>4SK`7lzB;UUygZ4?3b9PePIk5 zP^0AVFe&UI`GzbXII0c7e1^-mPvO7C-at*8Hzvy@l7tMF84sE8qfA`}Y|;*E5$dTU zZ_+_(Wo?{8{Rvum+ue#cHhu%e-y60s)T*5_aWrgccPX>hUQ<|=c#s#@kf=sZsP4BY zQG&zx-dBq1N>iQ3Xzq59S*fG>p19zIGAmGB3tgWAfdm51HF2)`73>_45G6MkeM$x$imt>fj8| zLX}GqrmR4Be(9{l`VYz8ELM*O3`T=4=ZW+L88&NK^2sjp=-B$8aC?~l-YZM9yVmqa z6k}EAd;1Y0+g380*@vGl$fm13>!5JwrHGS%1oon-@AphBrc02TUbK-(wnZLu zN048gZ>$^ei8#Q7s!*@pmwcU)16`*ZyUhrJqj-5VghBon^Fr}p`;oAmL^@$LjD^&{ zw%RryiEPkFxi}jB%#F_BC~|r*3HIsrANg_*DKG|`H!(*P`&u`>oU8n_HpVp@O)wV^ zm!iTpPTKH%x9^(h^~q3QrF|{|DYMqI_QfNbx4+2>g4_Nbo5r!cpy`#!2~x*IAFI$&(L)6^#?LC%;-W_^TU!GhsJ+|z!q zB69UHuJIVJq!4*sj?r30uIv;X66qzuzH6~B`6BiaXMb*9|f`MyN zH|_Aw`s06jIjMr;iO4*U&5X1k^^244eF~6*k?In2++~gZCP$`9JT|kmoL(aZ3i*!U z25(QIYq;T_q7-w~OiP+K>!8andVj1V2RB^NDwZ!Q!ZbPDqi7t-&zK^pEuN2W5rY@8 zN#JH+q&oQPh*!}I{GFjv=ZSQ0Z%?YYt~PO!Js&b%?&Cpy+-chg8Fu;Y)q4d53!V1`C-4vHnhNu5ULpg2c*@Po6+KI`#)gnUVYXc zL55*rY-Dx7YNc&5Sd{Ak&i1)}u1&gBxwef_*`awA&2w=2aK(MnESC$73p~K?Rk~_9 z``U4@wtP#sm?!-}+uw>wGTD&%<`su&NL^gPX?1^8e#*9;j)DATevcIV=lToF>`TF+ ztc(21NJqd?FgO7NR3q8af{BeO`*OB({JO0%`7?0@L=z*magkBga~#YfWE1d@4#}Kh zKEpqF6^{*c>4^F0R`?zB)ncBPP~DLZrM^tYiFpI-c5RADK7*9}NRQmaM2hONe|rJ7 za_LnJ7LKP6$}qp;AL^u*mi4yrhQAFf4yjcjbnwYJF2 zB-cs2o)~u1#LnV`DC2Kl2bM_d>VLm(Oymf4kD;D7GUXxGk}cmX(HPa;YU{g}aq)YO zR0RDZ-!eP52S})g$hV&io(C73C0M#7NzkDzGADD0v}!Y1s4DzwDPr*b!K#RXjrC?X zWXNW{QgY~MTeIUBJLrgQ45JX9V~LK< z`-wg#3*^fNl<*^e>zbRra#tBQF~&b9LRNz9w=~ywhv{-Gr6mntNRs#N2B?Kz+WI;(F77F*n#hT z)AI{vdgfw$sCg1h6N7ZQ?4K*F<$KYRMGfV%|EwGB2oC4G%5C%;eS6Z}NI=UFrI89n z9f_Y$K-N2tL*F3DKd0SCHdT1oDmN{OV#mdX1x5<((+}b!GzzL5kgZG~ruD?OlQ5TgTp>Z`^nNM&Z>qm!E#5M2vGWDPkam z!tr`p`hJ64yz34{#LR?r3;OM=x3V5|EqH225%!~IpdAeBs`IqaVINAbg z_8hKQf83diq&k-tLjXVE)5AIH>q_`!=EP+U2HR!#1=k^*;-%)WO82VO^*jmueVCY2 zT1mo%sMefob+F=IK+p33o1b)7Z4Gu$rid@;v2Py$pd9p)5NO=g5P zt0MENT6@<$2k(&~bsL^O@K@?I**E4w@-wm{kYzy&>EpL9ais#{0OQ_c$#SMS< zg{Ru1Uyf}H7WSQzao(aXg=%Y0zllH}<_-C*?ysZWhZ#-4AQ_LNL-#PMnj_tnrU zYJB*I_?t>K%(R>`lR;KFZSmDX>OV-+!ZW~`d0otXW-~HAq`#n-_uhfRcGqOcUVcD3 zX|G;fZ{`noo8&Y6dCV#yJRu=5PkpgOdZBL&X(d{`+QkVu94qq8$c^tI7j6i*0YkGi ziG* z7R9jzSM|WD_%_J>2P3*d4Bm3ahNxzL`7eZ!6 zJ{>WJd!p!}`vnnxvf1=dWN(}Lezn^H+h1`x!!q-mIsLhVEDfTkbBc%TG-g)bAZ=ru z_wwHWBfVi3I^Iqg6-KMCKK(+Kh-ZtyW+3-I*FH$UJdw2Vq7?5~NU|pO!@lqslY7FU z+I)GfMRh}v69jHgk6`^x?~J#4Lh67RFP>toGeC&no!D1uZRku&A~e}jjWYQ$hnt>n z1asD5L8jSe`EF=!@%0LQ1^VOo|Mg1R-Ja)~pffDT0~_*Dv~yx?dtOrdioSNA6^#`> zoUeHn)`=7^RJmzmu8sdZFP>oK^RQ3Sa!P|ul{491zk6VI5p{ohEYty)q(DGl@72nI z>zCW@&Ga3X#e6z!pQkN8#zowolJoMu_R!}KEGm>zh-1s9NpIrZ{JzV9+ zrO@{~f*}}cbHnDb7liE%@s-s#Pdh?TdH$s*BXx~)X5Vxm=5g@tag5{ocP0dW?#W_S z#u?&j(Eo2GrU`#-LjaF+_0=3~g##g6Y@UOI_PDv9FORn{lBCX_DtK|z7HxHb>De5<#R5a3Cpz9EqU`G%3=uR&r0oI z9sOaw^!vho;^1*4&9#ql2TZG&qUmPYcRUY_o#y0r(Bzl!_p$Ql&Ob~fV2u<_V&ELP3Q<7=>CHwkCCt``hvn33~ zotzx!kfTV`0Q=2aBD!Jj#Gb=-)0e{`?`8sjA757H-nrXFZqsIK(|NXwuNv868Q?B2 z)0~qp`J~#Hw8S;v1<-lXB%$9R$LWh!Rd!PFXrAZu9I_UqANQwcbG~jO(X4T<@w4LZ zII;+`s7z>Guo@{HS|yA(tTwW=YS`W!jdfM}QcIam_mL@ygKi-geY?id)G+sNgc|6| z6{M${?=3z+WWHUE4JGh*5^{z@j|eS8j4LUFrs?k-Mkvg=X4+$6fBv*|>gxn^RwCa!eTD`3k3QWU6G%5^ll=Z8 zQ}CglJ9;m-1qr?U`q^r}$wiW+@jb8w>oah@&-ffY!7g*WCXu_`Yp^E*4ki+=?; z+Lk3dEL4@h=!}7L)4vF>IKN`Xp<9&5o&qD>q=&w$a{GoAUGIMPv z{2UFFpBb5_^v-Zn&( zl4@J3MEPP?cCsQ2G&|%3lr%nQt13js9^Mi0Z0FPAvAowLdf!Ykg}uAB1bN8( zrW4glCcL;(v`q>loaXk0MzX3}(9=*`@0#vY>bCg9wwFh2PYVyW$k;$$0~Bp}E*1`5 zr@O{~`xf)2j;T@230Wdvn>6J0F@9ViA09e(47~cP#oChzkwMj%e$!ZQR-x6y7&QFL zmd!<9X}(k4@Pht(gvGn`&RK=6Tj>?y>e--gw0|yoFeR04ckyrI;6|w>PdrptDcg~* z#zNJdZmF~mcA-`j8y<8eZyl&ThmXKnpC}Bro?z#prGL<%Q=3=ULH+U@Z--_Suuf=Q z)P-Pj-$&T3?|HC8bn@F262_y}ZExShlcP)g5*41t9zkqS6whH2Q~SK8=$a{kvbVK4 z9W~DcE|h8~QmEXd4N~q3>i1oX7$5^ErAzfm>39>YQ;8nMJRh^`o03I`rjI7nZJ8$Z zt{nb&8|!S>feF-A1k4ktQ_nbA^Py#UZ~ZI8qHtPs?no7roMfU8dD!e$Ki*aTVIDW0 z|4Y9q+0n&0*oJ&JE|)h-LxKL@1_4|a%+EHiOmFRopU0YxF5ZdW9tC&CKAC?tPim>< znKNv-Mo9NH;W~-EJdE|iJ>^>Fv#LtRl4{5UluPkeR}YJRPJT}DkH(_z%&XiMlKXO4 z*KkWH9E-WBh!ydbHUs**<44+kcw@WP=FX4`7P-pXHLkB;BKa35B}cS>a`PNx830=C zm#BXOoFaxpBH|*HjZTXZLUJ>@T3|40`nR}!3l&R$lFDdj5l5ST&V?^h zx%^f&1$YlFVZqrK81;Q5trP*8B+`IH^YtMr{L;i$J@wsD|EO7nyY`k6cO^8b&8pzr zHep`rn$`-Sex;HMD7kHC>+H$f|h{5jFXN;r25FXmlD36>EJ3I_oUYo!y`6>+jclAL5?$rCYO)H2upv#6I4=GZp z<@l<1-(+OBS}9PjzHB?tR^0-E^Z%}It{yZ=U|L+HjP6F5l9Ubst_WX1U{-2H^0E7GS82c`_9sZ|J z00m!83Hb5a)u6%)|v3H<>w8e>ORvRy}Q9_7I^r}sU;Y|9D=EfY8e z^h=N7j*HAdAiQN&%b%07-D^rA5199ADCsbe$8Jpr1hTds{Fd$@Xa>}ZBY>jxld|x! z6xYD}q+6fD*UPP+ugVxY9z7e6ftbX!k4b}Bk#wK-?&g7S+CEZ((lH6?(gE?+0(0x% zLSkpBJGrF$uTD|^d%%>O?=eXAF)8^!s=~9q4c5D5=)-q!Kw3KjWQp(1wLEe~JZ&_C z#_`F3F@rY%E*b#@<+i2ircqLXU+?5Y6iJ&h0GY2n>l0}_zg2PYvQw+=n-{@T5!m!#s`7r;|2`#7$5B+)g;uUiSiew8d7 zjE={fWmg-i3uJymUofxD16onbT`1>dJh5Fd(xL(8U1<(Df!Bx`VIR=ZOI=y-wI z0rXJ~$g|!Tdn&q!di5yz)gY!-D(tvDQPf~e89YXwhJG&1@tA8AX4?lr=VBj)ky8yI z5gz`b1qXJv}v0pierhGb8F_akgB&qsc{Rruu> z2|B!zd~V=Qz%3OJ__HL<=o1kBjXv!?1}I;NS2^U&#v4WjchdnCar3vNm8b~pYqJzB zZ4L_idbRRNGkjOzTxJ2I1%&B%Sl3>%hO#qZVslfE65V6;=|q#G@nc|M)+3AhF<|}M z`0v52^fM#(DXmdX6lt8`)yMo(O5iVAl=K0D>I1Z%Ni`xaAj+|5764xlANpS)N@-Kv zzT_4^qG_^#&-a^6Qh)PVeZ?)XlKiIq^R30mi5)~iTytLl>%qb!2CQmt?c~emG52N@ zAdg8+I$DnxZQX+V<_EH2Gs6SO0ePM`psb^+@Ci~#y7%c~8Rr3&>KOaE2TO$cAeoQt zU%I;wr*rV^g3f&b$t(|m7XowVb9_=fu@Sq$&y5A$qA zUACpv00Hor5kc*{|75M2V6*hJMmQML0!)Tu3=C^VIRiWOGjT;IE$Q#u0lrzFr3kIHQzkhAApo*9vkx3k;<9iEz{ zl;Lm%;#{-zHYN2RgOS}rB>Iwq*|0JmQ{HKIwDXvGEN#Xfht)H_Tq1>zhjH3cHL6%n zg%`=s<%$z$UIzA&5c3f=I6V4K0+IMXd@lf2k&8ArAj5XteS09o0bpr#9KPX`@T%1w zwC)Ou61C;T65RH9Stw0t1FLScf%Fy*0 z2$F%ji_~rB29|@g;kbSLs?c+Pe&ahpC%Ov@1xPf~Ts~*kI((1uqMmK3W9X?dp~Oba zC>5JTSHO0H^QMHK`Rub}(@G+e!C20Ns@jc2#Z2hkt}*d!e{J{{p%Hi~G_3%#9a?8N z^A;4oe#=+P(h-Hqa~bSwf*Bl&evVH|bY4wuVKaATrS&i%c<O>Wgfs-p*JLY{a|F6(XvCa%G`={q{jB)Ghq!?(B@L$)jpPjqZxAfxt zrtW)wPaL-aCN8=|+Tt^y7&BLR!W8!jcP*W0Jw7ZD^bC}Y4YF|36dK{4Xm-EAIaz8O{Sz;0-1tzY69^Unj*ueFeW3)fYHp zk$06GN^%%{oV?d8DsuIv11veYin2Cw;< zKD(7xNT9rkie4Ere*k8RRs7Vq;#XecEww-%@MWHssCUxg@}H)maHT2KKP&GCvFLIi z#s1Qt;ICJ%%-2HSZNZNykHfQpaP-v);*N<_W9ionP!lZ|An_st5bah(7e0-~B;v6% zHvafuXDkG__t`n$*ZSi9zEuW)jr5}eO$>bwts#wZSJ1E;>SU%c-3z|`bC4!0K!|cZ z8K6%Kj`&mt0QDe`_Ar4^CS<<2_^ezzkdcl$EfUJ|@kHG}ne9qy_Rl=mNew_9A!}4E zy~7cT8r!+rwCDj&>%n#Z%1q$p@wYH2BH_^l_^KmhmT!`_K}2RPnE zV_${TO;GhEV00rY&QDeX?wD?W|?lmkAJo&$2*aR%LedlJYgTk0^d^3BVt}Tn-tUSE`O#EE8KqF1O-j3tuB)&AIExd`tT^s5%NnRU<_)3q30A!scu!q+vf^D-8G+)P z^KAh1Nu$*{v~8rX{COUGI&3|iOM&bcmy`6d>PP;ufCHdj`q~WpeS?WZc$3dACc)6f znU@a_i9k{13PGX8%FE~M>#utZ${%{2iYk{W8BiPQnhj5WqR1xiig9GX@$Db)IyH}-RkgYj3c2$ETD<*!E_V72f+M<3gz7tB9Q?%@tAcP zI|ivKtdtEs_}w1&^EgUpgpRW)kQ26%_maT8sSAr2H~TqZ!2Mf<(dK|m`!zGRL-@)= z$gw=v^U$Y5n(Oy7-Yk`xicpgEy!!3IlXj2kW#-dDc$Bml31=@OTZuN%S)-xc8lk2U zv3s{(?^NWBJ-K!6!3@@+d)L4$%pX4QDCl+Ooh(0byLO^XetkjC7z{ix01XO)pE zg?b_vz&GnG;*zdZ<9-7$%r{c$cqY_~r|eXJPMTeH^YDX4v)BiWfFi4$4x3i~aOr!6 z(Zo-AUonVuXo{N9^AagIuGz;(yrfS-&&k-HfRP%C7&IkLJe!7<>|I;>TK;X;*2p28 z6RC%AcY<{k=Gwo9s^EjG5b{q|E1&w?kFP;vZ zlxM)j^8`gH$ysbS^bm)Vt$F3?)k?M zxqiIN^?`~4B%7S<2u}g>D12^=~s+$K;2VdQ0X%fyH zH^K=+eQRvBc{+6@ClBbR4t}@z;{O;UNIFnf{|VO&wc^?v=26PEJpt|A_v&ANQVu`P zQ`^0=d++$@tj+S04ejK7o7!p$nXJ&M{;HVifg}BPsHGh97XHPix3=o5+eX1{;kecz z$IcQQ(mgDWPDNxR&=dV;=ZPZqDJZtI0}%sV8?5?!&c|vjxFiUip87cV0y7@~{xJMi zt!U8u2Ug&s$r_#ICbsLzb6G1uQ?BCeNt#`Dl3GR$`CjjtAH{}8StlPQ8^Gt2?o0!? za2$TcP0Z!Pd$(TjEvz)cV3x>HiYmZ#TM@m!EOUrgN;|e}gHGlvjj7mTO)--|`^mIl zoj%p?sIPIhOIZ_#6Tgvc2>>T%QI!mF>4)mL&P8a*?nD*0pl0gc`_FHDrJU4DRo(T~ zOt0jZfkrrf1@69xPi~XT!h?_OKaXFlv(fql1FYgB$?!xGGSYS-Ffw4gl-kDfE;cG} zvkgS#WF2o>I+YlH@@}jlcdYpoFwX_mu@AYB>o5)F|333fJ618sFgJwWokD?Ns9x!B z>r^r(g}6g1O#`?&5+6!Sbf3Io9cG)o>wUNb5)Iu-Gjv0iqu5nJtDuRpJeIQ^e9GdQ z=d~SdpDuEIx_%89wFkfu zXjwM=f_iMo^A=J=lwiT<+?5#iX!DB#+J^mfA%mR|?N7eRj$lr({s8XX>MUM#zCNZU|;qWuSQyAzL~5&`j5zJ7uDAXbI( zC6$}5Yrp$K56EldReMG6_002pvbr6hIh*)kg@Y9oge819@ETEW~00XnAT-Oq}mKi7ca&>AeoDw*;d0Fl+$qN`2+CCQXL==iC1J&Ye$5= z%t;(*4h^X2sgLMQGnqLr2h}zBjTVmuuH%+Xe1*GWv00j-2L}_tk+I`EI_%`Ff6*;0AngRJVX+=XeiVKHrv>4^&@cO8 zgN)7NyLrNW@j4BSXQfJ?z@y2k++k>mVPECws%%Ww=Yf+F7An7mt&~5{WbQi)DIG8m zW?2my()FSkl~guU4jKbPJGmWAIeCy!C9d%hdXz~~x*z^d8Y;fAOU5dcBW+n`(s3&D>6+!CuJ%N+Qi2dh_I@%9-mRKBtw)2IXX0$xQZ%1<57ykfIO; zJ?MB(+WCk2G!T(>hZh=1wi+}nSjF6J>v=U1w$2vWR0HBkZ=<6kG#_~P5nOM)5$HB? zL1%L_-ZNePIX%R56`K2NzyaWcE#a%LWb zCLvACptf=W)H`4sz>eB`13&FKT6K@2Ud@Skf~@XIDr%WA9Iqn*GOlP9{G23-hOSo0 z*Q;nJZBqJJUIU7OzWHL+2pHozz-}H%#a!$rD}5 zygtYX6HXR&TqZSi`wgp|Om(vE_)91#@AY|%Y*7Ec-8#4Ne`f(~a5#E@1s(z_R?Gv& zM#Wm2(?3s#qMJYu>fR7A6P0d>=+H498|LqeLi4kXkUAIlFrc^Io-U-mLchljkUZ@*hJaC@%9~ zi?6PvjfZ%0tt?mkE-Q6jvn3pJPa8OHQcYDCkrU3d7EAQncd_=dcR?3SFK zn)wPpb_trJ{n{(5)WUp>bry6if@G!m5C76npKIDj60e#PM5?28ehT4vU@pj4>aunC zK))vD-6@7gsFlnJI(p<6dtyF&)m5yl8U_qT^E##$0LZo_DdIM$p@X?|20%|`6F)6& z&@u8ZiB97=;+J@#ka^vG2QmPE3RqVo`Ut%mGB%jgH4WS(9DE&erp{LsGKw!vJoYpw zSp9?!KT!DP5!TM*Zh_g-&Q_qU+Iap6n}U3UNX}VPoB8^4DI@7~zIO8{RsZ8}(;4-s zWO!UlY>6dmnnt_dI(+Vne!9n-ssEf*8wDqLp~r$y+t?!)fubS1DBm<{j6MUtxp z_2McEc-s!2FtLFMz$78vMz<^*o3R+LoPE9X#g4RTFDJwz0}APy>xZ{OORT_59Djp2 z6iHStJ3S7nPBp|La``cngq)Xo{756Ag(CHcjRLc{$CI6YVgzyTkJ_ zr@ybbZ;UXR@yA(^k?7d4$Ol1o5g6PG0aipb^mDoGYKgsV#+$~&E<>DCXHzjrZAZ0{ zX=pLC;Da11wY$4sOkcGjwlR06LmRUUN;Y+TW;=2Uv&vC0@q?Chg3KJvhX-kf&AkbR zP^A~8umaINc=tjGwc?XhBJWNJ86Gv^(TY^oDK{OkZ`h>Y6>^T5GAaf@o?P@%7%KGP(T;O#8sW=!7G*!P!@Mr|Q)qa{7 zh8#XU{F&@qv><^WY{orE+YCbsPfy0&Q`FnYsQ-d^u02Qu9PzUCgBlITgl;cO%Eu1s#)J?27#YfVsxLXlek>3}>rM}yU zas13DX8QI?#14*H#CpX-t;Rjdx9rPma7P&ELAcU_Cvvl|9KYtsf|3HY_1@y(O*J;F z+q z13d-r^m4vUCKlgn2PdplgxWi-)0u}3v1hHPt`k*Riz)xs*M3#_m zz^NvogtU`vJ;uA$^7>HKR@Jc?BoWh7*tg4Ab=M@)i~-5UuI(-yp%687BXTm ze>)g2&+ab{R~IJ_aMc1H!7jrr{3pG|1mS5$8Tk0g`lUTmH2sz7%d6G|5SRFdE?}r( z01;U4Hqjv@A`lM^7ESv@SVDk^g{QpHvx51-f4rf-9a#~jRq0Aa0M@o&stTU?z%G&#AuJMRG`AE<@%wYQWh$FhUht(mCJbSDVr&9^ZWQGy_ z`FJacE+r?RIgW)>Z9IljVEihglOW`1oroYL7@FmSYK!)tt6Y@=mv9%ugL5!CKStvE z;?THrgsT1l>`&xzu$wmPRn~Stw?ZFP1D|xIA<=+I%BrP>zQk1^86@2rfrKiu8jAyAP&bsE>1hD}( z*$i3N9(j5%0P6u`7;fk0$`S1B5;HP+9RcauM zws$KlcWMYZ2yMtF2D4h28Y`qArazW~bbFG|*ZzCQED3Qaj{j%Xc3QyIhmPI`)Xp${ zhmaISug@~Rop6IV!*AsSKseI+u;MpGeqFi`|Iy(5p9h7Cx|aowq%~(pwak`m9W)oK-jqE3-kvIEMNib&&+z0Uq%Q4pYz1=1cw z>{_$80t~CxN9PAzd%Y1vrREmDuv|b++$S%KcAKVv9k)ZJh9d3oM zlg)ePY@+qQDZ5U4QBxRQJj~T6OJmM%O2U^jor(R}`k&1j#v|Zn#7GWsEDYFU8tvo| zVoES#+e;33Aa3mHE`RR72bQ{}>=(tI*II{>ZMAO%-_}WmQw9CR8%+Y90Ip5uII^7^ zv2CtYLy^sRTZ5NrooO$S%wVR~wp16(CdTyDUCUqj;-li6%PllzUHmrxx<(B)m=tCr zqxXN;aL+=*sj_>q+QZ-{Z;U9}G)NhSp5Y0@DW&GFB@KEa;YzV>s$1X69;@@3DETIw z0|22O?;eAL?&Vc*pg|AkPW;LEBs1xkF1N*#p?7O3X3v zY(bBvhI#vn)GX9IE;h(lDKG%sY!K=%Tdeu&+i}jMjvwA3dTZnPmti^SANFut1MccP zm7jukh8pguL#$4%Vg_SX4X&pmQdaru6)Xz=1vAVnjV)yq$bT7EWc#k!m{>EKuBa`q z(LQ#q#;^~JoQS=9ub?ZI7ySJ}Xnx+Nde z*x`dUSPfCNM;E1)qS~`tYc)~zQLQTzip}Cfj5NlV6O8T(9<#X>T-xF!k1D$V78=Qe z;r1DuTnK+e$?0db1v&}I-kFB|1uY`GuQyr?Md;QfGOz)C)Nr4ZvK!fTj)kOJh%Z53 zg{a}v_k_~+0}ElD`CWMl2Ww0rWxx7QMw9FYEF|kW*8fA?cgIuRzyFscTlPp983)H! zj?9EIva=#9E0IlQWMyTb}2sbS~sAzA_kjN)ssBAC6&oj4HLY zT=chrOoJ(6njkm%-qgGhs~1)p6Y(JxZ(gYNU+l2J5`>|KS33Jg@|#ljLqeV_S3V)U z#D6(A{`xVS3}MHyzxk6(By528LV@?XC@$TPLb!)w1OuBDWifdPBbT2XlXT7~)6D9q z4(2AYz=e=FA9~ES?jl(0`-Nex#kt|$H-koJlUFy_HGSFqvwJrdsI%rCG@tcl&z>#1 zA$yo@CGghPlesko-=@O5D;(0>@4HkVo#`Kn;e?X!S^7e;;{?sT^$cTIjqmBZ)ZR%L zmsp=@lZw_2Rh3!EuS(v|S1lS@5xCoTtw+aKCgPLUO2o`WY-B7i^$wrh8`Eu_l@+cW z`DWg(l-()1et=lY^*> zGVh;EP1&Iw@__QU_x*c(6rvCorqx)a<{vH&J}fUz=jd-I>fbXikOA@5ZLX)!ggNF) zQUlDbnYc5Qq5veA^og&)c92?_<^O)i4;UinW3{OJd0ykcKFLfQFn=?$I(lPbs3k>t z^Ue1+L^KAv?VP)Lu5&m3S|x4L`<-rPuG8pl!3F42ZveNZbFj>M(I(D@atxvxwJ^G zO0*c9Oc|+QnNS{6Fxd!M%sM%`QBfA$NTfQOXth}7F06SM;pH=k=TK{I;2cSob>;KY z;=MhtQhDelPg{5{|6>bexAVD`2YFa&q+2PjcmBkM z%}B6?Z0DQb0t-;Yqvw&$rP_9XhYOpBCZ~QZk2eBQM#K8#phfU9^gUW!+GQS**+ijzlks*2ab50d1s^JA06_h?G=TMspMaaOd*Ek z8_Kxdbqgzj-1md1`ayGYGyW2rc1UG&V@~1E+Y4y!?ooBu0jh61Rv$|lc&iYT2+_9HT0U?&Nsam zGLAqGm54$=4YyP^#QLs}c-sQuRcI7=zfV%FBKbPOj?5CAn&VSabz-G=Xy%51WVnD1 z!BC9?Q}&}-WKjI_=#6>77nh^NV&c(XwJKHT0~j`A@me2a1i>Jbh>YF0yh%37;huR` zj|v`IDgVfF6!tX&1<*2hkTS`cu1Gs=kL2$>yp_-PYtJ5uhaG<8>0qJ*!jJT!-GQdF zL##-*XJD_0LvT`&PM8Iy3FlN&wa_bHVN*#v%jQ?%OCrYskeVc;=B7oWutovm*6R*i z1csXk&x#dBon58~(j}a2G+Hfwis!$}F&-84P3-#2jqQlTGf;}oP;`I_UQA7FFzVK_ zT?65vgOuG$I-iE!Is~1onjlS|z#bppr+o}zM-qj$BX+f`u*+cb32L;hvNJ5J-T`yR zIYqyPq@$(eq0V+%= z^PQkx`^|(Nq{PRk&j6met&{~WRi6veO=}G=I-NMf5){<<848W3E}>i>wDzZ#W_L<0 z*B{8t5%7(9S_d}1W=vQq^?TuAH$2+8lQC?({e`V;0mE-q@lwTfS0(lwkS9X5+BS($ zVp{o5OkhPFvdpvIso<|owcRa~#_R9`<8>sh$MO9}wcd}(4z<4^f{}zhXljJgbnX=QhMQkulJsuAI~Bj}c_^`*v3fJX5cx7-S!()>Qt>kIWbEt8J{_z9B$4XjUBnu4+jSIa z+v0jpX0q;J6h%f_e2eY5460NYQxvN76sdp;?j5(_UPJI9>G@=Y<`pDAuImq6lcbKD zC&FGCZ)jI7H*i)2H0Dbve+{2O5d^dJARv(zD6H;mW^ckji^8}!)qZbW_D?Qc|ihn(69vPfui zOs%;_1voWm$$S$~ig4^lv-#xS1mak=%`u?KUB(@bH;=&)JA$CdQ(+tTQ$ z;L(-V966nqFW_NnrfJE1rq+nE0*-3Kj8>ZIATl8l;EP_*iw}(0P!o7Om84qgF^|yW z21$6Bn34p6262`#ltA5Pu!?lH(sKUT^D8Z41?F}BW?0dX=o5Dg&po@6oN93oH>#6l zSrz}Rbl3^nLoeWBd{A{@OfICGn&f$kIQDVBPVHEITE5$*(ryGZ-tOs?D}S_?%KhN{ zepzrhrjHb2RV(+Unol`@a;T{o=BAl$()8*F?&YAPpC}i!8X}e2e;J~;#ofx-vL)6# z4OGyn^nJ7z&(GN2VzbeS_nb2}Xu2$*#ymNxC>qqu)vXvhug%p=a_wPm($ldgg!$(L zqp0YP8@)mbh%r&PO{0hpfsx$WHSJVn461of5!^|{S^Rq~4~5=Moo-|ESAWXzwWn7i z$w!G~;EaoXXlV@zSy{GNLF$U_g}wCD7K4yzVTPn*oAUnU8r1S~Py(*GxXY+{HmTPTzO^G-%J|p}YCnT#xJ~%zI847YGQUi#>Ca%$m*2 zmt2!_##;}DHHdeL1iKeU(U2ahHj;en7HM)#X3aEASzjQN=S9x$yM!e#^Ky6~UPIs0 zBvyt$0%y{Bh^00q2;bo)U$j!3Ox<#rda7v#D2e(2uy+O!Wlr zpu5W_wPo?;d9G_;hWa)SS*adQP{JRWeh2Hou>?ZJS);U55q%#u6TAlYI7{;ut#@v7 zE<*(LNV~1#$EF}b-4zG4$qzk2Vc<$38~y7cBysxk^}-?qhv-1*rJBC;A+eYV;?@I2 zcM#KEsm&hp7u(60JR(zAu<9|mYO?w2^&?FiF606|8J)H~a=LUn@ItFuNq zSEAG$@4nA^@NGWj=ee)2&cV(mHd0ues7L)NiE!{9b0~TBun6Dxpi#=CJ7XIsue8V3 zmE6_hIryY@>&o%dd9{p=?5y~cjXF-U5kZ2kVYSGaoyrdp8n{wIFbLVfyfWuV7Mzg{re6eISy6^M35++{es5)wpDy@;Ra3+XWKVKEGd!+UWSGS z^B=>*yyxm$Ni_|7Fq7nx942n<+yXpTuF9%oPLDGsoWjapTHd41(Ab=G?-ULDFu)LS z0s`P@W6MzbZNBx0*l37<-fYq$S`mKVlI4~RpVADAvkW};m{AJS)>ltMmn;h_G-QTu zq&U4GVWT|FL3R9WjECBBui*FgxzYGSzmG#Cw=*At&A+sGGo2o`XZ7IO%8i%ok9B5; zdQ-n;f?Ibt3&}Lu_+6fiB&)~E8InyzS6{DGHmHZgxAh8$Szh`lycV;w9)V7o9MI&|HEQq{rXW0J`arDEb zz3f@-Sf!omcEw)ZpvgJL>bu-v7IbxLle<)<*@EZ^;u*dKsBqd$raHZsaUA&BmRj!+3RQ zeb7195KN(IMa?q~j1S|XN4Hf^T|}K-7*slToAT(~;#!g-CP{gysh5~FN~m^YLXLl3 z3c8F3UkGGO(j#Y)cu?#!@eyG;^T|}+R+Wv+Le17^`F&6Q@R5<5eA6Q8S`ytd!;hEq zCGP4ckVepK7kIKXf~df*+HET+5SKL)t?gVd$KPc-YQMQRZ@wpHRIlBQ-CrAmvZFJ? ztWVQ&53)EGQ)^tB^7cA-=K5K)+|HD-n_*q*g3quGETMvJ!9hKFCP`W@7;uXXdXZQs znHIpNowl{Kw3g-Yb!iD9QjC=2mLPK*O{|uGcSj4qhDcOM-&QVPzf=}!3_20BO~Y03 z#F+*Xj_$9e@a9J13&5&ywY2r&V+dPx-dt+7BFmu z2u_D;m`PS_aDMz|l^AHw$a7WWo=!~2W6iWgw8zqh$4W<`HhC9hTu!4SKtw5VRbxpq z^n9AF5Gb!khc*0C+Z^H>ugddK`U9##CHB|?Rt6K0>Gyr;!-UAoMZ!ZuojJROUGhn8 zp#QjI3@z9)CnVyEalTz2R-?pHzFP-VoJ5f~J_zwj{?5z4bc%<7QyYzNWQ%Iz^9jJuNJms7dri-i}U%KjI0}9!*tmnmrg}YODkj1 za{+1FxosAzhj9aw%850yb?6sOVnv^N&E99_Bq#KOz>l3EQ$s=1PrAw@Q4-la+C&2( zb^U1UE0L4e=K395!Fsqphq!|UdkDQ2FQIvBItKE8H+8BbRFC~Q9_%=<-E98VdS+Q* zzj50(UUBpnyppD{Pv9U=bItTDRUQAArxMWYrW}$e>9FSW;RdH8Z(TfgTinrWJD%(K zEo-xFw(AY5qLfu(6h~&fTpZuJsj~&V*Lntl-)Fg>SU{k`;W(;P18oCVxZv}=ao%iV z=MTkA5|@DHZT0mTszkl;U<Xt zvD}M|{W#p?aGK7r_gS1t*}ce3hic3Yz*ajq{PaRMa@XvVgc2n9^ExOFPHo9R&Fjw6 z+-#nV8A5cwwq^4DQfq=5I(+w$$M|1gnTo(3*5p&2KJzCODO&`IUqzJjL;k2D_8^79 z_fS*i|0|LE*V~68EnX}m)kyx>66X^gOtb%Ae=IA_eHx{G;v%m`VQ40N>+R5qVag{u;=q!z!dv*njdxoyhwty zaJEI!{?lHM@gy-j{RCa|NfpG zjewKKeU%0OFmpIx%OU`zY=>FZAHh}o;}D6yCpOplBdY76g*^SAndYCs)%WLMTAsui z_Wx^2{rYhvL7`_c)oB{Csq9Z60vEVJiS_@vV*i72hV#C@iQjNP)yGVDnWK++`hn>m zK~%bM*ohDFqq#5qarTQN59|N?A4{CG05E6<&=;=&q;X8}{T;U-0E^mDN=+RWw`vO4 z!0EC7Yn(FAWZTVNR+gfM0SPoG*@kA5NShxKB=_C~fmRoyBS(Nnn0`FoZy-&uEP(g^ zN&zC{yz3BU53gNxuCm#D`Sl=QtwxGSgD~Kf2!^R}jn)lHS))ln#txUq+x~ zz-#(75L6csI!Pc0XI=-x>Q0cMI2#yQ9z^s>sjwsWWB`sky$u1~^Ti*)FUGgB?DXcI zrRpm(6c&e*_#_MVuZ#vxhhJZxM35|hcV=|T8YBX3;M8B)d^6kI2Hg<^lA02`rJ9_5Uw5n0%7hv#1UVPaN)}km}QMxfS zr8vdYgAe}?IQR>7R}dv>Q!GfCF3^WLt#r6@{Pq!~Ta*oG74C(X*BvtfC2tp`<(u5eC_O{ULqP(7ftdd(>G`YGF!MQ=Y`+W7{(Pr)|!Q`HS(^cMxa`?JKN1fQp*y5ac* z4wTgmI*^360km-y;j4h~9f|tp`~jz3=oY4kyo)*gMjYDTc+29ky!U)?*?GnuZA>eSDH_hiAMkbbch^f3qICs0b&*qWBfF9r;~tA118fEA$3MyL&U6aGuw(F+_%&#j~?PVES$vbU$d73Q2tiGbR*sQ z)tW&tjSc*MJ?xSH#2qG^!z(oMr1Zk5OhmXBd|-k**|e9fWaIJ>L#-pC_>Y|2w%~ne z*?7B3COhx$KlhWVWQC=YCCb%wqkJ?y_-?3%o|i;mj_7*mqqUGkZ*x}&%Y5jVk4321 zZ5N$hq@B+UbZP0}OdAvd)DtoadOVZR;z;!iV`x;>1qt?%o5BMUI(Q~$BXB9mXr@xn z{;TEV-|KvV8xGf-n*v_2^2kX_yF3}5&OIth<10wv6ng+hW$mRI4^p4LzfvT1s7c3l ztb<4(r9!cGS@bxvCH&*dUKrv7C*go*As9>Q{YhgLif>-SLbS7q*Lf;Vb^F2t^!bS; zSYRSZVdnEtd0{FD^fIl@lVpGPf&9mI@Lmfx?eEEfy2xYMzn^u*N($$!Y;26b99Nt~ zv;s8PRH#|M-7GeCeEh5vcA(F!8@MTW=C0KzUV*+$@O%R2zgoqGBa&1J#-k8HpqTd4 zi5UeUUKpNE6lSGg-X@x|5;Wcq*xPls%U?eKh0+rctWc`?7yp|5M~E*^y4bQUtZWq?G6eDy4j5TeyfBS;oVk)u!wYGv1Q=H=qC&|xm8nB} zQc#doB1%=*m$|_I0x`Au$5%xP*tqP-FVkwtykUh*r2srEYykq7KtS_N$YQ&WdJkJd zN1$Wg7kF#V^J=w$GGJP6O^fhU#J*ZfO3DB_Aykto_ zG&kJgE39uA7i4ggnManco~D$|ri?k#R&PCQ;=p{>!-BL&QFSahV*X&nO#cY83fZ2wpx49BDtN`^Dbq8aHBVT1fBwuy0$_q}nZZ~4N z5Y>-4II5QXrN8I+uNPgA&T6twm>-vvTpSMZhbBA;d+*oMI+~R+IRi+D;~a6Cr~Q2o+6~ zExuH4q-dW6k3wptznU0t;Xv2te0fQN>C+x zvOETKWiO0MgGgD(ysp~QX-A-*hX)z)dDTZ+oUvk*j(in#FmW*a!64aSsdfLLdJbx! zX{SegvukH*#Cv_<)!NRzo>{-8k0V3i-(tJIm?7oftz8a_w#v*X=?=>YJl|Dc7M%4u z6np|s^c9>JN6Jx2iahQjPaE9xZ>}$kTd-hHpl5MGpP;Pbg~kLRl|Dy> zh<{8og&k`9bXiHG0DApsJkRSnda+L1+*=%{Isl8bd+G7Hmw>-~4&)DaEH`_Ry72_2 zyocNklhr_TfRD)4tlH}Yh*_=-kjsjXA{?~x+SGvEO$v1ZM6R4X+y7ASrxlF$9Q&c8 z9&zal-sWHA@hYxLeks!`Rc>(Hr9y|QZobI(&m3oo(vTyaY?}DpCgOPmRiJ(f9Hrcb zg*ZS8S{d{m`JF7d7CD#}RNi>NgsDC)I1P==p54BoSBOjl3j1VJH&tpyUf_ir!CJG_NUi9=uA6Nq#VM6&;QuZEU@2pSm#SUp=Dgh zGncN72MTW*@5m&HclL`PzRVC1c#xruuJvU3x+!n@R%zo?lqRxu6mE<&8DOxnZW^2f zo5ZiFK4)z|;~Dp#)#ChZf8^AzBQ$vxN{$bPQ)JNisl~MJ9htrDRa@|IamOFR3K>1| zInC?}d9u_r;Q$*--N4v}K437z1xIpw`p`x>l!A_m{TN#A2yo&2vhyy4R-3Y;xb*$F z@3jj-p_B$vANeidZ#@g|k1+#Brrhc}6rVFf5f@M=D1!fRd9K+Ws7P{9Bf}yZ(8bmi zFmM=_*_=wS9W1p(sA0}T$GF>rspLK}`HV942a8lIm7fkQ2h$ZBn;-ChD+9%9g|sz9 zePT$}_|XB{KG#p*4OxrJ^Q1*JDHeFr_P+gRf+HaYDY1mxJiWpC#$9jA*^|orXiQw` z)4bPYjW-TDG9?j*CW>Y(?TD9#MA!?Vgn2YGjO+&v=1aqXh~t?w>#c~g7&daCVk>#a zu+8E+?&@_2XxRs-Y&!Qyb&MnY521DSLKGFcrtD(^%vn+X_KHiSN+%z zA~{z)Q?HeB;|W$u7tmqKgq$>xi#7N-*`l<M&t-VtiUwf1Rlb%wd*=B;(x)51dW^c^uS+w+gD4Zu3697Ho)m$!Z)EB}n5 z4hI^X$$l$v83<;=!L;v@Z7}p|_uH@IaqoygRgln0_` z!YC=&dY}Jed;U*2Ai))Au+*P?P8nk0v{kLApe#&3;#}Eqm_QmWZrdpWX-MU2;8$ed zgnCl<(PJmBU_bX2#xl$Jlj}C^3E_&Y`RwjyB(=<#^o}F zy0|q?ID_0ROCP2uBeTIwUOgnd09ye0-LVdS)YXf_Q#Z9ko|>aZc)dxev-ziUBn%Qr|UKIoy*COUi=}^Ov=6!|f7=dmN z8JwZ72R=p~45b)<9FaYB(53-m^NH=I(rqGjCf41j5@E0VQFNuG5x4tWvdGoFjJ{U1 zaYm&;iTkZGU7CbLJlW0YgCvpE->uDX-yu1%K+MnFAtqqKY2=cFl~LVis4EvyT2>&K zk2X8CvI88cQ5P1oJ1rs@cCVd7NIEGDQUZE_+=1Q9?L}Un>ySjQ>rPoBm zGlrJTrg?nN-AUF8WTs$KJ_!TpT{5-Egf6?Y39U@U9F{64DG}_b)7$Bvaok_86&k5a zNe{H|!FO!^?Q4`*kK67z0n;duf-$qL-K2`pYg2bX%D{ovXj)@=&^ng;-a4GxM2^K} zDl;Jbl;&Up_dOnTuVNAP`Gv@r(+VJ>z*%#F$0fyn4swy&mLsE(g}>y z7n@aQjC>mTy$FsJNV8uHcnm?Ac`1a~hoHf(wM8O(>Xet1LA_{}&|)9I2EjZpue)aN z?F$=(1Dw3s_Y6pqu*J=Yt?4NvICTj&v|APNSMEl5{pr4rQpbhfc`Kqc4H>0`@T+g} zWs$_z^gJ!Xs}TiJ{;iLuK}h7H6KoWpAh4yoVah2o-WU25Jd$>o??*a+fb_X5ZF;+M zEDs}eMK6th`EGnEEAC|>Q$r+45odA4zf&V5Y<&F*fv&zWT#ef*XT5@KO&t}N{5PkI zZOkd$NFEjZVFBf-jP2d@I4`Jf!$lxSGB!E`tJrPV1t;fYQqX_wx1{OU(zo}A(y+ts z$&z+@8hQuddAN-~Zlq`L^h_2q18z)XYsH^%v&W$dMN~IOOjusNe}~|X)!}NOsXrf; zBkeiYZ|sI%od&8hy+|b*XhIV(_Ux#IfJ|dbO!MXTF0Z zzyGa@s<_kp3ci;zEA7$0eU6qwo%ubb?ZS;F_m!K?tqJy<#&gm(9T^pcI!L$j+zRX* z-|sQOFChB01mRTw3x_ma#(#wjfBhPTJVb1guIzuGUJzWR-G-Rzd>{7Z;XgbaIAaj_ z`TzQ8c<;1={?I>6Sa8WB1@w|7KVSb(=gHx~0uB-HKGO&xCBn3jdT;l%DMb(Ff2A$T zwD#2&Zwfz!5Fw_XL{69s7yk{OApv5xD}tg{Iu)kSC>f@!n|=YL%{4$K|K53RNw zxt0PERTmi3Z5O!Dc8@one<5q#LXI?kP6wx;L3H((r?wDpOCq*`eP(tgWXSG&9X8D_ zX1fea1Sd096>t4`b4hj;auU1R@-pzoNg3VIe7T86ZT&t8F0jMB(X$s3Q$LW{Pe&Uo z^EEtG+%5)yNb@uk4sq8BC8S4eDiY4^fJ54z7{u$zn{X*zU|nSD(C?WbuT}~4vr!)6 zprHA0P$EaN=%)O$*CibX(Bc4p6*dWAXu1D&nx6}WQF&;g&%>#GkpQ;0m&~U;9smW( zyYs|5Q@sy({nBp;9AMm%frxK`Le`B9No)cS_anCDRA08 zdyBe!V|~$SBjNA4@#}>I&weRN`%W*t$I97nseNGdyPzd5TR8-7O_zQEl4$)AP`g3v z_y4s5*HpmiXKjGEAZdIv>8CYymF=M(8;|E_KeTl{XVk|d8u-KFHiFYxtp<)@seE6 zG2I*srs_mc<~HyAL<2EsY7M>ZN~+>D#gSU?{d=xBPhW(SXoJ}#43Nes%(z2)E~lO za;@(>hN*xGrb0~#)ze{UEzaOxd&@qVi?3i$KWC5Ut@@AO7DcuZQuS2V_^h0Ln-ES4 z8{j8n3Ki@jxpV)t1-!k=zy-jmzYctXSU>_W%5uG+Y!@S>doidFH?k3~LMMnrt}%cR zSwjwW${8}W!ds_ZS9$_=`}jl=VWnX#(wrlEtO}-t1{@L@x`?UUNdSGWmKBF^fol(J9=qU8 z9TA^K>FJ)Y9l}dk4E7Zg{vZ~Q2prC*s*PIv+}9e%^LT#tJ>qn<3Q!y~)?OsflnxC= z`&2zVIgE{X&B7W}-VacUtu?m49~v)IUGhEm%S+(Y4EKt?XCp!}A??7$Epde9 zJKsN~Bl$P0_pocpH6;P_t9z=|K%2|`9lp_~46OP>ZZ*zv3eTrH3w01(g?OgvuqOez}D>-hEP^Wti7`I-nnoe3gEQ4@m3Nb@qnAR!qq{eEo zZqMHxXtigxJV&dD&{0T!gwCh3p;ctr6GtU$t67%OBuSL8H8(pI9wOrI*YNUuFMz;gTUi#{g%p6|dq6!sVl^o=k&j93i>%g3iJ zKTGZl=(VqNG5crikjVA=a9{{1N?lpNWM)vms;xa%oIIHq)kj+{T$4xAK|dE9bQ=H}LS=IG$CbOE+?en! zQbGePA+I&#-Ab}#;6t3t@%%*_yVcXz7fh zFe?IqR4I3pw9lo!ylCMoc#f^_^SC2g;7K)TORj&h1A58=&nzf^3p+z>Q+ zS24sSqTu?qMaM`d~8+QXQW~yXq{z;vedZKpI*dV5x|G^i6d9DNKnq{`&(HvhIvpJQmiXU3F zr#@BwHl+H&aEiUnHZ8QVuvvzvn~5kSev9Xpc{JC2vP)M2T47m7@#19I@hn7jXRIFl zHD7oNgFlo*LeH5Ob@5cw)4bB&9B3@sy+l4Bv(eoU=oqh*EX|!yCLE!rrZ?yByD_Gn zYGa8CvlsAPJM2IrXS6c5{DS$QG9Bp?-OYkG0cm%ON>u+5@MM=Ean9g^q1ys;Hf{ba z!2Y6p8?EvtXz2*sZNSJYc=-1Dp7JX=b1PqE!zN;tjaGqtS!(Koybt5?wDng)UAjIvy)f&> zg-g)!LRxlPk9bV`1$Muy>AmJJ)(`Pb5ww{H&z0DmHVUr+Sw;8CHHiSUT@Gm>62vT@ zWm9H8GnI`AD}%iKeid+i%?OXvNxKH=7V3V5(yA0$p+s`vg-0dLQ&DwUE-Byg+#TF^ zKfW^0#kzVtIz^fCHNV_*tpVbec-6c)&6P$|GemqTjfQHBh z8&*ff*{l?rdKovmb&8t*q*DX6K(Id1)~0WQ#dQ%IJe{E-uJss%fF-BMZ*jlnxFt`I zwTD>hvQ;2DqG8h^!-82cfK$Rtj)PM#LvPe~cKTGznG16YgNx9^E`#x`peg2%00-`- zwGB1>7RuiS_~>~HgJrn_dx)H)jFr@ea{Q?dq2SXt&Nsz+o87O1zH)*(!W}9R+(%2Q zchKc5Qf_(7K-Bnw&Z}E4eg#zd;ohn|vK&E|-6IQyusZX^o|*q+wkZ%^QO;hZ7gM_0 z5J04Um7jy$Bh{~YD|pBoD^E_88$ga>sz?+-uW-}|+KBFx84sZ~xqKaVm{bHb&bMTb zzm}>NMNT@{Ft!HOa>*xkyEmf3d0Y#xl{OFwDUw>=E;|>epS&-UD5)AzD0N>`Gh%M} zo~N472|%JfLU}A!Ho8Ik%I^DUK)}0MAih~aBUgSUK^A~0k5~i%sVudTymm#tkuAnJ zH|PS{Za6L|U(F*<)5r-g3m$hPUlUz!VB@~b`d})&Fcb!IWP%lOWadXdnt%E(_v4GT z5sL)4m?Nbjsi@TKM`&v_JaiOLyY6>p}x9Z1oEUCto1j7 zll`zX2xxbJ?^4MUl%Wb)>tj_-f_g}kZpH-+qoviom@4Qe=uVhtGI^9;4QU{WOa&K^ zlO2`^gKWOhB} zF1JGFwQfm&D*G%mFKv(;|?Srvjw_V^_f{KNWXoSeFC+vgY6r6f81VO|~$k zt)%4?^Dejl~9f}WJ}oqJe#Drxg;!7=o! zTqqr=P4iq8HpG3)f@kU(TrS>cX)1Y>Ia9`-;a;~bhguWkv4av?!6N{+R3EM zL@4yaW?$CsWg_4%-B_EOwtaFvwe?2GX%e+`McfDjUNTFuz^>MLKP&be;VSI2Hm`X>J%nHGR6eX?IY8!)(cd}2l|AGK90rGcvd^>}>V-kbGL z!Kh7(1~Ggo4c}+e?+~52O&3}J&*7j=1u6E!FoOr0M`ubjMMIN=dPBL%pvRLrQLu1` z@pW6P)G8ibjT1z%A#atLL+I-PSsx>!2sKxAQ!92koMCE3eJkidGugXV@>AuLXh*B0 zw7|3an&4N4DBPsEHD_hW5D0pvlp!(}D2iieP~T5OXbD}=zWwZl&hw3;Q0)L;#_3gaIz!1t<$DKE3}wX%sUZP0i|JN$ zQEzI>Y86gT3C8BM_JRG)IqoF(0-YCDt46?RwN7TBkEhz9YBqj|FL&G0;#kvT`7QE! zKos~=ugzkp0eu3uk7RWX0Y|&;O^!(lu{x(^mols*9qEx>&oBljNz?Bi?>e#AT<0d6 zwkUI3c=qm;0DTGgLv#ezJiDcJL}(qNe2?;8rB2KCu;lgxUc~*sJp)P|5u~Z5dT$Y1 z>BRY-CsVxmXU65WHj%dk)iuYnasT(~C%>HCm6#>0h&hJyc zHs~tYVOu(YYd_&{@?XHDHU)uaQ5-~Ej;0cniqjK}2hDjm5{BRwhs6pM{V3?j?%K$2 zNGJdUAw5Hm!bt3NMUpR0mn$P~YntOydywL7ZU$ISVO799A{Sr9FioeG5h=n|GH+aB zf`%XE1r1b(4rqkrGoaJzx!CXu_yHLGU>l|UQ@e)#tx9qoa7{gv( zaYez3v_|7C3}k#Jrm;SU=s4flnEdFc)(>b8l9tagMNDfZ{9 zCpUSqn-JHUlTu^RI-z&WS)PGJQu-j78Ut1<+>lh6UTmXt5K38@OXS6trI8J6WY@>1 z5jIWuUcog`04>fsXRXrI^uFgCLFMf1woG`;ietoH!}ubbY^3V#PMR z9~J~0DmJJQ3BQR@IUtu^@Rm|%V7)U#J9q56-((`%sEr*d27Lx#KMH-qXW8OKkANdi zT>zuNWQi5)oDuJ1qsP6WX+H;m(_yWB@)vY*P33Pc{Wif52hLGHxEM-KTqD*p`Jh0o zCHb>FiDC`E?)c{)%vw{Jie=7CUAM~;b|kD=|<(`i^5MXuc#$gZf}cb4HP!q7hPlLBsWmH z^wDqKO#JGgNTZ!BY0PaMoDXli5drmA=|>(#tuGC$HJuoXy(0Mkv626^qi4T|imT!b z!iYk=Wur6*eX*=)(MUYtSu7(LdcAR)lch`z{6xGsL{n`HxwH6YCY0r8fQRZbNqbulvyg$<0U~mjO{9u7)^R(oSIJ}!Vz=^m4vqEON1W}ET1qn2n`ooOZxF>^ z#zS&e@L?uxhla8wQ*8PC+_oc51Em-5T4YE%h&m@UI^gbO=rS zu=zRmd}YLW3W$ls3|NQ?ohp=apJBpfL$PQ=IqL_263r@99m#KyemMwPXR@($R-H|5 z7vXO`clvPKo4#UawMRZ(*Oxp(%ee+P&WB&`Bv!M9$8IP<9iZw5@SkHpQn{}Ox^0jj zJtZM9sJNHCGGZ)VTyC*%3Xt`E&h)&y-&^AbIq7U!;>??lzctBFa~rhrH-8OHBP zTiux%xiS6Hz-W8C&Y-*V+u;0%sFoH3^m_@jgt?$U{(24(XRLixeDr%F^`9Z^qXz4Q z&uRLMTz>-GwU>}+;Q#t)C|JD9d&&O$R`YAN2j1X?$N%a@+#lx#=W7qb8RwS*y$Ap1 zfK}We80~UcBPRTHYuO~k6=tQ-9X>hk900o!_8$Ma<$v8MKT-k0yId&w!`y{Q41W!} z1c_6B&*$G?BqC)Mn@WDeKivcL%|di*oGkyF^#mR|5fzA;CKmmMR{T9*xsaN}@1MPQ zd^t8?yAgqh7uA1!U+9Wg7vhunpG((f0Q?+}D@^zs&NVp@i1-itg7$wVK}${)Tv3t~ z3rWVfY;cLin=TFz;PZ*oy)wz(vm0e|OW)@LIRi zyOPXJ0HcTfdsqFS#6p#oHZ=3z{KjYQGdl>j=e-lKJ;~MulIyvzMgXRZIpiLBR=aEO z_s#6DF}N*&W_YTG=VT0kwl|x_US5=KqVpC_0$a18OyFzXji1;#j=84 z?aqZcZQ(i1yh32mN^^HZ`aC(O)?WzJsw1pa-V5*FegWZM#I#<^1w%6%h`odiy)mUr zP;wU8`LQmE@R2(!T_;U@QxYgH4bsCa^{l4$ zZ*BR>e)}1vL@Pqp)N>(M-+ZkTSyD2$JpMMg$zCCz(rwFjK_rVNrawI@;MGa@Vb@lv zs00R*FO-1hYas;;gt}J#T9M2q-cnEu6o5!Gt-^|SR$`~a`6Of+$_&O_73%@%D19D{ zl(DZH0QiqiV5Gq^g$1EZSGn=AsWS_sR5Zc!A4tnUF6mk7_T5G5VF;NNFUB3m+yW4WuC*3o z*3<Y399W-2b5(T7>q0eT^Ldq5W=_Me*71d zTd?m+S-d(lq7j%Kh7Yhm-*|0x$gV1$sMUtPUF``FJ~k#OC1Jf_1inE&B977GG32J1u#@%v1u0lbOwGXN+j19zvwMyDs5W$S-a%W5% zC~AjH>0&p;$W((v4=0JB%@JmErnq^I%J}6<>(-d=FwNnor6!l^>g4?XQXeVEB4_74 zliWy8ORu6=(fDPE7csLTKtqhOQ0eNl_PI?ndpqPCHcx#F5Ni)hqtx@$(>J?Lywf1R zo95geoq%)so_1S7(3 zKQe0UovHRL#;>qzRCmGjoXyzX+Sx5ndbc#6{h!e60WXZ}lsyH}H$=yAGqw9}F;Cco zBlm@ki!MdFEW-ISqseWeB+hp^1{ChVG|fY!5oJj##?@+|HcuGE?4x7uHDr?zU$M25 zGK5ffl@A0aG7>Q%@$u*0{HW2lkK|L=K7#wm$W;ux3nBh1p{gYzBFAvYk?>YFp{(Os z6B&{&uIar(pkzkkZY%Y>kgA{Xud7@qVT_)dB`xOhE#H8X z)Llhet-Qwm5<0?4^$pYy=acM|V^@zlGTs}CqfwyDOw!^n&n&c^RcHQWhxQ6pEL|4th+~C z8YxDiB*c5VxJplNg2&1W3~#jQkpt<9(~xqAK*1;tX{bOdoBuZ7+KaK4+^A9#DOIi$ zd<19z;4`Z2d&DBao#Jds%Al6 z>D5m05t^A9lGUlMD6zIh;Y*dU+QYg_`!()%daPl+t%b zI4%;(2;;O6^NiB|Mp%2ff3Lkm1VeR6YudTzj;1(qnCw%(`iKv(#T@Y?Lj8|&(mZp~ zoiW$T0H0c?v3u(FmPDP%wkl0Wn`R@m3u1BHD;GxhOxc^?JvgMH%wZ_`1L24=y5Hz4 zY+m2b^BG|nSTd_fd~xhMfci=R?4ipW($iQyjrZNBm@#Fhw*oV$$~(#f8>Ya&%JA~} zd0{E>EjSMBMN5n7(pug<00M`zs`>UC-1ns3fJbXTLPL2Le)=T=|9oNPzqwn1IZsDZ}$dJDI{pz~!H&V8bcG?yI zN}Jh&cbGRvP8H2|mF2@5NMEe!!#(<6TM9M4ZK~bd@IUjJ+B?H;|8#;KOtBf%n=(pJ zl@w;wF_!EPt3&9Dz}8L%0!1{^E0mU_aUMi7ZXG2#NqH#G55 zaY$2$ChoK`7YSlX%ryFfG~e`Z6Af;$WEJlK3>t9|a}z#|iEiW?sGH)l)t?Al9}O5a zJ}2_JVM@k1(_43-=0Tj?DRC^ewtuHFj7k#eyGJ3MWb{@4;>0B-vd0I6BT!P2mP(Q~ z2Gp}*&BUb41M|72Rlt}dlS^z^eOP#J4lV!4Df5C0w>#HV=+V|YHT!01HC|um5Y}I5 zst*9EwF}QYd}u)Jmy4{0f)2^h7B%O=%_3zLud@$#O5=Se=Gx_m20Dc%8M&Rtr9$&R z04e-)(%rH=a^;iidHjOIAmWZ!+{c^PQy(uEwx%w3KVafw@c!e9Y*rR`a0N{F>qJsY zh?1@5E}VO>_(SVqojLi~j`+ad^Bn!<52~I`?}KcvZ&;juqpkIIBxB<}a_k=DI~&1C zw`g5+vvjzX?5a^(urfE!jqRfzZs0hkZsEg=C-TjFs))gnxciJop+^3m^=EwfOGIb_ zhp;FwbW`bwvfHQ+JRlxf!lze|^1-=U5ZHy~9(2uF$!~JcR!6YZy2~*rxYj0Xe-hzq z>_vaP=;J?jhcT35otZQhGV*S_^i3t#5XE~x?n-ROSZAKy`7CdAbo@)Q4bFA_WKYzM z%H7U`4nI|VEVmRs96XX8|H!JHa88#am~#e(?;K$esNq@(8hGYtDEZ(l$5<#M1NOj%O9O77yDw1_i1NI5 zseSk^!D<^p(+i)_v1IQcqY{q9v>t(~%@CaUV(mWr9j|4ssn{*}27$)D73_cnc8l~} zmK8I#J`>)=zl5NLV)rnUzL>Py#TCWyAT)n1`YU*AbWj{PXuBA}-q3MGc;$K%+PQ;h9p#g zl$z)bN&`I1MkDzzYo7YdLYyE658c?Ab3brq&#*w}Yp z>dfg0f3+FwD8I9xi`vy1O+0nk)ym>!hW|#vA*#tdwetNWT7313!!_`^QrZ) zQSL=`G+V*>bXy;Tm2|B7_O+Av_g2N1D=%SLQFJH4FvL_D>w#9~(9^Z$%Tn4~U#F}K zL@8Fr&Ykut`RY4;6qTIaHZFBmy4O2JWZYZ)%3Ipxv$Ey^rhdynZ3q_A(Y!0EM#hvQbL*O%vg$)p=E3_$ z*++Fal@2|3y&5%iNPU}jrQK6BJzSTPWc>gqqo@V0W`qn+PA5jbZEv?ojlF?tS)Rw; zBK1OP9;R~{XVxbwL>SY-;|Rk4j3oF2Xd>(RYK@5gUu|a{6;->we>z9HQ;=q8r9)Cm zItHX0K`AMvVdzjAr9)I|D5Vq_Kw=OS5hWz0k?#CG<9pt7-t%4O-=D7Kni*%!?7e62 zXFtz!$%E5rz0>ihe3ZAI;)*A}Aj2ltkmKa&V4R`LH2r)M=5dnu|4FMUd{K zQuNuY_r^#AmI2vKiA`O+Z)k7}|I8Kt^2x$pN6Rdjz4}+Kno4>unVkR;RVxRYeIj){ z8yUPi$UQ6+MEXS@@`J8>?|e?4g!o8hl=mhPeQ1r(d&u-E_O!?e(6^JXm!@BS8$KPJ zZ8`ZloBuMU$l*3`_eD=LPvBRIiGstWd=))A1M7UwW{#vw!;LN4;amAaJS2fHJ%5?Qm|ITi0{7R@se09@DxGbGfER zU?~SwC3##?&*potbMj=)S_jF}R4tE8L7(|Bj)@2@ ztSoVc`bTH5O@`Q}Y(?L|hVDe+2jREa@KTPrv)_ZQGT%vlQG88*(!p11L2vSm4EA-m zgT!F!z+6?!=HhnKC9cdmo%q{VdH&PmdF@~=itqQ75yt@4Ypg={KT^m7wU$bma;}$fqo{1indPsPXws=bZ#iP&=4`_5bU;Mzm(U)L0R z&O^M~?=3#C5f}t}_UTQ5aVDM#@6z%1U>|n_{qL?V%cciDdCuHcQ&PFT1&u}n8EvKZ z_3fls7u3D8Kab=|No0>-*{sTnI-leGr0*;mj9*xv)bH%2cejji0$FY)`B``?58kBZv|3U98 z5vxp2EdfK+#|c`ywXn)H&TheZ95xudOq27ZK0!oFaJP4dZl}Gis~K!l<^?;SS#ssIZq2#k-@ip}2njePUxA_Mk~i4k)Jxb7$S677X1%iU+=Sq-*XicN}=*o~#9 zX8a^stdYtCDU?*A>HQZJ#yH(&M`v$(dd*=?r{kP~A66=L)5}KNDTwIZZqXJt)T3BM z9g7jgvTo#A(4ltBr+Ub1 z;0`DewSx9jfwS`t_If#z_laIaOwZ|W&l9utitWwH2dxc#Ws0sQyu2!GX(oSebY*P5 z%3tV>`#P}RdKLxm5f0p&h>`ETu!w~bb7tlH94pi3zE!7ByGOsbx;%q_xphI>PI7tJ z(O|PZ*NdNJ;rAZT)>o9kkag&141h#iP(nz znSrB9Z8>uC@~{wUZs<-Ow&3!OsQtM`5qUW++~SBmB@S152-i83)^0W4da2W!>0IrX z^f0sbT)`Ih=r9S6C95{ulGfn{10oRebgKDSrSb(H6}f469wU0G^S z01W~s#nB(S97ma1KHm9Ys;KU;b`;XC3~lb5c}9EgF=y`zS|&sOgs98;()4B05FywS zl7b^M&+CE!W$Brz{iNO^MqR+_e2Jed!fqS^ibiV+NwE z)snus%A2oi_RiIUCYrqY-wS=ZG+f!D{ocJdWj;oKes@O(C9VY;V)Q6)_S_e=>`?43 zQw9{r&Xq}@x1CJUToC4$(34NsTT)6N;~ZCg)iJZDF)E{yCirep=f; z7h&&qMR*F_oY!IdXZY+oXV&q8eDF;HNHN8s1f|@^S!z|B--DZLg|-L1-zl&cFciHkIS;vt6SF>#o899r*ltr{ z>#DEQ>m2tID8`{NZ+Ft^4R+5ubrgJdR9Wh*&h+4{(}FtfR)?d05EcTqOs+U+<-?Om)FH(#ve)y)^)((rtI zPot9$lTBJfaHp3z&}%q@w3p7PFtNxz7j-IqyoYEZXjebIa19-K*;XjJkiw}{hO^G- zbhLn%nQo@8z*G9rsxxYSR)0NFDr;8dR??GCS^XE8Y3Gc4C!`U^%BlWedD6UuexCgLRnQIR83|=Z(ciXOlCROZg%y3SAnafi~2! zV%E?-&u;DR)}Z;VpPKUgd*2j$6K-_K{0AUY_!fPNV$;+l?!=DJT|aG0l^5~oJ%!Vr zSRiNVE6AW2*@_&4uDRN|l3#x7h<0&5$0d(1Mrun74X9i6>EjI&VCQPa*GQlUVJ4Ik zg$}O?#r6qaZY>h&n~-IzWr%DXnb5kM8qnnmKUE{DMesBVsw8#Bvk8h`(|Q1-QF=o8 z`VCW&hyCl$L|LMz+Z0*eyPtfktv{$yv1HyXgjJdHW{@)Ji+T!CXvBLC=+Du2-j+Dk zPI$?`Btyds#bqDd2Zce}CwwOCQ!iAg6VyTfmCR;Pw$e_&p zkA(4V>6_bH_J!&*3sT9&Y}V<8O{Yd~*3U1>KhFqNClQ#QA0o~Z^}gw^m&pg8Hoc10 zKtW1^^d#Bg^87h{*Ht7;q#LNeP@8?^_z%1z+(fnGgNl|Cj{Ay@+N@$O<%FK-JJ$=b zrUt*lab%7FVJw--O-31lYn5ep##X0ab2-k9X2yzG#r8Si>k4|DAvgS;gHH+A{D5zM zW+)zC#j>{kc{p>dBXdFsZ})4VgStpwQ(C5Big6C8-&6WojGXlj5y#Qmm;P%{z~m!Z z*JpAJKSgHvbi7}Bi8<9ASH&bylBCf#PmCn0%J}wJpqKSV*kFdfW;$nXg14ogXZAOF zuf_D1VVQ%*F~e}?N+(CV&>B>K%N{WCRjN|hXdL2dJSWy&**JN^2du7|3$mQ=7A z5y9sM`;s#WUTa#R`}$3>)s(+ke>n>jW38h&et=qUJ=0h3LE<3Km3x&&2`P;$-WG9K zjsi_}Z{qe0WLErotCYCbVN%x7q^z5}iSqZ5ffJl5f_F!oJzfv9vR>;qUy4c~^qd}) z`*?w54d#H3v7_Yhs|@52npxXy?~;7HTOIoIw5ZBo!4gzyllWT-wm^NON+!iwAI7Dd zzj5e8`W_f<=ae5Z;*XfnlkEdks%nkfv+HHdCTtlJ0|w?7<=Hbr6+irfDk-=TniVvr zS}(q2nskoL!=^}NH0DOQRb>Izl~$ghzZ5uV8ilz`}sWwg-Ht; z{Y{srS{@f*m0sv1?B-k%2^w8rRS~nQY1{ewJJ{^4P*mBCcq+$gfS1W}q4rx+mBh{}_oh3{myW+qaSLsZ z{hBFj6w48G7nsYSSJ|u6VS%U+S~Johw<$I=7NU~hCf^c^xwsu`&ts)`c)UHyLp$-# zV{2%E0Uts~R+!B`Pqj+uh2nNBdCJb7L5qxzsrgDu@q5E#iJ>`ULH;$ zw0?Nj*hC8Gcf^{jKB95^`3t;TBd>TaM*r3R=1&FLdYxXD5+UoMk zIz8VRzEh1`#(Y%Zp`1QEHuH=>Wtl;L6ls?w*=#?W<4iqFS}b_tjB}94m%XYxhSgWQ zK(Fz>3bzC`X3o*Ytwp2_WykAo)8hPGUq_{x)C zU958-iOKUZFPY>|9{ht}blVLbl2O`|^K)tODJH(qIXpHAk19Y5|0<$iOCGzSblgqh zdY!%_7E~#_|Bh>iwlEPsUe#EQ`p|rBsqM$-iupoWw4rz{gAb$OSsqTy!7Zh_cqwDL zD#u3et`Xy@S@F&fJCivvcjI}{FhD*8O9wxvD0Sn)mCg5ab@b+T$IRq6yiz8ZgRSZ)IlFaqgrqbc_CmQ&>iJ>=bHO8TT! zIFRhF;0)OnkEr1#=@J?lo}KxDoimpwc)t3taaq^Gl*oJ3j~SX35GD)RrePyn=&%5n zM&iqdS-H4lHAXayr&`DP@3a!kON*>+`yKFS0)&|}JOY>Tw-RH~ zt2pz~yE&qqh~y}-<w_BON8UZ&dal@B-SeN^5FojRfO~3Q6Zy#I4ujeq{90sZS`KqmTI! z*c7z%+@|}S6;Fjtp3Nf9C&C|YChjpg3dn~jZiJyuaBGFe0*81m5X?`dSY?iHu~iD{ z1o0cQID%};SV0vAB)&#tH%T&NC_T<5l9j~mz5cV}Wm`8_3^mN=~H}c@hxgf zO_aA1-!4zTi8i#{6bYKrdv9m>lOV$YNrq%^qmi)%j(} zT<5Fs%*)Nf7|Qm#?^@-M=s%cyR7ycHYQScwyQR?r4tYXzYPi)##B_l$A{hO^sn#oJJG6o zQ|OHrt`xdpeY?Dq{rGPpx8xb!Rq`u?fyLLA{PP~SKLV5)A7Gs$uc*r6leAZu>ax5Albav>>anZ1-_0!{`o~LuIq;JdJShO70xy79 zeo3daK^!-03E~SVv%3!YeKY=#YT$3&2aq<5ih#o3T9cR2t-qBR|0SDTLMyR+-d0p={=M7()Pof! z13#8wZ&Jyd|M}TBxx<@Z?A1Lo{?}&dv4n3erTc#v`VZ?fyn(R6QusGhhP411i}>(`c4(dQ{cm@?F$K^J7>l#wzHt1Se}#5OqdD_~a__8agKtQ^aa z)!GvjUCq#luIFgqI+)(HvQdkUUI1}qU&Bwd{ENoIQ}O5mxUNOe@iTYHa5m3?h{4_j zm|w)cTlhGRd`ID3Y@tb!zkwN5GXu;8#P<4bq|o6N`Ya8)2Eet;j)Orr1syiy)E!HX z){P}UV1$kMHAl&7%(&CMes`H>a<=zm8^pLw{-jMBF&RBUhgE}*Y)5-E8OkQ!loQ;% z1z9|b^R_DkA;rh!Kij7WE+GY**f3a1}(bhU_vhDqs@4r8rJz0Bv^S=gUkSQpl7zi z#j36uwFQ8eK|(>0z?){U!?_vCCK1x7yTIMaaLG|d{tBlZt>0?{&7mcbk~Lt)OLfn- z8MH@pBm6<|Gr`uFA1-0(w@V&-wDpN-tU=Vwe|_u|npfvE(cr4XYnfw^4rGZKLe~G~xuh@V9`Rtr<;`63rAVa+m<4k_#>lOX#^*JsKXFVPgK8+M-IZ z_8B4C3%lvR4$kcdzfgKVRSR7Sz%~9KQitAMEVO=bNXBb6&W1OVH)yIQ=say!oqPs2 z`yJTDwN0j=OrY#Roj7$mtMn!_|Ewzy3ekdb^#q;=gX;(#?c*cQtw-_+up|Vxg0A}P zmj-k2xJ!=}J2Zsk4TbHmfblNRIts0Sa~2=HLic&mI8-w<;?1`TXtSh-ckt&rDd>hA zU(&Ca060Va{Sf^iIn4`b<0Z_XgSKecrfLDs=_NUA30(RXGUuS3bQCki-L@UB{Gzrp z{xE@Pc@`n&N{_n?015BDe69QNl?5u+&n+4F^1`-Llrf!rpEXnUSDL}eGmqC73fDrs zzW0puphK>VMh0+aSofK!I-%tyDYGwnX1X@+fz%tKLr3F{iz_st0uF#ir*7GLwXLZkcDcKwv~6)>1*eqYvo{`&}6u6wu7B6WrGInVPyA5KMr7G~b0 z?$6`=hD1LgGxG{5S}}w-WVD;0rBtP8%I1C!-<>~$ez?i)Ss*;{u1~x)s7Ap*fNg2&d0Jv*GyB2}2Y4vVnEYVD)=9 zhJ@MT6bwV-0tc(#DolrBZyXO6GG+P;2P{v}e@1SiQAlVN5nus+;;YF2kZu^7tplLK zyi|IX4e!3Z!Mn!ri^}ep%gz&+>$uB*w4$Ss6t;;JPOm)}4xDZloGFPAbpWGbKhH&R zuj@NtXX9QNlzg14D3Qs67Lo4jjrnK#ej)vnu@s|l-pPxHCM7Fh0_TGIeu)8z0-xUf zE8PTZny9O@UEu$3eu7B-Qm$t;dlWhRM>PsP;(MT~078x~7c+-Fx6q1FuWy4J{EL_6 zRZ=O--N3eG^LT&dS$|3rI=JK;CLCA(Ik0IpR4#lGDmyGNf2R3e~XXYvz;{RtV3RX|)D5+X+~ zh}PuQ7fVCFJT_wurs z#yu_s8I!~(P6oF7dbj}w97o=Y?w1OF(3iIRJMX7X?cYbSGDdL<9#?b@5Z>vBEa|@t zB<9BJfH4KYA-|;nl;2+vTR8Vr?c!NeB<}g7S1+^JP-0iVnP0C>y~oUlxu0f}5EXqc zJL6mTM_)n9dk3c$DB*7xVGSvnsFe8hiL0-_vT(aYY**Q7G%_Axe40H4!>^xCPxBr6 z*adYaAczG&QvF;`L@Jk#h<1y?R;b5CSN#ng&#)vL5noWga=v%h6$9hOH4PO-qmP_k zX9KGW4tnc{z46cVzFUFH{jbVz=SQW(Hz!ET7~7d;8TI4g0=)XCC9y>c+vjp1!6o%g z>iBRy{PH@e$h{H>KNl}+tSZp_D}#qDyyZq1Cq^bj-a(XFg)p)aBXYtb0*(=>T&W9V zprElr&_g|d0CsaxI%v>C2(T;!@LDeEmWzyR%`|fngYZs@c#cZLVx<$w)}MgnSR7OL zLF_XZQdd$GLZ~<54koWoz~y4LdO?eK2K{8mB{4X}ddJHboppy+;Gm7OTRIn%s8Y~h zzkZm~>6=<=d4(1`Spfa@5MQitgB88$JV0Wj~LDA_^ixX!B(i8udG} zp_7pq2I463US@y0gS$?c$yuAr{=KmTv2%H?II#498AW;C*nJ^F%*>tfN`dAEkNEb5 zp@6LvWqd25u%oMvsDigLYnSjsqW{jdWib3rf=<@FpoDu~@$(Ws#-CfAe%N)NY`sL`>STDXvhJFI0d{2D08PMCNXn5eOnVZ;lTk3JZ%I~(8B#q;d>yH z^0*RniPAV^-Yev}uAYjQWd;Q_tk||D*i3$-@oH0nUq2LT!)J@Tux`=E9US}lJ+MWH zqecD1Qk)*Fc6y1{^x1*@$qmODU$2N;n%F75+G`=)_XUg`#Ems{7>!OZg?=gReq*b? zIu2T|x(I&HcM+HRf>TXWdx%r^9Xg@SLPNASsG#RQ?}mkJ+HLew+ZgQ?F&6&r%qH1n z@0ZLfA4fo?b^D?mX7^2N7=!JhhSI_P&r_VXn2}fx8$W7mY2yuzp^B+g3_MV8v)E&8 zjfIk(<6udjo)`?Ggoi&{q}*uPgM_a;@KRw57VmQq4ipDP7M1#-ehxQ!-sP#^C0rHm ztl@UuKkcn`c#Snc2W`!66jBLr=K1wLe9;ZE-&WdLGj+MOp1Q<6Y`%H2-S{#m^m*X7 z-OHoz`uDHq&L6vnhFWG^s*oG6Y^fFb?gWdk?Nxr>%K+29>&)PEE$ynE^PcN@_5NUB zxQmWEZQ9kQ%Tiq%o|EIWBq^($;L~b8tn|^S(bhj>GL%kM)TO(UVG(os(oBi`u;4So z^S;Fnnt?iNv4hsGCM&sF)D;+R3+8 zFYB!EUJCYnTG>z^fymhW-a@+av8d5coP#3^_`O;T=L5g5J_x#oh^njct|=>(=lDd?KZ6b!0-NaV-{|Eu$zdB=y;VI6yoFj6m2Kqjq(M*{ zUjPW*pEw|Dfpar}@FW__o$IO8nMLqj>60AsP{R<#Slty_T`K)!rg#cYj8F<_f8i&N zMat{x!bC#y>-qzM55ILCAadd1E~%o?(sjuNjttGkrer-lS?3@$(?{z%>~Ic_d2?~0 z;D!$U!q>&qUu7&aERAGLbS3A&ySVJ8A2jYL{`jH`c0(M$Q&2`+Nv$9x@1>iGQW@MPt3} z5}LMCzo=>*C5PD~Nkj=F-2($+cs{Kq!ZED_zUf!m|y%Ae4a zx^$Gw{coWJf$_}(G+Jl}sK}?{23(3`l=Y?EamAfEIJw5-oSI(gGwE6*LX;BJ$Zx`x zyL|L^Ek&b|?dJ~Bwq$0R=OB}|X+|iM4*u0PLm13-d_a7|b94f&n@IXEh-d9Uw*sW7 zphWBWv7E!KKOmgYD{(xDNgTbV)r-z%yH=g^n}ZZ@^h=Iz?iz(RLmBkhEx=#MXsnE1 z#6jmO7!vrdj`T>^FjPA7s)$ zqs=tpkw+%Wua@(WfCOJ^H2=!^KyIik*v!v=RR2zM%kilvgw1*)r5*9Pz{Tg(GKEEM z(9Ah+@zhq*j@HrI_nWaGX^0k>c>lT7bfwONSmt!8X2Hb0{E2)a)i5WIZI}M%*{vG_G*k%Xq%Oz1*#me18$i|x($abYm+eUNpy3@eBSvCe=H^IbdX6 zE4yeG+$0xjV0}RGK4sy&E`2gY+eKJ0$(Qj+vRzEJ#Pb*xg`YVmE5j1Bt!VweTbF8D z8-h0H#JQqPkw6}?R7aX|b!GHgCnr$L*6(Qy$rxiuo}5tCACY)Cu+^jhHm>fNmesU& zg8DYFy~q#~YRk||Mp>8cId(a(n)sqW!?hm7zCdSOlw&xs^KbXF z)@trDdGmgY-DZy6$ji=1MYbc_;h%PJT94vuaQvSdfx>-G*JF@p0E4HiX(DmfPOO^i z^s#_;7O~BqV8;Q4EvW`KAvlKlqxEyitms(9WWVd34uOTZs$;~Bb{QU}c|7~{uDx)~ zE#M2ZG#}Zq`(%TQi!U#u19(2poRP)0fKpid%tzR`9aOj=?fxoGcH3danTNkPS&79% zBD;_t9zQJd#aRIr@$5YZbKtJb-O$|#WEq&ZU%D-&_2Ueb&*;lX5)9qZ=oVwduLp?( zzWVL$IfInqERR>%$~lNxx!NR$IdLk~(;eo%v*S(hAc_s{Uo<;a3i}YYC!OOJm7+NJ z^UwfbYfq@-?YO9K#KWW$$vg%Pjz)UkBCiIyfg`bcowl@VnO0La#O;{*<&`Lg3Zc4! za-jO;m+15a-RBltI%T_B6<0oY%XSy0B++E>_2I7QYY6}$)*Gerq3lJz*Ahpo2!I=P zl}6KUwxg3`;ngz|z#t)^-|IgCU)IURRpC8q7m5TON?B4D|4rKIZ)R`eaH)(UHlYC zAQytZ)E4Y>W{TUFL0x6B9?-hCNmqRQqh;?(z^RWqf3+vtuf>I}&JxxZyNiq+iKcz8 zNl>qreczBv?Dk0%t-DoNj@LYAB&$Ydo3IwLFh5I6s4b|N$%ZD5z6!uITC?b^-egoov?O5v&OY1;%Tv{9Q1b_pGjboS)GeSF2&?@cA+_XhukN2vx_-<=vAqN%^ZI0 z`UI$;{IN1$RYjJDg`sdV{rb{7|*qL<@kB=I+#zatrw2ql4Ty9&mc*;YR@dmI(g zc{~0|xl!k6mI5AA!?h!YV3w~o`F934MD<>_-*K*KKEl7okp7x9vLC=HyFbZ`c+RON zp1oNA#Tn`; z!eQ(D2gmAjfrIssLx{!BwGJ^pcgw!nlobVN+FVWu@TNFVjAV5tIIt11O+@Slk_c1Y z(;(z`7~$ite9FtM>!cE>^JQr22w~p}E4rZ57V*)#`gKIqqu1D-HN2O&EHM0&!_la5 z%}86Sv07mZGS%K@SoqXgPurSDMbUiFcS#eqMbw_Q&j>Xl&ZE0NY;aQc4J>|RB4P+! zw6CjGO?vNCEQ$=br=Jyo#JlANiQiUhJ|*dwhAF?TQGSrDPpx=cOl@5pgj-+R5`-0z zFs+dQbWfp89A&%!$SA_XbE>QYH=lV2XzU@0@m|&r!fb~?T=E}%`ff=#DK9pG6DoUr zkQ0JcTR1zXW_~G5U$!}DBidmr98ZDAW}Ta5#}?+jgy6K#b!4<+M5E`?%5l>qK z?UcY6V%j4VTGk*r$$md;n^bsxXKAt7sxaV7*M7Dl_sao@_{q*9=Tl6YdavWFjzsxf zQf!xo{kDygLUy?a(d;pbg4i=8%+ivS)>Job!>_H*D(3K%_r0L^qZ5^k{+B3yEuuKV z&%xyBTlISbbIMl$Sm92e?yb8m+U-rW$-GG&lGufc0QdFS`8jG{4an-&KIsIt;v5pn z9{D6$q$`FZW?USHb6$fx?^k+)?2V=^JsQ8Y(SBy3MOLnkQEE=?2fEGEQdY;)4qOcU zu?S?u6Czb?-#}2C&J#@>S9a=!iHFUnT_&@qxd3WXA1m*{<-EnF0+r}gr0v{?rAxJ) z$X!uBo?8Lr_GCQ=+e&Khvjs|~2?pddWmMqfzEoqf70q{YGP;mAHYjo688wt=wWqVE zdxR&)@?=>w>k)_uLb&*Mecsp3pj=;Vb^=O(={jNH%u(F#X!4veN~K-AJmt0X?R(NeYqgRDy>KM7~3?b3SHy*$aarbn-Cu z1880-vTCwVL}T)rVA_Ubp3#4{0&zz}yI~814-0=OhKChj9~>$mWu=l7KCBP6kkM)o zK8X$t@LU=u4$Ulis+iMRSg@Ox>l2OQb8MxOuRJMBGJ6>|f{W`Epq8U3y&k(NRJnXO zrq#Hn;MvKCQFcStOgj2&JED%#@phB77YPkx)OJ~l*WD>@TgB!t)8>&yh^~0Pq%5MK zTtpDo+=nvKj?~j47uzMuo*VVYR&xy1l#{T_!q~Fm-6^G_ULOc94he%6mXyj{LKi>JD4)K(->oRnHze4GY-M)8%q)Zt4U z9UO%1jIaQ;(67(;;(d(|RKonNpl3TbXhV95zCY_H|47%?TA!;ul8wHw2mQ3(98788 zuh}XnxWEjA4HL=;gk~U#TDKCQzn%w4epWiaXG7mL8y#b9RMas`tcs(_yfy^&`w=zs z9gffS5*|W^EW|0m8Qd4s8aCizsf197a31Lw(506=4cFid?nAf}I6|=XsJJ0FyO3dY zy&0oNz)I;`uOu0iK!mNZwg#v7$u0uJKO=VxlefB)ME!3bdmyan<8cVO=SWf3HkZ{c z@}s7Ksw;+57-%@oKMK6IN1!n1+6nd*p{^@H!ljV^!rFX(%kuQ@C=g+Ke;koIsVPJ z5RJb`8U89;Z(m+0-`5r}#BEbYE^Xm&{;I>@WWk)y$X3~F_f7K0edAR^ zwr%oiH0#KI3e(1N(EEVH5#pKyotK~|_C+t6gUQe^A))g@QG^j@eW3Owy~QTDl|CF3 zmyT5@=H}WXh|I7+hS`I<-orvh6DE~mAfb+AfvV3(-OBa)j_gYBWW-i*%Fj`)f$ND! zh;qJ3+&Qp~qfI8yk>d`|RpQ!M{#dYFg_EtHLYaHV&a%Usj8&7unwrPwY3bhlE}tWi zP^Wio$Oj_~3?QsCT>DiVMRG2*RM`8guc(bj0uSC6aQYpK*^(uTW`@fuh2Ew|dcSyO z`0ZvjCJ0**<;vxbx7*aJ^h=H642Bhug8+u(x6q@@0b{lO?3C({^%=|Vxi)v zJ`yw0C_y=+0*&Yw_GVSd#9BbKe(%EajHGI1HqY3A+MtME8duAeZjJfQ*FWp(<6{#8SqwLfGkIt&B} zZ2^gU$^Pt|GQr(+HZRS@n@>+2H3z+aQDoW_K6Coac#WSyK84lEX-mZ3k?{7TuuPO@ zm_6hn;`-|hrD#RE)UCPSDQEk(c<#Ms!oSS9dJ3}?iGC+W#Ekn7bR6MfTb?HwnV*kP z4|HmB>A0qvFvrg=&_7wy@ST+;q92iXqZEDUoI@5*U+ECUbH{@@5?M_#9AR-(mSP?O zr^zIlrqym@_Dp7Y|GlgC!cXKuzm)uYh4yq^Dr^F+8z(4E*v85Fp5yYmKa~GXIs&IM zzw7aG%fs`6xzb#DN}7-v*@f+p2mgY?-)6#`j;nP#D4rqLRVrZroQ)RGc2!Df5_n{E zc14E=NQ!E4K7B&j;X;++V#Z_lSzRpc9GK`ip64#WS*(d*rrp;*(+P7+aWBzDEDUn& z=!B&uXSEjOghX+QHg~f+AGTO1@fUzoJ%S93Jp}fA^#jvs!5UWA^WZkumfAT7HOwv2rdLIW*Xj}F#nA= zzuk;;oRkSv4ma_`TS8>^2k25-Mh55xY@fyiqy-;wF0qa=Lm&Wh_liTLUo^yiUWqJ? zg}d1+!d`GJg54`Q(F+nkd;BhSem#jb2VouSnWK`~X~v5bjrE4R=n#C+J-;rTTV60n zx>eCFPGWW8?kAUX{Y)I)aHSAp3YQ^rajGau!mqdpb9NsJkyG3LB^fi!T zgsi$=1`pen%}_p=oVVhoH}(G;x^@$aDKncMMF6{#`6CVAFV4utajm@k}&QeW9m4r=DJcu;tV*$~jx3C=v!s>GjWI2%=YmBI1>*`ZjqR zEem5O&Ndf!9hl!Ghx`;-x8`zFu&yL!@gF=@>X&w64sE&c%eiY3IQli3k)*PCh*<%s z$m@m@&g`h&1gj44Df}gu<=2mT^W5++%zmfsm9mp&B6~?RvE3d}!%*-JY4eBr#f>is zHfG1a{gm#0=#|U-ZB5||(pBtK_DD@cseSjmL)CYzR(BqR2~CfKzMi?;m!-8k%)G!k zNT0L!Dl&bu$r(MpW%+&Bhng(PVuk|T(x{y_<}d2eAN#Tm$u zl_Swq%{2KIFLcjsB1XJUzqMH7->93vlqd|*w|@$n)d@}BBHv;yBIAcR-3QB#UrKTd zQCW+Wg8}v$$PpqXlF9s8r-R_`{1w$&XabMH@qjJf^D>da+xCIVeb&W`-Gn-~Y8je< zkN6r5r(~1E*aS)UAi819dkjSoF~3U^(@`3&XzjTH;**|@q7XP8=%MM zMZNDi} zF`iR@;r3);<_14;deWX7@?CucDT*>m+^#=`78y_g7@U`G3A(4$s2`qdcONlS+VeG^ zN1oc8sJ74Ngw@Ler|vRO){`+Ax|emZ-vOvDNwO>hKRuDw0D*q;!h@i*ZbEd# z&%Nc+oU{1DZic6_X3;UTufE?&OIYgkfkF=&(uFOA{vLh*(ktCo#9>hLH7bjrz$|$Y z?s)ivWRsZhn;JIJMxlV)63sd;RI4~sL|)m0&Av;%)$G0u4^gG{^RV&I3Qte5c0Zft zwB8Uq4SVJ%xPE$Byptr}k(U=|_$Fr0e3OlZ9idw8y?v!A5A8qM<6D|7@8Z%7>P#x$ z8*7GPjFY8=8)Gy`_6k}Ll)__-8yrS{Jbm&odOFJkKQ5g4sbg|GFqqS$mtz1ZH6u<~z3T&4RKXheak4_-;q8x_-%`&Jt<#CfDnjf82 z^pU{*!{c0IDF)N0BMK`8rBzYBkf1g=yXPx!(-Azlq-~L~6yHUCD=2u0*yqQr1!fuJWR8VDtJNI4%tkXDk#dW9h zpQakC)8*bb$Bn2@ApB{DQms zTm(KYE%n`&blz;vjuNhN@FK>*P$_CIE{pFX>B&*I6$o4^A38&^`IJz=Qn|rH%)QzJE3ze|eT#@+s`~GM zh0g>y4Gz37Ghou;O3`JkJv6G&+!?xV2%D;LRPuJT?bQ3B_sQ}NYN!Az3|C2 zI3`N{=f|VJfg7aWYr13saN49r@*uXKIR+W1fS4>Egpj%n$g`DOGQlMtlA` zp6N;$_+3#4f~ua0mhlF-GwPBmf{PZpeGBg@VVzi&bjv9DP^xb{7xBIfLnreKydVF- z728dNWaEx?cjA`kkK$9f$**|jcP!4Q&9~9N{hmaF}bibehkuCx+PyfY>`lm`1Oao=X`Ni;o&=>PBk>UUOH%SesqSV|( zzM1|{fe|J=*uK6BGr-rACm_3iI}8I8>j`GeV2Bd#-%oM=dJz61OnK>T zcH6(6Vq*REpu?(zt-b(|X#e~5S+3cq%>rCP|L1iCt!Li`w-NeV{`vkULP>{*3H@6C z=k*O-CebrO>c6(?Kkt8tMFP43yA=QD_5ZJ%O>q7ks}vKzj1B|*(NNV Date: Sun, 28 Jul 2024 09:55:01 +0800 Subject: [PATCH 086/257] [docs] add new_committer.md (#2390) Co-authored-by: tomsun28 --- home/blog/2024-07-27-new-committer.md | 39 +++++++++++++++++++ .../2024-07-27-new-committer.md | 39 +++++++++++++++++++ 2 files changed, 78 insertions(+) create mode 100644 home/blog/2024-07-27-new-committer.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-27-new-committer.md diff --git a/home/blog/2024-07-27-new-committer.md b/home/blog/2024-07-27-new-committer.md new file mode 100644 index 00000000000..44128c6aabc --- /dev/null +++ b/home/blog/2024-07-27-new-committer.md @@ -0,0 +1,39 @@ +--- +title: Welcome to HertzBeat Community Committer! +author: Calvin979 +author_title: Calvin979 +author_url: https://github.com/Calvin979 +author_image_url: https://avatars.githubusercontent.com/u/131688897?v=4 +tags: [opensource, practice] +keywords: [open source monitoring system, alerting system] +--- + +![hertzBeat](/img/blog/new-committer.png) + +> It's an honor for me to become a Committer of Apache Hertzbeat + +## Personal introduction + +I have been working as a Java back-end developer for three years. At present, I participate in the development of converged message center in an Internet company. + +## First met Apache Hertzbeat + +Reading code and design in open-source framework is one way for me to improve myself. One day a thought came to my mind, which is to contribute to open-source community. Then I met Hertzbeat in an article on the internel, which is completely different from Prometheus and Skywalking that I had known before. Hertzbeat fascinate me with its feature, agentless and anyone can custom monitoring by monifying yaml. I forked Hertzbeat on GitHub and tried to run it locally for further study. + +## My first PR + +I failed to start Hertzbeat when I followed instructions in CONTRIBUTING. The exception shows that Node.js version at lease 18 is required. Therefor I modified CONTRIBUTING and opened my first PR in Hertzbeat community. + +## Keep active in community + +I'm getting familiar with Hertzbeat as I read documents and code. In addition to perfecting documentation and fixing bugs, I completed DNS monitoring and Registry monitoring. After that, I started to develop new feature named http_sd in V1.6.1, which is supported to manage monitor automatically. + +## Reward + +At the beginning, it was only the simplest revision and translation of documents. Although it was a very simple job, the community was really welcoming, which made me deeply feel the inclusiveness of this community. Later, while developing the new feature http_sd, my conversation with author tom gave me a better understanding of the overall architecture and responsibilities of each module of Hertzbeat, which led to the completion of the new feature. + +During the reading of Hertzbeat source code, I learned a lot of code writing and design, which is helpful for my personal growth. + +## Conclusion + +It's an interesting experience for me to become a Committer of Apache Hertzbeat and it motivates me day by day. I will continue to contribute to the Apache Hertzbeat community in the future, and I hope that Apache Hertzbeat can successfully graduate from the Apache incubator. \ No newline at end of file diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-27-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-27-new-committer.md new file mode 100644 index 00000000000..562ecc8eee7 --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-27-new-committer.md @@ -0,0 +1,39 @@ +--- +title: 热烈欢迎 HertzBeat 小伙伴新晋社区 Committer! +author: Calvin979 +author_title: Calvin979 +author_url: https://github.com/Calvin979 +author_image_url: https://avatars.githubusercontent.com/u/131688897?v=4 +tags: [opensource, practice] +keywords: [open source monitoring system, alerting system] +--- + +![hertzBeat](/img/blog/new-committer.png) + +> 非常荣幸能成为Apache Hertzbeat的Committer + +## 个人介绍 + +本人自21年参与工作至今,一直从事Java后端开发工作。目前在一家互联网公司中参与融合通信中台的开发。 + +## 初识 Apache Hertzbeat + +因工作的原因,在空闲时间里会研究学习开源框架的代码来提升自己。无意间萌生了为开源社区做贡献的想法,也正是这个时候在一篇文章中认识到了Hertzbeat。Hertzbeat与我在工作中接触到的Prometheus以及Skywalking不一样,其无需Agent以及可以通过yaml自定义监控的特性深深吸引了我。为方便进一步学习,我拉取了Hertzbeat源码并运行调试。 + +## 第一个PR + +说来也奇怪,在CONTRIBUTING文档的指引下,我尝试去启动Hertzbeat但没有成功,启动报错显示Node.js版本至少需要18以上。也正是因为如此,我修改了CONTRIBUTING文档并提交,最终促成了我的第一个PR。 + +## 持续贡献 + +随着阅读文档和源码,我越来越熟悉Hertzbeat,除了完善文档及修复bug以外,还接入了DNS和注册中心监控。在完成了注册中心监控后,便着手开始更有挑战性的http_sd新特性开发。 + +## 收获 + +回顾一路以来的贡献,即使一开始只是最简单的订正文档与文档翻译,社区也是十分欢迎,让我深刻感受到了这个社区的包容性。后来在开发http_sd新特性的时候,与作者tom的交流也让我更进一步了解Hertzbeat整体的架构以及职责划分,最终也能完成这个新特性。 + +同时,在阅读Hertzbeat源码期间,学习到了很多代码写法以及技术点的实际应用,对我个人的成长也是非常有帮助的。 + +## 结语 + +成为Apache Hertzbeat的Committer于我而言是一次很有趣的经历,无时无刻都在激励着我。今后我也会持续对Apache Hertzbeat社区贡献下去,也希望Apache Hertzbeat能顺利从孵化器毕业。 \ No newline at end of file From a9827bbcef7bae74e301a79491f1dc8df33120c2 Mon Sep 17 00:00:00 2001 From: Calvin Date: Sun, 28 Jul 2024 10:35:01 +0800 Subject: [PATCH 087/257] [refactor] move code from AccountController to AccountService (#2373) Co-authored-by: tomsun28 --- .../manager/controller/AccountController.java | 78 +++---------- .../manager/service/AccountService.java | 44 +++++++ .../service/impl/AccountServiceImpl.java | 110 ++++++++++++++++++ .../controller/AccountControllerTest.java | 19 ++- 4 files changed, 185 insertions(+), 66 deletions(-) create mode 100644 manager/src/main/java/org/apache/hertzbeat/manager/service/AccountService.java create mode 100644 manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AccountServiceImpl.java diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/controller/AccountController.java b/manager/src/main/java/org/apache/hertzbeat/manager/controller/AccountController.java index 54fdd517e82..29a53fbe5da 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/controller/AccountController.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/controller/AccountController.java @@ -19,25 +19,19 @@ import static org.apache.hertzbeat.common.constants.CommonConstants.MONITOR_LOGIN_FAILED_CODE; import static org.springframework.http.MediaType.APPLICATION_JSON_VALUE; -import com.usthe.sureness.provider.SurenessAccount; -import com.usthe.sureness.provider.SurenessAccountProvider; -import com.usthe.sureness.provider.ducument.DocumentAccountProvider; -import com.usthe.sureness.util.JsonWebTokenUtil; -import com.usthe.sureness.util.Md5Util; -import io.jsonwebtoken.Claims; import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; -import java.util.HashMap; -import java.util.List; import java.util.Map; +import javax.naming.AuthenticationException; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.common.entity.dto.Message; -import org.apache.hertzbeat.common.util.JsonUtil; import org.apache.hertzbeat.manager.pojo.dto.LoginDto; import org.apache.hertzbeat.manager.pojo.dto.RefreshTokenResponse; +import org.apache.hertzbeat.manager.service.AccountService; +import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; @@ -50,49 +44,21 @@ * Authentication registration TOKEN management API */ @Tag(name = "Auth Manage API") -@RestController() +@RestController @RequestMapping(value = "/api/account/auth", produces = {APPLICATION_JSON_VALUE}) @Slf4j public class AccountController { - /** - * Token validity time in seconds - */ - private static final long PERIOD_TIME = 3600L; - /** - * account data provider - */ - private SurenessAccountProvider accountProvider = new DocumentAccountProvider(); + @Autowired + private AccountService accountService; @PostMapping("/form") @Operation(summary = "Account password login to obtain associated user information", description = "Account password login to obtain associated user information") public ResponseEntity>> authGetToken(@Valid @RequestBody LoginDto loginDto) { - SurenessAccount account = accountProvider.loadAccount(loginDto.getIdentifier()); - if (account == null || account.getPassword() == null) { - return ResponseEntity.ok(Message.fail(MONITOR_LOGIN_FAILED_CODE, "Incorrect Account or Password")); - } else { - String password = loginDto.getCredential(); - if (account.getSalt() != null) { - password = Md5Util.md5(password + account.getSalt()); - } - if (!account.getPassword().equals(password)) { - return ResponseEntity.ok(Message.fail(MONITOR_LOGIN_FAILED_CODE, "Incorrect Account or Password")); - } - if (account.isDisabledAccount() || account.isExcessiveAttempts()) { - return ResponseEntity.ok(Message.fail(MONITOR_LOGIN_FAILED_CODE, "Expired or Illegal Account")); - } + try { + return ResponseEntity.ok(Message.success(accountService.authGetToken(loginDto))); + } catch (AuthenticationException e) { + return ResponseEntity.ok(Message.fail(MONITOR_LOGIN_FAILED_CODE, e.getMessage())); } - // Get the roles the user has - rbac - List roles = account.getOwnRoles(); - // Issue TOKEN - String issueToken = JsonWebTokenUtil.issueJwt(loginDto.getIdentifier(), PERIOD_TIME, roles); - Map customClaimMap = new HashMap<>(1); - customClaimMap.put("refresh", true); - String issueRefresh = JsonWebTokenUtil.issueJwt(loginDto.getIdentifier(), PERIOD_TIME << 5, customClaimMap); - Map resp = new HashMap<>(2); - resp.put("token", issueToken); - resp.put("refreshToken", issueRefresh); - resp.put("role", JsonUtil.toJson(roles)); - return ResponseEntity.ok(Message.success(resp)); } @GetMapping("/refresh/{refreshToken}") @@ -101,30 +67,12 @@ public ResponseEntity> refreshToken( @Parameter(description = "Refresh TOKEN", example = "xxx") @PathVariable("refreshToken") @NotNull final String refreshToken) { try { - Claims claims = JsonWebTokenUtil.parseJwt(refreshToken); - String userId = String.valueOf(claims.getSubject()); - boolean isRefresh = claims.get("refresh", Boolean.class); - if (userId == null || !isRefresh) { - return ResponseEntity.ok(Message.fail(MONITOR_LOGIN_FAILED_CODE, "Illegal Refresh Token")); - } - SurenessAccount account = accountProvider.loadAccount(userId); - if (account == null) { - return ResponseEntity.ok(Message.fail(MONITOR_LOGIN_FAILED_CODE, "Not Exists This Token Mapping Account")); - } - List roles = account.getOwnRoles(); - String issueToken = issueToken(userId, roles, PERIOD_TIME); - String issueRefresh = issueToken(userId, roles, PERIOD_TIME << 5); - RefreshTokenResponse response = new RefreshTokenResponse(issueToken, issueRefresh); - return ResponseEntity.ok(Message.success(response)); + return ResponseEntity.ok(Message.success(accountService.refreshToken(refreshToken))); + } catch (AuthenticationException e) { + return ResponseEntity.ok(Message.fail(MONITOR_LOGIN_FAILED_CODE, e.getMessage())); } catch (Exception e) { log.error("Exception occurred during token refresh: {}", e.getClass().getName(), e); return ResponseEntity.ok(Message.fail(MONITOR_LOGIN_FAILED_CODE, "Refresh Token Expired or Error")); } } - - private String issueToken(String userId, List roles, long expirationMillis) { - Map customClaimMap = new HashMap<>(1); - customClaimMap.put("refresh", true); - return JsonWebTokenUtil.issueJwt(userId, expirationMillis, roles, customClaimMap); - } } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/AccountService.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/AccountService.java new file mode 100644 index 00000000000..9a47f80c13b --- /dev/null +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/AccountService.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import java.util.Map; +import javax.naming.AuthenticationException; +import org.apache.hertzbeat.manager.pojo.dto.LoginDto; +import org.apache.hertzbeat.manager.pojo.dto.RefreshTokenResponse; + +/** + * Account service + */ +public interface AccountService { + /** + * Account password login to obtain associated user information + * @param loginDto loginDto + * @return token info + * @throws AuthenticationException when authentication is failed + */ + Map authGetToken(LoginDto loginDto) throws AuthenticationException; + + /** + * Use refresh TOKEN to re-acquire TOKEN + * @param refreshToken refreshToken + * @return token and refresh token + * @throws AuthenticationException failed to refresh + */ + RefreshTokenResponse refreshToken(String refreshToken) throws AuthenticationException; +} diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AccountServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AccountServiceImpl.java new file mode 100644 index 00000000000..e59a18f9560 --- /dev/null +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AccountServiceImpl.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service.impl; + +import com.usthe.sureness.provider.SurenessAccount; +import com.usthe.sureness.provider.SurenessAccountProvider; +import com.usthe.sureness.provider.ducument.DocumentAccountProvider; +import com.usthe.sureness.util.JsonWebTokenUtil; +import com.usthe.sureness.util.Md5Util; +import io.jsonwebtoken.Claims; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import javax.naming.AuthenticationException; +import lombok.extern.slf4j.Slf4j; +import org.apache.hertzbeat.common.util.JsonUtil; +import org.apache.hertzbeat.manager.pojo.dto.LoginDto; +import org.apache.hertzbeat.manager.pojo.dto.RefreshTokenResponse; +import org.apache.hertzbeat.manager.service.AccountService; +import org.springframework.core.Ordered; +import org.springframework.core.annotation.Order; +import org.springframework.stereotype.Service; + +/** + * Implementation of Account service + */ +@Service +@Order(value = Ordered.HIGHEST_PRECEDENCE) +@Slf4j +public class AccountServiceImpl implements AccountService { + /** + * Token validity time in seconds + */ + private static final long PERIOD_TIME = 3600L; + /** + * account data provider + */ + private final SurenessAccountProvider accountProvider = new DocumentAccountProvider(); + + @Override + public Map authGetToken(LoginDto loginDto) throws AuthenticationException { + SurenessAccount account = accountProvider.loadAccount(loginDto.getIdentifier()); + if (account == null || account.getPassword() == null) { + throw new AuthenticationException("Incorrect Account or Password"); + } else { + String password = loginDto.getCredential(); + if (account.getSalt() != null) { + password = Md5Util.md5(password + account.getSalt()); + } + if (!account.getPassword().equals(password)) { + throw new AuthenticationException("Incorrect Account or Password"); + } + if (account.isDisabledAccount() || account.isExcessiveAttempts()) { + throw new AuthenticationException("Expired or Illegal Account"); + } + } + // Get the roles the user has - rbac + List roles = account.getOwnRoles(); + // Issue TOKEN + String issueToken = JsonWebTokenUtil.issueJwt(loginDto.getIdentifier(), PERIOD_TIME, roles); + Map customClaimMap = new HashMap<>(1); + customClaimMap.put("refresh", true); + String issueRefresh = JsonWebTokenUtil.issueJwt(loginDto.getIdentifier(), PERIOD_TIME << 5, customClaimMap); + Map resp = new HashMap<>(2); + resp.put("token", issueToken); + resp.put("refreshToken", issueRefresh); + resp.put("role", JsonUtil.toJson(roles)); + + return resp; + } + + @Override + public RefreshTokenResponse refreshToken(String refreshToken) throws AuthenticationException { + Claims claims = JsonWebTokenUtil.parseJwt(refreshToken); + String userId = String.valueOf(claims.getSubject()); + boolean isRefresh = claims.get("refresh", Boolean.class); + if (userId == null || !isRefresh) { + throw new AuthenticationException("Illegal Refresh Token"); + } + SurenessAccount account = accountProvider.loadAccount(userId); + if (account == null) { + throw new AuthenticationException("Not Exists This Token Mapping Account"); + } + List roles = account.getOwnRoles(); + String issueToken = issueToken(userId, roles, PERIOD_TIME); + String issueRefresh = issueToken(userId, roles, PERIOD_TIME << 5); + return new RefreshTokenResponse(issueToken, issueRefresh); + } + + private String issueToken(String userId, List roles, long expirationMillis) { + Map customClaimMap = new HashMap<>(1); + customClaimMap.put("refresh", true); + return JsonWebTokenUtil.issueJwt(userId, expirationMillis, roles, customClaimMap); + } +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/controller/AccountControllerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/controller/AccountControllerTest.java index 634c32a4ef3..cb50ea0e18b 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/controller/AccountControllerTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/controller/AccountControllerTest.java @@ -20,13 +20,19 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; import com.usthe.sureness.util.JsonWebTokenUtil; +import java.util.HashMap; +import java.util.Map; +import javax.naming.AuthenticationException; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.util.JsonUtil; import org.apache.hertzbeat.manager.pojo.dto.LoginDto; +import org.apache.hertzbeat.manager.service.AccountService; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; import org.springframework.http.MediaType; import org.springframework.test.web.servlet.MockMvc; @@ -43,6 +49,8 @@ class AccountControllerTest { @InjectMocks private AccountController accountController; + @Mock + private AccountService accountService; @BeforeEach void setUp() { @@ -59,6 +67,12 @@ void authGetToken() throws Exception { .identifier("admin") .credential("hertzbeat") .build(); + Map resp = new HashMap<>(2); + resp.put("token", "token"); + resp.put("refreshToken", "refreshToken"); + resp.put("role", "roles"); + Mockito.when(accountService.authGetToken(loginDto)).thenReturn(resp); + this.mockMvc.perform(MockMvcRequestBuilders.post("/api/account/auth/form") .contentType(MediaType.APPLICATION_JSON) .content(JsonUtil.toJson(loginDto))) @@ -67,6 +81,7 @@ void authGetToken() throws Exception { .andExpect(jsonPath("$.data.token").exists()) .andReturn(); loginDto.setCredential("wrong_credential"); + Mockito.when(accountService.authGetToken(loginDto)).thenThrow(new AuthenticationException()); this.mockMvc.perform(MockMvcRequestBuilders.post("/api/account/auth/form") .contentType(MediaType.APPLICATION_JSON) .content(JsonUtil.toJson(loginDto))) @@ -76,8 +91,10 @@ void authGetToken() throws Exception { @Test void refreshToken() throws Exception { + String refreshToken = "123456"; + Mockito.when(accountService.refreshToken(refreshToken)).thenThrow(new AuthenticationException()); this.mockMvc.perform(MockMvcRequestBuilders.get("/api/account/auth/refresh/{refreshToken}", - "123456")) + refreshToken)) .andExpect(jsonPath("$.code").value((int) CommonConstants.MONITOR_LOGIN_FAILED_CODE)) .andReturn(); } From 64e156be2a77905d08f74491867ee7aaa0179c2b Mon Sep 17 00:00:00 2001 From: linDong <56677297@qq.com> Date: Sun, 28 Jul 2024 16:58:24 +0800 Subject: [PATCH 088/257] [doc] add committer introduction doc (#2394) Co-authored-by: tomsun28 --- home/blog/2024-07-28-new-committer.md | 36 ++++++++++++++++++ .../2024-07-28-new-committer.md | 37 +++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 home/blog/2024-07-28-new-committer.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md diff --git a/home/blog/2024-07-28-new-committer.md b/home/blog/2024-07-28-new-committer.md new file mode 100644 index 00000000000..26bfc69bba6 --- /dev/null +++ b/home/blog/2024-07-28-new-committer.md @@ -0,0 +1,36 @@ +--- +title: Welcome to HertzBeat Community Committer! +author: linDong +author_title: linDong +author_url: https://github.com/Yanshuming1 +author_image_url: https://avatars.githubusercontent.com/u/30208283?v=4 +tags: [opensource, practice] +keywords: [open source monitoring system, alerting system] +--- + +![hertzBeathertzBeat](/img/blog/new-committer.png) + +> It's an honor for me to become a Committer of Apache Hertzbeat +## Personal introduction + +I graduated in 2023 and am currently working as a Java developer in an Internet company. + +## First met Apache Hertzbeat + +After graduation and working for half a year, I had the idea of getting involved in open source. However, when searching for open source projects on GitHub, I found that many communities were not very active, and the official documentation was not complete, making it quite difficult to start with the projects. Therefore, I put the matter of open source on hold for the time being. + +One day, I saw a post by Senior Xiaobao on a certain platform and was immediately intrigued. I then privately messaged Xiaobao to consult about open source matters. Senior Xiaobao was extremely enthusiastic and meticulous in explaining many aspects of open source to me and recommended the Hertzbeat project. + +## My first PR + +I downloaded the source code of Hertzbeat and compiled and tested it according to the documentation. Then, when clicking on the functions on the page, I discovered a bug in the front-end pop-up window. So, I submitted my first issue and successfully claimed this issue, thus starting my first PR. + +## Keep active in community + +After submitting the first PR, I continuously followed the issues in the Hertzbeat community and attempted to solve the existing problems. I have successively completed tasks such as specifying @people on WeChat, integrating AI, adding a PrestoDB monitor, modifying bugs, and contributing to the official website documentation. + +## Reward +After several months of contributing to open source, I have reaped a lot. I have learned the business logic in the code, code norms, some technical frameworks that I have never used before, and some algorithms. It can be said that I have benefited greatly. + +## Conclusion +Becoming a Committer of Apache Hertzbeat is a very meaningful thing for me. In the future, I will continue to contribute to the Apache Hertzbeat community. I also hope that Apache Hertzbeat can successfully graduate from the incubator and that the community will become better and better. \ No newline at end of file diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md new file mode 100644 index 00000000000..463a89d1a35 --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md @@ -0,0 +1,37 @@ +--- +title: 热烈欢迎 HertzBeat 小伙伴新晋社区 Committer! +author: linDong +author_title: linDong +author_url: https://github.com/Yanshuming1 +author_image_url: https://avatars.githubusercontent.com/u/131688897?v=4 +tags: [opensource, practice] +keywords: [open source monitoring system, alerting system] +--- + +![hertzBeat](/img/blog/new-committer.png) + +> 非常荣幸能成为Apache Hertzbeat的Committer +## 个人介绍 + +本人是2023年毕业,目前在一家互联网公司担任java开发工程师。 + +## 初识 Apache Hertzbeat +毕业之后,工作了半年,萌生出接触开源的想法。然而,在 GitHub 上搜索开源项目时,发现诸多社区活跃度欠佳,官方文档也不完善,致使项目上手难度颇大,因此开源之事暂且搁置。 +某天,在某平台看到小宝大佬的一个帖子,我瞬间兴趣盎然,随即私聊小宝,咨询开源相关事宜。小宝大佬极其热情且细致地为我讲解了不少开源方面的情况,并向我推荐了 Hertzbeat 这个项目。 + +## 第一个PR + +我down了 Hertzbeat 的源码,并按照文档进行编译和测试,然后在页面点击功能的时候发现了一个前端弹窗的bug,所以我提交了自己的第一个issue,并且我成功认领了此issue +于是开启了我的第一个PR。 + +## 持续贡献 + +在提交第一个 PR 之后,我持续关注 Hertzbeat 社区的 issue ,且尝试去解决当中存在的问题。相继完成了微信指定艾特人、接入 AI 、添加 PrestoDB 监控器、Bug 修改以及官网文档贡献等工作。 + +## 收获 + +经过几个月对开源的贡献,我收获满满,学习到了代码中的业务逻辑、代码规范,一些此前未曾使用过的技术框架,还有一些算法,可谓是获益良多。 + +## 结语 + +成为Apache Hertzbeat的Committer于我而言是很有意义对事情,今后我也会持续对Apache Hertzbeat社区贡献下去,也希望Apache Hertzbeat能顺利从孵化器毕业,希望社区越来越好。 \ No newline at end of file From 22459c5b6f4a1348dfffa02f925bf0f6550b6aae Mon Sep 17 00:00:00 2001 From: aias00 Date: Sun, 28 Jul 2024 19:14:04 +0800 Subject: [PATCH 089/257] [improve] add some code style improve (#2376) Signed-off-by: tomsun28 Co-authored-by: tomsun28 --- .../alert/service/impl/AlertServiceImpl.java | 4 +-- .../collect/http/HttpCollectImpl.java | 4 +-- ...eater.java => PrometheusParseCreator.java} | 8 ++--- .../collect/push/PushCollectImpl.java | 2 +- .../collect/redis/RedisCommonCollectImpl.java | 4 +-- .../collect/script/ScriptCollectImpl.java | 7 ++-- .../collect/snmp/SnmpCollectImpl.java | 13 ++----- .../collector/collect/ssh/SshCollectImpl.java | 7 ++-- .../collect/icmp/IcmpCollectImplTest.java | 12 ++----- .../collect/imap/ImapCollectImplTest.java | 12 ++----- .../memcached/MemcachedCollectImplTest.java | 12 ++----- .../collect/nginx/NginxCollectImplTest.java | 6 ++-- .../collect/ntp/NtpCollectImplTest.java | 1 - .../collect/udp/UdpCollectImplTest.java | 12 +++---- .../common/support/SpringContextHolder.java | 3 +- .../common/util/TimePeriodUtilTest.java | 4 +-- .../component/alerter/DispatcherAlarm.java | 26 +++++++------- .../impl/WeChatAlertNotifyHandlerImpl.java | 3 +- .../config/AngularErrorViewResolver.java | 3 +- .../config/HeaderRequestInterceptor.java | 3 +- .../manager/controller/AccountController.java | 1 + .../controller/GeneralConfigController.java | 2 +- .../HertzbeatRuntimeHintsRegistrar.java | 3 +- .../apache/hertzbeat/manager/ManagerTest.java | 4 +-- .../push/service/impl/PushServiceImpl.java | 2 +- .../remoting/RemotingServiceTest.java | 4 +-- .../greptime/GreptimeDbDataStorage.java | 2 +- .../store/history/iotdb/IotDbDataStorage.java | 6 ++-- .../history/jpa/JpaDatabaseDataStorage.java | 36 +++++++------------ .../realtime/redis/RedisDataStorage.java | 16 ++++----- .../controller/MetricsDataControllerTest.java | 12 +++---- 31 files changed, 88 insertions(+), 146 deletions(-) rename collector/src/main/java/org/apache/hertzbeat/collector/collect/http/promethus/{PrometheusParseCreater.java => PrometheusParseCreator.java} (80%) diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertServiceImpl.java index ffdca29f9a9..a817d61238a 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertServiceImpl.java @@ -137,9 +137,7 @@ private Alert buildAlertData(AlertReport alertReport){ StringBuilder sb = new StringBuilder(); if (alertReport.getContent() == null || alertReport.getContent().length() <= 0){ StringBuilder finalSb = sb; - annotations.forEach((k, v) -> { - finalSb.append(k).append(":").append(v).append("\n"); - }); + annotations.forEach((k, v) -> finalSb.append(k).append(":").append(v).append("\n")); } else { sb = new StringBuilder(alertReport.getContent()); } diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/HttpCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/HttpCollectImpl.java index 7264c8c9b57..c2c48f48ead 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/HttpCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/HttpCollectImpl.java @@ -45,7 +45,7 @@ import org.apache.hertzbeat.collector.collect.AbstractCollect; import org.apache.hertzbeat.collector.collect.common.http.CommonHttpClient; import org.apache.hertzbeat.collector.collect.http.promethus.AbstractPrometheusParse; -import org.apache.hertzbeat.collector.collect.http.promethus.PrometheusParseCreater; +import org.apache.hertzbeat.collector.collect.http.promethus.PrometheusParseCreator; import org.apache.hertzbeat.collector.collect.http.promethus.exporter.ExporterParser; import org.apache.hertzbeat.collector.collect.http.promethus.exporter.MetricFamily; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; @@ -375,7 +375,7 @@ private void parseResponseByJsonPath(String resp, List aliasFields, Http private void parseResponseByPromQl(String resp, List aliasFields, HttpProtocol http, CollectRep.MetricsData.Builder builder) { - AbstractPrometheusParse prometheusParser = PrometheusParseCreater.getPrometheusParse(); + AbstractPrometheusParse prometheusParser = PrometheusParseCreator.getPrometheusParse(); prometheusParser.handle(resp, aliasFields, http, builder); } diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/promethus/PrometheusParseCreater.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/promethus/PrometheusParseCreator.java similarity index 80% rename from collector/src/main/java/org/apache/hertzbeat/collector/collect/http/promethus/PrometheusParseCreater.java rename to collector/src/main/java/org/apache/hertzbeat/collector/collect/http/promethus/PrometheusParseCreator.java index 8078ec0688d..72b83e33c8f 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/promethus/PrometheusParseCreater.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/promethus/PrometheusParseCreator.java @@ -26,15 +26,15 @@ */ @Slf4j @Component -public class PrometheusParseCreater implements InitializingBean { - private static AbstractPrometheusParse PROMETHEUSPARSE = new PrometheusVectorParser(); +public class PrometheusParseCreator implements InitializingBean { + private static final AbstractPrometheusParse PROMETHEUS_PARSE = new PrometheusVectorParser(); private static void create() { - PROMETHEUSPARSE.setInstance(new PrometheusMatrixParser().setInstance(new PrometheusLastParser())); + PROMETHEUS_PARSE.setInstance(new PrometheusMatrixParser().setInstance(new PrometheusLastParser())); } public static AbstractPrometheusParse getPrometheusParse(){ - return PROMETHEUSPARSE; + return PROMETHEUS_PARSE; } @Override diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/push/PushCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/push/PushCollectImpl.java index 86f4fe083f8..3757cf2d223 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/push/PushCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/push/PushCollectImpl.java @@ -53,7 +53,7 @@ @Slf4j public class PushCollectImpl extends AbstractCollect { - private static Map timeMap = new ConcurrentHashMap<>(); + private static final Map timeMap = new ConcurrentHashMap<>(); // ms private static final Integer timeout = 3000; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redis/RedisCommonCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redis/RedisCommonCollectImpl.java index ef4a5bbb230..2c5a6d73f97 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redis/RedisCommonCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redis/RedisCommonCollectImpl.java @@ -316,9 +316,7 @@ private Map parseInfo(String info, Metrics metrics) { .map(this::removeCr) .map(r -> r.split(SignConstants.DOUBLE_MARK)) .filter(t -> t.length > 1) - .forEach(it -> { - result.put(it[0], it[1]); - }); + .forEach(it -> result.put(it[0], it[1])); // fix https://github.com/apache/hertzbeat/pull/913 if (result.size() < fieldTotalSize) { for (Metrics.Field field : metrics.getFields()) { diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/script/ScriptCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/script/ScriptCollectImpl.java index aa7dc06743f..dd1c1e30a57 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/script/ScriptCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/script/ScriptCollectImpl.java @@ -26,6 +26,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.collector.collect.AbstractCollect; @@ -202,11 +203,7 @@ private void parseResponseDataByNetcat(String result, List aliasFields, CollectRep.ValueRow.Builder valueRowBuilder = CollectRep.ValueRow.newBuilder(); for (String field : aliasFields) { String fieldValue = mapValue.get(field); - if (fieldValue == null) { - valueRowBuilder.addColumns(CommonConstants.NULL_VALUE); - } else { - valueRowBuilder.addColumns(fieldValue); - } + valueRowBuilder.addColumns(Objects.requireNonNullElse(fieldValue, CommonConstants.NULL_VALUE)); } builder.addValues(valueRowBuilder.build()); } diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/snmp/SnmpCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/snmp/SnmpCollectImpl.java index 78aa80d7365..97cf4a5c8f7 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/snmp/SnmpCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/snmp/SnmpCollectImpl.java @@ -21,6 +21,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import lombok.extern.slf4j.Slf4j; @@ -173,11 +174,7 @@ public void collect(CollectRep.MetricsData.Builder builder, long monitorId, Stri } else { String oid = oidsMap.get(alias); String value = oidsValueMap.get(oid); - if (value != null) { - valueRowBuilder.addColumns(value); - } else { - valueRowBuilder.addColumns(CommonConstants.NULL_VALUE); - } + valueRowBuilder.addColumns(Objects.requireNonNullElse(value, CommonConstants.NULL_VALUE)); } } builder.addValues(valueRowBuilder.build()); @@ -227,11 +224,7 @@ public void collect(CollectRep.MetricsData.Builder builder, long monitorId, Stri } } } - if (value != null) { - valueRowBuilder.addColumns(value); - } else { - valueRowBuilder.addColumns(CommonConstants.NULL_VALUE); - } + valueRowBuilder.addColumns(Objects.requireNonNullElse(value, CommonConstants.NULL_VALUE)); } } builder.addValues(valueRowBuilder.build()); diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java index 1a77c793715..236b2d1959f 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java @@ -29,6 +29,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -206,11 +207,7 @@ private void parseResponseDataByNetcat(String result, List aliasFields, CollectRep.ValueRow.Builder valueRowBuilder = CollectRep.ValueRow.newBuilder(); for (String field : aliasFields) { String fieldValue = mapValue.get(field); - if (fieldValue == null) { - valueRowBuilder.addColumns(CommonConstants.NULL_VALUE); - } else { - valueRowBuilder.addColumns(fieldValue); - } + valueRowBuilder.addColumns(Objects.requireNonNullElse(fieldValue, CommonConstants.NULL_VALUE)); } builder.addValues(valueRowBuilder.build()); } diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImplTest.java index 90e393c2de3..5e85a4777f0 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImplTest.java @@ -76,16 +76,10 @@ void setUp() { @Test void testPreCheck() { - assertDoesNotThrow(() -> { - icmpCollect.preCheck(metrics); - }); - assertThrows(IllegalArgumentException.class, () -> { - icmpCollect.preCheck(null); - }); + assertDoesNotThrow(() -> icmpCollect.preCheck(metrics)); + assertThrows(IllegalArgumentException.class, () -> icmpCollect.preCheck(null)); metrics.setIcmp(null); - assertThrows(IllegalArgumentException.class, () -> { - icmpCollect.preCheck(null); - }); + assertThrows(IllegalArgumentException.class, () -> icmpCollect.preCheck(null)); } @Test diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/imap/ImapCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/imap/ImapCollectImplTest.java index eae79039afc..a05b311c634 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/imap/ImapCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/imap/ImapCollectImplTest.java @@ -68,16 +68,10 @@ void setUp() { @Test void preCheck() { - assertDoesNotThrow(() -> { - imapCollect.preCheck(metrics); - }); - assertThrows(NullPointerException.class, () -> { - imapCollect.preCheck(null); - }); + assertDoesNotThrow(() -> imapCollect.preCheck(metrics)); + assertThrows(NullPointerException.class, () -> imapCollect.preCheck(null)); metrics.setImap(null); - assertThrows(NullPointerException.class, () -> { - imapCollect.preCheck(null); - }); + assertThrows(NullPointerException.class, () -> imapCollect.preCheck(null)); } @Test diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/memcached/MemcachedCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/memcached/MemcachedCollectImplTest.java index ea886607246..8756a878086 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/memcached/MemcachedCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/memcached/MemcachedCollectImplTest.java @@ -69,16 +69,10 @@ void setUp() { @Test void testPreCheck() { - assertDoesNotThrow(() -> { - memcachedCollect.preCheck(metrics); - }); - assertThrows(IllegalArgumentException.class, () -> { - memcachedCollect.preCheck(null); - }); + assertDoesNotThrow(() -> memcachedCollect.preCheck(metrics)); + assertThrows(IllegalArgumentException.class, () -> memcachedCollect.preCheck(null)); metrics.setIcmp(null); - assertThrows(IllegalArgumentException.class, () -> { - memcachedCollect.preCheck(null); - }); + assertThrows(IllegalArgumentException.class, () -> memcachedCollect.preCheck(null)); } @Test diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImplTest.java index fdaf19d9b47..53b5f081c59 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImplTest.java @@ -81,7 +81,7 @@ public void testNginxCollectFail() throws IOException { .build(); try (MockedStatic mockStatic = Mockito.mockStatic(CommonHttpClient.class)) { - mockStatic.when(() -> CommonHttpClient.getHttpClient()).thenReturn(client); + mockStatic.when(CommonHttpClient::getHttpClient).thenReturn(client); Mockito.when(client.execute(Mockito.any(HttpUriRequest.class), Mockito.any(HttpContext.class))) .thenReturn(mockHttpResponse); @@ -111,7 +111,7 @@ public void testNginxStatusCollect() throws IOException { .build(); try (MockedStatic mockedStatic = Mockito.mockStatic(CommonHttpClient.class)) { - mockedStatic.when(() -> CommonHttpClient.getHttpClient()).thenReturn(client); + mockedStatic.when(CommonHttpClient::getHttpClient).thenReturn(client); Mockito.when(client.execute(Mockito.any(HttpUriRequest.class), Mockito.any(HttpContext.class))) .thenReturn(mockHttpResponse); @@ -176,7 +176,7 @@ public void testNginxReqStatusCollect() throws IOException { .build(); try (MockedStatic mockedStatic = Mockito.mockStatic(CommonHttpClient.class)) { - mockedStatic.when(() -> CommonHttpClient.getHttpClient()).thenReturn(client); + mockedStatic.when(CommonHttpClient::getHttpClient).thenReturn(client); Mockito.when(client.execute(Mockito.any(HttpUriRequest.class), Mockito.any(HttpContext.class))) .thenReturn(mockHttpResponse); diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/ntp/NtpCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/ntp/NtpCollectImplTest.java index 66eb13d349f..ebe138988c5 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/ntp/NtpCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/ntp/NtpCollectImplTest.java @@ -62,7 +62,6 @@ void testCollect() { packet.setReceiveTimeStamp(new TimeStamp(2000)); packet.setTransmitTime(new TimeStamp(1000)); TimeInfo timeInfo = new TimeInfo(packet, 1000, false); - ; MockedConstruction mocked = Mockito.mockConstruction(NTPUDPClient.class, (client, context) -> { diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/udp/UdpCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/udp/UdpCollectImplTest.java index cc4b89a66a5..652318b4e08 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/udp/UdpCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/udp/UdpCollectImplTest.java @@ -95,10 +95,8 @@ void testCollectWithSocketException() { .build(); MockedConstruction socketMockedConstruction = - Mockito.mockConstruction(DatagramSocket.class, (socket, context) -> { - Mockito.doThrow(new SocketTimeoutException("test exception")) - .when(socket).send(Mockito.any(DatagramPacket.class)); - }); + Mockito.mockConstruction(DatagramSocket.class, (socket, context) -> Mockito.doThrow(new SocketTimeoutException("test exception")) + .when(socket).send(Mockito.any(DatagramPacket.class))); List aliasField = new ArrayList<>(); @@ -123,10 +121,8 @@ void testCollectWithPortUnreachableException() { .build(); MockedConstruction socketMockedConstruction = - Mockito.mockConstruction(DatagramSocket.class, (socket, context) -> { - Mockito.doThrow(new PortUnreachableException("test exception")) - .when(socket).send(Mockito.any(DatagramPacket.class)); - }); + Mockito.mockConstruction(DatagramSocket.class, (socket, context) -> Mockito.doThrow(new PortUnreachableException("test exception")) + .when(socket).send(Mockito.any(DatagramPacket.class))); List aliasField = new ArrayList<>(); diff --git a/common/src/main/java/org/apache/hertzbeat/common/support/SpringContextHolder.java b/common/src/main/java/org/apache/hertzbeat/common/support/SpringContextHolder.java index 37b47994cc9..432a7aaf0d0 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/support/SpringContextHolder.java +++ b/common/src/main/java/org/apache/hertzbeat/common/support/SpringContextHolder.java @@ -21,6 +21,7 @@ import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContextAware; import org.springframework.context.ConfigurableApplicationContext; +import org.springframework.lang.NonNull; import org.springframework.stereotype.Component; /** @@ -34,7 +35,7 @@ public class SpringContextHolder implements ApplicationContextAware { private static ConfigurableApplicationContext configurableApplicationContext; @Override - public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { + public void setApplicationContext(@NonNull ApplicationContext applicationContext) throws BeansException { set(applicationContext); if (applicationContext instanceof ConfigurableApplicationContext context) { configurableApplicationContext = context; diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/TimePeriodUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/TimePeriodUtilTest.java index 59a7492b86c..cd4ceb5c100 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/TimePeriodUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/TimePeriodUtilTest.java @@ -98,9 +98,7 @@ void testParseTokenTimeInvalidInput() { assertNull(result); // invalid format (non-numeric) - Exception exception = assertThrows(DateTimeParseException.class, () -> { - TimePeriodUtil.parseTokenTime("abc"); - }); + Exception exception = assertThrows(DateTimeParseException.class, () -> TimePeriodUtil.parseTokenTime("abc")); assertNotNull(exception.getMessage()); } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/DispatcherAlarm.java b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/DispatcherAlarm.java index 101a4c44034..c57d569716a 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/DispatcherAlarm.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/DispatcherAlarm.java @@ -143,21 +143,19 @@ public void run() { } private void sendNotify(Alert alert) { - matchNoticeRulesByAlert(alert).ifPresent(noticeRules -> { - noticeRules.forEach(rule -> { - workerPool.executeNotify(() -> { - rule.getReceiverId() - .forEach(receiverId -> { - try { - sendNoticeMsg(getOneReceiverById(receiverId), - getOneTemplateById(rule.getTemplateId()), alert); - } catch (AlertNoticeException e) { - log.warn("DispatchTask sendNoticeMsg error, message: {}", e.getMessage()); - } - }); - }); + matchNoticeRulesByAlert(alert).ifPresent(noticeRules -> noticeRules.forEach(rule -> { + workerPool.executeNotify(() -> { + rule.getReceiverId() + .forEach(receiverId -> { + try { + sendNoticeMsg(getOneReceiverById(receiverId), + getOneTemplateById(rule.getTemplateId()), alert); + } catch (AlertNoticeException e) { + log.warn("DispatchTask sendNoticeMsg error, message: {}", e.getMessage()); + } + }); }); - }); + })); } } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/WeChatAlertNotifyHandlerImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/WeChatAlertNotifyHandlerImpl.java index 71f4d336181..7132776b59f 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/WeChatAlertNotifyHandlerImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/WeChatAlertNotifyHandlerImpl.java @@ -59,8 +59,7 @@ private String getAccessToken() throws Exception { .GET() .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); - JsonParser parser = new JsonParser(); - JsonObject jsonObject = parser.parse(response.body()).getAsJsonObject(); + JsonObject jsonObject = JsonParser.parseString(response.body()).getAsJsonObject(); String accessToken = null; if (jsonObject.has(ACCESS_TOKEN)) { accessToken = jsonObject.get(ACCESS_TOKEN).getAsString(); diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/config/AngularErrorViewResolver.java b/manager/src/main/java/org/apache/hertzbeat/manager/config/AngularErrorViewResolver.java index ad3f3a98553..15d9a737077 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/config/AngularErrorViewResolver.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/config/AngularErrorViewResolver.java @@ -33,6 +33,7 @@ import org.springframework.core.io.Resource; import org.springframework.http.HttpStatus; import org.springframework.http.MediaType; +import org.springframework.lang.NonNull; import org.springframework.util.Assert; import org.springframework.util.FileCopyUtils; import org.springframework.web.servlet.ModelAndView; @@ -134,7 +135,7 @@ public String getContentType() { } @Override - public void render(Map model, HttpServletRequest request, HttpServletResponse response) + public void render(Map model, @NonNull HttpServletRequest request, HttpServletResponse response) throws Exception { response.setContentType(getContentType()); FileCopyUtils.copy(this.resource.getInputStream(), response.getOutputStream()); diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/config/HeaderRequestInterceptor.java b/manager/src/main/java/org/apache/hertzbeat/manager/config/HeaderRequestInterceptor.java index 7c8cf348c82..08a92528f3b 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/config/HeaderRequestInterceptor.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/config/HeaderRequestInterceptor.java @@ -24,6 +24,7 @@ import org.springframework.http.client.ClientHttpRequestExecution; import org.springframework.http.client.ClientHttpRequestInterceptor; import org.springframework.http.client.ClientHttpResponse; +import org.springframework.lang.NonNull; /** * Rest Template interceptor adds request header information @@ -31,7 +32,7 @@ public class HeaderRequestInterceptor implements ClientHttpRequestInterceptor { @Override - public ClientHttpResponse intercept(HttpRequest request, byte[] body, ClientHttpRequestExecution execution) + public ClientHttpResponse intercept(HttpRequest request, @NonNull byte[] body, @NonNull ClientHttpRequestExecution execution) throws IOException { // Send json by default if (request.getHeaders().getContentType() == null) { diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/controller/AccountController.java b/manager/src/main/java/org/apache/hertzbeat/manager/controller/AccountController.java index 29a53fbe5da..f01731cab0a 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/controller/AccountController.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/controller/AccountController.java @@ -48,6 +48,7 @@ @RequestMapping(value = "/api/account/auth", produces = {APPLICATION_JSON_VALUE}) @Slf4j public class AccountController { + @Autowired private AccountService accountService; diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/controller/GeneralConfigController.java b/manager/src/main/java/org/apache/hertzbeat/manager/controller/GeneralConfigController.java index c2857eeac0e..49f48d482d2 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/controller/GeneralConfigController.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/controller/GeneralConfigController.java @@ -47,7 +47,7 @@ @Tag(name = "Alert sender Configuration API") @Slf4j public class GeneralConfigController { - private Map configServiceMap; + private final Map configServiceMap; public GeneralConfigController(List generalConfigServices) { configServiceMap = new HashMap<>(8); diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/nativex/HertzbeatRuntimeHintsRegistrar.java b/manager/src/main/java/org/apache/hertzbeat/manager/nativex/HertzbeatRuntimeHintsRegistrar.java index d1d58f01a7d..44d9b3a6a33 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/nativex/HertzbeatRuntimeHintsRegistrar.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/nativex/HertzbeatRuntimeHintsRegistrar.java @@ -33,6 +33,7 @@ import org.springframework.aot.hint.RuntimeHints; import org.springframework.aot.hint.RuntimeHintsRegistrar; import org.springframework.aot.hint.TypeReference; +import org.springframework.lang.NonNull; import org.springframework.util.ClassUtils; /** @@ -44,7 +45,7 @@ public class HertzbeatRuntimeHintsRegistrar implements RuntimeHintsRegistrar { private static final String SshConstantsClassName = "org.apache.sshd.common.SshConstants"; @Override - public void registerHints(RuntimeHints hints, ClassLoader classLoader) { + public void registerHints(@NonNull RuntimeHints hints, ClassLoader classLoader) { // see: https://github.com/spring-cloud/spring-cloud-config/blob/main/spring-cloud-config-server/src/main/java/org/springframework/cloud/config/server/config/ConfigServerRuntimeHints.java // TODO: move over to GraalVM reachability metadata if (ClassUtils.isPresent(SshConstantsClassName, classLoader)) { diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/ManagerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/ManagerTest.java index 14a9a5040cd..3925f317969 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/ManagerTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/ManagerTest.java @@ -30,7 +30,7 @@ import org.apache.hertzbeat.alert.service.impl.AlertDefineServiceImpl; import org.apache.hertzbeat.alert.service.impl.AlertServiceImpl; import org.apache.hertzbeat.collector.collect.database.JdbcSpiLoader; -import org.apache.hertzbeat.collector.collect.http.promethus.PrometheusParseCreater; +import org.apache.hertzbeat.collector.collect.http.promethus.PrometheusParseCreator; import org.apache.hertzbeat.collector.collect.strategy.CollectStrategyFactory; import org.apache.hertzbeat.collector.dispatch.CommonDispatcher; import org.apache.hertzbeat.collector.dispatch.DispatchProperties; @@ -83,7 +83,7 @@ void testAutoImport() { assertNotNull(ctx.getBean(WorkerPool.class)); assertNotNull(ctx.getBean(CollectJobService.class)); assertNotNull(ctx.getBean(JdbcSpiLoader.class)); - assertNotNull(ctx.getBean(PrometheusParseCreater.class)); + assertNotNull(ctx.getBean(PrometheusParseCreator.class)); assertNotNull(ctx.getBean(DataSizeConvert.class)); assertNotNull(ctx.getBean(CollectStrategyFactory.class)); diff --git a/push/src/main/java/org/apache/hertzbeat/push/service/impl/PushServiceImpl.java b/push/src/main/java/org/apache/hertzbeat/push/service/impl/PushServiceImpl.java index 5cdd981ca96..cbd22bdb5f5 100644 --- a/push/src/main/java/org/apache/hertzbeat/push/service/impl/PushServiceImpl.java +++ b/push/src/main/java/org/apache/hertzbeat/push/service/impl/PushServiceImpl.java @@ -128,7 +128,7 @@ public PushMetricsDto getPushMetricData(final Long monitorId, final Long time) { lastPushMetrics.put(monitorId, metrics); } catch (Exception e) { - log.error("no metrics found, monitor id: {}, {}, {}", monitorId, e.getMessage(), e); + log.error("no metrics found, monitor id: {}, {}", monitorId, e.getMessage(), e); return pushMetricsDto; } } diff --git a/remoting/src/test/java/org/apache/hertzbeat/remoting/RemotingServiceTest.java b/remoting/src/test/java/org/apache/hertzbeat/remoting/RemotingServiceTest.java index b37e23ff1e5..6b9831e331c 100644 --- a/remoting/src/test/java/org/apache/hertzbeat/remoting/RemotingServiceTest.java +++ b/remoting/src/test/java/org/apache/hertzbeat/remoting/RemotingServiceTest.java @@ -139,9 +139,7 @@ public void testSendMsgSync() { @Test public void testNettyHook() { this.remotingServer.registerHook(Lists.newArrayList( - (ctx, message) -> { - Assertions.assertEquals("hello world", message.getMsg()); - } + (ctx, message) -> Assertions.assertEquals("hello world", message.getMsg()) )); this.remotingServer.registerProcessor(ClusterMsg.MessageType.HEARTBEAT, (ctx, message) -> diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/greptime/GreptimeDbDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/greptime/GreptimeDbDataStorage.java index aefd45e0942..d0c499563eb 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/greptime/GreptimeDbDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/greptime/GreptimeDbDataStorage.java @@ -362,7 +362,7 @@ public Map> getHistoryIntervalMetricData(Long monitorId, Str List values = instanceValuesMap.computeIfAbsent(instanceValue, k -> new LinkedList<>()); try (Connection connection = hikariDataSource.getConnection(); Statement statement = connection.createStatement(); - ResultSet resultSet = statement.executeQuery(selectSql);) { + ResultSet resultSet = statement.executeQuery(selectSql)) { while (resultSet.next()) { long ts = resultSet.getLong(1); if (ts == 0) { diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/iotdb/IotDbDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/iotdb/IotDbDataStorage.java index 9626ba16fd9..9792ef5c1d5 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/iotdb/IotDbDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/iotdb/IotDbDataStorage.java @@ -107,16 +107,16 @@ private boolean initIotDbSession(IotDbProperties properties) { boolean available = checkConnection(); if (!available) { log.error("IotDB session pool init error with check connection"); - return available; + return false; } available = this.createDatabase(); if (!available) { log.error("IotDB session pool init error with create database"); - return available; + return false; } this.initTtl(properties.expireTime()); log.info("IotDB session pool init success"); - return available; + return true; } private boolean checkConnection() { diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/jpa/JpaDatabaseDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/jpa/JpaDatabaseDataStorage.java index 786ea2d8728..9ab515754c2 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/jpa/JpaDatabaseDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/jpa/JpaDatabaseDataStorage.java @@ -146,34 +146,22 @@ public void saveData(CollectRep.MetricsData metricsData) { if (CommonConstants.NULL_VALUE.equals(columnValue)) { switch (fieldType) { - case CommonConstants.TYPE_NUMBER -> { - historyBuilder.metricType(CommonConstants.TYPE_NUMBER) - .dou(null); - } - case CommonConstants.TYPE_STRING -> { - historyBuilder.metricType(CommonConstants.TYPE_STRING) - .str(null); - } - case CommonConstants.TYPE_TIME -> { - historyBuilder.metricType(CommonConstants.TYPE_TIME) - .int32(null); - } + case CommonConstants.TYPE_NUMBER -> historyBuilder.metricType(CommonConstants.TYPE_NUMBER) + .dou(null); + case CommonConstants.TYPE_STRING -> historyBuilder.metricType(CommonConstants.TYPE_STRING) + .str(null); + case CommonConstants.TYPE_TIME -> historyBuilder.metricType(CommonConstants.TYPE_TIME) + .int32(null); default -> historyBuilder.metricType(CommonConstants.TYPE_NUMBER); } } else { switch (fieldType) { - case CommonConstants.TYPE_NUMBER -> { - historyBuilder.metricType(CommonConstants.TYPE_NUMBER) - .dou(Double.parseDouble(columnValue)); - } - case CommonConstants.TYPE_STRING -> { - historyBuilder.metricType(CommonConstants.TYPE_STRING) - .str(formatStrValue(columnValue)); - } - case CommonConstants.TYPE_TIME -> { - historyBuilder.metricType(CommonConstants.TYPE_TIME) - .int32(Integer.parseInt(columnValue)); - } + case CommonConstants.TYPE_NUMBER -> historyBuilder.metricType(CommonConstants.TYPE_NUMBER) + .dou(Double.parseDouble(columnValue)); + case CommonConstants.TYPE_STRING -> historyBuilder.metricType(CommonConstants.TYPE_STRING) + .str(formatStrValue(columnValue)); + case CommonConstants.TYPE_TIME -> historyBuilder.metricType(CommonConstants.TYPE_TIME) + .int32(Integer.parseInt(columnValue)); default -> historyBuilder.metricType(CommonConstants.TYPE_NUMBER) .dou(Double.parseDouble(columnValue)); } diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/realtime/redis/RedisDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/realtime/redis/RedisDataStorage.java index 0f35cb01a10..8122c882ea7 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/realtime/redis/RedisDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/realtime/redis/RedisDataStorage.java @@ -65,15 +65,13 @@ public void saveData(CollectRep.MetricsData metricsData) { return; } - redisCommandDelegate.operate().hset(key, hashKey, metricsData, future -> { - future.thenAccept(response -> { - if (response) { - log.debug("[warehouse] redis add new data {}:{}.", key, hashKey); - } else { - log.debug("[warehouse] redis replace data {}:{}.", key, hashKey); - } - }); - }); + redisCommandDelegate.operate().hset(key, hashKey, metricsData, future -> future.thenAccept(response -> { + if (response) { + log.debug("[warehouse] redis add new data {}:{}.", key, hashKey); + } else { + log.debug("[warehouse] redis replace data {}:{}.", key, hashKey); + } + })); } @Override diff --git a/warehouse/src/test/java/org/apache/hertzbeat/warehouse/controller/MetricsDataControllerTest.java b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/controller/MetricsDataControllerTest.java index 995986b5e54..7b4e3f4ba8b 100644 --- a/warehouse/src/test/java/org/apache/hertzbeat/warehouse/controller/MetricsDataControllerTest.java +++ b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/controller/MetricsDataControllerTest.java @@ -153,13 +153,11 @@ void getMetricHistoryData() throws Exception { .andReturn(); when(historyDataReader.isServerAvailable()).thenReturn(true); - ServletException exception = assertThrows(ServletException.class, () -> { - this.mockMvc.perform(MockMvcRequestBuilders.get(getUrlFail).params(params)) - .andExpect(status().isOk()) - .andExpect(jsonPath("$.code").value((int) CommonConstants.FAIL_CODE)) - .andExpect(jsonPath("$.data").isEmpty()) - .andReturn(); - }); + ServletException exception = assertThrows(ServletException.class, () -> this.mockMvc.perform(MockMvcRequestBuilders.get(getUrlFail).params(params)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.FAIL_CODE)) + .andExpect(jsonPath("$.data").isEmpty()) + .andReturn()); assertTrue(exception.getMessage().contains("IllegalArgumentException")); final Map> instanceValuesMap = new HashMap<>(); From 974b27e8782e654a64386192b9bc2056de08c470 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Mon, 29 Jul 2024 01:10:24 +0800 Subject: [PATCH 090/257] [Improve] add AlertSilenceServiceImpl unit test (#2393) Signed-off-by: yuluo-yx Co-authored-by: Calvin --- .../service/AlertSilenceServiceTest.java | 117 ++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertSilenceServiceTest.java diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertSilenceServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertSilenceServiceTest.java new file mode 100644 index 00000000000..93cff97beae --- /dev/null +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertSilenceServiceTest.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.alert.service; + +import java.util.Optional; +import java.util.Set; + +import org.apache.hertzbeat.alert.dao.AlertSilenceDao; +import org.apache.hertzbeat.alert.service.impl.AlertSilenceServiceImpl; +import org.apache.hertzbeat.common.entity.alerter.AlertSilence; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * test case for {@link AlertSilenceServiceImpl} + */ + +class AlertSilenceServiceTest { + + @Mock + private AlertSilenceDao alertSilenceDao; + + @InjectMocks + private AlertSilenceServiceImpl alertSilenceService; + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + + alertSilenceDao.save(AlertSilence + .builder() + .id(1L) + .type((byte) 1) + .build() + ); + + assertNotNull(alertSilenceDao.findAll()); + } + + @Test + void testValidate() { + + AlertSilence alertSilence = new AlertSilence(); + alertSilence.setType((byte) 1); + + alertSilenceService.validate(alertSilence, false); + + assertNotNull(alertSilence.getDays()); + assertEquals(7, alertSilence.getDays().size()); + } + + @Test + void testAddAlertSilence() { + + AlertSilence alertSilence = new AlertSilence(); + when(alertSilenceDao.save(any(AlertSilence.class))).thenReturn(alertSilence); + + assertDoesNotThrow(() -> alertSilenceService.addAlertSilence(alertSilence)); + verify(alertSilenceDao, times(1)).save(alertSilence); + } + + @Test + void testModifyAlertSilence() { + AlertSilence alertSilence = new AlertSilence(); + when(alertSilenceDao.save(any(AlertSilence.class))).thenReturn(alertSilence); + + assertDoesNotThrow(() -> alertSilenceService.modifyAlertSilence(alertSilence)); + verify(alertSilenceDao, times(1)).save(alertSilence); + } + + @Test + void testGetAlertSilence() { + AlertSilence alertSilence = new AlertSilence(); + when(alertSilenceDao.findById(anyLong())).thenReturn(Optional.of(alertSilence)); + + AlertSilence result = alertSilenceService.getAlertSilence(1L); + assertNotNull(result); + verify(alertSilenceDao, times(1)).findById(1L); + } + + @Test + void testDeleteAlertSilences() { + + alertSilenceDao.deleteAlertSilencesByIdIn(Set.of(1L)); + + verify(alertSilenceDao, times(1)).deleteAlertSilencesByIdIn(Set.of(1L)); + } + +} From 3487e4a16ba4fe9226f91a67ac4d81ef57f476d6 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Mon, 29 Jul 2024 01:24:03 +0800 Subject: [PATCH 091/257] [Improve] add AlertDefineJsonImExportServiceImpl unit test (#2391) Signed-off-by: yuluo-yx Signed-off-by: YuLuo Co-authored-by: Calvin --- .../AlertDefineJsonImExportServiceImpl.java | 4 +- .../AlertDefineJsonImExportServiceTest.java | 147 ++++++++++++++++++ 2 files changed, 149 insertions(+), 2 deletions(-) create mode 100644 alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineJsonImExportServiceTest.java diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineJsonImExportServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineJsonImExportServiceImpl.java index 894d1f87ca9..3487b44fc1a 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineJsonImExportServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineJsonImExportServiceImpl.java @@ -51,7 +51,7 @@ public String getFileName() { } @Override - List parseImport(InputStream is) { + public List parseImport(InputStream is) { try { return objectMapper.readValue(is, new TypeReference<>() { }); @@ -62,7 +62,7 @@ List parseImport(InputStream is) { } @Override - void writeOs(List exportAlertDefineList, OutputStream os) { + public void writeOs(List exportAlertDefineList, OutputStream os) { try { objectMapper.writeValue(os, exportAlertDefineList); } catch (IOException ex) { diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineJsonImExportServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineJsonImExportServiceTest.java new file mode 100644 index 00000000000..d8566ab980d --- /dev/null +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineJsonImExportServiceTest.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.alert.service; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.List; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.alert.dto.AlertDefineDTO; +import org.apache.hertzbeat.alert.dto.ExportAlertDefineDTO; +import org.apache.hertzbeat.alert.service.impl.AlertDefineJsonImExportServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * test case for {@link AlertDefineJsonImExportServiceImpl} + */ + +@ExtendWith(MockitoExtension.class) +class AlertDefineJsonImExportServiceImplTest { + + @Mock + private ObjectMapper objectMapper; + + @InjectMocks + private AlertDefineJsonImExportServiceImpl service; + + private static final String JSON_DATA = "[{\"alertDefine\":{\"app\":\"App1\",\"metric\":\"Metric1\",\"field\":\"Field1\",\"preset\":true,\"expr\":\"Expr1\",\"priority\":1,\"times\":1,\"tags\":[],\"enable\":true,\"recoverNotice\":true,\"template\":\"Template1\"}}]"; + + private InputStream inputStream; + private List alertDefineList; + + @BeforeEach + public void setup() { + + inputStream = new ByteArrayInputStream(JSON_DATA.getBytes()); + + AlertDefineDTO alertDefine = new AlertDefineDTO(); + alertDefine.setApp("App1"); + alertDefine.setMetric("Metric1"); + alertDefine.setField("Field1"); + alertDefine.setPreset(true); + alertDefine.setExpr("Expr1"); + alertDefine.setPriority((byte) 1); + alertDefine.setTimes(1); + alertDefine.setTags(List.of()); + alertDefine.setEnable(true); + alertDefine.setRecoverNotice(true); + alertDefine.setTemplate("Template1"); + + ExportAlertDefineDTO exportAlertDefine = new ExportAlertDefineDTO(); + exportAlertDefine.setAlertDefine(alertDefine); + + alertDefineList = List.of(exportAlertDefine); + } + + @Test + void testParseImport() throws IOException { + + when(objectMapper.readValue( + any(InputStream.class), + any(TypeReference.class)) + ).thenReturn(alertDefineList); + + List result = service.parseImport(inputStream); + + assertNotNull(result); + assertEquals(1, result.size()); + assertEquals(alertDefineList, result); + verify(objectMapper, times(1)).readValue(any(InputStream.class), any(TypeReference.class)); + } + + @Test + void testParseImportFailed() throws IOException { + + when(objectMapper.readValue( + any(InputStream.class), + any(TypeReference.class)) + ).thenThrow(new IOException("Test Exception")); + + RuntimeException exception = assertThrows(RuntimeException.class, () -> service.parseImport(inputStream)); + + assertEquals("import alertDefine failed", exception.getMessage()); + verify(objectMapper, times(1)).readValue(any(InputStream.class), any(TypeReference.class)); + } + + @Test + void testWriteOs() throws IOException { + + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + + service.writeOs(alertDefineList, outputStream); + + verify(objectMapper, times(1)).writeValue(any(OutputStream.class), eq(alertDefineList)); + } + + @Test + void testWriteOsFailed() throws IOException { + + doThrow(new IOException("Test Exception")).when(objectMapper).writeValue(any(OutputStream.class), any()); + + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + + RuntimeException exception = assertThrows( + RuntimeException.class, + () -> service.writeOs(alertDefineList, outputStream) + ); + + assertEquals("export alertDefine failed", exception.getMessage()); + verify(objectMapper, times(1)).writeValue(any(OutputStream.class), eq(alertDefineList)); + } + +} From 90035cac5df85f4883a8f68dd6365233c6d1de89 Mon Sep 17 00:00:00 2001 From: Kerwin Bryant Date: Mon, 29 Jul 2024 12:26:57 +0800 Subject: [PATCH 092/257] [doc] introduction document for the new committer (#2401) --- home/blog/2024-07-29-new-committer.md | 34 +++++++++++++++++++ .../2024-07-29-new-committer.md | 34 +++++++++++++++++++ 2 files changed, 68 insertions(+) create mode 100644 home/blog/2024-07-29-new-committer.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-29-new-committer.md diff --git a/home/blog/2024-07-29-new-committer.md b/home/blog/2024-07-29-new-committer.md new file mode 100644 index 00000000000..9b7e63476a5 --- /dev/null +++ b/home/blog/2024-07-29-new-committer.md @@ -0,0 +1,34 @@ +--- +title: Welcome to HertzBeat Community Committer! +author: kerwin612 +author_title: Le Zhang +author_url: https://github.com/kerwin612 +author_image_url: https://avatars.githubusercontent.com/u/3371163?v=4 +tags: [opensource, practice] +keywords: [open source monitoring system, alerting system] +--- + +![hertzBeat](/img/blog/new-committer.png) + +> It's an honor for me to become a Committer of Apache HertzBeat + +## Becoming Part of a Stronger Team: My Contributions and Growth +In the open-source community, every contribution not only pushes the project forward but also signifies personal growth. Recently, in the Apache HertzBeat project, I have contributed through a series of optimizations and feature enhancements, which have not only advanced the project but also improved my skills. Today, I would like to share my contribution experience and insights into my growth. + +## Starting from the Details: Optimizing Visuals and Interactions +I firmly believe that details determine success or failure. When I first joined the project, I began by optimizing the interface to enhance the user's visual and interactive experience. I refined the modal window layout of the monitoring selection menu to better align with user operation habits. I adjusted the header style and content layout of the monitoring details page to make information presentation clearer and more intuitive. Additionally, I unified the border-radius values of components and addressed issues such as missing internationalization translations, ensuring the consistency and completeness of the system interface. + +These seemingly minor changes significantly enhanced the overall aesthetics and user experience of the system. Through this process, I gained a profound understanding of the importance of interface design for user experience and honed my attention to detail. + +## Delving into Functional Modules for Enhanced Capabilities +In addition to interface optimization, I actively engaged in expanding and enhancing the functional modules of the system. I refactored repetitive code within the system, abstracted common components, and improved code reusability and maintainability. These improvements not only simplified the development process but also reduced the cost of maintenance in the long run. Furthermore, I extended the capabilities of the multi-functional input component by adding support for password types and search types, further enriching its functionality and versatility. + +During the process of implementing these features, I encountered numerous challenges. However, these challenges spurred me to continuously learn and explore new technologies and methodologies. By consulting official documentation and other resources, I gradually overcame these obstacles and successfully completed the tasks. This process not only enhanced my technical abilities but also deepened my understanding of the importance of team collaboration. + +## Emphasizing User Feedback for Continuous Product Optimization +I firmly believe that users are the ultimate judges of a product. As such, I continuously collect and analyze user feedback both within the company and in the community, using these insights to guide targeted optimizations and improvements. By refining search and filtering functionalities and adopting a consistent and concise set of interactive elements, I have consistently enhanced the user experience. + +In this process, I realized the importance of a user-centric approach. Only by genuinely focusing on users' needs and expectations can we create products that meet market demands. + +## Looking Ahead: Continuous Contribution and Growth +Reflecting on my past contributions, I feel a profound sense of pride and satisfaction. However, I am also acutely aware of the many areas where I still have room for improvement and need to continue learning and growing. Moving forward, I will uphold the spirit of rigor, innovation, and user-centricity, continuously exploring and practicing to contribute even more to the Apache HertzBeat project. I eagerly anticipate the opportunity to grow and progress alongside my fellow team members, jointly driving the project towards prosperity and success. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-29-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-29-new-committer.md new file mode 100644 index 00000000000..506725aaac6 --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-29-new-committer.md @@ -0,0 +1,34 @@ +--- +title: 热烈欢迎 HertzBeat 小伙伴新晋社区 Committer! +author: kerwin612 +author_title: Le Zhang +author_url: https://github.com/kerwin612 +author_image_url: https://avatars.githubusercontent.com/u/3371163?v=4 +tags: [opensource, practice] +keywords: [open source monitoring system, alerting system] +--- + +![hertzBeat](/img/blog/new-committer.png) + +> 非常荣幸能成为Apache Hertzbeat的Committer + +## 成为更强大团队的一份子:我的贡献与成长 +在开源社区中,每一次的贡献不仅是对项目的推动,也是个人成长的见证。近期,在Apache HertzBeat项目中,我通过一系列的优化与功能增强,不仅为项目的发展贡献了力量,也实现了自我技能的提升。今天,我想借此机会分享我的贡献经历与成长感悟。 + +## 从细节入手,优化视觉与交互 +我始终认为细节决定成败,在参与项目之初,我便从界面优化入手,致力于提升用户的视觉和交互体验。我优化了监控选择菜单的模态窗口布局,使其更加符合用户的操作习惯;调整了监控详情页面的头部样式和内容布局,使得信息展示更加清晰直观。同时,我还统一了组件的边框圆角值,解决了国际化翻译缺失等问题,确保了系统界面的一致性和完整性。 + +这些看似微小的改动,实则大大提升了系统的整体美观度和用户体验。通过这个过程,我深刻体会到界面设计对于用户体验的重要性,也锻炼了自己在细节处理上的能力。 + +## 深入功能模块,实现功能增强 +除了界面优化外,我还积极参与了功能模块的扩展与增强工作。我重构了系统中的重复代码,抽象出了通用组件,提高了代码的复用性和可维护性。这些改进不仅简化了开发流程,还降低了后期维护的成本。同时,我还为多功能输入组件添加了密码类型支持和搜索类型支持,进一步丰富了组件的功能和用途。 + +在功能实现的过程中,我遇到了不少挑战。但正是这些挑战促使我不断学习和探索新的技术和方法。通过查阅官方文档等方式,我逐步攻克了难关,并成功完成了任务。这个过程不仅提升了我的技术能力,也让我更加深刻地理解了团队协作的重要性。 + +## 注重用户反馈,持续优化产品 +我始终认为用户是产品的最终评判者。因此,我在公司内部以及社区持续收集和分析用户的反馈意见,并根据这些意见进行针对性的优化和改进。通过优化搜索与筛选功能、统一使用简洁的交互元素等方式,我不断提升了用户的使用体验。 + +这个过程中,我深刻体会到了用户导向的重要性。只有真正关注用户的需求和期望才能打造出符合市场需求的产品。 + +## 展望未来,持续贡献与成长 +回顾过去一段时间的贡献经历我深感自豪和满足。但同时我也清楚地认识到自己还有很多不足之处需要不断学习和提升。未来我将继续秉持着严谨、创新、用户至上的精神不断探索和实践为Apache HertzBeat项目贡献更多的力量。同时我也期待与更多的团队成员一起共同成长和进步共同推动项目的繁荣发展。 From bc49280c3cb460768bdbb7e7658c551b956b1cf7 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Mon, 29 Jul 2024 17:05:17 +0800 Subject: [PATCH 093/257] [Improve] add AlertDefineYamlImExportServiceImpl unit test (#2392) Signed-off-by: yuluo-yx Co-authored-by: Calvin --- .../AlertDefineYamlImExportServiceImpl.java | 4 +- .../AlertDefineJsonImExportServiceTest.java | 2 +- .../AlertDefineYamlImExportServiceTest.java | 164 ++++++++++++++++++ 3 files changed, 167 insertions(+), 3 deletions(-) create mode 100644 alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineYamlImExportServiceTest.java diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineYamlImExportServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineYamlImExportServiceImpl.java index 11cb6581fad..b58db57b0e2 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineYamlImExportServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineYamlImExportServiceImpl.java @@ -49,13 +49,13 @@ public String getFileName() { } @Override - List parseImport(InputStream is) { + public List parseImport(InputStream is) { Yaml yaml = new Yaml(); return yaml.load(is); } @Override - void writeOs(List exportAlertDefineList, OutputStream os) { + public void writeOs(List exportAlertDefineList, OutputStream os) { DumperOptions options = new DumperOptions(); options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); options.setIndent(2); diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineJsonImExportServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineJsonImExportServiceTest.java index d8566ab980d..6f690083d75 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineJsonImExportServiceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineJsonImExportServiceTest.java @@ -51,7 +51,7 @@ */ @ExtendWith(MockitoExtension.class) -class AlertDefineJsonImExportServiceImplTest { +class AlertDefineJsonImExportServiceTest { @Mock private ObjectMapper objectMapper; diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineYamlImExportServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineYamlImExportServiceTest.java new file mode 100644 index 00000000000..693e0316f98 --- /dev/null +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineYamlImExportServiceTest.java @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.alert.service; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.List; + +import org.apache.hertzbeat.alert.dto.AlertDefineDTO; +import org.apache.hertzbeat.alert.dto.ExportAlertDefineDTO; +import org.apache.hertzbeat.alert.service.impl.AlertDefineYamlImExportServiceImpl; +import org.apache.hertzbeat.common.util.JsonUtil; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.junit.jupiter.MockitoExtension; +import org.yaml.snakeyaml.Yaml; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * test case for {@link AlertDefineYamlImExportServiceImpl} + */ + +@ExtendWith(MockitoExtension.class) +class AlertDefineYamlImExportServiceTest { + + @InjectMocks + private AlertDefineYamlImExportServiceImpl service; + + private static final String YAML_DATA = + "- alertDefine:\n" + + " app: App1\n" + + " metric: Metric1\n" + + " field: Field1\n" + + " preset: true\n" + + " expr: Expr1\n" + + " priority: 1\n" + + " times: 1\n" + + " tags: []\n" + + " enable: true\n" + + " recoverNotice: true\n" + + " template: Template1\n"; + + private InputStream inputStream; + private List alertDefineList; + + @BeforeEach + public void setup() { + + inputStream = new ByteArrayInputStream(YAML_DATA.getBytes(StandardCharsets.UTF_8)); + + AlertDefineDTO alertDefine = new AlertDefineDTO(); + alertDefine.setApp("App1"); + alertDefine.setMetric("Metric1"); + alertDefine.setField("Field1"); + alertDefine.setPreset(true); + alertDefine.setExpr("Expr1"); + alertDefine.setPriority((byte) 1); + alertDefine.setTimes(1); + alertDefine.setTags(List.of()); + alertDefine.setEnable(true); + alertDefine.setRecoverNotice(true); + alertDefine.setTemplate("Template1"); + + ExportAlertDefineDTO exportAlertDefine = new ExportAlertDefineDTO(); + exportAlertDefine.setAlertDefine(alertDefine); + + alertDefineList = List.of(exportAlertDefine); + } + + @Test + void testParseImport() throws IllegalAccessException { + + List result = service.parseImport(inputStream); + + assertNotNull(result); + assertEquals(1, result.size()); + + InputStream inputStream = new ByteArrayInputStream(JsonUtil.toJson(alertDefineList) + .getBytes(StandardCharsets.UTF_8)); + Yaml yaml = new Yaml(); + + assertEquals(yaml.load(inputStream), result); + } + + @Test + void testParseImportFailed() { + + InputStream faultyInputStream = mock(InputStream.class); + try { + when(faultyInputStream.read( + any(byte[].class), + anyInt(), anyInt()) + ).thenThrow(new IOException("Test Exception")); + + RuntimeException exception = assertThrows( + RuntimeException.class, + () -> service.parseImport(faultyInputStream) + ); + assertEquals("java.io.IOException: Test Exception", exception.getMessage()); + } catch (IOException e) { + + fail("Mocking IOException failed"); + } + } + + @Test + void testWriteOs() { + + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + service.writeOs(alertDefineList, outputStream); + String yamlOutput = outputStream.toString(StandardCharsets.UTF_8); + + assertTrue(yamlOutput.contains("app: App1")); + assertTrue(yamlOutput.contains("metric: Metric1")); + } + + @Test + void testWriteOsFailed() { + + OutputStream faultyOutputStream = mock(OutputStream.class); + + try { + doThrow(new IOException("Test Exception")).when(faultyOutputStream).write(any(byte[].class), anyInt(), anyInt()); + + RuntimeException exception = assertThrows(RuntimeException.class, () -> service.writeOs(alertDefineList, faultyOutputStream)); + assertEquals("java.io.IOException: Test Exception", exception.getMessage()); + } catch (IOException e) { + + fail("Mocking IOException failed"); + } + } + +} From 05bce69b848ac602e35d8a360d7484508f0ae55d Mon Sep 17 00:00:00 2001 From: Loong <16333958+loong95@users.noreply.github.com> Date: Mon, 29 Jul 2024 20:44:21 +0800 Subject: [PATCH 094/257] [collector] bugfix Fix ssh session not closing properly (#2398) Co-authored-by: tomsun28 --- .../hertzbeat/collector/collect/ssh/SshCollectImpl.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java index 236b2d1959f..fddbf7abffb 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java @@ -102,6 +102,10 @@ public void collect(CollectRep.MetricsData.Builder builder, long monitorId, Stri list.add(ClientChannelEvent.CLOSED); Collection waitEvents = channel.waitFor(list, timeout); if (waitEvents.contains(ClientChannelEvent.TIMEOUT)) { + // A cancel signal needs to be sent if the execution times out, otherwise the session cannot be closed promptly + int cancelSignal = 3; + channel.getInvertedIn().write(cancelSignal); + channel.getInvertedIn().flush(); throw new SocketTimeoutException("Failed to retrieve command result in time: " + sshProtocol.getScript()); } Long responseTime = System.currentTimeMillis() - startTime; @@ -147,7 +151,10 @@ public void collect(CollectRep.MetricsData.Builder builder, long monitorId, Stri } finally { if (channel != null && channel.isOpen()) { try { - channel.close(); + // Close the SSH channel with the 'false' parameter to ensure the session is not kept alive. + long st = System.currentTimeMillis(); + channel.close(false).addListener(future -> + log.debug("channel is closed in {} ms", System.currentTimeMillis() - st)); } catch (Exception e) { log.error(e.getMessage(), e); } From 82219d14df1e84c9e5c6c25de53db550ad56c218 Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Mon, 29 Jul 2024 22:30:47 +0800 Subject: [PATCH 095/257] [Improve] add database file server provider (#2371) Co-authored-by: tomsun28 --- .../common/entity/manager/Define.java | 73 ++++++++++++++ .../hertzbeat/manager/dao/DefineDao.java | 28 ++++++ .../manager/pojo/dto/ObjectStoreDTO.java | 5 + .../manager/service/ObjectStoreService.java | 12 +++ .../manager/service/impl/AppServiceImpl.java | 94 +++++++++++++++++-- .../impl/ObsObjectStoreServiceImpl.java | 12 +++ web-app/src/app/pojo/ObjectStore.ts | 5 + .../object-store/object-store.component.html | 1 + web-app/src/assets/i18n/en-US.json | 1 + web-app/src/assets/i18n/zh-CN.json | 1 + web-app/src/assets/i18n/zh-TW.json | 1 + 11 files changed, 226 insertions(+), 7 deletions(-) create mode 100644 common/src/main/java/org/apache/hertzbeat/common/entity/manager/Define.java create mode 100644 manager/src/main/java/org/apache/hertzbeat/manager/dao/DefineDao.java diff --git a/common/src/main/java/org/apache/hertzbeat/common/entity/manager/Define.java b/common/src/main/java/org/apache/hertzbeat/common/entity/manager/Define.java new file mode 100644 index 00000000000..a06fca5712a --- /dev/null +++ b/common/src/main/java/org/apache/hertzbeat/common/entity/manager/Define.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.entity.manager; + +import io.swagger.v3.oas.annotations.media.Schema; +import jakarta.persistence.Entity; +import jakarta.persistence.EntityListeners; +import jakarta.persistence.Id; +import jakarta.persistence.Lob; +import jakarta.persistence.Table; +import java.time.LocalDateTime; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; +import org.springframework.data.annotation.CreatedBy; +import org.springframework.data.annotation.CreatedDate; +import org.springframework.data.annotation.LastModifiedBy; +import org.springframework.data.annotation.LastModifiedDate; +import org.springframework.data.jpa.domain.support.AuditingEntityListener; + +/** + * monitor define entity + */ +@Entity +@Table(name = "hzb_define") +@Data +@Builder +@AllArgsConstructor +@NoArgsConstructor +@Schema(description = "monitor define entity") +@EntityListeners(AuditingEntityListener.class) +public class Define { + + @Id + @Schema(title = "app", example = "websocket") + private String app; + + @Lob + @Schema(title = "define content", description = "define yml content") + private String content; + + @Schema(title = "The creator of this record", example = "tom") + @CreatedBy + private String creator; + + @Schema(title = "This record was last modified by") + @LastModifiedBy + private String modifier; + + @Schema(title = "This record creation time (millisecond timestamp)") + @CreatedDate + private LocalDateTime gmtCreate; + + @Schema(title = "Record the latest modification time (timestamp in milliseconds)") + @LastModifiedDate + private LocalDateTime gmtUpdate; +} diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/dao/DefineDao.java b/manager/src/main/java/org/apache/hertzbeat/manager/dao/DefineDao.java new file mode 100644 index 00000000000..cd04776afcf --- /dev/null +++ b/manager/src/main/java/org/apache/hertzbeat/manager/dao/DefineDao.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.dao; + +import org.apache.hertzbeat.common.entity.manager.Define; +import org.springframework.data.jpa.repository.JpaRepository; +import org.springframework.data.jpa.repository.JpaSpecificationExecutor; + +/** + * monitor define repository + */ +public interface DefineDao extends JpaRepository, JpaSpecificationExecutor { +} diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/ObjectStoreDTO.java b/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/ObjectStoreDTO.java index 0a8a6fdb00b..c07d2a86827 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/ObjectStoreDTO.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/ObjectStoreDTO.java @@ -51,6 +51,11 @@ public enum Type { */ FILE, + /** + * local database + */ + DATABASE, + /** * Huawei Cloud OBS */ diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/ObjectStoreService.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/ObjectStoreService.java index e4e8f26b608..03017646e9f 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/ObjectStoreService.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/ObjectStoreService.java @@ -34,6 +34,18 @@ public interface ObjectStoreService { */ boolean upload(String filePath, InputStream is); + /** + * remove file + * @param filePath file path,eg:hertzbeat/111.json + */ + void remove(String filePath); + + /** + * whether the file exists + * @param filePath file path,eg:hertzbeat/111.json + */ + boolean isExist(String filePath); + /** * read file * @param filePath file path,eg:hertzbeat/111.json diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AppServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AppServiceImpl.java index 2dc04c53e7c..8f27b29752b 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AppServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AppServiceImpl.java @@ -43,12 +43,15 @@ import org.apache.hertzbeat.common.entity.job.Configmap; import org.apache.hertzbeat.common.entity.job.Job; import org.apache.hertzbeat.common.entity.job.Metrics; +import org.apache.hertzbeat.common.entity.manager.Define; import org.apache.hertzbeat.common.entity.manager.Monitor; import org.apache.hertzbeat.common.entity.manager.Param; import org.apache.hertzbeat.common.entity.manager.ParamDefine; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.support.SpringContextHolder; +import org.apache.hertzbeat.common.support.exception.CommonException; import org.apache.hertzbeat.common.util.CommonUtil; +import org.apache.hertzbeat.manager.dao.DefineDao; import org.apache.hertzbeat.manager.dao.MonitorDao; import org.apache.hertzbeat.manager.dao.ParamDao; import org.apache.hertzbeat.manager.pojo.dto.Hierarchy; @@ -89,6 +92,9 @@ public class AppServiceImpl implements AppService, CommandLineRunner { @Resource private ParamDao paramDao; + + @Resource + private DefineDao defineDao; @Resource private WarehouseService warehouseService; @@ -437,13 +443,7 @@ public void deleteMonitorDefine(String app) { if (monitors != null && !monitors.isEmpty()) { throw new IllegalArgumentException("Can not delete define which has monitoring instances."); } - var classpath = Objects.requireNonNull(this.getClass().getClassLoader().getResource("")).getPath(); - var defineAppPath = classpath + "define" + File.separator + "app-" + app + ".yml"; - var defineAppFile = new File(defineAppPath); - if (defineAppFile.exists() && defineAppFile.isFile()) { - defineAppFile.delete(); - } - appDefines.remove(app.toLowerCase()); + appDefineStore.delete(app); } @Override @@ -491,6 +491,8 @@ private void refreshStore(ObjectStoreDTO objectStoreConfig) { } else { if (objectStoreConfig.getType() == ObjectStoreDTO.Type.OBS) { appDefineStore = new ObjectStoreAppDefineStoreImpl(); + } else if (objectStoreConfig.getType() == ObjectStoreDTO.Type.DATABASE){ + appDefineStore = new DatabaseAppDefineStoreImpl(); } else { appDefineStore = new LocalFileAppDefineStoreImpl(); } @@ -519,6 +521,7 @@ private interface AppDefineStore { void save(String app, String ymlContent); + void delete(String app); } private class JarAppDefineStoreImpl implements AppDefineStore { @@ -565,6 +568,11 @@ public void save(String app, String ymlContent) { throw new UnsupportedOperationException(); } + @Override + public void delete(String app) { + throw new UnsupportedOperationException("define yml inside jars cannot be deleted"); + } + } private class LocalFileAppDefineStoreImpl implements AppDefineStore { @@ -641,6 +649,22 @@ public void save(String app, String ymlContent) { throw new RuntimeException("flush file " + defineAppPath + " error: " + e.getMessage()); } } + + @Override + public void delete(String app) { + var classpath = Objects.requireNonNull(this.getClass().getClassLoader().getResource("")).getPath(); + var defineAppPath = classpath + "define" + File.separator + "app-" + app + ".yml"; + var defineAppFile = new File(defineAppPath); + + if (!defineAppFile.exists() && appDefines.containsKey(app.toLowerCase())){ + throw new CommonException("the app define file is not in current file server provider"); + } + + if (defineAppFile.exists() && defineAppFile.isFile()) { + defineAppFile.delete(); + } + appDefines.remove(app.toLowerCase()); + } } private class ObjectStoreAppDefineStoreImpl implements AppDefineStore { @@ -682,6 +706,20 @@ public void save(String app, String ymlContent) { objectStoreService.upload(getDefineAppPath(app), IOUtils.toInputStream(ymlContent, StandardCharsets.UTF_8)); } + @Override + public void delete(String app) { + var objectStoreService = getObjectStoreService(); + String defineAppPath = getDefineAppPath(app); + boolean exist = objectStoreService.isExist(defineAppPath); + if (!exist && appDefines.containsKey(app.toLowerCase())){ + throw new CommonException("the app define file is not in current file server provider"); + } + if (exist){ + objectStoreService.remove(defineAppPath); + } + appDefines.remove(app.toLowerCase()); + } + private ObjectStoreService getObjectStoreService() { return SpringContextHolder.getBean(ObsObjectStoreServiceImpl.class); } @@ -692,4 +730,46 @@ private String getDefineAppPath(String app) { } + private class DatabaseAppDefineStoreImpl implements AppDefineStore { + + @Override + public boolean loadAppDefines() { + Yaml yaml = new Yaml(); + List defines = defineDao.findAll(); + for (Define define : defines) { + var app = yaml.loadAs(define.getContent(), Job.class); + if (app != null){ + appDefines.put(define.getApp().toLowerCase(), app); + } + } + // merge define yml files inside jars + return false; + } + + @Override + public String loadAppDefine(String app) { + Optional defineOptional = defineDao.findById(app); + return defineOptional.map(Define::getContent).orElse(null); + } + + @Override + public void save(String app, String ymlContent) { + Define define = new Define(); + define.setApp(app); + define.setContent(ymlContent); + defineDao.save(define); + } + + @Override + public void delete(String app) { + Optional defineOptional = defineDao.findById(app); + if (defineOptional.isEmpty() && appDefines.containsKey(app.toLowerCase())){ + throw new CommonException("the app define file is not in current file server provider"); + } + if (defineOptional.isPresent()){ + defineDao.deleteById(app); + } + appDefines.remove(app.toLowerCase()); + } + } } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObsObjectStoreServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObsObjectStoreServiceImpl.java index 5d70430c92a..3dc01e1cd30 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObsObjectStoreServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObsObjectStoreServiceImpl.java @@ -55,6 +55,18 @@ public boolean upload(String filePath, InputStream is) { return Objects.equals(response.getStatusCode(), 200); } + @Override + public void remove(String filePath) { + var objectKey = getObjectKey(filePath); + obsClient.deleteObject(bucketName, objectKey); + } + + @Override + public boolean isExist(String filePath) { + var objectKey = getObjectKey(filePath); + return obsClient.doesObjectExist(bucketName, objectKey); + } + @Override public FileDTO download(String filePath) { var objectKey = getObjectKey(filePath); diff --git a/web-app/src/app/pojo/ObjectStore.ts b/web-app/src/app/pojo/ObjectStore.ts index 7bc31d73604..0fc00c8eb86 100644 --- a/web-app/src/app/pojo/ObjectStore.ts +++ b/web-app/src/app/pojo/ObjectStore.ts @@ -29,6 +29,11 @@ export enum ObjectStoreType { */ FILE = 'FILE', + /** + * Local database + */ + DATABASE = 'DATABASE', + /** * Huawei cloud OBS */ diff --git a/web-app/src/app/routes/setting/settings/object-store/object-store.component.html b/web-app/src/app/routes/setting/settings/object-store/object-store.component.html index 741fe310126..7b7a629ddaa 100644 --- a/web-app/src/app/routes/setting/settings/object-store/object-store.component.html +++ b/web-app/src/app/routes/setting/settings/object-store/object-store.component.html @@ -31,6 +31,7 @@ (ngModelChange)="onChange()" > + diff --git a/web-app/src/assets/i18n/en-US.json b/web-app/src/assets/i18n/en-US.json index 4e8ff763051..c798b6b6d89 100644 --- a/web-app/src/assets/i18n/en-US.json +++ b/web-app/src/assets/i18n/en-US.json @@ -575,6 +575,7 @@ "settings.object-store": "File Server Configuration", "settings.object-store.type": "File Server Provider", "settings.object-store.type.file": "Local file (default)", + "settings.object-store.type.database": "Local database", "settings.object-store.type.obs": "HUAWEI CLOUD OBS", "settings.object-store.obs.accessKey": "AccessKey", "settings.object-store.obs.accessKey.placeholder": "Access Key ID of HUAWEI CLOUD", diff --git a/web-app/src/assets/i18n/zh-CN.json b/web-app/src/assets/i18n/zh-CN.json index c6b650814bd..d8ea6a49071 100644 --- a/web-app/src/assets/i18n/zh-CN.json +++ b/web-app/src/assets/i18n/zh-CN.json @@ -575,6 +575,7 @@ "settings.object-store": "文件服务配置", "settings.object-store.type": "文件服务提供商", "settings.object-store.type.file": "本地文件(默认)", + "settings.object-store.type.database": "本地数据库", "settings.object-store.type.obs": "华为云OBS", "settings.object-store.obs.accessKey": "AccessKey", "settings.object-store.obs.accessKey.placeholder": "华为云的AccessKeyId", diff --git a/web-app/src/assets/i18n/zh-TW.json b/web-app/src/assets/i18n/zh-TW.json index e0fdabfef13..97d1f040d43 100644 --- a/web-app/src/assets/i18n/zh-TW.json +++ b/web-app/src/assets/i18n/zh-TW.json @@ -572,6 +572,7 @@ "settings.object-store": "文件服務配置", "settings.object-store.type": "文件服務提供商", "settings.object-store.type.file": "本地文件(默認)", + "settings.object-store.type.database": "本地數據庫", "settings.object-store.type.obs": "華為雲OBS", "settings.object-store.obs.accessKey": "AccessKey", "settings.object-store.obs.accessKey.placeholder": "華為雲的AccessKeyId", From f5e88db8411f44b32ad0919f41b89e886d99730f Mon Sep 17 00:00:00 2001 From: aias00 Date: Mon, 29 Jul 2024 22:46:08 +0800 Subject: [PATCH 096/257] [improve] modify ssh client common config (#2403) Co-authored-by: tomsun28 --- .../hertzbeat/collector/collect/common/ssh/CommonSshClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/common/ssh/CommonSshClient.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/common/ssh/CommonSshClient.java index 639ca2480a8..882aa37605c 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/common/ssh/CommonSshClient.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/common/ssh/CommonSshClient.java @@ -43,7 +43,7 @@ public class CommonSshClient { PropertyResolverUtils.updateProperty( SSH_CLIENT, CoreModuleProperties.HEARTBEAT_INTERVAL.getName(), 2000); PropertyResolverUtils.updateProperty( - SSH_CLIENT, CoreModuleProperties.HEARTBEAT_REPLY_WAIT.getName(), 300_000); + SSH_CLIENT, CoreModuleProperties.HEARTBEAT_NO_REPLY_MAX.getName(), 30); PropertyResolverUtils.updateProperty( SSH_CLIENT, CoreModuleProperties.SOCKET_KEEPALIVE.getName(), true); // set support all KeyExchange From 6b7dbd53f4f47284f26194b8a00e178bba5899b9 Mon Sep 17 00:00:00 2001 From: aias00 Date: Mon, 29 Jul 2024 23:27:50 +0800 Subject: [PATCH 097/257] [doc] fix mistake md content (#2406) Co-authored-by: tomsun28 --- .../2022-07-10-hertzbeat-v1.1.1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md index 3768024c848..0e6e391b890 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md @@ -23,7 +23,7 @@ Feature: 1. [[script] feature 升级docker的基础镜像为 openjdk:11.0.15-jre-slim #205](https://github.com/apache/hertzbeat/pull/205) 2. [[monitor] 支持前置采集指标数据作为变量赋值给下一采集流程 #206](https://github.com/apache/hertzbeat/pull/206). 3. [[collector] 使用基本的http headers头实现basic auth替换前置模式 #212](https://github.com/apache/hertzbeat/pull/212) -4. [[manager,alerter] 支持告警通知设置丁丁微信飞书自定义 webhook url #213](https://github.com/apache/hertzbeat/pull/213) +4. [[manager,alerter] 支持告警通知设置钉钉机器人微信飞书自定义 webhook url #213](https://github.com/apache/hertzbeat/pull/213) 5. [[monitor] feature 更新数值指标数据不带末尾为0的小数点 #217](https://github.com/apache/hertzbeat/pull/217) 6. [[web-app]feature:toggle [enable and cancel] button #218](https://github.com/apache/hertzbeat/pull/218) 7. [[manager] 更新监控define yml文件前缀名称 "app" or "param",便于自定义监控区别 #221](https://github.com/apache/hertzbeat/pull/221) From f9bce29523324a70bed4a7dcbc43994ab0a026a9 Mon Sep 17 00:00:00 2001 From: aias00 Date: Tue, 30 Jul 2024 18:20:00 +0800 Subject: [PATCH 098/257] [feature] add sms config (#2399) Co-authored-by: tomsun28 --- .../impl/SmsAlertNotifyHandlerImpl.java | 8 +- .../manager/pojo/dto/SmsAlibabaConfig.java | 35 ++++++ .../manager/pojo/dto/SmsNoticeSender.java | 41 +++++++ .../manager/pojo/dto/SmsTencentConfig.java | 48 ++++++++ .../impl/SmsGeneralConfigServiceImpl.java | 65 +++++++++++ web-app/src/app/pojo/AlibabaSmsConfig.ts | 26 +++++ web-app/src/app/pojo/SmsNoticeSender.ts | 33 ++++++ web-app/src/app/pojo/TencentSmsConfig.ts | 26 +++++ .../message-server.component.html | 107 ++++++++++++++++-- .../message-server.component.ts | 98 +++++++++++++++- web-app/src/assets/i18n/en-US.json | 8 ++ web-app/src/assets/i18n/zh-CN.json | 12 +- web-app/src/assets/i18n/zh-TW.json | 12 +- 13 files changed, 500 insertions(+), 19 deletions(-) create mode 100644 manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/SmsAlibabaConfig.java create mode 100644 manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/SmsNoticeSender.java create mode 100644 manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/SmsTencentConfig.java create mode 100644 manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SmsGeneralConfigServiceImpl.java create mode 100644 web-app/src/app/pojo/AlibabaSmsConfig.ts create mode 100644 web-app/src/app/pojo/SmsNoticeSender.ts create mode 100644 web-app/src/app/pojo/TencentSmsConfig.ts diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/SmsAlertNotifyHandlerImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/SmsAlertNotifyHandlerImpl.java index f0ab01c2d30..c80bbc5d90c 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/SmsAlertNotifyHandlerImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/SmsAlertNotifyHandlerImpl.java @@ -38,11 +38,11 @@ @Slf4j @ConditionalOnProperty("common.sms.tencent.app-id") final class SmsAlertNotifyHandlerImpl extends AbstractAlertNotifyHandlerImpl { - + private final TencentSmsClient tencentSmsClient; - + private final ResourceBundle bundle = ResourceBundleUtil.getBundle("alerter"); - + @Override public void send(NoticeReceiver receiver, NoticeTemplate noticeTemplate, Alert alert) { // SMS notification @@ -60,7 +60,7 @@ public void send(NoticeReceiver receiver, NoticeTemplate noticeTemplate, Alert a throw new AlertNoticeException("[Sms Notify Error] " + e.getMessage()); } } - + @Override public byte type() { return 0; diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/SmsAlibabaConfig.java b/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/SmsAlibabaConfig.java new file mode 100644 index 00000000000..25feeaa0ebc --- /dev/null +++ b/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/SmsAlibabaConfig.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.pojo.dto; + +import jakarta.validation.constraints.NotBlank; +import lombok.Data; + +/** + * Alibaba Sms Sender configuration dto + */ +@Data +public class SmsAlibabaConfig { + + @NotBlank(message = "SecretId cannot be empty") + private String secretId; + + @NotBlank(message = "SecretKey cannot be empty") + private String secretKey; + +} diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/SmsNoticeSender.java b/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/SmsNoticeSender.java new file mode 100644 index 00000000000..b2aecead57c --- /dev/null +++ b/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/SmsNoticeSender.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.pojo.dto; + +import jakarta.validation.constraints.NotBlank; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * Sms Sender configuration dto + */ +@Data +@NoArgsConstructor +@AllArgsConstructor +public class SmsNoticeSender { + + @NotBlank(message = "Type cannot be empty") + private String type; + + private SmsTencentConfig tencent; + + private SmsAlibabaConfig alibaba; + + private boolean enable = true; +} diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/SmsTencentConfig.java b/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/SmsTencentConfig.java new file mode 100644 index 00000000000..f54affd5978 --- /dev/null +++ b/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/SmsTencentConfig.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.pojo.dto; + +import jakarta.validation.constraints.NotBlank; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * Tencent Sms Sender configuration dto + */ +@Data +@NoArgsConstructor +@AllArgsConstructor +public class SmsTencentConfig { + + @NotBlank(message = "SecretId cannot be empty") + private String secretId; + + @NotBlank(message = "SecretKey cannot be empty") + private String secretKey; + + @NotBlank(message = "SignName cannot be empty") + private String signName; + + @NotBlank(message = "AppId cannot be null") + private String appId; + + @NotBlank(message = "templateId cannot be null") + private String templateId; + +} diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SmsGeneralConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SmsGeneralConfigServiceImpl.java new file mode 100644 index 00000000000..b80ef9fa4cd --- /dev/null +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SmsGeneralConfigServiceImpl.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service.impl; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.lang.reflect.Type; +import org.apache.hertzbeat.manager.dao.GeneralConfigDao; +import org.apache.hertzbeat.manager.pojo.dto.SmsNoticeSender; +import org.springframework.stereotype.Service; + +/** + * SmsGeneralConfigServiceImpl class is the implementation of general sms configuration service, + * which inherits the AbstractGeneralConfigServiceImpl class. + */ + +@Service +public class SmsGeneralConfigServiceImpl extends AbstractGeneralConfigServiceImpl { + + /** + * SmsGeneralConfigServiceImpl's constructor creates an instance of this class + * through the default constructor or deserialization construction (setBeanProps). + * The parameter generalConfigDao is used for dao layer operation data, + * and objectMapper is used for object mapping. + * @param generalConfigDao dao layer operation data, needed to create an instance of this class + * @param objectMapper object mapping , needed to create an instance of this class + */ + public SmsGeneralConfigServiceImpl(GeneralConfigDao generalConfigDao, ObjectMapper objectMapper) { + super(generalConfigDao, objectMapper); + } + + @Override + public String type() { + return "sms"; + } + + /** + * This method is used to get the TypeReference of NoticeSender type for subsequent processing. + * a TypeReference of NoticeSender type + */ + @Override + protected TypeReference getTypeReference() { + return new TypeReference<>() { + @Override + public Type getType() { + return SmsNoticeSender.class; + } + }; + } +} diff --git a/web-app/src/app/pojo/AlibabaSmsConfig.ts b/web-app/src/app/pojo/AlibabaSmsConfig.ts new file mode 100644 index 00000000000..4f072c57180 --- /dev/null +++ b/web-app/src/app/pojo/AlibabaSmsConfig.ts @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +export class AlibabaSmsConfig { + secretId!: string; + secretKey!: string; + signName!: string; + appId!: string; + templateId!: string; +} diff --git a/web-app/src/app/pojo/SmsNoticeSender.ts b/web-app/src/app/pojo/SmsNoticeSender.ts new file mode 100644 index 00000000000..ffb58105322 --- /dev/null +++ b/web-app/src/app/pojo/SmsNoticeSender.ts @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { AlibabaSmsConfig } from './AlibabaSmsConfig'; +import { TencentSmsConfig } from './TencentSmsConfig'; + +export class SmsNoticeSender { + id!: number; + type!: string; + tencent!: TencentSmsConfig; + alibaba!: AlibabaSmsConfig; + enable!: boolean; + creator!: string; + modifier!: string; + gmtCreate!: number; + gmtUpdate!: number; +} diff --git a/web-app/src/app/pojo/TencentSmsConfig.ts b/web-app/src/app/pojo/TencentSmsConfig.ts new file mode 100644 index 00000000000..c91b24fe945 --- /dev/null +++ b/web-app/src/app/pojo/TencentSmsConfig.ts @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +export class TencentSmsConfig { + secretId!: string; + secretKey!: string; + signName!: string; + appId!: string; + templateId!: string; +} diff --git a/web-app/src/app/routes/setting/settings/message-server/message-server.component.html b/web-app/src/app/routes/setting/settings/message-server/message-server.component.html index dbadf2d1bf8..dc23399183b 100644 --- a/web-app/src/app/routes/setting/settings/message-server/message-server.component.html +++ b/web-app/src/app/routes/setting/settings/message-server/message-server.component.html @@ -39,16 +39,33 @@ - - - - {{ 'common.button.setting' | i18n }} - + + + {{ 'common.button.setting' | i18n }} - - - - + + + {{ 'alert.notice.sender.sms.type' | i18n }}: {{ 'alert.notice.sender.sms.type.' + smsNoticeSender.type | i18n }} +
+ + {{ 'alert.notice.sender.sms.tencent.secretId' | i18n }}: {{ smsNoticeSender.tencent.secretId }} +
+
+ + {{ 'alert.notice.sender.sms.tencent.secretKey' | i18n }}: {{ smsNoticeSender.tencent.secretKey }} +
+
+ + {{ 'alert.notice.sender.sms.tencent.signName' | i18n }}: {{ smsNoticeSender.tencent.signName }} +
+
+ + {{ 'alert.notice.sender.sms.tencent.appId' | i18n }}: {{ smsNoticeSender.tencent.appId }} +
+
+ + {{ 'alert.notice.sender.sms.tencent.templateId' | i18n }}: {{ smsNoticeSender.tencent.templateId }} +
@@ -112,3 +129,75 @@

+ + + +
+
+ + {{ 'alert.notice.sender.sms.type' | i18n }} + + + + + + + + + + {{ + 'alert.notice.sender.sms.tencent.secretId' | i18n + }} + + + + + + {{ + 'alert.notice.sender.sms.tencent.secretKey' | i18n + }} + + + + + + {{ + 'alert.notice.sender.sms.tencent.signName' | i18n + }} + + + + + + {{ 'alert.notice.sender.sms.tencent.appId' | i18n }} + + + + + + {{ + 'alert.notice.sender.sms.tencent.templateId' | i18n + }} + + + + + + + + {{ 'alert.notice.sender.enable' | i18n }} + + + + +
+
+
diff --git a/web-app/src/app/routes/setting/settings/message-server/message-server.component.ts b/web-app/src/app/routes/setting/settings/message-server/message-server.component.ts index d3392fa007d..b919d1a2b63 100644 --- a/web-app/src/app/routes/setting/settings/message-server/message-server.component.ts +++ b/web-app/src/app/routes/setting/settings/message-server/message-server.component.ts @@ -24,6 +24,9 @@ import { ALAIN_I18N_TOKEN } from '@delon/theme'; import { NzMessageService } from 'ng-zorro-antd/message'; import { NzNotificationService } from 'ng-zorro-antd/notification'; import { finalize } from 'rxjs/operators'; +import { AlibabaSmsConfig } from 'src/app/pojo/AlibabaSmsConfig'; +import { SmsNoticeSender } from 'src/app/pojo/SmsNoticeSender'; +import { TencentSmsConfig } from 'src/app/pojo/TencentSmsConfig'; import { EmailNoticeSender } from '../../../../pojo/EmailNoticeSender'; import { GeneralConfigService } from '../../../../service/general-config.service'; @@ -45,13 +48,17 @@ export class MessageServerComponent implements OnInit { senderServerLoading: boolean = true; loading: boolean = false; isEmailServerModalVisible: boolean = false; + isSmsServerModalVisible: boolean = false; + smsType: string = 'tencent'; emailSender = new EmailNoticeSender(); + smsNoticeSender = new SmsNoticeSender(); ngOnInit(): void { - this.loadSenderServer(); + this.loadEmailSenderServer(); + this.loadSmsSenderServer(); } - loadSenderServer() { + loadEmailSenderServer() { this.senderServerLoading = true; let senderInit$ = this.noticeSenderSvc.getGeneralConfig('email').subscribe( message => { @@ -116,4 +123,91 @@ export class MessageServerComponent implements OnInit { } ); } + + loadSmsSenderServer() { + this.senderServerLoading = true; + let senderInit$ = this.noticeSenderSvc.getGeneralConfig('sms').subscribe( + message => { + this.senderServerLoading = false; + if (message.code === 0) { + if (message.data) { + this.smsNoticeSender = message.data; + this.smsType = message.data.type; + } else { + this.smsNoticeSender = new SmsNoticeSender(); + this.smsNoticeSender.type = 'tencent'; + this.smsNoticeSender.tencent = new TencentSmsConfig(); + } + } else { + console.warn(message.msg); + } + senderInit$.unsubscribe(); + }, + error => { + console.error(error.msg); + this.senderServerLoading = false; + senderInit$.unsubscribe(); + } + ); + } + + onConfigSmsServer() { + this.isSmsServerModalVisible = true; + } + + onCancelSmsServer() { + this.isSmsServerModalVisible = false; + } + + onSmsTypeChange(value: string) { + if (value === 'tencent') { + // tencent sms sender + this.smsType = 'tencent'; + this.smsNoticeSender.type = 'tencent'; + } else if (value === 'alibaba') { + // alibaba sms sender + this.smsType = 'alibaba'; + this.smsNoticeSender.type = 'alibaba'; + } + } + + onSaveSmsServer() { + if (this.senderForm?.invalid) { + Object.values(this.senderForm.controls).forEach(control => { + if (control.invalid) { + control.markAsDirty(); + control.updateValueAndValidity({ onlySelf: true }); + } + }); + return; + } + if (this.smsNoticeSender.type === 'tencent') { + this.smsNoticeSender.alibaba = new AlibabaSmsConfig(); + } + if (this.smsNoticeSender.type === 'alibaba') { + this.smsNoticeSender.tencent = new TencentSmsConfig(); + } + const modalOk$ = this.noticeSenderSvc + .saveGeneralConfig(this.smsNoticeSender, 'sms') + .pipe( + finalize(() => { + modalOk$.unsubscribe(); + this.senderServerLoading = false; + }) + ) + .subscribe( + message => { + if (message.code === 0) { + this.isSmsServerModalVisible = false; + this.notifySvc.success(this.i18nSvc.fanyi('common.notify.apply-success'), ''); + } else { + this.notifySvc.error(this.i18nSvc.fanyi('common.notify.apply-fail'), message.msg); + } + }, + error => { + this.isSmsServerModalVisible = false; + this.notifySvc.error(this.i18nSvc.fanyi('common.notify.apply-fail'), error.msg); + } + ); + } } diff --git a/web-app/src/assets/i18n/en-US.json b/web-app/src/assets/i18n/en-US.json index c798b6b6d89..4a9cf506e35 100644 --- a/web-app/src/assets/i18n/en-US.json +++ b/web-app/src/assets/i18n/en-US.json @@ -336,6 +336,14 @@ "alert.notice.sender.mail.ssl": "Enable SSL", "alert.notice.sender.mail.starttls": "Enable STARTTLS", "alert.notice.sender.mail.enable": "Enable Email Configuration", + "alert.notice.sender.sms.type": "Sms Type", + "alert.notice.sender.sms.type.tencent": "Tencent Sms", + "alert.notice.sender.sms.type.alibaba": "Alibaba Sms", + "alert.notice.sender.sms.tencent.secretId": "Tencent Sms SecretId", + "alert.notice.sender.sms.tencent.secretKey": "Tencent Sms SecretKey", + "alert.notice.sender.sms.tencent.signName": "Tencent Sms SignName", + "alert.notice.sender.sms.tencent.appId": "Tencent Sms AppId", + "alert.notice.sender.sms.tencent.templateId": "Tencent Sms TemplateId", "alert.export.switch-type": "Please select the export file format!", "alert.export.use-type": "Export rules in {{type}} file format", "dashboard.alerts.title": "Recently Alarms List", diff --git a/web-app/src/assets/i18n/zh-CN.json b/web-app/src/assets/i18n/zh-CN.json index d8ea6a49071..8218fe13370 100644 --- a/web-app/src/assets/i18n/zh-CN.json +++ b/web-app/src/assets/i18n/zh-CN.json @@ -337,6 +337,14 @@ "alert.notice.sender.mail.ssl": "是否启用SSL", "alert.notice.sender.mail.starttls": "是否启用STARTTLS", "alert.notice.sender.mail.enable": "是否启用邮箱配置", + "alert.notice.sender.sms.type": "短信类型", + "alert.notice.sender.sms.type.tencent": "腾讯短信", + "alert.notice.sender.sms.type.alibaba": "阿里短信", + "alert.notice.sender.sms.tencent.secretId": "腾讯短信SecretId", + "alert.notice.sender.sms.tencent.secretKey": "腾讯短信SecretKey", + "alert.notice.sender.sms.tencent.signName": "腾讯短信SignName", + "alert.notice.sender.sms.tencent.appId": "腾讯短信AppId", + "alert.notice.sender.sms.tencent.templateId": "腾讯短信TemplateId", "alert.export.switch-type": "请选择导出文件格式!", "alert.export.use-type": "以 {{type}} 文件格式导出阈值规则", "dashboard.alerts.title": "最近告警列表", @@ -559,8 +567,8 @@ "settings.server": "消息服务配置", "settings.server.email": "邮件服务器", "settings.server.email.setting": "配置邮件服务器", - "settings.server.sms": "短信服务器", - "settings.server.sms.setting": "配置短信服务器", + "settings.server.sms": "短信配置", + "settings.server.sms.setting": "配置短信参数", "settings.system-config": "系统配置", "settings.system-config.locale": "系统语言", "settings.system-config.locale.zh_CN": "简体中文(zh_CN)", diff --git a/web-app/src/assets/i18n/zh-TW.json b/web-app/src/assets/i18n/zh-TW.json index 97d1f040d43..671de56b5fb 100644 --- a/web-app/src/assets/i18n/zh-TW.json +++ b/web-app/src/assets/i18n/zh-TW.json @@ -336,6 +336,14 @@ "alert.notice.sender.mail.ssl": "是否啟用SSL", "alert.notice.sender.mail.starttls": "是否啟用STARTTLS", "alert.notice.sender.mail.enable": "啟用郵件設定", + "alert.notice.sender.sms.type": "騰訊類型", + "alert.notice.sender.sms.type.tencent": "騰訊短訊", + "alert.notice.sender.sms.type.alibaba": "阿裏短訊", + "alert.notice.sender.sms.tencent.secretId": "騰訊短訊SecretId", + "alert.notice.sender.sms.tencent.secretKey": "騰訊短訊SecretKey", + "alert.notice.sender.sms.tencent.signName": "騰訊短訊SignName", + "alert.notice.sender.sms.tencent.appId": "騰訊短訊AppId", + "alert.notice.sender.sms.tencent.templateId": "騰訊短訊TemplateId", "alert.export.switch-type": "請選擇導出文件格式!", "alert.export.use-type": "以 {{type}} 文件格式導出阈值規則", "dashboard.alerts.title": "最近告警列表", @@ -556,8 +564,8 @@ "settings.server": "消息服務配置", "settings.server.email": "郵件服務器", "settings.server.email.setting": "配置郵件服務器", - "settings.server.sms": "短信服務器", - "settings.server.sms.setting": "配置短信服務器", + "settings.server.sms": "短信配置", + "settings.server.sms.setting": "配置短信參數", "settings.system-config": "系統配置", "settings.system-config.locale": "系統語言", "settings.system-config.locale.zh_CN": "簡體中文(zh_CN)", From 28245420371f085e2b528a83ce23b06d8f8b0d3e Mon Sep 17 00:00:00 2001 From: linDong <56677297@qq.com> Date: Tue, 30 Jul 2024 18:26:25 +0800 Subject: [PATCH 099/257] [doc] Fix profile (#2408) Co-authored-by: tomsun28 --- home/blog/2024-07-28-new-committer.md | 2 +- .../docusaurus-plugin-content-blog/2024-07-28-new-committer.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/home/blog/2024-07-28-new-committer.md b/home/blog/2024-07-28-new-committer.md index 26bfc69bba6..cbaeab8fb90 100644 --- a/home/blog/2024-07-28-new-committer.md +++ b/home/blog/2024-07-28-new-committer.md @@ -3,7 +3,7 @@ title: Welcome to HertzBeat Community Committer! author: linDong author_title: linDong author_url: https://github.com/Yanshuming1 -author_image_url: https://avatars.githubusercontent.com/u/30208283?v=4 +author_image_url: https://avatars.githubusercontent.com/u/118667222?v=4 tags: [opensource, practice] keywords: [open source monitoring system, alerting system] --- diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md index 463a89d1a35..be6c1adf2fb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md @@ -3,7 +3,7 @@ title: 热烈欢迎 HertzBeat 小伙伴新晋社区 Committer! author: linDong author_title: linDong author_url: https://github.com/Yanshuming1 -author_image_url: https://avatars.githubusercontent.com/u/131688897?v=4 +author_image_url: https://avatars.githubusercontent.com/u/118667222?v=4 tags: [opensource, practice] keywords: [open source monitoring system, alerting system] --- From 15768d8077c43bdb626b18cb32fa79e8b8568f9d Mon Sep 17 00:00:00 2001 From: Kerwin Bryant Date: Tue, 30 Jul 2024 21:31:09 +0800 Subject: [PATCH 100/257] [infra] support contribute with Gitpod (#2419) --- .devcontainer/devcontainer.json | 38 ++++++++++++++++++++++++++++++ .gitpod.Dockerfile | 7 ++++++ .gitpod.yml | 41 +++++++++++++++++++++++++++++++++ .licenserc.yaml | 6 +++-- README.md | 1 + README_CN.md | 1 + 6 files changed, 92 insertions(+), 2 deletions(-) create mode 100644 .devcontainer/devcontainer.json create mode 100644 .gitpod.Dockerfile create mode 100644 .gitpod.yml diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000000..349fa4ad299 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,38 @@ +{ + "name": "Hertzbeat DevContainer", + "image": "mcr.microsoft.com/devcontainers/base:ubuntu", + "features": { + "ghcr.io/devcontainers/features/java:1": { + "version": "17", + "installMaven": "true", + "installGradle": "false" + }, + "ghcr.io/devcontainers/features/node:1": { + "version": "20" + }, + "ghcr.io/devcontainers/features/git-lfs:1.1.0": {} + }, + "customizations": { + "vscode": { + "settings": {}, + // same extensions as Gitpod, should match /.gitpod.yml + "extensions": [ + "vscjava.vscode-java-pack", + "editorconfig.editorconfig", + "dbaeumer.vscode-eslint", + "stylelint.vscode-stylelint", + "DavidAnson.vscode-markdownlint", + "ms-azuretools.vscode-docker", + "cweijan.vscode-database-client2", + "GitHub.vscode-pull-request-github" + ] + } + }, + "portsAttributes": { + "4200": { + "label": "Hertzbeat Web", + "onAutoForward": "notify" + } + }, + "postCreateCommand": "java -version" +} diff --git a/.gitpod.Dockerfile b/.gitpod.Dockerfile new file mode 100644 index 00000000000..7bfd3167cc7 --- /dev/null +++ b/.gitpod.Dockerfile @@ -0,0 +1,7 @@ +FROM gitpod/workspace-full + +USER gitpod + +RUN bash -c ". /home/gitpod/.sdkman/bin/sdkman-init.sh && \ + sdk install java 17.0.12-amzn && \ + sdk default java 17.0.12-amzn" diff --git a/.gitpod.yml b/.gitpod.yml new file mode 100644 index 00000000000..84e8df6d67d --- /dev/null +++ b/.gitpod.yml @@ -0,0 +1,41 @@ +image: + file: .gitpod.Dockerfile +tasks: + - name: Setup + init: | + cp -r contrib/ide/vscode .vscode + mvn clean install + cd web-app + yarn install + command: | + gp sync-done setup + exit 0 + - name: Run backend + command: | + gp sync-await setup + + cd manager + mvn spring-boot:run + - name: Run frontend + command: | + gp sync-await setup + + cd web-app + yarn start + openMode: split-right + +vscode: + extensions: + - vscjava.vscode-java-pack + - editorconfig.editorconfig + - dbaeumer.vscode-eslint + - stylelint.vscode-stylelint + - DavidAnson.vscode-markdownlint + - ms-azuretools.vscode-docker + - cweijan.vscode-database-client2 + - GitHub.vscode-pull-request-github + +ports: + - port: 4200 + name: Hertzbeat + onOpen: open-browser diff --git a/.licenserc.yaml b/.licenserc.yaml index 8eaea845764..4266e3e085b 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -62,6 +62,8 @@ header: - '.stylelintrc' - '.prettierignore' - '.prettierrc.js' + - '.gitpod.Dockerfile' + - '.gitpod.yml' - 'karma.conf.js' - 'proxy.conf.js' - '.helmignore' @@ -92,12 +94,12 @@ header: - 'web-app/src/style.less' - 'web-app/src/test.ts' - 'web-app/src/typings.d.ts' - + comment: on-failure dependency: files: - pom.xml - web-app/package.json - + diff --git a/README.md b/README.md index 5379709cb70..f012545debe 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ [![Docker Pulls](https://img.shields.io/docker/pulls/apache/hertzbeat?style=%20for-the-badge&logo=docker&label=DockerHub%20Download)](https://hub.docker.com/r/apache/hertzbeat) [![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/hertzbeat)](https://artifacthub.io/packages/search?repo=hertzbeat) [![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UCri75zfWX0GHqJFPENEbLow?logo=youtube&label=YouTube%20Channel)](https://www.youtube.com/channel/UCri75zfWX0GHqJFPENEbLow) +[![Contribute with Gitpod](https://img.shields.io/badge/Contribute%20with-Gitpod-908a85?logo=gitpod&color=green)](https://gitpod.io/#https://github.com/apache/hertzbeat) **Home: [hertzbeat.apache.org](https://hertzbeat.apache.org)** **Email: Mail to `dev-subscribe@hertzbeat.apache.org` to subscribe mailing lists** diff --git a/README_CN.md b/README_CN.md index 0db4924b13c..930d1285181 100644 --- a/README_CN.md +++ b/README_CN.md @@ -18,6 +18,7 @@ [![Docker Pulls](https://img.shields.io/docker/pulls/apache/hertzbeat?style=%20for-the-badge&logo=docker&label=DockerHub%20Download)](https://hub.docker.com/r/apache/hertzbeat) [![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/hertzbeat)](https://artifacthub.io/packages/search?repo=hertzbeat) [![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UCri75zfWX0GHqJFPENEbLow?logo=youtube&label=YouTube%20Channel)](https://www.youtube.com/channel/UCri75zfWX0GHqJFPENEbLow) +[![Contribute with Gitpod](https://img.shields.io/badge/Contribute%20with-Gitpod-908a85?logo=gitpod&color=green)](https://gitpod.io/#https://github.com/apache/hertzbeat) **官网: [hertzbeat.apache.org](https://hertzbeat.apache.org)** From dce9a4b15e1217d0a149e09d31b8b10e4e786d9f Mon Sep 17 00:00:00 2001 From: YuLuo Date: Tue, 30 Jul 2024 23:37:13 +0800 Subject: [PATCH 101/257] [Improve] add AlarmSilenceReduce unit test (#2409) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../alert/reduce/AlarmSilenceReduceTest.java | 188 ++++++++++++++++++ 1 file changed, 188 insertions(+) create mode 100644 alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmSilenceReduceTest.java diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmSilenceReduceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmSilenceReduceTest.java new file mode 100644 index 00000000000..5f18ac52c07 --- /dev/null +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmSilenceReduceTest.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.alert.reduce; + +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.util.Collections; +import java.util.HashMap; + +import org.apache.hertzbeat.alert.dao.AlertSilenceDao; +import org.apache.hertzbeat.common.cache.CacheFactory; +import org.apache.hertzbeat.common.cache.CommonCacheService; +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.entity.alerter.Alert; +import org.apache.hertzbeat.common.entity.alerter.AlertSilence; +import org.apache.hertzbeat.common.entity.manager.TagItem; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.MockitoAnnotations; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * test case for {@link AlarmSilenceReduce} + */ + +class AlarmSilenceReduceTest { + + @Mock + private AlertSilenceDao alertSilenceDao; + + @Mock + private CommonCacheService silenceCache; + + private AlarmSilenceReduce alarmSilenceReduce; + + private MockedStatic cacheFactoryMockedStatic; + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + + cacheFactoryMockedStatic = mockStatic(CacheFactory.class); + cacheFactoryMockedStatic.when(CacheFactory::getAlertSilenceCache).thenReturn(silenceCache); + + // inject dao object. + alarmSilenceReduce = new AlarmSilenceReduce(alertSilenceDao); + } + + @Test + void testFilterSilenceNull() { + + // when cache get result is null, exec db logic. + when(silenceCache.get(CommonConstants.CACHE_ALERT_SILENCE)).thenReturn(null); + doReturn(Collections.emptyList()).when(alertSilenceDao).findAll(); + + Alert alert = Alert.builder() + .tags(new HashMap<>()) + .priority((byte) 1) + .build(); + + boolean result = alarmSilenceReduce.filterSilence(alert); + + assertTrue(result); + verify(alertSilenceDao, times(1)).findAll(); + verify(silenceCache, times(1)).put(eq(CommonConstants.CACHE_ALERT_SILENCE), any()); + } + + @Test + void testFilterSilenceOnce() { + + AlertSilence alertSilence = AlertSilence.builder() + .enable(Boolean.TRUE) + .matchAll(Boolean.TRUE) + .type((byte) 0) + .periodEnd(LocalDateTime.now().atZone(ZoneId.systemDefault()).plusHours(1)) + .periodStart(LocalDateTime.now().atZone(ZoneId.systemDefault()).minusHours(1)) + .times(0) + .build(); + + when(silenceCache.get(CommonConstants.CACHE_ALERT_SILENCE)).thenReturn(Collections.singletonList(alertSilence)); + doReturn(alertSilence).when(alertSilenceDao).save(alertSilence); + + Alert alert = Alert.builder() + .tags(new HashMap<>()) + .priority((byte) 1) + .build(); + + boolean result = alarmSilenceReduce.filterSilence(alert); + + assertFalse(result); + verify(alertSilenceDao, times(1)).save(alertSilence); + assertEquals(1, alertSilence.getTimes()); + } + + @Test + void testFilterSilenceCyc() { + + AlertSilence alertSilence = AlertSilence.builder() + .enable(Boolean.TRUE) + .matchAll(Boolean.TRUE) + .type((byte) 1) // cyc time + .periodEnd(LocalDateTime.now().atZone(ZoneId.systemDefault()).plusHours(1)) + .periodStart(LocalDateTime.now().atZone(ZoneId.systemDefault()).minusHours(1)) + .times(0) + .days(Collections.singletonList((byte) LocalDateTime.now().getDayOfWeek().getValue())) + .build(); + + when(silenceCache.get(CommonConstants.CACHE_ALERT_SILENCE)).thenReturn(Collections.singletonList(alertSilence)); + doReturn(alertSilence).when(alertSilenceDao).save(alertSilence); + + Alert alert = Alert.builder() + .tags(new HashMap<>()) + .priority((byte) 1) + .build(); + + boolean result = alarmSilenceReduce.filterSilence(alert); + + assertFalse(result); + verify(alertSilenceDao, times(1)).save(alertSilence); + assertEquals(1, alertSilence.getTimes()); + } + + @Test + void testFilterSilenceNoMatch() { + + AlertSilence alertSilence = AlertSilence.builder() + .enable(Boolean.TRUE) + .matchAll(Boolean.TRUE) + .type((byte) 0) + .tags(Collections.singletonList(new TagItem("non-matching-tag", "value"))) + .periodEnd(LocalDateTime.now().atZone(ZoneId.systemDefault()).minusHours(1)) + .periodStart(LocalDateTime.now().atZone(ZoneId.systemDefault()).plusHours(1)) + .times(0) + .build(); + + when(silenceCache.get(CommonConstants.CACHE_ALERT_SILENCE)).thenReturn(Collections.singletonList(alertSilence)); + doReturn(alertSilence).when(alertSilenceDao).save(alertSilence); + + Alert alert = Alert.builder() + .tags(new HashMap<>()) + .priority((byte) 1) + .build(); + + boolean result = alarmSilenceReduce.filterSilence(alert); + + assertTrue(result); + verify(alertSilenceDao, never()).save(any()); + } + + @AfterEach + public void tearDown() { + + if (cacheFactoryMockedStatic != null) { + cacheFactoryMockedStatic.close(); + } + } + +} From c1252fbe2b6cb8b38f3ad17472a31c3d6ec87d32 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Tue, 30 Jul 2024 23:57:51 +0800 Subject: [PATCH 102/257] [Improve] add AlarmConvergeReduce unit test (#2410) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../alert/reduce/AlarmConvergeReduceTest.java | 114 ++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduceTest.java diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduceTest.java new file mode 100644 index 00000000000..2e06c3aacdd --- /dev/null +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduceTest.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.alert.reduce; + +import java.util.Collections; +import java.util.HashMap; + +import org.apache.hertzbeat.alert.dao.AlertConvergeDao; +import org.apache.hertzbeat.common.cache.CacheFactory; +import org.apache.hertzbeat.common.cache.CommonCacheService; +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.entity.alerter.Alert; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.junit.jupiter.MockitoExtension; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * test case for {@link AlarmConvergeReduce} + */ + +@ExtendWith(MockitoExtension.class) +class AlarmConvergeReduceTest { + + @Mock + private AlertConvergeDao alertConvergeDao; + + @Mock + private CommonCacheService convergeCache; + + private AlarmConvergeReduce alarmConvergeReduce; + + private Alert testAlert; + + private MockedStatic cacheFactoryMockedStatic; + + @BeforeEach + void setUp() { + + testAlert = Alert.builder() + .tags(new HashMap<>()) + .status(CommonConstants.ALERT_STATUS_CODE_SOLVED) + .build(); + + cacheFactoryMockedStatic = mockStatic(CacheFactory.class); + cacheFactoryMockedStatic.when(CacheFactory::getAlertConvergeCache).thenReturn(convergeCache); + + alarmConvergeReduce = new AlarmConvergeReduce(alertConvergeDao); + } + + @AfterEach + void tearDown() { + + if (cacheFactoryMockedStatic != null) { + cacheFactoryMockedStatic.close(); + } + } + + @Test + void testFilterConverge_RestoredAlert() { + + testAlert.setStatus(CommonConstants.ALERT_STATUS_CODE_RESTORED); + boolean result = alarmConvergeReduce.filterConverge(testAlert); + + assertTrue(result); + } + + @Test + void testFilterConverge_IgnoreTag() { + + testAlert.getTags().put(CommonConstants.IGNORE, "true"); + boolean result = alarmConvergeReduce.filterConverge(testAlert); + + assertTrue(result); + } + + @Test + void testFilterConvergeNoConverge() { + + when(convergeCache.get(CommonConstants.CACHE_ALERT_CONVERGE)).thenReturn(null); + when(alertConvergeDao.findAll()).thenReturn(Collections.emptyList()); + + boolean result = alarmConvergeReduce.filterConverge(testAlert); + + assertTrue(result); + verify(convergeCache).get(CommonConstants.CACHE_ALERT_CONVERGE); + verify(alertConvergeDao).findAll(); + verify(convergeCache).put(CommonConstants.CACHE_ALERT_CONVERGE, Collections.emptyList()); + } + +} From 861e9d377ffcd12f61763ff57d050d7f892f5a2d Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Wed, 31 Jul 2024 00:31:08 +0800 Subject: [PATCH 103/257] [refactor] move code from TagController to TagService (#2418) Co-authored-by: Calvin --- .../manager/controller/TagController.java | 37 +------------------ .../hertzbeat/manager/service/TagService.java | 10 ++--- .../manager/service/impl/TagServiceImpl.java | 35 +++++++++++++++++- .../manager/service/TagServiceTest.java | 8 ++-- 4 files changed, 43 insertions(+), 47 deletions(-) diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/controller/TagController.java b/manager/src/main/java/org/apache/hertzbeat/manager/controller/TagController.java index cbcf52c1441..e7f793239d8 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/controller/TagController.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/controller/TagController.java @@ -20,9 +20,7 @@ import static org.springframework.http.MediaType.APPLICATION_JSON_VALUE; import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; -import jakarta.persistence.criteria.Predicate; import jakarta.validation.Valid; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.stream.Collectors; @@ -31,8 +29,6 @@ import org.apache.hertzbeat.manager.service.TagService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.jpa.domain.Specification; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; @@ -83,38 +79,7 @@ public ResponseEntity>> getTags( @Parameter(description = "Tag type", example = "0") @RequestParam(required = false) Byte type, @Parameter(description = "List current page", example = "0") @RequestParam(defaultValue = "0") int pageIndex, @Parameter(description = "Number of list pagination", example = "8") @RequestParam(defaultValue = "8") int pageSize) { - // Get tag information - Specification specification = (root, query, criteriaBuilder) -> { - List andList = new ArrayList<>(); - if (type != null) { - Predicate predicateApp = criteriaBuilder.equal(root.get("type"), type); - andList.add(predicateApp); - } - Predicate[] andPredicates = new Predicate[andList.size()]; - Predicate andPredicate = criteriaBuilder.and(andList.toArray(andPredicates)); - - List orList = new ArrayList<>(); - if (search != null && !search.isEmpty()) { - Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + search + "%"); - orList.add(predicateName); - Predicate predicateValue = criteriaBuilder.like(root.get("tagValue"), "%" + search + "%"); - orList.add(predicateValue); - } - Predicate[] orPredicates = new Predicate[orList.size()]; - Predicate orPredicate = criteriaBuilder.or(orList.toArray(orPredicates)); - - if (andPredicates.length == 0 && orPredicates.length == 0) { - return query.where().getRestriction(); - } else if (andPredicates.length == 0) { - return orPredicate; - } else if (orPredicates.length == 0) { - return andPredicate; - } else { - return query.where(andPredicate, orPredicate).getRestriction(); - } - }; - PageRequest pageRequest = PageRequest.of(pageIndex, pageSize); - Page alertPage = tagService.getTags(specification, pageRequest); + Page alertPage = tagService.getTags(search, type, pageIndex, pageSize); Message> message = Message.success(alertPage); return ResponseEntity.ok(message); } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/TagService.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/TagService.java index e6d6c5f8879..4e9f399cbad 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/TagService.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/TagService.java @@ -23,8 +23,6 @@ import org.apache.hertzbeat.common.entity.manager.Monitor; import org.apache.hertzbeat.common.entity.manager.Tag; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.jpa.domain.Specification; /** * tag service @@ -45,11 +43,13 @@ public interface TagService { /** * get tag page list - * @param specification Query condition - * @param pageRequest Paging condition + * @param search Tag content search + * @param type Tag type + * @param pageIndex List current page + * @param pageSize Number of list pagination * @return Tags */ - Page getTags(Specification specification, PageRequest pageRequest); + Page getTags(String search, Byte type, int pageIndex, int pageSize); /** * delete tags diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/TagServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/TagServiceImpl.java index 479de1c4737..74f555644a4 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/TagServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/TagServiceImpl.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.manager.service.impl; +import jakarta.persistence.criteria.Predicate; +import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Optional; @@ -69,7 +71,38 @@ public void modifyTag(Tag tag) { } @Override - public Page getTags(Specification specification, PageRequest pageRequest) { + public Page getTags(String search, Byte type, int pageIndex, int pageSize) { + // Get tag information + Specification specification = (root, query, criteriaBuilder) -> { + List andList = new ArrayList<>(); + if (type != null) { + Predicate predicateApp = criteriaBuilder.equal(root.get("type"), type); + andList.add(predicateApp); + } + Predicate[] andPredicates = new Predicate[andList.size()]; + Predicate andPredicate = criteriaBuilder.and(andList.toArray(andPredicates)); + + List orList = new ArrayList<>(); + if (search != null && !search.isEmpty()) { + Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + search + "%"); + orList.add(predicateName); + Predicate predicateValue = criteriaBuilder.like(root.get("tagValue"), "%" + search + "%"); + orList.add(predicateValue); + } + Predicate[] orPredicates = new Predicate[orList.size()]; + Predicate orPredicate = criteriaBuilder.or(orList.toArray(orPredicates)); + + if (andPredicates.length == 0 && orPredicates.length == 0) { + return query.where().getRestriction(); + } else if (andPredicates.length == 0) { + return orPredicate; + } else if (orPredicates.length == 0) { + return andPredicate; + } else { + return query.where(andPredicate, orPredicate).getRestriction(); + } + }; + PageRequest pageRequest = PageRequest.of(pageIndex, pageSize); return tagDao.findAll(specification, pageRequest); } diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/TagServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/TagServiceTest.java index f8f7bdc73c0..b5c11f88554 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/TagServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/TagServiceTest.java @@ -22,11 +22,10 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anySet; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.when; - import java.util.Collections; import java.util.HashSet; import java.util.Optional; @@ -78,9 +77,8 @@ void modifyTag() { @Test void getTags() { - Specification specification = mock(Specification.class); - when(tagDao.findAll(specification, PageRequest.of(1, 1))).thenReturn(Page.empty()); - assertNotNull(tagService.getTags(specification, PageRequest.of(1, 1))); + when(tagDao.findAll(any(Specification.class), any(PageRequest.class))).thenReturn(Page.empty()); + assertNotNull(tagService.getTags(null, null, 1, 10)); } @Test From c998d3b85099ced94c6817e9b047781c2b7e43eb Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Wed, 31 Jul 2024 00:36:06 +0800 Subject: [PATCH 104/257] [refactor] move code from PluginController to PluginService (#2417) Co-authored-by: Calvin --- .../manager/controller/PluginController.java | 26 ++----------------- .../manager/service/PluginService.java | 9 +++---- .../service/impl/PluginServiceImpl.java | 20 +++++++++++++- .../manager/service/PluginServiceTest.java | 8 +----- 4 files changed, 26 insertions(+), 37 deletions(-) diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/controller/PluginController.java b/manager/src/main/java/org/apache/hertzbeat/manager/controller/PluginController.java index dc989bd4158..1fb73ada760 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/controller/PluginController.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/controller/PluginController.java @@ -20,9 +20,7 @@ import static org.springframework.http.MediaType.APPLICATION_JSON_VALUE; import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; -import jakarta.persistence.criteria.Predicate; import jakarta.validation.Valid; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import lombok.RequiredArgsConstructor; @@ -31,8 +29,6 @@ import org.apache.hertzbeat.common.entity.manager.PluginMetadata; import org.apache.hertzbeat.manager.service.PluginService; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.jpa.domain.Specification; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; @@ -68,26 +64,8 @@ public ResponseEntity>> getPlugins( @Parameter(description = "plugin name search", example = "status") @RequestParam(required = false) String search, @Parameter(description = "List current page", example = "0") @RequestParam(defaultValue = "0") int pageIndex, @Parameter(description = "Number of list pagination", example = "8") @RequestParam(defaultValue = "8") int pageSize) { - // Get tag information - Specification specification = (root, query, criteriaBuilder) -> { - List andList = new ArrayList<>(); - if (search != null && !search.isEmpty()) { - Predicate predicateApp = criteriaBuilder.like(root.get("name"), "%" + search + "%"); - andList.add(predicateApp); - } - Predicate[] andPredicates = new Predicate[andList.size()]; - Predicate andPredicate = criteriaBuilder.and(andList.toArray(andPredicates)); - - if (andPredicates.length == 0) { - return query.where().getRestriction(); - } else { - return andPredicate; - } - }; - PageRequest pageRequest = PageRequest.of(pageIndex, pageSize); - Page alertPage = pluginService.getPlugins(specification, pageRequest); - Message> message = Message.success(alertPage); - return ResponseEntity.ok(message); + Page alertPage = pluginService.getPlugins(search, pageIndex, pageSize); + return ResponseEntity.ok(Message.success(alertPage)); } @DeleteMapping() diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/PluginService.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/PluginService.java index 2fe3e5041c5..23d01a30be2 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/PluginService.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/PluginService.java @@ -22,8 +22,6 @@ import org.apache.hertzbeat.common.entity.dto.PluginUpload; import org.apache.hertzbeat.common.entity.manager.PluginMetadata; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.jpa.domain.Specification; /** * plugin service @@ -47,11 +45,12 @@ public interface PluginService { /** * get plugin page list * - * @param specification Query condition - * @param pageRequest Paging condition + * @param search plugin name search + * @param pageIndex List current page + * @param pageSize Number of list pagination * @return Plugins */ - Page getPlugins(Specification specification, PageRequest pageRequest); + Page getPlugins(String search, int pageIndex, int pageSize); /** * execute plugin diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/PluginServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/PluginServiceImpl.java index 1aeeb49b4d7..69b79047916 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/PluginServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/PluginServiceImpl.java @@ -17,6 +17,7 @@ package org.apache.hertzbeat.manager.service.impl; +import jakarta.persistence.criteria.Predicate; import java.io.File; import java.io.IOException; import java.net.MalformedURLException; @@ -221,7 +222,24 @@ public boolean pluginIsEnable(Class clazz) { } @Override - public Page getPlugins(Specification specification, PageRequest pageRequest) { + public Page getPlugins(String search, int pageIndex, int pageSize) { + // Get tag information + Specification specification = (root, query, criteriaBuilder) -> { + List andList = new ArrayList<>(); + if (search != null && !search.isEmpty()) { + Predicate predicateApp = criteriaBuilder.like(root.get("name"), "%" + search + "%"); + andList.add(predicateApp); + } + Predicate[] andPredicates = new Predicate[andList.size()]; + Predicate andPredicate = criteriaBuilder.and(andList.toArray(andPredicates)); + + if (andPredicates.length == 0) { + return query.where().getRestriction(); + } else { + return andPredicate; + } + }; + PageRequest pageRequest = PageRequest.of(pageIndex, pageSize); return metadataDao.findAll(specification, pageRequest); } diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/PluginServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/PluginServiceTest.java index 4187b99f8c3..ed94f69c6c6 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/PluginServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/PluginServiceTest.java @@ -24,12 +24,10 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; - import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -123,13 +121,9 @@ void testDeletePlugins() { @Test void testGetPlugins() { - Specification spec = mock(Specification.class); - PageRequest pageRequest = PageRequest.of(0, 10); Page page = new PageImpl<>(Collections.singletonList(new PluginMetadata())); - when(metadataDao.findAll(any(Specification.class), any(PageRequest.class))).thenReturn(page); - - Page result = pluginService.getPlugins(spec, pageRequest); + Page result = pluginService.getPlugins(null, 0, 10); assertFalse(result.isEmpty()); verify(metadataDao, times(1)).findAll(any(Specification.class), any(PageRequest.class)); } From c7f4390638cbebcc8170a0625e67ec284b8b1b30 Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Wed, 31 Jul 2024 00:41:40 +0800 Subject: [PATCH 105/257] [refactor] move code from NoticeConfigController to NoticeConfigService (#2416) Co-authored-by: Calvin --- .../controller/NoticeConfigController.java | 42 +++---------------- .../manager/service/NoticeConfigService.java | 13 +++--- .../service/impl/NoticeConfigServiceImpl.java | 31 ++++++++++++-- .../service/NoticeConfigServiceTest.java | 15 +++---- 4 files changed, 46 insertions(+), 55 deletions(-) diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/controller/NoticeConfigController.java b/manager/src/main/java/org/apache/hertzbeat/manager/controller/NoticeConfigController.java index d1eb09ac35f..eb1c38f45a8 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/controller/NoticeConfigController.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/controller/NoticeConfigController.java @@ -22,7 +22,6 @@ import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; -import jakarta.persistence.criteria.Predicate; import java.util.List; import java.util.Optional; import javax.validation.Valid; @@ -32,7 +31,6 @@ import org.apache.hertzbeat.common.entity.manager.NoticeTemplate; import org.apache.hertzbeat.manager.service.NoticeConfigService; import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.data.jpa.domain.Specification; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; @@ -86,17 +84,8 @@ public ResponseEntity> deleteNoticeReceiver( description = "Get a list of message notification recipients based on query filter items") public ResponseEntity>> getReceivers( @Parameter(description = "en: Recipient name,support fuzzy query", example = "tom") @RequestParam(required = false) final String name) { - Specification specification = (root, query, criteriaBuilder) -> { - Predicate predicate = criteriaBuilder.conjunction(); - if (name != null && !name.isEmpty()) { - Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + name + "%"); - predicate = criteriaBuilder.and(predicateName); - } - return predicate; - }; - List receivers = noticeConfigService.getNoticeReceivers(specification); - Message> message = Message.success(receivers); - return ResponseEntity.ok(message); + List receivers = noticeConfigService.getNoticeReceivers(name); + return ResponseEntity.ok(Message.success(receivers)); } @PostMapping(path = "/rule") @@ -131,17 +120,8 @@ public ResponseEntity> deleteNoticeRule( description = "Get a list of message notification policies based on query filter items") public ResponseEntity>> getRules( @Parameter(description = "en: Recipient name", example = "rule1") @RequestParam(required = false) final String name) { - Specification specification = (root, query, criteriaBuilder) -> { - Predicate predicate = criteriaBuilder.conjunction(); - if (name != null && !name.isEmpty()) { - Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + name + "%"); - predicate = criteriaBuilder.and(predicateName); - } - return predicate; - }; - List receiverPage = noticeConfigService.getNoticeRules(specification); - Message> message = Message.success(receiverPage); - return ResponseEntity.ok(message); + List receiverPage = noticeConfigService.getNoticeRules(name); + return ResponseEntity.ok(Message.success(receiverPage)); } @@ -177,18 +157,8 @@ public ResponseEntity> deleteNoticeTemplate( description = "Get a list of message notification templates based on query filter items") public ResponseEntity>> getTemplates( @Parameter(description = "Template name,support fuzzy query", example = "rule1") @RequestParam(required = false) final String name) { - - Specification specification = (root, query, criteriaBuilder) -> { - Predicate predicate = criteriaBuilder.conjunction(); - if (name != null && !"".equals(name)) { - Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + name + "%"); - predicate = criteriaBuilder.and(predicateName); - } - return predicate; - }; - List templatePage = noticeConfigService.getNoticeTemplates(specification); - Message> message = Message.success(templatePage); - return ResponseEntity.ok(message); + List templatePage = noticeConfigService.getNoticeTemplates(name); + return ResponseEntity.ok(Message.success(templatePage)); } @PostMapping(path = "/receiver/send-test-msg") diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/NoticeConfigService.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/NoticeConfigService.java index a304cace573..42fde07d0c8 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/NoticeConfigService.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/NoticeConfigService.java @@ -23,7 +23,6 @@ import org.apache.hertzbeat.common.entity.manager.NoticeReceiver; import org.apache.hertzbeat.common.entity.manager.NoticeRule; import org.apache.hertzbeat.common.entity.manager.NoticeTemplate; -import org.springframework.data.jpa.domain.Specification; /** * Message notification configuration interface @@ -32,24 +31,24 @@ public interface NoticeConfigService { /** * Dynamic conditional query - * @param specification Query conditions + * @param name Recipient name,support fuzzy query * @return Search result */ - List getNoticeReceivers(Specification specification); + List getNoticeReceivers(String name); /** * Dynamic conditional query - * @param specification Query conditions + * @param name Template name,support fuzzy query * @return Search result */ - List getNoticeTemplates(Specification specification); + List getNoticeTemplates(String name); /** * Dynamic conditional query - * @param specification Query conditions + * @param name Recipient name * @return Search result */ - List getNoticeRules(Specification specification); + List getNoticeRules(String name); /** * Add a notification recipient diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/NoticeConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/NoticeConfigServiceImpl.java index 20c98298b46..efb3a8a6e13 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/NoticeConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/NoticeConfigServiceImpl.java @@ -17,6 +17,7 @@ package org.apache.hertzbeat.manager.service.impl; +import jakarta.persistence.criteria.Predicate; import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; @@ -83,19 +84,43 @@ public class NoticeConfigServiceImpl implements NoticeConfigService, CommandLine @Override - public List getNoticeReceivers(Specification specification) { + public List getNoticeReceivers(String name) { + Specification specification = (root, query, criteriaBuilder) -> { + Predicate predicate = criteriaBuilder.conjunction(); + if (name != null && !name.isEmpty()) { + Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + name + "%"); + predicate = criteriaBuilder.and(predicateName); + } + return predicate; + }; return noticeReceiverDao.findAll(specification); } @Override - public List getNoticeTemplates(Specification specification) { + public List getNoticeTemplates(String name) { + Specification specification = (root, query, criteriaBuilder) -> { + Predicate predicate = criteriaBuilder.conjunction(); + if (name != null && !"".equals(name)) { + Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + name + "%"); + predicate = criteriaBuilder.and(predicateName); + } + return predicate; + }; List defaultTemplates = new LinkedList<>(PRESET_TEMPLATE.values()); defaultTemplates.addAll(noticeTemplateDao.findAll(specification)); return defaultTemplates; } @Override - public List getNoticeRules(Specification specification) { + public List getNoticeRules(String name) { + Specification specification = (root, query, criteriaBuilder) -> { + Predicate predicate = criteriaBuilder.conjunction(); + if (name != null && !name.isEmpty()) { + Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + name + "%"); + predicate = criteriaBuilder.and(predicateName); + } + return predicate; + }; return noticeRuleDao.findAll(specification); } diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/NoticeConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/NoticeConfigServiceTest.java index c595b724237..a7c6ac19e10 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/NoticeConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/NoticeConfigServiceTest.java @@ -69,23 +69,20 @@ void setUp() { @Test void getNoticeReceivers() { - final Specification specification = mock(Specification.class); - noticeConfigService.getNoticeReceivers(specification); - verify(noticeReceiverDao, times(1)).findAll(specification); + noticeConfigService.getNoticeReceivers(null); + verify(noticeReceiverDao, times(1)).findAll(any(Specification.class)); } @Test void getNoticeTemplates() { - final Specification specification = mock(Specification.class); - noticeConfigService.getNoticeTemplates(specification); - verify(noticeTemplateDao, times(1)).findAll(specification); + noticeConfigService.getNoticeTemplates(null); + verify(noticeTemplateDao, times(1)).findAll(any(Specification.class)); } @Test void getNoticeRules() { - final Specification specification = mock(Specification.class); - noticeConfigService.getNoticeRules(specification); - verify(noticeRuleDao, times(1)).findAll(specification); + noticeConfigService.getNoticeRules(null); + verify(noticeRuleDao, times(1)).findAll(any(Specification.class)); } @Test From ae4a5b1c6393878802b22b11de0121a03730934c Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Wed, 31 Jul 2024 00:48:10 +0800 Subject: [PATCH 106/257] [refactor] move code from GeneralConfigController to ConfigService (#2414) Co-authored-by: Calvin --- .../controller/GeneralConfigController.java | 42 ++----- .../manager/service/ConfigService.java | 47 ++++++++ .../manager/service/GeneralConfigService.java | 2 +- .../service/impl/ConfigServiceImpl.java | 81 +++++++++++++ .../manager/service/ConfigServiceTest.java | 107 ++++++++++++++++++ 5 files changed, 243 insertions(+), 36 deletions(-) create mode 100644 manager/src/main/java/org/apache/hertzbeat/manager/service/ConfigService.java create mode 100644 manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ConfigServiceImpl.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/ConfigServiceTest.java diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/controller/GeneralConfigController.java b/manager/src/main/java/org/apache/hertzbeat/manager/controller/GeneralConfigController.java index 49f48d482d2..8747bc8b9d7 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/controller/GeneralConfigController.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/controller/GeneralConfigController.java @@ -21,15 +21,12 @@ import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; +import jakarta.annotation.Resource; import jakarta.validation.constraints.NotNull; -import java.util.HashMap; -import java.util.List; -import java.util.Map; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.common.entity.dto.Message; import org.apache.hertzbeat.manager.pojo.dto.TemplateConfig; -import org.apache.hertzbeat.manager.service.GeneralConfigService; -import org.apache.hertzbeat.manager.service.impl.TemplateConfigServiceImpl; +import org.apache.hertzbeat.manager.service.ConfigService; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; @@ -47,14 +44,9 @@ @Tag(name = "Alert sender Configuration API") @Slf4j public class GeneralConfigController { - private final Map configServiceMap; + @Resource + private ConfigService configService; - public GeneralConfigController(List generalConfigServices) { - configServiceMap = new HashMap<>(8); - if (generalConfigServices != null) { - generalConfigServices.forEach(config -> configServiceMap.put(config.type(), config)); - } - } @PostMapping(path = "/{type}") @Operation(summary = "Save or update common config", description = "Save or update common config") @@ -62,11 +54,7 @@ public ResponseEntity> saveOrUpdateConfig( @Parameter(description = "Config Type", example = "email") @PathVariable("type") @NotNull final String type, @RequestBody Object config) { - GeneralConfigService configService = configServiceMap.get(type); - if (configService == null) { - throw new IllegalArgumentException("Not supported this config type: " + type); - } - configService.saveConfig(config); + configService.saveConfig(type, config); return ResponseEntity.ok(Message.success("Update config success")); } @@ -75,11 +63,7 @@ public ResponseEntity> saveOrUpdateConfig( public ResponseEntity> getConfig( @Parameter(description = "Config Type", example = "email") @PathVariable("type") @NotNull final String type) { - GeneralConfigService configService = configServiceMap.get(type); - if (configService == null) { - throw new IllegalArgumentException("Not supported this config type: " + type); - } - return ResponseEntity.ok(Message.success(configService.getConfig())); + return ResponseEntity.ok(Message.success(configService.getConfig(type))); } @PutMapping(path = "/template/{app}") @@ -87,19 +71,7 @@ public ResponseEntity> getConfig( public ResponseEntity> updateTemplateAppConfig( @PathVariable("app") @NotNull final String app, @RequestBody TemplateConfig.AppTemplate template) { - GeneralConfigService configService = configServiceMap.get("template"); - if (configService == null || !(configService instanceof TemplateConfigServiceImpl)) { - throw new IllegalArgumentException("Not supported this config type: template"); - } - TemplateConfig config = ((TemplateConfigServiceImpl) configService).getConfig(); - if (config == null) { - config = new TemplateConfig(); - } - if (config.getApps() == null) { - config.setApps(new HashMap<>(8)); - } - config.getApps().put(app, template); - configService.saveConfig(config); + configService.updateTemplateAppConfig(app, template); return ResponseEntity.ok(Message.success()); } } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/ConfigService.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/ConfigService.java new file mode 100644 index 00000000000..418c7c5e256 --- /dev/null +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/ConfigService.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import org.apache.hertzbeat.manager.pojo.dto.TemplateConfig; + +/** + * Provides operations for the GeneralConfigService + */ +public interface ConfigService { + + /** + * save config + * @param type config type + * @param config need save configuration + */ + void saveConfig(String type, Object config); + + /** + * get config + * @param type config type + * @return config + */ + Object getConfig(String type); + + /** + * Update the app template config + * @param app monitoring type + * @param template template config + */ + void updateTemplateAppConfig(String app, TemplateConfig.AppTemplate template); +} diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/GeneralConfigService.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/GeneralConfigService.java index 0f381bf6821..3d6c956d8f0 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/GeneralConfigService.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/GeneralConfigService.java @@ -18,7 +18,7 @@ package org.apache.hertzbeat.manager.service; /** - *

ConfigService interface provides CRUD operations for configurations.

+ *

GeneralConfigService interface provides CRUD operations for configurations.

* @param configuration type. * @version 1.0 */ diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ConfigServiceImpl.java new file mode 100644 index 00000000000..345e2b3b4c9 --- /dev/null +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ConfigServiceImpl.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service.impl; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import org.apache.hertzbeat.manager.pojo.dto.TemplateConfig; +import org.apache.hertzbeat.manager.service.ConfigService; +import org.apache.hertzbeat.manager.service.GeneralConfigService; +import org.springframework.stereotype.Component; + + +/** + * GeneralConfigService proxy class + */ +@Component +public class ConfigServiceImpl implements ConfigService { + + private static final String TEMPLATE_CONFIG_TYPE = "template"; + + private final Map configServiceMap; + + public ConfigServiceImpl(List generalConfigServices){ + configServiceMap = new ConcurrentHashMap<>(8); + if (generalConfigServices != null) { + generalConfigServices.forEach(config -> configServiceMap.put(config.type(), config)); + } + } + + @Override + public void saveConfig(String type, Object config) { + GeneralConfigService configService = configServiceMap.get(type); + if (configService == null) { + throw new IllegalArgumentException("Not supported this config type: " + type); + } + configService.saveConfig(config); + } + + @Override + public Object getConfig(String type) { + GeneralConfigService configService = configServiceMap.get(type); + if (configService == null) { + throw new IllegalArgumentException("Not supported this config type: " + type); + } + return configService.getConfig(); + } + + @Override + public void updateTemplateAppConfig(String app, TemplateConfig.AppTemplate template){ + GeneralConfigService configService = configServiceMap.get(TEMPLATE_CONFIG_TYPE); + if (!(configService instanceof TemplateConfigServiceImpl)) { + throw new IllegalArgumentException("Not supported this config type: template"); + } + TemplateConfig config = ((TemplateConfigServiceImpl) configService).getConfig(); + if (config == null) { + config = new TemplateConfig(); + } + if (config.getApps() == null) { + config.setApps(new HashMap<>(8)); + } + config.getApps().put(app, template); + configService.saveConfig(config); + } +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/ConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/ConfigServiceTest.java new file mode 100644 index 00000000000..d0b16d4cca6 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/ConfigServiceTest.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + + +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import java.util.ArrayList; +import java.util.List; +import org.apache.hertzbeat.manager.pojo.dto.EmailNoticeSender; +import org.apache.hertzbeat.manager.pojo.dto.ObjectStoreDTO; +import org.apache.hertzbeat.manager.pojo.dto.TemplateConfig; +import org.apache.hertzbeat.manager.service.impl.ConfigServiceImpl; +import org.apache.hertzbeat.manager.service.impl.MailGeneralConfigServiceImpl; +import org.apache.hertzbeat.manager.service.impl.ObjectStoreConfigServiceImpl; +import org.apache.hertzbeat.manager.service.impl.TemplateConfigServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + + +/** + * Test case for {@link ConfigService} + */ +@ExtendWith(MockitoExtension.class) +public class ConfigServiceTest { + + @InjectMocks + private ConfigServiceImpl configService; + @Mock + private ObjectStoreConfigServiceImpl objectStoreConfigService; + @Mock + private TemplateConfigServiceImpl templateConfigService; + @Mock + private MailGeneralConfigServiceImpl mailGeneralConfigService; + + @BeforeEach + public void setUp() { + List generalConfigServices = new ArrayList<>(); + when(objectStoreConfigService.type()).thenReturn("oss"); + when(templateConfigService.type()).thenReturn("template"); + when(mailGeneralConfigService.type()).thenReturn("mail"); + generalConfigServices.add(objectStoreConfigService); + generalConfigServices.add(templateConfigService); + generalConfigServices.add(mailGeneralConfigService); + configService = new ConfigServiceImpl(generalConfigServices); + } + + @Test + public void testSaveConfig() { + configService.saveConfig("oss", new ObjectStoreDTO<>()); + verify(objectStoreConfigService, times(1)).saveConfig(any(ObjectStoreDTO.class)); + + configService.saveConfig("mail", new EmailNoticeSender()); + verify(mailGeneralConfigService, times(1)).saveConfig(any(EmailNoticeSender.class)); + } + + @Test + public void testGetConfig() { + ObjectStoreDTO ossConfig = new ObjectStoreDTO<>(); + when(objectStoreConfigService.getConfig()).thenReturn(ossConfig); + assertNotNull(configService.getConfig("oss")); + + EmailNoticeSender emailNoticeSender = new EmailNoticeSender(); + when(mailGeneralConfigService.getConfig()).thenReturn(emailNoticeSender); + configService.getConfig("mail"); + verify(mailGeneralConfigService, times(1)).getConfig(); + } + + @Test + public void testUpdateTemplateAppConfig(){ + TemplateConfig templateConfig = new TemplateConfig(); + when(templateConfigService.getConfig()).thenReturn(templateConfig); + configService.updateTemplateAppConfig("custom", new TemplateConfig.AppTemplate()); + + verify(templateConfigService, times(1)).getConfig(); + verify(templateConfigService, times(1)).saveConfig(templateConfig); + } + + @Test + public void testException(){ + assertThrows(IllegalArgumentException.class, () -> configService.saveConfig("test", new ObjectStoreDTO<>())); + assertThrows(IllegalArgumentException.class, () -> configService.getConfig("test2"), "Not supported this config type: test2"); + } +} From 2784c8fa6b04538d0640507695231c85033f84cd Mon Sep 17 00:00:00 2001 From: Kerwin Bryant Date: Wed, 31 Jul 2024 19:28:31 +0800 Subject: [PATCH 107/257] Optimize the execution efficiency of Gitpod tasks (#2422) --- .gitpod.Dockerfile | 7 ------- .gitpod.yml | 46 +++++++++++++++++++++++++++++----------------- 2 files changed, 29 insertions(+), 24 deletions(-) delete mode 100644 .gitpod.Dockerfile diff --git a/.gitpod.Dockerfile b/.gitpod.Dockerfile deleted file mode 100644 index 7bfd3167cc7..00000000000 --- a/.gitpod.Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM gitpod/workspace-full - -USER gitpod - -RUN bash -c ". /home/gitpod/.sdkman/bin/sdkman-init.sh && \ - sdk install java 17.0.12-amzn && \ - sdk default java 17.0.12-amzn" diff --git a/.gitpod.yml b/.gitpod.yml index 84e8df6d67d..c13fbe042c8 100644 --- a/.gitpod.yml +++ b/.gitpod.yml @@ -1,27 +1,33 @@ -image: - file: .gitpod.Dockerfile tasks: - - name: Setup - init: | - cp -r contrib/ide/vscode .vscode - mvn clean install - cd web-app - yarn install - command: | - gp sync-done setup - exit 0 + - name: Run backend + before: cd manager command: | - gp sync-await setup - - cd manager + gp sync-await setup-backend mvn spring-boot:run + - name: Run frontend + before: cd web-app + command: | + gp sync-await setup-frontend + yarn start --public-host "`gp url 4200`" + openMode: split-right + + - name: Setup backend + init: | + sdk install java 17.0.11.fx-zulu < /dev/null + sdk default java 17.0.11.fx-zulu < /dev/null + mvn clean install -DskipTests command: | - gp sync-await setup + gp sync-done setup-backend + exit 0 - cd web-app - yarn start + - name: Setup frontend + init: | + cd web-app && yarn install + command: | + gp sync-done setup-frontend + exit 0 openMode: split-right vscode: @@ -39,3 +45,9 @@ ports: - port: 4200 name: Hertzbeat onOpen: open-browser + + - port: 1157 + onOpen: ignore + + - port: 1158 + onOpen: ignore From a5c444a49ab1f3457112e10c588a1139da63be59 Mon Sep 17 00:00:00 2001 From: Limbo Date: Wed, 31 Jul 2024 19:32:25 +0800 Subject: [PATCH 108/257] [chore] update web-app readme (#2423) Co-authored-by: tomsun28 --- web-app/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web-app/README.md b/web-app/README.md index 8a0e7b0a75b..8a75b725ea6 100644 --- a/web-app/README.md +++ b/web-app/README.md @@ -11,7 +11,7 @@ 2. Install yarn if not existed `npm install -g yarn` 3. Execute `yarn install` or `yarn install --registry=https://registry.npmmirror.com` in `web-app` 4. Install angular-cli : `yarn global add @angular/cli@15` or `yarn global add @angular/cli@15 --registry=https://registry.npmmirror.com` -5. Start After Backend Server Available : `ng serve --open` +5. Start After Backend Server Available : `yarn start` ### Build HertzBeat Install Package From 181ca5aa5e6ba67cfe7bab84c0c2222d08c6269f Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Thu, 1 Aug 2024 00:01:11 +0800 Subject: [PATCH 109/257] [refactor] move code from MonitorsController to MonitorService (#2415) Co-authored-by: Calvin --- .../controller/MonitorsController.java | 91 ++----------------- .../manager/service/MonitorService.java | 16 +++- .../service/impl/MonitorServiceImpl.java | 72 ++++++++++++++- .../manager/service/MonitorServiceTest.java | 8 +- 4 files changed, 92 insertions(+), 95 deletions(-) diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/controller/MonitorsController.java b/manager/src/main/java/org/apache/hertzbeat/manager/controller/MonitorsController.java index 46e1129b3d7..6bbe0a270fb 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/controller/MonitorsController.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/controller/MonitorsController.java @@ -21,12 +21,7 @@ import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; -import jakarta.persistence.criteria.CriteriaBuilder; -import jakarta.persistence.criteria.JoinType; -import jakarta.persistence.criteria.ListJoin; -import jakarta.persistence.criteria.Predicate; import jakarta.servlet.http.HttpServletResponse; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import org.apache.hertzbeat.common.entity.dto.Message; @@ -34,11 +29,7 @@ import org.apache.hertzbeat.manager.service.MonitorService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.domain.Sort; -import org.springframework.data.jpa.domain.Specification; import org.springframework.http.ResponseEntity; -import org.springframework.util.StringUtils; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; @@ -56,10 +47,6 @@ @RequestMapping(path = "/api/monitors", produces = {APPLICATION_JSON_VALUE}) public class MonitorsController { - private static final byte ALL_MONITOR_STATUS = 9; - - private static final int TAG_LENGTH = 2; - @Autowired private MonitorService monitorService; @@ -77,69 +64,8 @@ public ResponseEntity>> getMonitors( @Parameter(description = "List current page", example = "0") @RequestParam(defaultValue = "0") int pageIndex, @Parameter(description = "Number of list pagination ", example = "8") @RequestParam(defaultValue = "8") int pageSize, @Parameter(description = "Monitor tag ", example = "env:prod") @RequestParam(required = false) final String tag) { - Specification specification = (root, query, criteriaBuilder) -> { - List andList = new ArrayList<>(); - if (ids != null && !ids.isEmpty()) { - CriteriaBuilder.In inPredicate = criteriaBuilder.in(root.get("id")); - for (long id : ids) { - inPredicate.value(id); - } - andList.add(inPredicate); - } - if (StringUtils.hasText(app)) { - Predicate predicateApp = criteriaBuilder.equal(root.get("app"), app); - andList.add(predicateApp); - } - if (status != null && status >= 0 && status < ALL_MONITOR_STATUS) { - Predicate predicateStatus = criteriaBuilder.equal(root.get("status"), status); - andList.add(predicateStatus); - } - - if (StringUtils.hasText(tag)) { - String[] tagArr = tag.split(":"); - String tagName = tagArr[0]; - ListJoin tagJoin = root - .join(root.getModel() - .getList("tags", org.apache.hertzbeat.common.entity.manager.Tag.class), JoinType.LEFT); - if (tagArr.length == TAG_LENGTH) { - String tagValue = tagArr[1]; - andList.add(criteriaBuilder.equal(tagJoin.get("name"), tagName)); - andList.add(criteriaBuilder.equal(tagJoin.get("tagValue"), tagValue)); - } else { - andList.add(criteriaBuilder.equal(tagJoin.get("name"), tag)); - } - } - Predicate[] andPredicates = new Predicate[andList.size()]; - Predicate andPredicate = criteriaBuilder.and(andList.toArray(andPredicates)); - - List orList = new ArrayList<>(); - if (StringUtils.hasText(host)) { - Predicate predicateHost = criteriaBuilder.like(root.get("host"), "%" + host + "%"); - orList.add(predicateHost); - } - if (StringUtils.hasText(name)) { - Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + name + "%"); - orList.add(predicateName); - } - Predicate[] orPredicates = new Predicate[orList.size()]; - Predicate orPredicate = criteriaBuilder.or(orList.toArray(orPredicates)); - - if (andPredicates.length == 0 && orPredicates.length == 0) { - return query.where().getRestriction(); - } else if (andPredicates.length == 0) { - return orPredicate; - } else if (orPredicates.length == 0) { - return andPredicate; - } else { - return query.where(andPredicate, orPredicate).getRestriction(); - } - }; - // Pagination is a must - Sort sortExp = Sort.by(new Sort.Order(Sort.Direction.fromString(order), sort)); - PageRequest pageRequest = PageRequest.of(pageIndex, pageSize, sortExp); - Page monitorPage = monitorService.getMonitors(specification, pageRequest); - Message> message = Message.success(monitorPage); - return ResponseEntity.ok(message); + Page monitorPage = monitorService.getMonitors(ids, app, name, host, status, sort, order, pageIndex, pageSize, tag); + return ResponseEntity.ok(Message.success(monitorPage)); } @GetMapping(path = "/{app}") @@ -147,9 +73,7 @@ public ResponseEntity>> getMonitors( description = "Filter all acquired monitoring information lists of the specified monitoring type according to the query") public ResponseEntity>> getAppMonitors( @Parameter(description = "en: Monitoring type", example = "linux") @PathVariable(required = false) final String app) { - List monitors = monitorService.getAppMonitors(app); - Message> message = Message.success(monitors); - return ResponseEntity.ok(message); + return ResponseEntity.ok(Message.success(monitorService.getAppMonitors(app))); } @@ -162,8 +86,7 @@ public ResponseEntity> deleteMonitors( if (ids != null && !ids.isEmpty()) { monitorService.deleteMonitors(new HashSet<>(ids)); } - Message message = Message.success(); - return ResponseEntity.ok(message); + return ResponseEntity.ok(Message.success()); } @DeleteMapping("manage") @@ -175,8 +98,7 @@ public ResponseEntity> cancelManageMonitors( if (ids != null && !ids.isEmpty()) { monitorService.cancelManageMonitors(new HashSet<>(ids)); } - Message message = Message.success(); - return ResponseEntity.ok(message); + return ResponseEntity.ok(Message.success()); } @GetMapping("manage") @@ -188,8 +110,7 @@ public ResponseEntity> enableManageMonitors( if (ids != null && !ids.isEmpty()) { monitorService.enableManageMonitors(new HashSet<>(ids)); } - Message message = Message.success(); - return ResponseEntity.ok(message); + return ResponseEntity.ok(Message.success()); } @GetMapping("/export") diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/MonitorService.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/MonitorService.java index 1657b7a4da8..0a3007e868b 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/MonitorService.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/MonitorService.java @@ -28,8 +28,6 @@ import org.apache.hertzbeat.manager.pojo.dto.MonitorDto; import org.apache.hertzbeat.manager.support.exception.MonitorDetectException; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.jpa.domain.Specification; import org.springframework.web.multipart.MultipartFile; /** @@ -96,11 +94,19 @@ public interface MonitorService { /** * Dynamic conditional query - * @param specification Query conditions - * @param pageRequest Pagination parameters + * @param monitorIds Monitor ID List + * @param app Monitor Type + * @param name Monitor Name support fuzzy query + * @param host Monitor Host support fuzzy query + * @param status Monitor Status 0:no monitor,1:usable,2:disabled,9:all status + * @param sort Sort Field + * @param order Sort mode eg:asc desc + * @param pageIndex List current page + * @param pageSize Number of list pagination + * @param tag Monitor tag * @return Search Result */ - Page getMonitors(Specification specification, PageRequest pageRequest); + Page getMonitors(List monitorIds, String app, String name, String host, Byte status, String sort, String order, int pageIndex, int pageSize, String tag); /** * Unmanaged monitoring items in batches according to the monitoring ID list diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java index 2684ea1910c..4cd7c888dac 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java @@ -18,10 +18,15 @@ package org.apache.hertzbeat.manager.service.impl; import com.fasterxml.jackson.core.type.TypeReference; +import jakarta.persistence.criteria.CriteriaBuilder; +import jakarta.persistence.criteria.JoinType; +import jakarta.persistence.criteria.ListJoin; +import jakarta.persistence.criteria.Predicate; import jakarta.servlet.http.HttpServletResponse; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.time.LocalDateTime; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; @@ -71,6 +76,7 @@ import org.springframework.context.ApplicationContext; import org.springframework.data.domain.Page; import org.springframework.data.domain.PageRequest; +import org.springframework.data.domain.Sort; import org.springframework.data.jpa.domain.Specification; import org.springframework.http.HttpHeaders; import org.springframework.stereotype.Service; @@ -95,6 +101,10 @@ public class MonitorServiceImpl implements MonitorService { public static final String PATTERN_HTTP = "(?i)http://"; public static final String PATTERN_HTTPS = "(?i)https://"; + private static final byte ALL_MONITOR_STATUS = 9; + + private static final int TAG_LENGTH = 2; + @Autowired private AppService appService; @@ -642,7 +652,67 @@ public MonitorDto getMonitorDto(long id) throws RuntimeException { } @Override - public Page getMonitors(Specification specification, PageRequest pageRequest) { + public Page getMonitors(List monitorIds, String app, String name, String host, Byte status, String sort, String order, int pageIndex, int pageSize, String tag) { + Specification specification = (root, query, criteriaBuilder) -> { + List andList = new ArrayList<>(); + if (monitorIds != null && !monitorIds.isEmpty()) { + CriteriaBuilder.In inPredicate = criteriaBuilder.in(root.get("id")); + for (long id : monitorIds) { + inPredicate.value(id); + } + andList.add(inPredicate); + } + if (StringUtils.hasText(app)) { + Predicate predicateApp = criteriaBuilder.equal(root.get("app"), app); + andList.add(predicateApp); + } + if (status != null && status >= 0 && status < ALL_MONITOR_STATUS) { + Predicate predicateStatus = criteriaBuilder.equal(root.get("status"), status); + andList.add(predicateStatus); + } + + if (StringUtils.hasText(tag)) { + String[] tagArr = tag.split(":"); + String tagName = tagArr[0]; + ListJoin tagJoin = root + .join(root.getModel() + .getList("tags", org.apache.hertzbeat.common.entity.manager.Tag.class), JoinType.LEFT); + if (tagArr.length == TAG_LENGTH) { + String tagValue = tagArr[1]; + andList.add(criteriaBuilder.equal(tagJoin.get("name"), tagName)); + andList.add(criteriaBuilder.equal(tagJoin.get("tagValue"), tagValue)); + } else { + andList.add(criteriaBuilder.equal(tagJoin.get("name"), tag)); + } + } + Predicate[] andPredicates = new Predicate[andList.size()]; + Predicate andPredicate = criteriaBuilder.and(andList.toArray(andPredicates)); + + List orList = new ArrayList<>(); + if (StringUtils.hasText(host)) { + Predicate predicateHost = criteriaBuilder.like(root.get("host"), "%" + host + "%"); + orList.add(predicateHost); + } + if (StringUtils.hasText(name)) { + Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + name + "%"); + orList.add(predicateName); + } + Predicate[] orPredicates = new Predicate[orList.size()]; + Predicate orPredicate = criteriaBuilder.or(orList.toArray(orPredicates)); + + if (andPredicates.length == 0 && orPredicates.length == 0) { + return query.where().getRestriction(); + } else if (andPredicates.length == 0) { + return orPredicate; + } else if (orPredicates.length == 0) { + return andPredicate; + } else { + return query.where(andPredicate, orPredicate).getRestriction(); + } + }; + // Pagination is a must + Sort sortExp = Sort.by(new Sort.Order(Sort.Direction.fromString(order), sort)); + PageRequest pageRequest = PageRequest.of(pageIndex, pageSize, sortExp); return monitorDao.findAll(specification, pageRequest); } diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/MonitorServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/MonitorServiceTest.java index 5956619773f..fcb91ae6cbd 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/MonitorServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/MonitorServiceTest.java @@ -21,8 +21,9 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; @@ -652,9 +653,8 @@ void getMonitorDto() { @Test void getMonitors() { - Specification specification = mock(Specification.class); - when(monitorDao.findAll(specification, PageRequest.of(1, 1))).thenReturn(Page.empty()); - assertNotNull(monitorService.getMonitors(specification, PageRequest.of(1, 1))); + doReturn(Page.empty()).when(monitorDao).findAll(any(Specification.class), any(PageRequest.class)); + assertNotNull(monitorService.getMonitors(null, null, null, null, null, "gmtCreate", "desc", 1, 1, null)); } @Test From 51b1a9bdc79d80e0216fb428d645f269ce762a19 Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Thu, 1 Aug 2024 10:08:11 +0800 Subject: [PATCH 110/257] [bugfix] fixed jpa expired data clean error (#2329) Co-authored-by: tomsun28 --- .../java/org/apache/hertzbeat/warehouse/dao/HistoryDao.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/dao/HistoryDao.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/dao/HistoryDao.java index d00bec8e4ff..7af37dfa939 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/dao/HistoryDao.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/dao/HistoryDao.java @@ -46,7 +46,7 @@ public interface HistoryDao extends JpaRepository, JpaSpecificati */ @Modifying @Transactional(rollbackFor = Exception.class) - @Query(value = "delete from hzb_history limit :delNum", nativeQuery = true) + @Query(value = "DELETE FROM hzb_history WHERE id IN ( SELECT t2.id from (SELECT t1.id FROM hzb_history t1 LIMIT ?1) as t2)", nativeQuery = true) int deleteOlderHistoriesRecord(@Param(value = "delNum") int delNum); /** From 3a7d5829ad002fcc037d8f15fe3b7115e9346a6b Mon Sep 17 00:00:00 2001 From: YuLuo Date: Thu, 1 Aug 2024 10:16:26 +0800 Subject: [PATCH 111/257] [Improve] add AlarmCommonReduce unit test (#2412) Signed-off-by: yuluo-yx Signed-off-by: YuLuo Co-authored-by: tomsun28 --- .../alert/reduce/AlarmCommonReduceTest.java | 135 ++++++++++++++++++ 1 file changed, 135 insertions(+) create mode 100644 alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmCommonReduceTest.java diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmCommonReduceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmCommonReduceTest.java new file mode 100644 index 00000000000..9b7f77deef1 --- /dev/null +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmCommonReduceTest.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.alert.reduce; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hertzbeat.alert.dao.AlertMonitorDao; +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.entity.alerter.Alert; +import org.apache.hertzbeat.common.queue.CommonDataQueue; +import org.apache.hertzbeat.common.entity.manager.Tag; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * test case for {@link AlarmCommonReduce} + */ + +@ExtendWith(MockitoExtension.class) +class AlarmCommonReduceTest { + + @Mock + private AlarmSilenceReduce alarmSilenceReduce; + + @Mock + private AlarmConvergeReduce alarmConvergeReduce; + + @Mock + private CommonDataQueue dataQueue; + + @Mock + private AlertMonitorDao alertMonitorDao; + + private AlarmCommonReduce alarmCommonReduce; + + private Alert testAlert; + + @BeforeEach + void setUp() { + + testAlert = Alert.builder().build(); + alarmCommonReduce = new AlarmCommonReduce( + alarmSilenceReduce, + alarmConvergeReduce, + dataQueue, + alertMonitorDao + ); + } + + @Test + void testReduceAndSendAlarmNoMonitorId() { + + when(alarmConvergeReduce.filterConverge(testAlert)).thenReturn(true); + when(alarmSilenceReduce.filterSilence(testAlert)).thenReturn(true); + + alarmCommonReduce.reduceAndSendAlarm(testAlert); + + verify(dataQueue).sendAlertsData(testAlert); + verify(alertMonitorDao, never()).findMonitorIdBindTags(anyLong()); + } + @Test + void testReduceAndSendAlarmWithMonitorId() { + + Map tags = new HashMap<>(); + tags.put(CommonConstants.TAG_MONITOR_ID, "123"); + testAlert.setTags(tags); + + doReturn(Collections.singletonList( + Tag.builder() + .name("newTag") + .tagValue("tagValue") + .build()) + ).when(alertMonitorDao).findMonitorIdBindTags(123L); + when(alarmConvergeReduce.filterConverge(testAlert)).thenReturn(true); + when(alarmSilenceReduce.filterSilence(testAlert)).thenReturn(true); + + alarmCommonReduce.reduceAndSendAlarm(testAlert); + + assertTrue(testAlert.getTags().containsKey("newTag")); + assertEquals("tagValue", testAlert.getTags().get("newTag")); + verify(dataQueue).sendAlertsData(testAlert); + } + + @Test + void testReduceAndSendAlarmConvergeFilterFail() { + + when(alarmConvergeReduce.filterConverge(testAlert)).thenReturn(false); + + alarmCommonReduce.reduceAndSendAlarm(testAlert); + + verify(dataQueue, never()).sendAlertsData(testAlert); + verify(alarmSilenceReduce, never()).filterSilence(any(Alert.class)); + } + + @Test + void testReduceAndSendAlarmSilenceFilterFail() { + + when(alarmConvergeReduce.filterConverge(testAlert)).thenReturn(true); + when(alarmSilenceReduce.filterSilence(testAlert)).thenReturn(false); + + alarmCommonReduce.reduceAndSendAlarm(testAlert); + + verify(dataQueue, never()).sendAlertsData(testAlert); + } + +} From 3d99835487a7b9fcd0532b241f35af1463175bfb Mon Sep 17 00:00:00 2001 From: YuLuo Date: Thu, 1 Aug 2024 10:31:18 +0800 Subject: [PATCH 112/257] [Improve] add AlertConvergesController & AlertConvergeController unit test (#2424) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../AlertConvergeControllerTest.java | 125 ++++++++++++++++++ .../AlertConvergesControllerTest.java | 122 +++++++++++++++++ 2 files changed, 247 insertions(+) create mode 100644 alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergeControllerTest.java create mode 100644 alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergesControllerTest.java diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergeControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergeControllerTest.java new file mode 100644 index 00000000000..6ccf349559c --- /dev/null +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergeControllerTest.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.alert.controller; + +import org.apache.hertzbeat.alert.service.AlertConvergeService; +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.entity.alerter.AlertConverge; +import org.apache.hertzbeat.common.util.JsonUtil; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.http.MediaType; +import org.springframework.test.web.servlet.MockMvc; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; + +/** + * test case for {@link AlertConvergeController} + */ + +@ExtendWith(MockitoExtension.class) +public class AlertConvergeControllerTest { + + private MockMvc mockMvc; + + @Mock + private AlertConvergeService alertConvergeService; + + private AlertConverge alertConverge; + + @InjectMocks + private AlertConvergeController alertConvergeController; + + @BeforeEach + void setUp() { + + this.mockMvc = standaloneSetup(alertConvergeController).build(); + + alertConverge = AlertConverge.builder() + .name("test") + .creator("admin") + .modifier("admin") + .id(1L) + .build(); + } + + @Test + void testAddNewAlertConverge() throws Exception { + + doNothing().when(alertConvergeService).validate(any(AlertConverge.class), eq(false)); + doNothing().when(alertConvergeService).addAlertConverge(any(AlertConverge.class)); + + mockMvc.perform(post("/api/alert/converge") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(alertConverge)) + ).andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andExpect(jsonPath("$.msg").value("Add success")); + } + + @Test + void testModifyAlertConverge() throws Exception { + + doNothing().when(alertConvergeService).validate(any(AlertConverge.class), eq(true)); + doNothing().when(alertConvergeService).modifyAlertConverge(any(AlertConverge.class)); + + mockMvc.perform(put("/api/alert/converge") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(alertConverge)) + ).andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andExpect(jsonPath("$.msg").value("Modify success")); + } + + @Test + void testGetAlertConvergeExists() throws Exception { + + when(alertConvergeService.getAlertConverge(1L)).thenReturn(alertConverge); + + mockMvc.perform(get("/api/alert/converge/{id}", 1L) + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.data.id").value(alertConverge.getId())); + } + + @Test + void testGetAlertConvergeNotExists() throws Exception { + + when(alertConvergeService.getAlertConverge(1L)).thenReturn(null); + + mockMvc.perform(get("/api/alert/converge/{id}", 1L) + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.MONITOR_NOT_EXIST_CODE)) + .andExpect(jsonPath("$.msg").value("AlertConverge not exist.")); + } + +} diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergesControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergesControllerTest.java new file mode 100644 index 00000000000..28088f2d467 --- /dev/null +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergesControllerTest.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.alert.controller; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; + +import org.apache.hertzbeat.alert.service.AlertConvergeService; +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.entity.alerter.AlertConverge; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.http.MediaType; +import org.springframework.data.domain.Page; +import org.springframework.data.domain.PageImpl; +import org.springframework.data.domain.PageRequest; +import org.springframework.data.domain.Sort; +import org.springframework.test.web.servlet.MockMvc; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.delete; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; + +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; + +/** + * test case for {@link AlertConvergesController} + */ + +@ExtendWith(MockitoExtension.class) +class AlertConvergesControllerTest { + + private MockMvc mockMvc; + + @Mock + private AlertConvergeService alertConvergeService; + + @InjectMocks + private AlertConvergesController alertConvergesController; + + private List alertConvergeList; + + @BeforeEach + void setUp() { + + this.mockMvc = standaloneSetup(alertConvergesController).build(); + + AlertConverge alertConverge1 = AlertConverge.builder() + .name("Converge1") + .id(1L) + .build(); + + AlertConverge alertConverge2 = AlertConverge.builder() + .name("Converge2") + .id(2L) + .build(); + + alertConvergeList = Arrays.asList(alertConverge1, alertConverge2); + } + + @Test + void testGetAlertConverges() throws Exception { + + Page alertConvergePage = new PageImpl<>( + alertConvergeList, + PageRequest.of(0, 8, Sort.by("id").descending()), + alertConvergeList.size() + ); + + when(alertConvergeService.getAlertConverges(any(), any(PageRequest.class))).thenReturn(alertConvergePage); + + mockMvc.perform(get("/api/alert/converges") + .param("pageIndex", "0") + .param("pageSize", "8") + .param("sort", "id") + .param("order", "desc") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.data.content[0].id").value(1)) + .andExpect(jsonPath("$.data.content[0].name").value("Converge1")) + .andExpect(jsonPath("$.data.content[1].id").value(2)) + .andExpect(jsonPath("$.data.content[1].name").value("Converge2")); + } + + @Test + void testDeleteAlertDefines() throws Exception { + + doNothing().when(alertConvergeService).deleteAlertConverges(eq(new HashSet<>(Arrays.asList(1L, 2L)))); + + mockMvc.perform(delete("/api/alert/converges") + .param("ids", "1,2") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } +} + From 2a3f4dfa80f6c1f221eae732d64ba6a678b97b3f Mon Sep 17 00:00:00 2001 From: Jast Date: Fri, 2 Aug 2024 10:05:10 +0800 Subject: [PATCH 113/257] [Improve] script command blacklist (#2438) --- .../common/ssh/CommonSshBlacklist.java | 114 ++++++++++++++++++ .../collector/collect/ssh/SshCollectImpl.java | 8 ++ 2 files changed, 122 insertions(+) create mode 100644 collector/src/main/java/org/apache/hertzbeat/collector/collect/common/ssh/CommonSshBlacklist.java diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/common/ssh/CommonSshBlacklist.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/common/ssh/CommonSshBlacklist.java new file mode 100644 index 00000000000..edbb08649a5 --- /dev/null +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/common/ssh/CommonSshBlacklist.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.collector.collect.common.ssh; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +/** + * Command blacklist + */ +public class CommonSshBlacklist { + + private static final Set BLACKLIST; + + static { + Set tempSet = new HashSet<>(); + initializeDefaultBlacklist(tempSet); + BLACKLIST = Collections.unmodifiableSet(tempSet); + } + + private CommonSshBlacklist() { + // Prevent instantiation + } + + private static void initializeDefaultBlacklist(Set blacklist) { + // Adding default dangerous commands to blacklist + blacklist.add("rm "); + blacklist.add("mv "); + blacklist.add("cp "); + blacklist.add("ln "); + blacklist.add("dd "); + blacklist.add("tar "); + blacklist.add("zip "); + blacklist.add("bzip2 "); + blacklist.add("bunzip2 "); + blacklist.add("xz "); + blacklist.add("unxz "); + blacklist.add("kill "); + blacklist.add("killall "); + blacklist.add("reboot"); + blacklist.add("shutdown"); + blacklist.add("poweroff"); + blacklist.add("init 0"); + blacklist.add("init 6"); + blacklist.add("telinit 0"); + blacklist.add("telinit 6"); + blacklist.add("systemctl halt"); + blacklist.add("systemctl suspend"); + blacklist.add("systemctl hibernate"); + blacklist.add("service reboot"); + blacklist.add("service shutdown"); + blacklist.add("crontab -e"); + blacklist.add("visudo"); + blacklist.add("useradd"); + blacklist.add("userdel"); + blacklist.add("usermod"); + blacklist.add("groupadd"); + blacklist.add("groupdel"); + blacklist.add("groupmod"); + blacklist.add("passwd"); + blacklist.add("su "); + blacklist.add("sudo "); + blacklist.add("mount "); + blacklist.add("parted"); + blacklist.add("mkpart"); + blacklist.add("partprobe"); + blacklist.add("iptables"); + blacklist.add("firewalld"); + blacklist.add("nft"); + blacklist.add("nc "); + blacklist.add("netcat"); + blacklist.add("ssh "); + blacklist.add("scp "); + blacklist.add("rsync"); + blacklist.add("ftp "); + blacklist.add("sftp "); + blacklist.add("telnet "); + blacklist.add("chmod "); + blacklist.add("chattr "); + blacklist.add("dd "); + blacklist.add("mknod"); + blacklist.add("losetup"); + blacklist.add("cryptsetup"); + } + + public static boolean isCommandBlacklisted(String command) { + if (command == null || command.trim().isEmpty()) { + throw new IllegalArgumentException("Command cannot be null or empty"); + } + String trimmedCommand = command.trim(); + return BLACKLIST.stream().anyMatch(trimmedCommand::contains); + } + + public static Set getBlacklist() { + return BLACKLIST; + } + +} diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java index fddbf7abffb..dfed8e7a6c3 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java @@ -38,6 +38,7 @@ import org.apache.hertzbeat.collector.collect.common.cache.CacheIdentifier; import org.apache.hertzbeat.collector.collect.common.cache.ConnectionCommonCache; import org.apache.hertzbeat.collector.collect.common.cache.SshConnect; +import org.apache.hertzbeat.collector.collect.common.ssh.CommonSshBlacklist; import org.apache.hertzbeat.collector.collect.common.ssh.CommonSshClient; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; @@ -85,6 +86,7 @@ public void preCheck(Metrics metrics) throws IllegalArgumentException { @Override public void collect(CollectRep.MetricsData.Builder builder, long monitorId, String app, Metrics metrics) { + long startTime = System.currentTimeMillis(); SshProtocol sshProtocol = metrics.getSsh(); boolean reuseConnection = Boolean.parseBoolean(sshProtocol.getReuseConnection()); @@ -93,6 +95,12 @@ public void collect(CollectRep.MetricsData.Builder builder, long monitorId, Stri ClientSession clientSession = null; try { clientSession = getConnectSession(sshProtocol, timeout, reuseConnection); + if (CommonSshBlacklist.isCommandBlacklisted(sshProtocol.getScript())) { + builder.setCode(CollectRep.Code.FAIL); + builder.setMsg("The command is blacklisted: " + sshProtocol.getScript()); + log.warn("The command is blacklisted: {}", sshProtocol.getScript()); + return; + } channel = clientSession.createExecChannel(sshProtocol.getScript()); ByteArrayOutputStream response = new ByteArrayOutputStream(); channel.setOut(response); From f6e8e64e23687c80192a622ff2864a3797245c33 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Fri, 2 Aug 2024 10:34:55 +0800 Subject: [PATCH 114/257] [Improve] add AlertSilenceController unit test (#2425) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../AlertSilenceControllerTest.java | 124 ++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertSilenceControllerTest.java diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertSilenceControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertSilenceControllerTest.java new file mode 100644 index 00000000000..b31415f275a --- /dev/null +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertSilenceControllerTest.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.alert.controller; + +import org.apache.hertzbeat.alert.service.AlertSilenceService; +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.entity.alerter.AlertSilence; +import org.apache.hertzbeat.common.util.JsonUtil; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.http.MediaType; +import org.springframework.test.web.servlet.MockMvc; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; + +/** + * tes case for {@link AlertSilenceController} + */ + +@ExtendWith(MockitoExtension.class) +class AlertSilenceControllerTest { + + private MockMvc mockMvc; + + @Mock + private AlertSilenceService alertSilenceService; + + private AlertSilence alertSilence; + + @InjectMocks + private AlertSilenceController alertSilenceController; + + @BeforeEach + void setUp() { + + this.mockMvc = standaloneSetup(alertSilenceController).build(); + + alertSilence = AlertSilence.builder() + .id(1L) + .name("Test Silence") + .type((byte) 1) + .build(); + } + + @Test + void testAddNewAlertSilence() throws Exception { + + doNothing().when(alertSilenceService).validate(any(AlertSilence.class), eq(false)); + doNothing().when(alertSilenceService).addAlertSilence(any(AlertSilence.class)); + + mockMvc.perform(post("/api/alert/silence") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(alertSilence))) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + + @Test + void testModifyAlertSilence() throws Exception { + + doNothing().when(alertSilenceService).validate(any(AlertSilence.class), eq(true)); + doNothing().when(alertSilenceService).modifyAlertSilence(any(AlertSilence.class)); + + mockMvc.perform(put("/api/alert/silence") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(alertSilence))) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + + @Test + void testGetAlertSilence() throws Exception { + + when(alertSilenceService.getAlertSilence(1L)).thenReturn(alertSilence); + + mockMvc.perform(get("/api/alert/silence/1") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.data.id").value(1)) + .andExpect(jsonPath("$.data.name").value("Test Silence")); + } + + @Test + void testGetAlertSilenceNotExists() throws Exception { + + when(alertSilenceService.getAlertSilence(1L)).thenReturn(null); + + mockMvc.perform(get("/api/alert/silence/1") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.MONITOR_NOT_EXIST_CODE)) + .andExpect(jsonPath("$.msg").value("AlertSilence not exist.")); + } + +} From 7446d3ebbf2a79289ae378f00bac6681689222cd Mon Sep 17 00:00:00 2001 From: aias00 Date: Fri, 2 Aug 2024 12:04:18 +0800 Subject: [PATCH 115/257] [feature] add oceanbase template (#2439) --- .../main/resources/define/app-oceanbase.yml | 361 ++++++++++++++++++ 1 file changed, 361 insertions(+) create mode 100644 manager/src/main/resources/define/app-oceanbase.yml diff --git a/manager/src/main/resources/define/app-oceanbase.yml b/manager/src/main/resources/define/app-oceanbase.yml new file mode 100644 index 00000000000..dd5cb6bf2d8 --- /dev/null +++ b/manager/src/main/resources/define/app-oceanbase.yml @@ -0,0 +1,361 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring +category: db +# The monitoring type +app: OceanBase +# The monitoring i18n name +name: + zh-CN: OceanBase数据库 + en-US: OceanBase DB +# The description and help of this monitoring type +help: + zh-CN: HertzBeat 使用 JDBC 协议 通过配置 SQL 对 OceanBase 数据库的通用性能指标(系统信息、性能状态、Innodb、缓存、事物、用户线程、慢SQL等)进行采集监控,支持版本为 OceanBase 4.0+。
您可以点击“新建 OceanBase 数据库”并进行配置,或者选择“更多操作”,导入已有配置。 + en-US: HertzBeat uses JDBC Protocol to configure SQL for collecting general metrics of OceanBase database (system information, performance status, Innodb, cache, things, user threads, slow SQL, etc.). Supported version is OceanBase 4.0+.
You can click "New Mysql Database" and configure it, or select "More Operations" to import the existing configuration. + zh-TW: HertzBeat 使用 JDBC 協議 通過配置 SQL 對 OceanBase 數據庫的通用性能指標(系統信息、性能狀態、Innodb、緩存、事物、用戶線程、慢SQL等)進行采集監控,支持版本爲 OceanBase 4.0+。
您可以點擊“新建 OceanBase 數據庫”並進行配置,或者選擇“更多操作”,導入已有配置。 +helpLink: + zh-CN: https://hertzbeat.apache.org/zh-cn/docs/help/OceanBase + en-US: https://hertzbeat.apache.org/docs/help/OceanBase +# Input params define for monitoring(render web ui by the definition) +params: + # field-param field key + - field: host + # name-param field display i18n name + name: + zh-CN: 目标Host + en-US: Target Host + # type-param field type(most mapping the html input type) + type: host + # required-true or false + required: true + # field-param field key + - field: port + # name-param field display i18n name + name: + zh-CN: 端口 + en-US: Port + # type-param field type(most mapping the html input type) + type: number + # when type is number, range is required + range: '[0,65535]' + # required-true or false + required: true + # default value + defaultValue: 2881 + # field-param field key + - field: timeout + # name-param field display i18n name + name: + zh-CN: 查询超时时间(ms) + en-US: Query Timeout(ms) + # type-param field type(most mapping the html input type) + type: number + # when type is number, range is required + range: '[400,200000]' + # required-true or false + required: false + # hide param-true or false + hide: true + # default value + defaultValue: 6000 + # field-param field key + - field: database + # name-param field display i18n name + name: + zh-CN: 数据库名称 + en-US: Database Name + # type-param field type(most mapping the html input tag) + type: text + # required-true or false + required: false + # field-param field key + - field: username + # name-param field display i18n name + name: + zh-CN: 用户名 + en-US: Username + # type-param field type(most mapping the html input tag) + type: text + # when type is text, use limit to limit string length + limit: 50 + # required-true or false + required: false + # field-param field key + - field: password + # name-param field display i18n name + name: + zh-CN: 密码 + en-US: Password + # type-param field type(most mapping the html input tag) + type: password + # required-true or false + required: false + # field-param field key + - field: url + # name-param field display i18n name + name: + zh-CN: URL + en-US: URL + # type-param field type(most mapping the html input tag) + type: text + # required-true or false + required: false + # hide param-true or false + hide: true + +# collect metrics config list +metrics: + # metrics - basic + - name: basic + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + i18n: + zh-CN: 基础 信息 + en-US: Basic Info + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: version + type: 1 + label: true + i18n: + zh-CN: 版本 + en-US: Version + - field: datadir + type: 1 + i18n: + zh-CN: 存储目录 + en-US: DataDir + - field: max_connections + type: 0 + i18n: + zh-CN: 最大连接数 + en-US: Max Connections + # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field + aliasFields: + - version + - version_compile_os + - version_compile_machine + - datadir + - max_connections + # (optional)mapping and conversion expressions, use these and aliasField above to calculate metrics value + # eg: cores=core1+core2, usage=usage, waitTime=allTime-runningTime + calculates: + - datadir=datadir + - max_connections=max_connections + - version=version+"_"+version_compile_os+"_"+version_compile_machine + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: jdbc + # the config content when protocol is jdbc + jdbc: + # OceanBase host: ipv4 ipv6 host + host: ^_^host^_^ + # OceanBase port + port: ^_^port^_^ + # database platform name + platform: mysql + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # database name + database: ^_^database^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + # SQL Query Method:oneRow, multiRow, columns + queryType: columns + # sql + sql: show global variables where Variable_name like 'version%' or Variable_name = 'max_connections' or Variable_name = 'datadir' ; + # JDBC url + url: ^_^url^_^ + # metrics - tenant + - name: tenant + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + i18n: + zh-CN: 租户 信息 + en-US: Tenant Info + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: tenant_id + type: 1 + label: true + i18n: + zh-CN: 租户id + en-US: TenantId + - field: tenant_name + type: 1 + i18n: + zh-CN: 租户名称 + en-US: tenantName + - field: status + type: 1 + i18n: + zh-CN: 状态 + en-US: status + # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field + aliasFields: + - tenant_id + - tenant_name + - status + # (optional)mapping and conversion expressions, use these and aliasField above to calculate metrics value + # eg: cores=core1+core2, usage=usage, waitTime=allTime-runningTime + calculates: + - tenant_id=tenant_id + - tenant_name=tenant_name + - status=status + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: jdbc + # the config content when protocol is jdbc + jdbc: + # OceanBase host: ipv4 ipv6 host + host: ^_^host^_^ + # OceanBase port + port: ^_^port^_^ + # database platform name + platform: mysql + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # database name + database: ^_^database^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + # SQL Query Method:oneRow, multiRow, columns + queryType: multiRow + # sql + sql: select tenant_id, tenant_name, info, status from oceanbase.__all_tenant; + # JDBC url + url: ^_^url^_^ + + - name: sql + priority: 1 + i18n: + zh-CN: Sql 信息 + en-US: Sql Info + fields: + - field: con_id + type: 1 + label: true + i18n: + zh-CN: 租户id + en-US: TenantId + - field: sql_select_count + type: 0 + i18n: + zh-CN: select 语句执行次数 + en-US: sql select count + - field: sql_insert_count + type: 0 + i18n: + zh-CN: insert 语句执行次数 + en-US: sql insert count + - field: sql_update_count + type: 0 + i18n: + zh-CN: update 语句执行次数 + en-US: sql update count + - field: sql_delete_count + type: 0 + i18n: + zh-CN: delete 语句执行次数 + en-US: sql delete count +# - field: com_commit +# type: 0 +# i18n: +# zh-CN: 事务提交次数 +# en-US: trans commit count +# - field: com_rollback +# type: 0 +# i18n: +# zh-CN: 事务回滚次数 +# en-US: trans rollback count + aliasFields: + - con_id + - sql_select_count + - sql_insert_count + - sql_update_count + - sql_delete_count +# - com_commit +# - com_rollback + calculates: + - con_id=con_id + - sql_select_count=sql_select_count + - sql_insert_count=sql_insert_count + - sql_update_count=sql_update_count + - sql_delete_count=sql_delete_count + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + platform: mysql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + timeout: ^_^timeout^_^ + queryType: multiRow + sql: | + SELECT + con_id, + MAX(CASE WHEN name = 'sql select count' THEN value END) AS "sql_select_count", + MAX(CASE WHEN name = 'sql insert count' THEN value END) AS "sql_insert_count", + MAX(CASE WHEN name = 'sql update count' THEN value END) AS "sql_update_count", + MAX(CASE WHEN name = 'sql delete count' THEN value END) AS "sql_delete_count" + FROM + oceanbase.gv$sysstat + WHERE + CLASS = 8 + AND name IN ('sql select count', 'sql insert count', 'sql update count', 'sql delete count') + GROUP BY + con_id + ORDER BY + con_id; + url: ^_^url^_^ + + + - name: process_state + priority: 2 + i18n: + zh-CN: 进程状态 信息 + en-US: Process State Info + fields: + - field: state + type: 1 + label: true + i18n: + zh-CN: 进程状态 + en-US: State + - field: num + type: 0 + i18n: + zh-CN: 该状态进程数量 + en-US: Num + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + platform: mysql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + timeout: ^_^timeout^_^ + queryType: multiRow + sql: select state, count(*) as num from information_schema.PROCESSLIST where state != '' group by state; + url: ^_^url^_^ From c85e182119813af89e69446654e3814b1c4c6f09 Mon Sep 17 00:00:00 2001 From: aias00 Date: Fri, 2 Aug 2024 17:12:40 +0800 Subject: [PATCH 116/257] [feature] add greenplum template (#2442) --- .../main/resources/define/app-greenplum.yml | 528 ++++++++++++++++++ 1 file changed, 528 insertions(+) create mode 100644 manager/src/main/resources/define/app-greenplum.yml diff --git a/manager/src/main/resources/define/app-greenplum.yml b/manager/src/main/resources/define/app-greenplum.yml new file mode 100644 index 00000000000..0eb25f844ce --- /dev/null +++ b/manager/src/main/resources/define/app-greenplum.yml @@ -0,0 +1,528 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring +category: db +# The monitoring type eg: linux windows tomcat mysql aws... +app: greenplum +# The monitoring i18n name +name: + zh-CN: GreenPlum 数据库 + en-US: GreenPlum DB +# The description and help of this monitoring type +help: + zh-CN: HertzBeat 使用 JDBC 协议 通过配置 SQL 对 GreenPlum 数据库的通用性能指标 (basic、state、activity etc) 进行采集监控,支持版本为 GreenPlum 6.23.0+。
您可以点击“新建 GreenPlum 数据库”并进行配置,或者选择“更多操作”,导入已有配置。 + en-US: HertzBeat uses JDBC Protocol to configure SQL for collecting general metrics of GreenPlum database (basic、state、activity etc). Supported version is GreenPlum 6.23.0+.
You can click "New GreenPlum Database" and configure it, or select "More Action" to import the existing configuration. + zh-TW: HertzBeat 使用 JDBC 協議 通過配置 SQL 對 GreenPlum 數據庫的通用性能指標 (basic、state、activity etc)進行采集監控,支持版本爲 GreenPlum 6.23.0+。
您可以點擊“新建 GreenPlum 數據庫”並進行配置,或者選擇“更多操作”,導入已有配置。 +helpLink: + zh-CN: https://hertzbeat.apache.org/zh-cn/docs/help/greenplum + en-US: https://hertzbeat.apache.org/docs/help/greenplum +# Input params define for monitoring(render web ui by the definition) +params: + # field-param field key + - field: host + # name-param field display i18n name + name: + zh-CN: 目标Host + en-US: Target Host + # type-param field type(most mapping the html input type) + type: host + # required-true or false + required: true + # field-param field key + - field: port + # name-param field display i18n name + name: + zh-CN: 端口 + en-US: Port + # type-param field type(most mapping the html input type) + type: number + # when type is number, range is required + range: '[0,65535]' + # required-true or false + required: true + # default value + defaultValue: 5432 + - field: timeout + name: + zh-CN: 查询超时时间(ms) + en-US: Query Timeout(ms) + type: number + range: '[400,200000]' + required: false + hide: true + defaultValue: 6000 + - field: database + name: + zh-CN: 数据库名称 + en-US: Database Name + type: text + defaultValue: postgres + required: false + - field: username + name: + zh-CN: 用户名 + en-US: Username + type: text + limit: 50 + required: false + - field: password + name: + zh-CN: 密码 + en-US: Password + type: password + required: false + - field: url + name: + zh-CN: URL + en-US: URL + type: text + required: false + hide: true + +# collect metrics config list +metrics: + # metrics - basic + - name: basic + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: server_version + type: 1 + label: true + - field: port + type: 1 + - field: server_encoding + type: 1 + - field: data_directory + type: 1 + - field: max_connections + type: 0 + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: jdbc + # the config content when protocol is jdbc + jdbc: + # host: ipv4 ipv6 host + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + # database platform name + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + # SQL Query Method:oneRow, multiRow, columns + queryType: columns + # sql + sql: select name, setting as value from pg_settings where name = 'max_connections' or name = 'server_version' or name = 'server_encoding' or name = 'port' or name = 'data_directory'; + # JDBC url + url: ^_^url^_^ + + - name: state + priority: 1 + fields: + - field: db_name + type: 1 + label: true + - field: conflicts + type: 0 + unit: times + - field: deadlocks + type: 0 + unit: times + - field: blks_read + type: 0 + unit: blocks per second + - field: blks_hit + type: 0 + unit: blocks per second + - field: blk_read_time + type: 0 + unit: ms + - field: blk_write_time + type: 0 + unit: ms + - field: stats_reset + type: 1 + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: SELECT COALESCE(datname,'shared-object') as db_name, conflicts, deadlocks, blks_read, blks_hit, blk_read_time, blk_write_time, stats_reset from pg_stat_database where (datname != 'template1' and datname != 'template0') or datname is null; + url: ^_^url^_^ + + - name: activity + priority: 2 + fields: + - field: running + type: 0 + unit: sbc + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: oneRow + sql: SELECT count(*) as running FROM pg_stat_activity WHERE NOT pid=pg_backend_pid(); + url: ^_^url^_^ + + - name: resource_config + priority: 1 + fields: + - field: work_mem + type: 0 + unit: MB + - field: shared_buffers + type: 0 + unit: MB + - field: autovacuum + type: 1 + - field: max_connections + type: 0 + - field: effective_cache_size + type: 0 + unit: MB + - field: wal_buffers + type: 0 + unit: MB + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: columns + sql: show all; + url: ^_^url^_^ + + - name: connection + priority: 1 + fields: + - field: active + type: 0 + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: oneRow + sql: select count(1) as active from pg_stat_activity; + url: ^_^url^_^ + + - name: connection_state + priority: 1 + fields: + - field: state + type: 1 + label: true + - field: num + type: 0 + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select COALESCE(state, 'other') as state, count(*) as num from pg_stat_activity group by state; + url: ^_^url^_^ + + - name: connection_db + priority: 1 + fields: + - field: db_name + type: 1 + label: true + - field: active + type: 0 + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select count(*) as active, COALESCE(datname, 'other') as db_name from pg_stat_activity group by datname; + url: ^_^url^_^ + + - name: tuple + priority: 1 + fields: + - field: fetched + type: 0 + - field: returned + type: 0 + - field: inserted + type: 0 + - field: updated + type: 0 + - field: deleted + type: 0 + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select sum(tup_fetched) as fetched, sum(tup_updated) as updated, sum(tup_deleted) as deleted, sum(tup_inserted) as inserted, sum(tup_returned) as returned from pg_stat_database; + url: ^_^url^_^ + + - name: temp_file + priority: 1 + fields: + - field: db_name + type: 1 + label: true + - field: num + type: 0 + - field: size + type: 0 + unit: B + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select COALESCE(datname, 'other') as db_name, sum(temp_files) as num, sum(temp_bytes) as size from pg_stat_database group by datname; + url: ^_^url^_^ + + - name: lock + priority: 1 + fields: + - field: db_name + type: 1 + label: true + - field: conflicts + type: 0 + unit: times + - field: deadlocks + type: 0 + unit: times + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: SELECT COALESCE(datname,'shared-object') as db_name, conflicts, deadlocks from pg_stat_database where (datname != 'template1' and datname != 'template0') or datname is null; + url: ^_^url^_^ + + - name: slow_sql + priority: 1 + fields: + - field: sql_text + type: 1 + label: true + - field: calls + type: 0 + - field: rows + type: 0 + - field: avg_time + type: 0 + unit: ms + - field: total_time + type: 0 + unit: ms + aliasFields: + - query + - calls + - rows + - total_exec_time + - mean_exec_time + calculates: + - sql_text=query + - avg_time=mean_exec_time + - total_time=total_exec_time + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select * from pg_stat_statements; + url: ^_^url^_^ + + - name: transaction + priority: 2 + fields: + - field: db_name + type: 1 + label: true + - field: commits + type: 0 + unit: times + - field: rollbacks + type: 0 + unit: times + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select COALESCE(datname, 'other') as db_name, sum(xact_commit) as commits, sum(xact_rollback) as rollbacks from pg_stat_database group by datname; + url: ^_^url^_^ + + - name: conflicts + priority: 2 + fields: + - field: db_name + type: 1 + label: true + - field: tablespace + type: 0 + - field: lock + type: 0 + - field: snapshot + type: 0 + - field: bufferpin + type: 0 + - field: deadlock + type: 0 + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select datname as db_name, confl_tablespace as tablespace, confl_lock as lock, confl_snapshot as snapshot, confl_bufferpin as bufferpin, confl_deadlock as deadlock from pg_stat_database_conflicts; + url: ^_^url^_^ + + - name: cache_hit_ratio + priority: 2 + fields: + - field: db_name + type: 1 + label: true + - field: ratio + type: 0 + unit: '%' + aliasFields: + - blks_hit + - blks_read + - db_name + calculates: + - ratio=(blks_hit + 1) / (blks_read + blks_hit + 1) * 100 + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select datname as db_name, blks_hit, blks_read from pg_stat_database; + url: ^_^url^_^ + + - name: checkpoint + priority: 2 + fields: + - field: checkpoint_sync_time + type: 0 + unit: ms + - field: checkpoint_write_time + type: 0 + unit: ms + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: oneRow + sql: select checkpoint_sync_time, checkpoint_write_time from pg_stat_bgwriter; + url: ^_^url^_^ + + - name: buffer + priority: 2 + fields: + - field: allocated + type: 0 + - field: fsync_calls_by_backend + type: 0 + - field: written_directly_by_backend + type: 0 + - field: written_by_background_writer + type: 0 + - field: written_during_checkpoints + type: 0 + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: oneRow + sql: select buffers_alloc as allocated, buffers_backend_fsync as fsync_calls_by_backend, buffers_backend as written_directly_by_backend, buffers_clean as written_by_background_writer, buffers_checkpoint as written_during_checkpoints from pg_stat_bgwriter; + url: ^_^url^_^ From b0438da537206c862b8fbb3a12735b0ce7a756ec Mon Sep 17 00:00:00 2001 From: YuLuo Date: Fri, 2 Aug 2024 18:01:53 +0800 Subject: [PATCH 117/257] [Improve] add KafkaMetricsDataSerializer & KafkaMetricsDataDeserializer unit test (#2431) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../serialize/KafkaMetricsDataSerializer.java | 9 ++ .../KafkaMetricsDataDeserializerTest.java | 102 ++++++++++++++++++ .../KafkaMetricsDataSerializerTest.java | 100 +++++++++++++++++ 3 files changed, 211 insertions(+) create mode 100644 common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataDeserializerTest.java create mode 100644 common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataSerializerTest.java diff --git a/common/src/main/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataSerializer.java b/common/src/main/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataSerializer.java index 015dbf7ae96..bcd2fe6d66f 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataSerializer.java +++ b/common/src/main/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataSerializer.java @@ -18,6 +18,7 @@ package org.apache.hertzbeat.common.serialize; import java.util.Map; +import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.serialization.Serializer; @@ -25,6 +26,8 @@ /** * kafka metrics data serializer */ + +@Slf4j public class KafkaMetricsDataSerializer implements Serializer { @Override @@ -34,6 +37,12 @@ public void configure(Map configs, boolean isKey) { @Override public byte[] serialize(String s, CollectRep.MetricsData metricsData) { + + if (metricsData == null) { + log.error("metricsData is null"); + return null; + } + return metricsData.toByteArray(); } diff --git a/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataDeserializerTest.java b/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataDeserializerTest.java new file mode 100644 index 00000000000..f3da7ce171f --- /dev/null +++ b/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataDeserializerTest.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.serialize; + +import java.util.Map; + +import org.apache.hertzbeat.common.entity.message.CollectRep; +import org.apache.kafka.common.header.Headers; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +/** + * test case for {@link KafkaMetricsDataDeserializer} + */ + +class KafkaMetricsDataDeserializerTest { + + private KafkaMetricsDataDeserializer deserializer; + + @Mock + private Map configs; + + @Mock + private Headers headers; + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + + deserializer = new KafkaMetricsDataDeserializer(); + } + + @Test + void testConfigure() { + + deserializer.configure(configs, false); + } + + @Test + void testDeserializeWithBytes() { + + CollectRep.MetricsData expectedMetricsData = CollectRep.MetricsData.newBuilder() + .setMetrics("someValue") + .setApp("linux") + .build(); + byte[] bytes = expectedMetricsData.toByteArray(); + + CollectRep.MetricsData actualMetricsData = deserializer.deserialize("", bytes); + + assertEquals(expectedMetricsData, actualMetricsData); + } + + @Test + void testDeserializeWithInvalidBytes() { + + byte[] invalidBytes = "invalid data".getBytes(); + + assertThrows(RuntimeException.class, () -> deserializer.deserialize("", invalidBytes)); + } + + @Test + void testDeserializeWithHeaders() { + + CollectRep.MetricsData expectedMetricsData = CollectRep.MetricsData.newBuilder() + .setMetrics("someValue") + .setApp("linux") + .build(); + byte[] bytes = expectedMetricsData.toByteArray(); + + CollectRep.MetricsData actualMetricsData = deserializer.deserialize("topic", headers, bytes); + + assertEquals(expectedMetricsData, actualMetricsData); + } + + @Test + void testClose() { + + deserializer.close(); + } + +} diff --git a/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataSerializerTest.java b/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataSerializerTest.java new file mode 100644 index 00000000000..63d7a1dd1b5 --- /dev/null +++ b/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataSerializerTest.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.serialize; + +import java.util.Map; +import org.apache.hertzbeat.common.entity.message.CollectRep; +import org.apache.kafka.common.header.Headers; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +/** + * test case for {@link KafkaMetricsDataSerializer} + */ + +class KafkaMetricsDataSerializerTest { + + private KafkaMetricsDataSerializer serializer; + + @Mock + private Map configs; + + @Mock + private Headers headers; + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + + serializer = new KafkaMetricsDataSerializer(); + } + + @Test + void testConfigure() { + + serializer.configure(configs, false); + } + + @Test + void testSerializeWithMetricsData() { + + CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder() + .setMetrics("someValue") + .setApp("linux") + .build(); + byte[] bytes = serializer.serialize("", metricsData); + + assertNotNull(bytes); + assertArrayEquals(metricsData.toByteArray(), bytes); + } + + @Test + void testSerializeWithNullMetricsData() { + + byte[] bytes = serializer.serialize("", null); + + assertNull(bytes); + } + + @Test + void testSerializeWithHeaders() { + + CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder() + .setMetrics("someValue") + .setApp("linux") + .build(); + byte[] expectedBytes = metricsData.toByteArray(); + byte[] bytes = serializer.serialize("topic", headers, metricsData); + + assertArrayEquals(expectedBytes, bytes); + } + + @Test + void testClose() { + + serializer.close(); + } + +} From aee969a18a00d8294e54331d042659e57d624fb3 Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Fri, 2 Aug 2024 23:15:45 +0800 Subject: [PATCH 118/257] [refactor] move code from AlertDefinesController to AlertDefineService (#2429) Co-authored-by: Calvin --- .../controller/AlertDefinesController.java | 53 +------------------ .../alert/service/AlertDefineService.java | 11 ++-- .../service/impl/AlertDefineServiceImpl.java | 49 ++++++++++++++++- .../AlertDefinesControllerTest.java | 26 ++++----- .../alert/service/AlertDefineServiceTest.java | 9 ++-- 5 files changed, 76 insertions(+), 72 deletions(-) diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertDefinesController.java b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertDefinesController.java index 2150f06800f..7579c05df3d 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertDefinesController.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertDefinesController.java @@ -21,10 +21,7 @@ import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; -import jakarta.persistence.criteria.CriteriaBuilder; -import jakarta.persistence.criteria.Predicate; import jakarta.servlet.http.HttpServletResponse; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import org.apache.hertzbeat.alert.service.AlertDefineService; @@ -32,11 +29,7 @@ import org.apache.hertzbeat.common.entity.dto.Message; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.domain.Sort; -import org.springframework.data.jpa.domain.Specification; import org.springframework.http.ResponseEntity; -import org.springframework.util.StringUtils; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PostMapping; @@ -67,51 +60,7 @@ public ResponseEntity>> getAlertDefines( @Parameter(description = "Sort mode: asc: ascending, desc: descending", example = "desc") @RequestParam(defaultValue = "desc") String order, @Parameter(description = "List current page", example = "0") @RequestParam(defaultValue = "0") int pageIndex, @Parameter(description = "Number of list pages", example = "8") @RequestParam(defaultValue = "8") int pageSize) { - - Specification specification = (root, query, criteriaBuilder) -> { - List andList = new ArrayList<>(); - if (ids != null && !ids.isEmpty()) { - CriteriaBuilder.In inPredicate = criteriaBuilder.in(root.get("id")); - for (long id : ids) { - inPredicate.value(id); - } - andList.add(inPredicate); - } - if (StringUtils.hasText(search)) { - Predicate predicate = criteriaBuilder.or( - criteriaBuilder.like( - criteriaBuilder.lower(root.get("app")), - "%" + search.toLowerCase() + "%" - ), - criteriaBuilder.like( - criteriaBuilder.lower(root.get("metric")), - "%" + search.toLowerCase() + "%" - ), - criteriaBuilder.like( - criteriaBuilder.lower(root.get("field")), - "%" + search.toLowerCase() + "%" - ), - criteriaBuilder.like( - criteriaBuilder.lower(root.get("expr")), - "%" + search.toLowerCase() + "%" - ), - criteriaBuilder.like( - criteriaBuilder.lower(root.get("template")), - "%" + search.toLowerCase() + "%" - ) - ); - andList.add(predicate); - } - if (priority != null) { - Predicate predicate = criteriaBuilder.equal(root.get("priority"), priority); - andList.add(predicate); - } - Predicate[] predicates = new Predicate[andList.size()]; - return criteriaBuilder.and(andList.toArray(predicates)); - }; - Sort sortExp = Sort.by(new Sort.Order(Sort.Direction.fromString(order), sort)); - PageRequest pageRequest = PageRequest.of(pageIndex, pageSize, sortExp); - Page alertDefinePage = alertDefineService.getAlertDefines(specification, pageRequest); + Page alertDefinePage = alertDefineService.getAlertDefines(ids, search, priority, sort, order, pageIndex, pageSize); return ResponseEntity.ok(Message.success(alertDefinePage)); } diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertDefineService.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertDefineService.java index 74319167a81..dabad05a38e 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertDefineService.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertDefineService.java @@ -113,11 +113,16 @@ public interface AlertDefineService { /** * Dynamic conditional query - * @param specification Query conditions - * @param pageRequest Paging parameters + * @param defineIds Alarm Definition ID List + * @param search Search-Target Expr Template + * @param priority Alarm Definition Severity + * @param sort Sort field + * @param order Sort mode: asc: ascending, desc: descending + * @param pageIndex List current page + * @param pageSize Number of list pages * @return The query results */ - Page getAlertDefines(Specification specification, PageRequest pageRequest); + Page getAlertDefines(List defineIds, String search, Byte priority, String sort, String order, int pageIndex, int pageSize); /** * Query the associated monitoring list information based on the alarm definition ID diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineServiceImpl.java index b29add88a95..624df433407 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineServiceImpl.java @@ -17,9 +17,12 @@ package org.apache.hertzbeat.alert.service.impl; +import jakarta.persistence.criteria.CriteriaBuilder; +import jakarta.persistence.criteria.Predicate; import jakarta.servlet.http.HttpServletResponse; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.Comparator; import java.util.HashMap; import java.util.List; @@ -41,6 +44,7 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; import org.springframework.data.domain.PageRequest; +import org.springframework.data.domain.Sort; import org.springframework.data.jpa.domain.Specification; import org.springframework.http.HttpHeaders; import org.springframework.stereotype.Service; @@ -147,7 +151,50 @@ public AlertDefine getMonitorBindAlertAvaDefine(long monitorId, String app, Stri } @Override - public Page getAlertDefines(Specification specification, PageRequest pageRequest) { + public Page getAlertDefines(List defineIds, String search, Byte priority, String sort, String order, int pageIndex, int pageSize) { + Specification specification = (root, query, criteriaBuilder) -> { + List andList = new ArrayList<>(); + if (defineIds != null && !defineIds.isEmpty()) { + CriteriaBuilder.In inPredicate = criteriaBuilder.in(root.get("id")); + for (long id : defineIds) { + inPredicate.value(id); + } + andList.add(inPredicate); + } + if (StringUtils.hasText(search)) { + Predicate predicate = criteriaBuilder.or( + criteriaBuilder.like( + criteriaBuilder.lower(root.get("app")), + "%" + search.toLowerCase() + "%" + ), + criteriaBuilder.like( + criteriaBuilder.lower(root.get("metric")), + "%" + search.toLowerCase() + "%" + ), + criteriaBuilder.like( + criteriaBuilder.lower(root.get("field")), + "%" + search.toLowerCase() + "%" + ), + criteriaBuilder.like( + criteriaBuilder.lower(root.get("expr")), + "%" + search.toLowerCase() + "%" + ), + criteriaBuilder.like( + criteriaBuilder.lower(root.get("template")), + "%" + search.toLowerCase() + "%" + ) + ); + andList.add(predicate); + } + if (priority != null) { + Predicate predicate = criteriaBuilder.equal(root.get("priority"), priority); + andList.add(predicate); + } + Predicate[] predicates = new Predicate[andList.size()]; + return criteriaBuilder.and(andList.toArray(predicates)); + }; + Sort sortExp = Sort.by(new Sort.Order(Sort.Direction.fromString(order), sort)); + PageRequest pageRequest = PageRequest.of(pageIndex, pageSize, sortExp); return alertDefineDao.findAll(specification, pageRequest); } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertDefinesControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertDefinesControllerTest.java index 62ce3fb82c2..7f9ef2c25f6 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertDefinesControllerTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertDefinesControllerTest.java @@ -98,20 +98,20 @@ void getAlertDefines() throws Exception { // Test the correctness of the mock // Although objects cannot be mocked, stubs can be stored using class files -// Mockito.when(alertDefineService.getAlertDefines(Mockito.any(Specification.class), Mockito.argThat(new ArgumentMatcher() { -// @Override -// public boolean matches(PageRequest pageRequestMidden) { -// // There are three methods in the source code that need to be compared, namely getPageNumber(), getPageSize(), getSort() -// if(pageRequestMidden.getPageSize() == pageRequest.getPageSize() && -// pageRequestMidden.getPageNumber() == pageRequest.getPageNumber() && -// pageRequestMidden.getSort().equals(pageRequest.getSort())) { -// return true; -// } -// return false; -// } -// }))).thenReturn(new PageImpl(new ArrayList())); + // Mockito.when(alertDefineService.getAlertDefines(Mockito.any(Specification.class), Mockito.argThat(new ArgumentMatcher() { + // @Override + // public boolean matches(PageRequest pageRequestMidden) { + // // There are three methods in the source code that need to be compared, namely getPageNumber(), getPageSize(), getSort() + // if(pageRequestMidden.getPageSize() == pageRequest.getPageSize() && + // pageRequestMidden.getPageNumber() == pageRequest.getPageNumber() && + // pageRequestMidden.getSort().equals(pageRequest.getSort())) { + // return true; + // } + // return false; + // } + // }))).thenReturn(new PageImpl(new ArrayList())); AlertDefine define = AlertDefine.builder().id(9L).app("linux").metric("disk").field("usage").expr("x").times(1).tags(new LinkedList<>()).build(); - Mockito.when(alertDefineService.getAlertDefines(Mockito.any(), Mockito.any())).thenReturn(new PageImpl<>(Collections.singletonList(define))); + Mockito.when(alertDefineService.getAlertDefines(null, null, null, "id", "desc", 1, 10)).thenReturn(new PageImpl<>(Collections.singletonList(define))); mockMvc.perform(MockMvcRequestBuilders.get( "/api/alert/defines") diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineServiceTest.java index b3b317db728..549f980327c 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineServiceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineServiceTest.java @@ -20,10 +20,13 @@ import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.anySet; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.util.ArrayList; import java.util.Collections; @@ -176,9 +179,9 @@ void testGetMonitorBindAlertDefines() { @Test void getAlertDefines() { - Specification specification = mock(Specification.class); - when(alertDefineDao.findAll(specification, PageRequest.of(1, 1))).thenReturn(Page.empty()); - assertNotNull(alertDefineService.getAlertDefines(specification, PageRequest.of(1, 1))); + when(alertDefineDao.findAll(any(Specification.class), any(PageRequest.class))).thenReturn(Page.empty()); + assertNotNull(alertDefineService.getAlertDefines(null, null, null, "id", "desc", 1, 10)); + verify(alertDefineDao, times(1)).findAll(any(Specification.class), any(PageRequest.class)); } @Test From 598ed2ea168338d4b4322d6fbb3c3808b14c3818 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sat, 3 Aug 2024 18:10:07 +0800 Subject: [PATCH 119/257] [ci] add e2e logs to debug e2e test & delete unnecessary files (#2447) Signed-off-by: yuluo-yx --- .github/workflows/backend-build-test.yml | 13 ++++++++++- e2e/Dockerfile | 23 ------------------- e2e/{compose.yaml => docker-compose.yml} | 12 ++++++++-- e2e/script/entrypoint.sh | 21 ----------------- e2e/script/start.sh | 29 ------------------------ 5 files changed, 22 insertions(+), 76 deletions(-) delete mode 100644 e2e/Dockerfile rename e2e/{compose.yaml => docker-compose.yml} (80%) delete mode 100644 e2e/script/entrypoint.sh delete mode 100644 e2e/script/start.sh diff --git a/.github/workflows/backend-build-test.yml b/.github/workflows/backend-build-test.yml index 1124b17fd9d..ecf9cfa4ad5 100644 --- a/.github/workflows/backend-build-test.yml +++ b/.github/workflows/backend-build-test.yml @@ -53,6 +53,7 @@ jobs: uses: codecov/codecov-action@v4.0.1 with: token: ${{ secrets.CODECOV_TOKEN }} + - name: Build Image env: IMAGE_PUSH: false @@ -63,9 +64,19 @@ jobs: docker buildx use myBuilder ./script/docker/server/build.sh + - name: Run E2E run: | sudo curl -L https://github.com/docker/compose/releases/download/v2.23.0/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose sudo chmod u+x /usr/local/bin/docker-compose - cd e2e && chmod +x ./script/*.sh && ./script/start.sh + cd e2e + sudo docker-compose version + sudo docker-compose up --exit-code-from testing --remove-orphans + + # upload application logs + - name: Upload logs + uses: actions/upload-artifact@v3 + with: + name: hz-logs-${{ github.run_id }} + path: e2e/logs/ diff --git a/e2e/Dockerfile b/e2e/Dockerfile deleted file mode 100644 index babd3488156..00000000000 --- a/e2e/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -FROM ghcr.io/linuxsuren/api-testing:v0.0.17 - -WORKDIR /workspace -COPY . . - -CMD [ "/workspace/script/entrypoint.sh" ] diff --git a/e2e/compose.yaml b/e2e/docker-compose.yml similarity index 80% rename from e2e/compose.yaml rename to e2e/docker-compose.yml index 7e522fbb169..b9e1b12ce90 100644 --- a/e2e/compose.yaml +++ b/e2e/docker-compose.yml @@ -18,17 +18,25 @@ version: '3.8' services: testing: - build: - context: . + image: ghcr.io/linuxsuren/api-testing:v0.0.17 environment: SERVER: http://hertzbeat:1157 + container_name: e2e-testing + volumes: + - ./data/:/work/data/ + - ./testsuite.yaml:/work/testsuite.yaml + command: atest run -p /work/testsuite.yaml --report md depends_on: hertzbeat: condition: service_healthy links: - hertzbeat + hertzbeat: image: apache/hertzbeat + container_name: e2e-hertzbeat + volumes: + - ./logs/:/opt/hertzbeat/logs/ ports: - "1157:1157" environment: diff --git a/e2e/script/entrypoint.sh b/e2e/script/entrypoint.sh deleted file mode 100644 index 6ddca683b7c..00000000000 --- a/e2e/script/entrypoint.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -atest run -p testsuite.yaml --report md diff --git a/e2e/script/start.sh b/e2e/script/start.sh deleted file mode 100644 index cb724733921..00000000000 --- a/e2e/script/start.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Derived from Api Testing https://github.com/LinuxSuRen/api-testing/blob/master/e2e/start.sh - -file=$1 -if [ "$file" == "" ] -then - file=compose.yaml -fi - -docker-compose version -docker-compose -f "$file" down -docker-compose -f "$file" up --build testing --exit-code-from testing --remove-orphans From 7672502c3d6d21f10167ca228426dd0a6c76d3ec Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sat, 3 Aug 2024 18:19:30 +0800 Subject: [PATCH 120/257] [Improve] add AlertSerializer & AlertDeserializer unit test (#2430) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../collector/util/TimeExpressionUtil.java | 2 +- .../serialize/AlertDeserializerTest.java | 98 ++++++++++++++++ .../common/serialize/AlertSerializerTest.java | 108 ++++++++++++++++++ 3 files changed, 207 insertions(+), 1 deletion(-) create mode 100644 common/src/test/java/org/apache/hertzbeat/common/serialize/AlertDeserializerTest.java create mode 100644 common/src/test/java/org/apache/hertzbeat/common/serialize/AlertSerializerTest.java diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/util/TimeExpressionUtil.java b/collector/src/main/java/org/apache/hertzbeat/collector/util/TimeExpressionUtil.java index 69f16ca3c6c..a34d9afa24c 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/util/TimeExpressionUtil.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/util/TimeExpressionUtil.java @@ -36,7 +36,7 @@ /** * time expression deal util */ -public class TimeExpressionUtil { +public final class TimeExpressionUtil { private TimeExpressionUtil() { } diff --git a/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertDeserializerTest.java b/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertDeserializerTest.java new file mode 100644 index 00000000000..c90729ff5ed --- /dev/null +++ b/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertDeserializerTest.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.serialize; + +import java.util.Map; + +import org.apache.hertzbeat.common.entity.alerter.Alert; +import org.apache.kafka.common.header.Headers; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * test case for {@link AlertDeserializer} + */ + +class AlertDeserializerTest { + + private AlertDeserializer alertDeserializer; + + @Mock + private Map configs; + + @Mock + private Headers headers; + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + + alertDeserializer = new AlertDeserializer(); + } + + @Test + void testConfigure() { + + alertDeserializer.configure(configs, false); + } + + @Test + void testDeserializeWithBytes() { + + String json = "{\"target\":\"test\",\"content\":\"test\"}"; + byte[] bytes = json.getBytes(); + Alert expectedAlert = Alert.builder() + .content("test") + .target("test") + .build(); + + Alert actualAlert = alertDeserializer.deserialize("", bytes); + + assertEquals(expectedAlert.getContent(), actualAlert.getContent()); + assertEquals(expectedAlert.getTarget(), actualAlert.getTarget()); + } + + @Test + void testDeserializeWithHeaders() { + + String topic = "alerts"; + byte[] data = "{\"target\":\"test\",\"content\":\"test\"}".getBytes(); + + Alert expectedAlert = Alert.builder() + .content("test") + .target("test") + .build(); + + Alert actualAlert = alertDeserializer.deserialize(topic, headers, data); + + assertEquals(expectedAlert.getContent(), actualAlert.getContent()); + assertEquals(expectedAlert.getTarget(), actualAlert.getTarget()); + } + + @Test + void testClose() { + + alertDeserializer.close(); + } + +} diff --git a/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertSerializerTest.java b/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertSerializerTest.java new file mode 100644 index 00000000000..48ebb9bfbb4 --- /dev/null +++ b/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertSerializerTest.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.serialize; + +import java.util.Arrays; +import java.util.Map; + +import org.apache.hertzbeat.common.entity.alerter.Alert; +import org.apache.kafka.common.header.Headers; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +/** + * test case for {@link AlertSerializer} + */ + +class AlertSerializerTest { + + private AlertSerializer alertSerializer; + + @Mock + private Map configs; + + @Mock + private Headers headers; + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + alertSerializer = new AlertSerializer(); + } + + @Test + void testConfigure() { + + alertSerializer.configure(configs, false); + } + + @Test + void testSerializeWithAlert() { + + Alert alert = Alert.builder() + .content("test") + .target("test") + .build(); + byte[] expectedJson = ("{\"id\":null,\"target\":\"test\",\"alertDefineId\":null,\"priority\":0,\"content\":" + + "\"test\",\"status\":0,\"times\":null,\"firstAlarmTime\":null,\"lastAlarmTime\":null,\"triggerTimes" + + "\":null,\"tags\":null,\"creator\":null,\"modifier\":null,\"gmtCreate\":null,\"gmtUpdate\":null}").getBytes(); + + byte[] bytes = alertSerializer.serialize("", alert); + + assertNotNull(bytes); + assertEquals(Arrays.toString(expectedJson), Arrays.toString(bytes)); + } + + @Test + void testSerializeWithNullAlert() { + + byte[] bytes = alertSerializer.serialize("", null); + assertNull(bytes); + } + + @Test + void testSerializeWithHeaders() { + + Alert alert = Alert.builder() + .content("test") + .target("test") + .build(); + byte[] expectedBytes = ("{\"id\":null,\"target\":\"test\",\"alertDefineId\":null,\"priority\":0,\"content\":" + + "\"test\",\"status\":0,\"times\":null,\"firstAlarmTime\":null,\"lastAlarmTime\":null,\"triggerTimes" + + "\":null,\"tags\":null,\"creator\":null,\"modifier\":null,\"gmtCreate\":null,\"gmtUpdate\":null}").getBytes(); + + byte[] bytes = alertSerializer.serialize("alerts", headers, alert); + + assertArrayEquals(expectedBytes, bytes); + } + + @Test + void testClose() { + + alertSerializer.close(); + } + +} From 822825b614ce09a21e5f205052c5832ce381fbe5 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sat, 3 Aug 2024 23:20:24 +0800 Subject: [PATCH 121/257] [improve] add TimeZoneListener test (#2432) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../listener/TimeZoneListenerTest.java | 76 +++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/component/listener/TimeZoneListenerTest.java diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/component/listener/TimeZoneListenerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/component/listener/TimeZoneListenerTest.java new file mode 100644 index 00000000000..1d4d7fa59aa --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/component/listener/TimeZoneListenerTest.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.component.listener; + +import java.text.SimpleDateFormat; +import java.util.TimeZone; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.common.support.event.SystemConfigChangeEvent; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.test.util.ReflectionTestUtils; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * test case for {@link TimeZoneListener} + */ + +@ExtendWith(MockitoExtension.class) +class TimeZoneListenerTest { + + @Mock + private ObjectMapper objectMapper; + + @InjectMocks + private TimeZoneListener timeZoneListener; + + @Mock + private SystemConfigChangeEvent event; + + @BeforeEach + void setUp() { + + ReflectionTestUtils.setField(timeZoneListener, "objectMapper", objectMapper); + } + + @Test + void testOnEvent() { + + when(objectMapper.setTimeZone(any(TimeZone.class))).thenReturn(objectMapper); + when(objectMapper.setDateFormat(any(SimpleDateFormat.class))).thenReturn(objectMapper); + + Object eventSource = new Object(); + when(event.getSource()).thenReturn(eventSource); + + timeZoneListener.onEvent(event); + + SimpleDateFormat expectedDateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSX"); + expectedDateFormat.setTimeZone(TimeZone.getDefault()); + + verify(objectMapper).setTimeZone(TimeZone.getDefault()); + verify(objectMapper).setDateFormat(expectedDateFormat); + } + +} From de657f914403044e5ebb1d7db7b8db70682bb09b Mon Sep 17 00:00:00 2001 From: aias00 Date: Sat, 3 Aug 2024 23:26:36 +0800 Subject: [PATCH 122/257] [improve] add i18n for postgresql kingbase greenplum (#2448) Co-authored-by: tomsun28 --- .../main/resources/define/app-greenplum.yml | 222 ++++++++++++++++++ .../main/resources/define/app-kingbase.yml | 222 ++++++++++++++++++ .../main/resources/define/app-postgresql.yml | 222 ++++++++++++++++++ 3 files changed, 666 insertions(+) diff --git a/manager/src/main/resources/define/app-greenplum.yml b/manager/src/main/resources/define/app-greenplum.yml index 0eb25f844ce..a590bb9417a 100644 --- a/manager/src/main/resources/define/app-greenplum.yml +++ b/manager/src/main/resources/define/app-greenplum.yml @@ -96,6 +96,9 @@ params: metrics: # metrics - basic - name: basic + i18n: + zh-CN: 基本信息 + en-US: Basic Info # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -105,14 +108,29 @@ metrics: - field: server_version type: 1 label: true + i18n: + zh-CN: 服务器版本 + en-US: Server Version - field: port type: 1 + i18n: + zh-CN: 端口 + en-US: Port - field: server_encoding type: 1 + i18n: + zh-CN: 服务器编码 + en-US: Server Encoding - field: data_directory type: 1 + i18n: + zh-CN: 数据目录 + en-US: Data Directory - field: max_connections type: 0 + i18n: + zh-CN: 最大连接数 + en-US: Max Connections # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: jdbc # the config content when protocol is jdbc @@ -134,31 +152,58 @@ metrics: url: ^_^url^_^ - name: state + i18n: + zh-CN: 状态信息 + en-US: State Info priority: 1 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: conflicts type: 0 unit: times + i18n: + zh-CN: 冲突次数 + en-US: Conflicts - field: deadlocks type: 0 unit: times + i18n: + zh-CN: 死锁次数 + en-US: Deadlocks - field: blks_read type: 0 unit: blocks per second + i18n: + zh-CN: 读取块 + en-US: Blocks Read - field: blks_hit type: 0 unit: blocks per second + i18n: + zh-CN: 命中块 + en-US: Blocks Hit - field: blk_read_time type: 0 unit: ms + i18n: + zh-CN: 读取时间 + en-US: Read Time - field: blk_write_time type: 0 unit: ms + i18n: + zh-CN: 写入时间 + en-US: Write Time - field: stats_reset type: 1 + i18n: + zh-CN: 统计重置 + en-US: Stats Reset protocol: jdbc jdbc: host: ^_^host^_^ @@ -173,11 +218,17 @@ metrics: url: ^_^url^_^ - name: activity + i18n: + zh-CN: 活动信息 + en-US: Activity Info priority: 2 fields: - field: running type: 0 unit: sbc + i18n: + zh-CN: 运行中 + en-US: Running protocol: jdbc jdbc: host: ^_^host^_^ @@ -192,24 +243,45 @@ metrics: url: ^_^url^_^ - name: resource_config + i18n: + zh-CN: 资源配置 + en-US: Resource Config priority: 1 fields: - field: work_mem type: 0 unit: MB + i18n: + zh-CN: 工作内存 + en-US: Work Memory - field: shared_buffers type: 0 unit: MB + i18n: + zh-CN: 共享缓冲区 + en-US: Shared Buffers - field: autovacuum type: 1 + i18n: + zh-CN: 自动清理 + en-US: Auto Vacuum - field: max_connections type: 0 + i18n: + zh-CN: 最大连接数 + en-US: Max Connections - field: effective_cache_size type: 0 unit: MB + i18n: + zh-CN: 有效缓存大小 + en-US: Effective Cache Size - field: wal_buffers type: 0 unit: MB + i18n: + zh-CN: WAL缓冲区 + en-US: WAL Buffers protocol: jdbc jdbc: host: ^_^host^_^ @@ -224,10 +296,16 @@ metrics: url: ^_^url^_^ - name: connection + i18n: + zh-CN: 连接信息 + en-US: Connection Info priority: 1 fields: - field: active type: 0 + i18n: + zh-CN: 活动连接 + en-US: Active Connection protocol: jdbc jdbc: host: ^_^host^_^ @@ -242,13 +320,22 @@ metrics: url: ^_^url^_^ - name: connection_state + i18n: + zh-CN: 连接状态 + en-US: Connection State priority: 1 fields: - field: state type: 1 label: true + i18n: + zh-CN: 状态 + en-US: State - field: num type: 0 + i18n: + zh-CN: 数量 + en-US: Num protocol: jdbc jdbc: host: ^_^host^_^ @@ -263,13 +350,22 @@ metrics: url: ^_^url^_^ - name: connection_db + i18n: + zh-CN: 连接数据库 + en-US: Connection Db priority: 1 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: active type: 0 + i18n: + zh-CN: 活动连接 + en-US: Active Connection protocol: jdbc jdbc: host: ^_^host^_^ @@ -284,18 +380,36 @@ metrics: url: ^_^url^_^ - name: tuple + i18n: + zh-CN: 元组信息 + en-US: Tuple Info priority: 1 fields: - field: fetched type: 0 + i18n: + zh-CN: 获取次数 + en-US: Fetched - field: returned type: 0 + i18n: + zh-CN: 返回次数 + en-US: Returned - field: inserted type: 0 + i18n: + zh-CN: 插入次数 + en-US: Inserted - field: updated type: 0 + i18n: + zh-CN: 更新次数 + en-US: Updated - field: deleted type: 0 + i18n: + zh-CN: 删除次数 + en-US: Deleted protocol: jdbc jdbc: host: ^_^host^_^ @@ -310,16 +424,28 @@ metrics: url: ^_^url^_^ - name: temp_file + i18n: + zh-CN: 临时文件 + en-US: Temp File priority: 1 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: num type: 0 + i18n: + zh-CN: 次数 + en-US: Num - field: size type: 0 unit: B + i18n: + zh-CN: 大小 + en-US: Size protocol: jdbc jdbc: host: ^_^host^_^ @@ -334,17 +460,29 @@ metrics: url: ^_^url^_^ - name: lock + i18n: + zh-CN: 锁信息 + en-US: Lock Info priority: 1 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: conflicts type: 0 unit: times + i18n: + zh-CN: 冲突次数 + en-US: Conflicts - field: deadlocks type: 0 unit: times + i18n: + zh-CN: 死锁次数 + en-US: Deadlocks protocol: jdbc jdbc: host: ^_^host^_^ @@ -359,21 +497,39 @@ metrics: url: ^_^url^_^ - name: slow_sql + i18n: + zh-CN: 慢查询 + en-US: Slow Sql priority: 1 fields: - field: sql_text type: 1 label: true + i18n: + zh-CN: SQL语句 + en-US: SQL Text - field: calls type: 0 + i18n: + zh-CN: 调用次数 + en-US: Calls - field: rows type: 0 + i18n: + zh-CN: 行数 + en-US: Rows - field: avg_time type: 0 unit: ms + i18n: + zh-CN: 平均时间 + en-US: Avg Time - field: total_time type: 0 unit: ms + i18n: + zh-CN: 总时间 + en-US: Total Time aliasFields: - query - calls @@ -398,17 +554,29 @@ metrics: url: ^_^url^_^ - name: transaction + i18n: + zh-CN: 事务信息 + en-US: Transaction Info priority: 2 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: commits type: 0 unit: times + i18n: + zh-CN: 提交次数 + en-US: Commits - field: rollbacks type: 0 unit: times + i18n: + zh-CN: 回滚次数 + en-US: Rollbacks protocol: jdbc jdbc: host: ^_^host^_^ @@ -423,21 +591,42 @@ metrics: url: ^_^url^_^ - name: conflicts + i18n: + zh-CN: 冲突信息 + en-US: Conflicts Info priority: 2 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: tablespace type: 0 + i18n: + zh-CN: 表空间 + en-US: Tablespace - field: lock type: 0 + i18n: + zh-CN: 锁 + en-US: Lock - field: snapshot type: 0 + i18n: + zh-CN: 快照 + en-US: Snapshot - field: bufferpin type: 0 + i18n: + zh-CN: 缓冲区 + en-US: Bufferpin - field: deadlock type: 0 + i18n: + zh-CN: 死锁 + en-US: Deadlock protocol: jdbc jdbc: host: ^_^host^_^ @@ -452,14 +641,23 @@ metrics: url: ^_^url^_^ - name: cache_hit_ratio + i18n: + zh-CN: 缓存命中率 + en-US: Cache Hit Ratio priority: 2 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: ratio type: 0 unit: '%' + i18n: + zh-CN: 命中率 + en-US: Hit Ratio aliasFields: - blks_hit - blks_read @@ -480,14 +678,23 @@ metrics: url: ^_^url^_^ - name: checkpoint + i18n: + zh-CN: Checkpoint信息 + en-US: Checkpoint Info priority: 2 fields: - field: checkpoint_sync_time type: 0 unit: ms + i18n: + zh-CN: Checkpoint同步时间 + en-US: Checkpoint Sync Time - field: checkpoint_write_time type: 0 unit: ms + i18n: + zh-CN: Checkpoint写入时间 + en-US: Checkpoint Write Time protocol: jdbc jdbc: host: ^_^host^_^ @@ -502,18 +709,33 @@ metrics: url: ^_^url^_^ - name: buffer + i18n: + zh-CN: Buffer信息 + en-US: Buffer Info priority: 2 fields: - field: allocated type: 0 - field: fsync_calls_by_backend type: 0 + i18n: + zh-CN: 后端进程直接执行的文件同步调用次数 + en-US: Fsync Calls By Backend - field: written_directly_by_backend type: 0 + i18n: + zh-CN: 后台写入到数据文件 + en-US: Written Directly By Backend - field: written_by_background_writer type: 0 + i18n: + zh-CN: 后台写入 + en-US: Written By Background Writer - field: written_during_checkpoints type: 0 + i18n: + zh-CN: 检查点期间写入 + en-US: Written During Checkpoints protocol: jdbc jdbc: host: ^_^host^_^ diff --git a/manager/src/main/resources/define/app-kingbase.yml b/manager/src/main/resources/define/app-kingbase.yml index 892fd704fb8..eb6e6b01f87 100644 --- a/manager/src/main/resources/define/app-kingbase.yml +++ b/manager/src/main/resources/define/app-kingbase.yml @@ -96,6 +96,9 @@ params: metrics: # metrics - basic - name: basic + i18n: + zh-CN: 基本信息 + en-US: Basic Info # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -105,14 +108,29 @@ metrics: - field: server_version type: 1 label: true + i18n: + zh-CN: 服务器版本 + en-US: Server Version - field: port type: 1 + i18n: + zh-CN: 端口 + en-US: Port - field: server_encoding type: 1 + i18n: + zh-CN: 服务器编码 + en-US: Server Encoding - field: data_directory type: 1 + i18n: + zh-CN: 数据目录 + en-US: Data Directory - field: max_connections type: 0 + i18n: + zh-CN: 最大连接数 + en-US: Max Connections # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: jdbc # the config content when protocol is jdbc @@ -134,31 +152,58 @@ metrics: url: ^_^url^_^ - name: state + i18n: + zh-CN: 状态信息 + en-US: State Info priority: 1 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: conflicts type: 0 unit: times + i18n: + zh-CN: 冲突次数 + en-US: Conflicts - field: deadlocks type: 0 unit: times + i18n: + zh-CN: 死锁次数 + en-US: Deadlocks - field: blks_read type: 0 unit: blocks per second + i18n: + zh-CN: 读取块 + en-US: Blocks Read - field: blks_hit type: 0 unit: blocks per second + i18n: + zh-CN: 命中块 + en-US: Blocks Hit - field: blk_read_time type: 0 unit: ms + i18n: + zh-CN: 读取时间 + en-US: Read Time - field: blk_write_time type: 0 unit: ms + i18n: + zh-CN: 写入时间 + en-US: Write Time - field: stats_reset type: 1 + i18n: + zh-CN: 统计重置 + en-US: Stats Reset protocol: jdbc jdbc: host: ^_^host^_^ @@ -173,11 +218,17 @@ metrics: url: ^_^url^_^ - name: activity + i18n: + zh-CN: 活动信息 + en-US: Activity Info priority: 2 fields: - field: running type: 0 unit: sbc + i18n: + zh-CN: 运行中 + en-US: Running protocol: jdbc jdbc: host: ^_^host^_^ @@ -192,24 +243,45 @@ metrics: url: ^_^url^_^ - name: resource_config + i18n: + zh-CN: 资源配置 + en-US: Resource Config priority: 1 fields: - field: work_mem type: 0 unit: MB + i18n: + zh-CN: 工作内存 + en-US: Work Memory - field: shared_buffers type: 0 unit: MB + i18n: + zh-CN: 共享缓冲区 + en-US: Shared Buffers - field: autovacuum type: 1 + i18n: + zh-CN: 自动清理 + en-US: Auto Vacuum - field: max_connections type: 0 + i18n: + zh-CN: 最大连接数 + en-US: Max Connections - field: effective_cache_size type: 0 unit: MB + i18n: + zh-CN: 有效缓存大小 + en-US: Effective Cache Size - field: wal_buffers type: 0 unit: MB + i18n: + zh-CN: WAL缓冲区 + en-US: WAL Buffers protocol: jdbc jdbc: host: ^_^host^_^ @@ -224,10 +296,16 @@ metrics: url: ^_^url^_^ - name: connection + i18n: + zh-CN: 连接信息 + en-US: Connection Info priority: 1 fields: - field: active type: 0 + i18n: + zh-CN: 活动连接 + en-US: Active Connection protocol: jdbc jdbc: host: ^_^host^_^ @@ -242,13 +320,22 @@ metrics: url: ^_^url^_^ - name: connection_state + i18n: + zh-CN: 连接状态 + en-US: Connection State priority: 1 fields: - field: state type: 1 label: true + i18n: + zh-CN: 状态 + en-US: State - field: num type: 0 + i18n: + zh-CN: 数量 + en-US: Num protocol: jdbc jdbc: host: ^_^host^_^ @@ -263,13 +350,22 @@ metrics: url: ^_^url^_^ - name: connection_db + i18n: + zh-CN: 连接数据库 + en-US: Connection Db priority: 1 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: active type: 0 + i18n: + zh-CN: 活动连接 + en-US: Active Connection protocol: jdbc jdbc: host: ^_^host^_^ @@ -284,18 +380,36 @@ metrics: url: ^_^url^_^ - name: tuple + i18n: + zh-CN: 元组信息 + en-US: Tuple Info priority: 1 fields: - field: fetched type: 0 + i18n: + zh-CN: 获取次数 + en-US: Fetched - field: returned type: 0 + i18n: + zh-CN: 返回次数 + en-US: Returned - field: inserted type: 0 + i18n: + zh-CN: 插入次数 + en-US: Inserted - field: updated type: 0 + i18n: + zh-CN: 更新次数 + en-US: Updated - field: deleted type: 0 + i18n: + zh-CN: 删除次数 + en-US: Deleted protocol: jdbc jdbc: host: ^_^host^_^ @@ -310,16 +424,28 @@ metrics: url: ^_^url^_^ - name: temp_file + i18n: + zh-CN: 临时文件 + en-US: Temp File priority: 1 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: num type: 0 + i18n: + zh-CN: 次数 + en-US: Num - field: size type: 0 unit: B + i18n: + zh-CN: 大小 + en-US: Size protocol: jdbc jdbc: host: ^_^host^_^ @@ -334,17 +460,29 @@ metrics: url: ^_^url^_^ - name: lock + i18n: + zh-CN: 锁信息 + en-US: Lock Info priority: 1 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: conflicts type: 0 unit: times + i18n: + zh-CN: 冲突次数 + en-US: Conflicts - field: deadlocks type: 0 unit: times + i18n: + zh-CN: 死锁次数 + en-US: Deadlocks protocol: jdbc jdbc: host: ^_^host^_^ @@ -359,21 +497,39 @@ metrics: url: ^_^url^_^ - name: slow_sql + i18n: + zh-CN: 慢查询 + en-US: Slow Sql priority: 1 fields: - field: sql_text type: 1 label: true + i18n: + zh-CN: SQL语句 + en-US: SQL Text - field: calls type: 0 + i18n: + zh-CN: 调用次数 + en-US: Calls - field: rows type: 0 + i18n: + zh-CN: 行数 + en-US: Rows - field: avg_time type: 0 unit: ms + i18n: + zh-CN: 平均时间 + en-US: Avg Time - field: total_time type: 0 unit: ms + i18n: + zh-CN: 总时间 + en-US: Total Time aliasFields: - query - calls @@ -398,17 +554,29 @@ metrics: url: ^_^url^_^ - name: transaction + i18n: + zh-CN: 事务信息 + en-US: Transaction Info priority: 2 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: commits type: 0 unit: times + i18n: + zh-CN: 提交次数 + en-US: Commits - field: rollbacks type: 0 unit: times + i18n: + zh-CN: 回滚次数 + en-US: Rollbacks protocol: jdbc jdbc: host: ^_^host^_^ @@ -423,21 +591,42 @@ metrics: url: ^_^url^_^ - name: conflicts + i18n: + zh-CN: 冲突信息 + en-US: Conflicts Info priority: 2 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: tablespace type: 0 + i18n: + zh-CN: 表空间 + en-US: Tablespace - field: lock type: 0 + i18n: + zh-CN: 锁 + en-US: Lock - field: snapshot type: 0 + i18n: + zh-CN: 快照 + en-US: Snapshot - field: bufferpin type: 0 + i18n: + zh-CN: 缓冲区 + en-US: Bufferpin - field: deadlock type: 0 + i18n: + zh-CN: 死锁 + en-US: Deadlock protocol: jdbc jdbc: host: ^_^host^_^ @@ -452,14 +641,23 @@ metrics: url: ^_^url^_^ - name: cache_hit_ratio + i18n: + zh-CN: 缓存命中率 + en-US: Cache Hit Ratio priority: 2 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: ratio type: 0 unit: '%' + i18n: + zh-CN: 命中率 + en-US: Hit Ratio aliasFields: - blks_hit - blks_read @@ -480,14 +678,23 @@ metrics: url: ^_^url^_^ - name: checkpoint + i18n: + zh-CN: Checkpoint信息 + en-US: Checkpoint Info priority: 2 fields: - field: checkpoint_sync_time type: 0 unit: ms + i18n: + zh-CN: Checkpoint同步时间 + en-US: Checkpoint Sync Time - field: checkpoint_write_time type: 0 unit: ms + i18n: + zh-CN: Checkpoint写入时间 + en-US: Checkpoint Write Time protocol: jdbc jdbc: host: ^_^host^_^ @@ -502,18 +709,33 @@ metrics: url: ^_^url^_^ - name: buffer + i18n: + zh-CN: Buffer信息 + en-US: Buffer Info priority: 2 fields: - field: allocated type: 0 - field: fsync_calls_by_backend type: 0 + i18n: + zh-CN: 后端进程直接执行的文件同步调用次数 + en-US: Fsync Calls By Backend - field: written_directly_by_backend type: 0 + i18n: + zh-CN: 后台写入到数据文件 + en-US: Written Directly By Backend - field: written_by_background_writer type: 0 + i18n: + zh-CN: 后台写入 + en-US: Written By Background Writer - field: written_during_checkpoints type: 0 + i18n: + zh-CN: 检查点期间写入 + en-US: Written During Checkpoints protocol: jdbc jdbc: host: ^_^host^_^ diff --git a/manager/src/main/resources/define/app-postgresql.yml b/manager/src/main/resources/define/app-postgresql.yml index 3c85d6c29e6..ead5bd80da3 100644 --- a/manager/src/main/resources/define/app-postgresql.yml +++ b/manager/src/main/resources/define/app-postgresql.yml @@ -96,6 +96,9 @@ params: metrics: # metrics - basic - name: basic + i18n: + zh-CN: 基本信息 + en-US: Basic Info # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -105,14 +108,29 @@ metrics: - field: server_version type: 1 label: true + i18n: + zh-CN: 服务器版本 + en-US: Server Version - field: port type: 1 + i18n: + zh-CN: 端口 + en-US: Port - field: server_encoding type: 1 + i18n: + zh-CN: 服务器编码 + en-US: Server Encoding - field: data_directory type: 1 + i18n: + zh-CN: 数据目录 + en-US: Data Directory - field: max_connections type: 0 + i18n: + zh-CN: 最大连接数 + en-US: Max Connections # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: jdbc # the config content when protocol is jdbc @@ -134,31 +152,58 @@ metrics: url: ^_^url^_^ - name: state + i18n: + zh-CN: 状态信息 + en-US: State Info priority: 1 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: conflicts type: 0 unit: times + i18n: + zh-CN: 冲突次数 + en-US: Conflicts - field: deadlocks type: 0 unit: times + i18n: + zh-CN: 死锁次数 + en-US: Deadlocks - field: blks_read type: 0 unit: blocks per second + i18n: + zh-CN: 读取块 + en-US: Blocks Read - field: blks_hit type: 0 unit: blocks per second + i18n: + zh-CN: 命中块 + en-US: Blocks Hit - field: blk_read_time type: 0 unit: ms + i18n: + zh-CN: 读取时间 + en-US: Read Time - field: blk_write_time type: 0 unit: ms + i18n: + zh-CN: 写入时间 + en-US: Write Time - field: stats_reset type: 1 + i18n: + zh-CN: 统计重置 + en-US: Stats Reset protocol: jdbc jdbc: host: ^_^host^_^ @@ -173,11 +218,17 @@ metrics: url: ^_^url^_^ - name: activity + i18n: + zh-CN: 活动信息 + en-US: Activity Info priority: 2 fields: - field: running type: 0 unit: sbc + i18n: + zh-CN: 运行中 + en-US: Running protocol: jdbc jdbc: host: ^_^host^_^ @@ -192,24 +243,45 @@ metrics: url: ^_^url^_^ - name: resource_config + i18n: + zh-CN: 资源配置 + en-US: Resource Config priority: 1 fields: - field: work_mem type: 0 unit: MB + i18n: + zh-CN: 工作内存 + en-US: Work Memory - field: shared_buffers type: 0 unit: MB + i18n: + zh-CN: 共享缓冲区 + en-US: Shared Buffers - field: autovacuum type: 1 + i18n: + zh-CN: 自动清理 + en-US: Auto Vacuum - field: max_connections type: 0 + i18n: + zh-CN: 最大连接数 + en-US: Max Connections - field: effective_cache_size type: 0 unit: MB + i18n: + zh-CN: 有效缓存大小 + en-US: Effective Cache Size - field: wal_buffers type: 0 unit: MB + i18n: + zh-CN: WAL缓冲区 + en-US: WAL Buffers protocol: jdbc jdbc: host: ^_^host^_^ @@ -224,10 +296,16 @@ metrics: url: ^_^url^_^ - name: connection + i18n: + zh-CN: 连接信息 + en-US: Connection Info priority: 1 fields: - field: active type: 0 + i18n: + zh-CN: 活动连接 + en-US: Active Connection protocol: jdbc jdbc: host: ^_^host^_^ @@ -242,13 +320,22 @@ metrics: url: ^_^url^_^ - name: connection_state + i18n: + zh-CN: 连接状态 + en-US: Connection State priority: 1 fields: - field: state type: 1 label: true + i18n: + zh-CN: 状态 + en-US: State - field: num type: 0 + i18n: + zh-CN: 数量 + en-US: Num protocol: jdbc jdbc: host: ^_^host^_^ @@ -263,13 +350,22 @@ metrics: url: ^_^url^_^ - name: connection_db + i18n: + zh-CN: 连接数据库 + en-US: Connection Db priority: 1 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: active type: 0 + i18n: + zh-CN: 活动连接 + en-US: Active Connection protocol: jdbc jdbc: host: ^_^host^_^ @@ -284,18 +380,36 @@ metrics: url: ^_^url^_^ - name: tuple + i18n: + zh-CN: 元组信息 + en-US: Tuple Info priority: 1 fields: - field: fetched type: 0 + i18n: + zh-CN: 获取次数 + en-US: Fetched - field: returned type: 0 + i18n: + zh-CN: 返回次数 + en-US: Returned - field: inserted type: 0 + i18n: + zh-CN: 插入次数 + en-US: Inserted - field: updated type: 0 + i18n: + zh-CN: 更新次数 + en-US: Updated - field: deleted type: 0 + i18n: + zh-CN: 删除次数 + en-US: Deleted protocol: jdbc jdbc: host: ^_^host^_^ @@ -310,16 +424,28 @@ metrics: url: ^_^url^_^ - name: temp_file + i18n: + zh-CN: 临时文件 + en-US: Temp File priority: 1 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: num type: 0 + i18n: + zh-CN: 次数 + en-US: Num - field: size type: 0 unit: B + i18n: + zh-CN: 大小 + en-US: Size protocol: jdbc jdbc: host: ^_^host^_^ @@ -334,17 +460,29 @@ metrics: url: ^_^url^_^ - name: lock + i18n: + zh-CN: 锁信息 + en-US: Lock Info priority: 1 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: conflicts type: 0 unit: times + i18n: + zh-CN: 冲突次数 + en-US: Conflicts - field: deadlocks type: 0 unit: times + i18n: + zh-CN: 死锁次数 + en-US: Deadlocks protocol: jdbc jdbc: host: ^_^host^_^ @@ -359,21 +497,39 @@ metrics: url: ^_^url^_^ - name: slow_sql + i18n: + zh-CN: 慢查询 + en-US: Slow Sql priority: 1 fields: - field: sql_text type: 1 label: true + i18n: + zh-CN: SQL语句 + en-US: SQL Text - field: calls type: 0 + i18n: + zh-CN: 调用次数 + en-US: Calls - field: rows type: 0 + i18n: + zh-CN: 行数 + en-US: Rows - field: avg_time type: 0 unit: ms + i18n: + zh-CN: 平均时间 + en-US: Avg Time - field: total_time type: 0 unit: ms + i18n: + zh-CN: 总时间 + en-US: Total Time aliasFields: - query - calls @@ -398,17 +554,29 @@ metrics: url: ^_^url^_^ - name: transaction + i18n: + zh-CN: 事务信息 + en-US: Transaction Info priority: 2 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: commits type: 0 unit: times + i18n: + zh-CN: 提交次数 + en-US: Commits - field: rollbacks type: 0 unit: times + i18n: + zh-CN: 回滚次数 + en-US: Rollbacks protocol: jdbc jdbc: host: ^_^host^_^ @@ -423,21 +591,42 @@ metrics: url: ^_^url^_^ - name: conflicts + i18n: + zh-CN: 冲突信息 + en-US: Conflicts Info priority: 2 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: tablespace type: 0 + i18n: + zh-CN: 表空间 + en-US: Tablespace - field: lock type: 0 + i18n: + zh-CN: 锁 + en-US: Lock - field: snapshot type: 0 + i18n: + zh-CN: 快照 + en-US: Snapshot - field: bufferpin type: 0 + i18n: + zh-CN: 缓冲区 + en-US: Bufferpin - field: deadlock type: 0 + i18n: + zh-CN: 死锁 + en-US: Deadlock protocol: jdbc jdbc: host: ^_^host^_^ @@ -452,14 +641,23 @@ metrics: url: ^_^url^_^ - name: cache_hit_ratio + i18n: + zh-CN: 缓存命中率 + en-US: Cache Hit Ratio priority: 2 fields: - field: db_name type: 1 label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name - field: ratio type: 0 unit: '%' + i18n: + zh-CN: 命中率 + en-US: Hit Ratio aliasFields: - blks_hit - blks_read @@ -480,14 +678,23 @@ metrics: url: ^_^url^_^ - name: checkpoint + i18n: + zh-CN: Checkpoint信息 + en-US: Checkpoint Info priority: 2 fields: - field: checkpoint_sync_time type: 0 unit: ms + i18n: + zh-CN: Checkpoint同步时间 + en-US: Checkpoint Sync Time - field: checkpoint_write_time type: 0 unit: ms + i18n: + zh-CN: Checkpoint写入时间 + en-US: Checkpoint Write Time protocol: jdbc jdbc: host: ^_^host^_^ @@ -502,18 +709,33 @@ metrics: url: ^_^url^_^ - name: buffer + i18n: + zh-CN: Buffer信息 + en-US: Buffer Info priority: 2 fields: - field: allocated type: 0 - field: fsync_calls_by_backend type: 0 + i18n: + zh-CN: 后端进程直接执行的文件同步调用次数 + en-US: Fsync Calls By Backend - field: written_directly_by_backend type: 0 + i18n: + zh-CN: 后台写入到数据文件 + en-US: Written Directly By Backend - field: written_by_background_writer type: 0 + i18n: + zh-CN: 后台写入 + en-US: Written By Background Writer - field: written_during_checkpoints type: 0 + i18n: + zh-CN: 检查点期间写入 + en-US: Written During Checkpoints protocol: jdbc jdbc: host: ^_^host^_^ From 69b8db7048ad7f0ca818037ce04bb31c9c686f39 Mon Sep 17 00:00:00 2001 From: lingluojun <247677857yh@gmail.com> Date: Sat, 3 Aug 2024 23:32:46 +0800 Subject: [PATCH 123/257] [improve] remove useless invoke method (#2450) Co-authored-by: tomsun28 --- .../dispatch/entrance/processor/GoOfflineProcessor.java | 1 - .../apache/hertzbeat/remoting/netty/NettyRemotingAbstract.java | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/entrance/processor/GoOfflineProcessor.java b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/entrance/processor/GoOfflineProcessor.java index 1e8dbb2b041..007b77053ba 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/entrance/processor/GoOfflineProcessor.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/entrance/processor/GoOfflineProcessor.java @@ -41,7 +41,6 @@ public ClusterMsg.Message handle(ChannelHandlerContext ctx, ClusterMsg.Message m } timerDispatch.goOffline(); log.info("receive offline message and handle success"); - message.getMsg(); if (message.getMsg().contains(CommonConstants.COLLECTOR_AUTH_FAILED)) { log.error("[Auth Failed]receive client auth failed message and go offline. {}", message.getMsg()); return null; diff --git a/remoting/src/main/java/org/apache/hertzbeat/remoting/netty/NettyRemotingAbstract.java b/remoting/src/main/java/org/apache/hertzbeat/remoting/netty/NettyRemotingAbstract.java index 09b4dc4f4d2..a9632d7c401 100644 --- a/remoting/src/main/java/org/apache/hertzbeat/remoting/netty/NettyRemotingAbstract.java +++ b/remoting/src/main/java/org/apache/hertzbeat/remoting/netty/NettyRemotingAbstract.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.concurrent.ConcurrentHashMap; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.CollectionUtils; import org.apache.hertzbeat.common.entity.message.ClusterMsg; import org.apache.hertzbeat.common.util.NetworkUtil; import org.apache.hertzbeat.remoting.RemotingService; @@ -77,7 +78,7 @@ protected void processRequestMsg(ChannelHandlerContext ctx, ClusterMsg.Message r } private void doBeforeRequest(ChannelHandlerContext ctx, ClusterMsg.Message request) { - if (this.nettyHookList == null || this.nettyHookList.isEmpty()) { + if (CollectionUtils.isEmpty(this.nettyHookList)) { return; } for (NettyHook nettyHook : this.nettyHookList) { From ae03111a7554976b991d4b74eb90dd3ff34320a9 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sat, 3 Aug 2024 23:47:24 +0800 Subject: [PATCH 124/257] [Improve] add AiController unit test (#2443) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../manager/controller/AiController.java | 1 - .../manager/controller/AiControllerTest.java | 87 +++++++++++++++++++ 2 files changed, 87 insertions(+), 1 deletion(-) create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/controller/AiControllerTest.java diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/controller/AiController.java b/manager/src/main/java/org/apache/hertzbeat/manager/controller/AiController.java index 9c2c3fd7c88..6c7408826d4 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/controller/AiController.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/controller/AiController.java @@ -33,7 +33,6 @@ import org.springframework.web.bind.annotation.RestController; import reactor.core.publisher.Flux; - /** * AI Management API */ diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/controller/AiControllerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/controller/AiControllerTest.java new file mode 100644 index 00000000000..7c1bfa5f27e --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/controller/AiControllerTest.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.controller; + +import org.apache.hertzbeat.manager.config.AiProperties; +import org.apache.hertzbeat.manager.service.AiService; +import org.apache.hertzbeat.manager.service.impl.AiServiceFactoryImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import reactor.core.publisher.Flux; + +import org.springframework.http.MediaType; +import org.springframework.http.codec.ServerSentEvent; +import org.springframework.test.web.servlet.MockMvc; +import org.springframework.test.web.servlet.setup.MockMvcBuilders; + +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; + +/** + * test case for {@link AiController} + */ + +@ExtendWith(MockitoExtension.class) +class AiControllerTest { + + private MockMvc mockMvc; + + @Mock + private AiServiceFactoryImpl aiServiceFactory; + + @Mock + private AiProperties aiProperties; + + @Mock + private AiService aiService; + + @InjectMocks + private AiController aiController; + + @BeforeEach + public void setup() { + + mockMvc = MockMvcBuilders.standaloneSetup(aiController).build(); + } + + @Test + public void testRequestAi() throws Exception { + + String responseText = "response"; + Flux> responseFlux = Flux.just(ServerSentEvent.builder(responseText).build()); + + when(aiServiceFactory.getAiServiceImplBean(anyString())).thenReturn(aiService); + when(aiService.requestAi(anyString())).thenReturn(responseFlux); + when(aiProperties.getType()).thenReturn("alibabaAi"); + + mockMvc.perform(get("/api/ai/get") + .param("text", "Who are you") + .accept(MediaType.TEXT_EVENT_STREAM)) + .andExpect(status().isOk()) + .andExpect(content().contentType(MediaType.TEXT_EVENT_STREAM_VALUE)) + .andExpect(content().string("data:response\n\n")); + } + +} From 6e47da758efabecac1796f364e8e8112d21949fb Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sat, 3 Aug 2024 23:53:19 +0800 Subject: [PATCH 125/257] [Improve] add GeneralConfigController unit test (#2444) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../GeneralConfigControllerTest.java | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/controller/GeneralConfigControllerTest.java diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/controller/GeneralConfigControllerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/controller/GeneralConfigControllerTest.java new file mode 100644 index 00000000000..92e8ffd6f7c --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/controller/GeneralConfigControllerTest.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.controller; + +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.manager.pojo.dto.TemplateConfig; +import org.apache.hertzbeat.manager.service.impl.ConfigServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.http.MediaType; +import org.springframework.test.web.servlet.MockMvc; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; + +/** + * Test case for {@link GeneralConfigController} + */ + +@ExtendWith(MockitoExtension.class) +class GeneralConfigControllerTest { + + private MockMvc mockMvc; + + @Mock + private ConfigServiceImpl configService; + + @InjectMocks + private GeneralConfigController generalConfigController; + + @BeforeEach + public void setup() { + + mockMvc = standaloneSetup(generalConfigController).build(); + } + + @Test + public void testSaveOrUpdateConfig() throws Exception { + + doNothing().when(configService).saveConfig(anyString(), any()); + + mockMvc.perform(post("/api/config/email") + .contentType(MediaType.APPLICATION_JSON) + .content("{\"key\":\"value\"}")) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andExpect(jsonPath("$.msg").value("Update config success")); + } + + @Test + public void testGetConfig() throws Exception { + + when(configService.getConfig(anyString())).thenReturn(any()); + + mockMvc.perform(get("/api/config/email") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + + @Test + public void testUpdateTemplateAppConfig() throws Exception { + + doNothing().when(configService).updateTemplateAppConfig(anyString(), any(TemplateConfig.AppTemplate.class)); + + mockMvc.perform(put("/api/config/template/appName") + .contentType(MediaType.APPLICATION_JSON) + .content("{\"templateKey\":\"templateValue\"}")) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + +} From a4c6a6045f51f50a8f53b8b25e5463b7b6413d67 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sat, 3 Aug 2024 23:54:40 +0800 Subject: [PATCH 126/257] [Improve] add MetricsController unit test (#2445) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../controller/MetricsControllerTest.java | 77 +++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/controller/MetricsControllerTest.java diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/controller/MetricsControllerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/controller/MetricsControllerTest.java new file mode 100644 index 00000000000..c269a4ad804 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/controller/MetricsControllerTest.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.controller; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hertzbeat.common.queue.impl.InMemoryCommonDataQueue; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.http.MediaType; +import org.springframework.test.web.servlet.MockMvc; + +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; + +/** + * Test case for {@link MetricsController} + */ + +@ExtendWith(MockitoExtension.class) +class MetricsControllerTest { + + private MockMvc mockMvc; + + @Mock + private InMemoryCommonDataQueue commonDataQueue; + + @InjectMocks + private MetricsController metricsController; + + @BeforeEach + public void setup() { + + mockMvc = standaloneSetup(metricsController).build(); + } + + @Test + public void testGetMetricsInfo() throws Exception { + + Map queueInfo = new HashMap<>(); + queueInfo.put("metric1", 100); + queueInfo.put("metric2", 200); + + when(commonDataQueue.getQueueSizeMetricsInfo()).thenReturn(queueInfo); + + mockMvc.perform(get("/api/metrics") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value(0)) + .andExpect(jsonPath("$.data.metric1").value(100)) + .andExpect(jsonPath("$.data.metric2").value(200)); + } + +} From 58920978c312418592663fe8a6e1bccd377648b5 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sat, 3 Aug 2024 23:59:35 +0800 Subject: [PATCH 127/257] [Improve] add StatusPagesController & StatusPagePublicController unit test (#2446) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../controller/StatusPageControllerTest.java | 233 ++++++++++++++++++ .../StatusPagePublicControllerTest.java | 123 +++++++++ 2 files changed, 356 insertions(+) create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/controller/StatusPageControllerTest.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/controller/StatusPagePublicControllerTest.java diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/controller/StatusPageControllerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/controller/StatusPageControllerTest.java new file mode 100644 index 00000000000..1c64dd504cd --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/controller/StatusPageControllerTest.java @@ -0,0 +1,233 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.controller; + +import java.util.Collections; +import java.util.List; +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.entity.manager.StatusPageComponent; +import org.apache.hertzbeat.common.entity.manager.StatusPageIncident; +import org.apache.hertzbeat.common.entity.manager.StatusPageOrg; +import org.apache.hertzbeat.common.util.JsonUtil; +import org.apache.hertzbeat.manager.service.StatusPageService; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.http.MediaType; +import org.springframework.test.web.servlet.MockMvc; + +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.delete; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; + +/** + * test case for {@link StatusPageController} + */ + +@ExtendWith(MockitoExtension.class) +class StatusPageControllerTest { + + private MockMvc mockMvc; + + @Mock + private StatusPageService statusPageService; + + @InjectMocks + private StatusPageController statusPageController; + + @BeforeEach + public void setup() { + + mockMvc = standaloneSetup(statusPageController).build(); + } + + @Test + public void testQueryStatusPageOrg() throws Exception { + + StatusPageOrg statusPageOrg = StatusPageOrg.builder().build(); + when(statusPageService.queryStatusPageOrg()).thenReturn(statusPageOrg); + + mockMvc.perform(get("/api/status/page/org") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + + @Test + public void testQueryStatusPageOrg_NotFound() throws Exception { + + when(statusPageService.queryStatusPageOrg()).thenReturn(null); + + mockMvc.perform(get("/api/status/page/org") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.FAIL_CODE)) + .andExpect(jsonPath("$.msg").value("Status Page Organization Not Found")); + } + + @Test + public void testSaveStatusPageOrg() throws Exception { + + StatusPageOrg statusPageOrg = StatusPageOrg.builder() + .name("Test name") + .home("Test home") + .description("Test description") + .logo("Test logo") + .build(); + when(statusPageService.saveStatusPageOrg(statusPageOrg)).thenReturn(statusPageOrg); + + mockMvc.perform(post("/api/status/page/org") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(statusPageOrg)) + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + + @Test + public void testQueryStatusPageComponent() throws Exception { + + List components = Collections.singletonList(new StatusPageComponent()); + when(statusPageService.queryStatusPageComponents()).thenReturn(components); + + mockMvc.perform(get("/api/status/page/component") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + + @Test + public void testNewStatusPageComponent() throws Exception { + + mockMvc.perform(post("/api/status/page/component") + .contentType(MediaType.APPLICATION_JSON) + .content("{\"name\":\"New Component\"}") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andExpect(jsonPath("$.msg").value("Add success")); + } + + @Test + public void testUpdateStatusPageComponent() throws Exception { + + mockMvc.perform(put("/api/status/page/component") + .contentType(MediaType.APPLICATION_JSON) + .content("{\"name\":\"Updated Component\"}") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andExpect(jsonPath("$.msg").value("Update success")); + } + + @Test + public void testDeleteStatusPageComponent() throws Exception { + + mockMvc.perform(delete("/api/status/page/component/1") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andExpect(jsonPath("$.msg").value("Delete success")); + } + + @Test + public void testQueryStatusPageComponentById() throws Exception { + + StatusPageComponent component = new StatusPageComponent(); + when(statusPageService.queryStatusPageComponent(1L)).thenReturn(component); + + mockMvc.perform(get("/api/status/page/component/1") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + + @Test + public void testNewStatusPageIncident() throws Exception { + + StatusPageIncident statusPageIncident = StatusPageIncident.builder() + .name("New Incident") + .build(); + + mockMvc.perform(post("/api/status/page/incident") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(statusPageIncident)) + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andExpect(jsonPath("$.msg").value("Add success")); + } + + @Test + public void testUpdateStatusPageIncident() throws Exception { + + StatusPageIncident statusPageIncident = StatusPageIncident.builder() + .name("Update Incident") + .build(); + + mockMvc.perform(put("/api/status/page/incident") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(statusPageIncident)) + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + + @Test + public void testDeleteStatusPageIncident() throws Exception { + + mockMvc.perform(delete("/api/status/page/incident/1") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andExpect(jsonPath("$.msg").value("Delete success")); + } + + @Test + public void testQueryStatusPageIncidentById() throws Exception { + + StatusPageIncident incident = new StatusPageIncident(); + when(statusPageService.queryStatusPageIncident(1L)).thenReturn(incident); + + mockMvc.perform(get("/api/status/page/incident/1") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + + @Test + public void testQueryStatusPageIncident() throws Exception { + + List incidents = Collections.singletonList(new StatusPageIncident()); + when(statusPageService.queryStatusPageIncidents()).thenReturn(incidents); + + mockMvc.perform(get("/api/status/page/incident") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/controller/StatusPagePublicControllerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/controller/StatusPagePublicControllerTest.java new file mode 100644 index 00000000000..543365928ae --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/controller/StatusPagePublicControllerTest.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.controller; + +import java.util.Collections; +import java.util.List; +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.entity.manager.StatusPageIncident; +import org.apache.hertzbeat.common.entity.manager.StatusPageOrg; +import org.apache.hertzbeat.manager.pojo.dto.ComponentStatus; +import org.apache.hertzbeat.manager.service.StatusPageService; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.http.MediaType; +import org.springframework.test.web.servlet.MockMvc; + +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; + +/** + * test case for {@link StatusPagePublicController} + */ + +@ExtendWith(MockitoExtension.class) +class StatusPagePublicControllerTest { + + private MockMvc mockMvc; + + @Mock + private StatusPageService statusPageService; + + @InjectMocks + private StatusPagePublicController statusPagePublicController; + + @BeforeEach + public void setup() { + + mockMvc = standaloneSetup(statusPagePublicController).build(); + } + + @Test + public void testQueryStatusPageOrg() throws Exception { + + StatusPageOrg statusPageOrg = StatusPageOrg.builder().build(); + when(statusPageService.queryStatusPageOrg()).thenReturn(statusPageOrg); + + mockMvc.perform(get("/api/status/page/public/org") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + + @Test + public void testQueryStatusPageOrgNotFound() throws Exception { + + when(statusPageService.queryStatusPageOrg()).thenReturn(null); + + mockMvc.perform(get("/api/status/page/public/org") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.FAIL_CODE)) + .andExpect(jsonPath("$.msg").value("Status Page Organization Not Found")); + } + + @Test + public void testQueryStatusPageComponent() throws Exception { + + List componentStatusList = Collections.singletonList(new ComponentStatus()); + when(statusPageService.queryComponentsStatus()).thenReturn(componentStatusList); + + mockMvc.perform(get("/api/status/page/public/component") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + + @Test + public void testQueryStatusPageComponentById() throws Exception { + + ComponentStatus componentStatus = new ComponentStatus(); + when(statusPageService.queryComponentStatus(1L)).thenReturn(componentStatus); + + mockMvc.perform(get("/api/status/page/public/component/1") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + + @Test + public void testQueryStatusPageIncident() throws Exception { + + List incidents = Collections.singletonList(new StatusPageIncident()); + when(statusPageService.queryStatusPageIncidents()).thenReturn(incidents); + + mockMvc.perform(get("/api/status/page/public/incident") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } + +} From 7eef0c21f121a33089f9a6cb157ab9d2c8abafdf Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sun, 4 Aug 2024 00:00:49 +0800 Subject: [PATCH 128/257] [Improve] add push module service unit test (#2452) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../push/service/impl/PushServiceImpl.java | 2 +- .../push/service/PushGatewayServiceTest.java | 106 ++++++++++++++ .../push/service/PushServiceTest.java | 133 ++++++++++++++++++ 3 files changed, 240 insertions(+), 1 deletion(-) create mode 100644 push/src/test/java/org/apache/hertzbeat/push/service/PushGatewayServiceTest.java create mode 100644 push/src/test/java/org/apache/hertzbeat/push/service/PushServiceTest.java diff --git a/push/src/main/java/org/apache/hertzbeat/push/service/impl/PushServiceImpl.java b/push/src/main/java/org/apache/hertzbeat/push/service/impl/PushServiceImpl.java index cbd22bdb5f5..d3780a3995b 100644 --- a/push/src/main/java/org/apache/hertzbeat/push/service/impl/PushServiceImpl.java +++ b/push/src/main/java/org/apache/hertzbeat/push/service/impl/PushServiceImpl.java @@ -60,7 +60,7 @@ public class PushServiceImpl implements PushService { private static final long deleteBeforeTime = deleteMetricsPeriod / 2; - PushServiceImpl(){ + public PushServiceImpl(){ monitorIdCache = new HashMap<>(); lastPushMetrics = new HashMap<>(); diff --git a/push/src/test/java/org/apache/hertzbeat/push/service/PushGatewayServiceTest.java b/push/src/test/java/org/apache/hertzbeat/push/service/PushGatewayServiceTest.java new file mode 100644 index 00000000000..9cec4edc5ba --- /dev/null +++ b/push/src/test/java/org/apache/hertzbeat/push/service/PushGatewayServiceTest.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hertzbeat.push.service; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.List; + +import org.apache.hertzbeat.common.util.prometheus.Metric; +import org.apache.hertzbeat.common.util.prometheus.PrometheusUtil; +import org.apache.hertzbeat.push.service.impl.PushGatewayServiceImpl; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.MockedStatic; +import org.mockito.junit.jupiter.MockitoExtension; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; + +/** + * test case for {@link PushGatewayServiceImpl} + */ + +@ExtendWith(MockitoExtension.class) +class PushGatewayServiceImplTest { + + @InjectMocks + private PushGatewayServiceImpl pushGatewayService; + + private MockedStatic prometheusUtilMock; + + @BeforeEach + void setUp() { + + prometheusUtilMock = mockStatic(PrometheusUtil.class); + } + + @AfterEach + void tearDown() { + + prometheusUtilMock.close(); + } + + @Test + void testPushMetricsDataSuccess() throws IOException { + + String mockData = "some metric data"; + InputStream inputStream = new ByteArrayInputStream(mockData.getBytes()); + List mockMetrics = Collections.singletonList(new Metric()); + + prometheusUtilMock.when( + () -> PrometheusUtil.parseMetrics(any(InputStream.class)) + ).thenReturn(mockMetrics); + + boolean result = pushGatewayService.pushMetricsData(inputStream); + + assertTrue(result); + prometheusUtilMock.verify( + () -> PrometheusUtil.parseMetrics(any(InputStream.class)), + times(1) + ); + } + + @Test + void testPushMetricsDataFailure() throws IOException { + + String mockData = "some metric data"; + InputStream inputStream = new ByteArrayInputStream(mockData.getBytes()); + + prometheusUtilMock.when(() -> PrometheusUtil.parseMetrics(any(InputStream.class))).thenReturn(null); + + boolean result = pushGatewayService.pushMetricsData(inputStream); + + assertFalse(result); + prometheusUtilMock.verify( + () -> PrometheusUtil.parseMetrics(any(InputStream.class)), + times(1) + ); + } + +} diff --git a/push/src/test/java/org/apache/hertzbeat/push/service/PushServiceTest.java b/push/src/test/java/org/apache/hertzbeat/push/service/PushServiceTest.java new file mode 100644 index 00000000000..fdf9d45780b --- /dev/null +++ b/push/src/test/java/org/apache/hertzbeat/push/service/PushServiceTest.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hertzbeat.push.service; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import org.apache.hertzbeat.common.entity.manager.Monitor; +import org.apache.hertzbeat.common.entity.push.PushMetrics; +import org.apache.hertzbeat.common.entity.push.PushMetricsDto; +import org.apache.hertzbeat.push.dao.PushMetricsDao; +import org.apache.hertzbeat.push.dao.PushMonitorDao; +import org.apache.hertzbeat.push.service.impl.PushServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.test.util.ReflectionTestUtils; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * test case for {@link PushServiceImpl} + */ + +@ExtendWith(MockitoExtension.class) +class PushServiceImplTest { + + @Mock + private PushMonitorDao monitorDao; + + @Mock + private PushMetricsDao metricsDao; + + @InjectMocks + private PushServiceImpl pushService; + + @BeforeEach + void setUp() { + + pushService = new PushServiceImpl(); + + ReflectionTestUtils.setField(pushService, "monitorDao", monitorDao); + ReflectionTestUtils.setField(pushService, "metricsDao", metricsDao); + } + + @Test + void testPushMetricsData() { + + PushMetricsDto pushMetricsDto = new PushMetricsDto(); + List metricsList = new ArrayList<>(); + PushMetricsDto.Metrics metrics = new PushMetricsDto.Metrics(); + metrics.setMonitorId(1L); + metricsList.add(metrics); + pushMetricsDto.setMetricsList(metricsList); + + when(monitorDao.findById(anyLong())).thenReturn(Optional.of(new Monitor())); + + pushService.pushMetricsData(pushMetricsDto); + + verify(metricsDao, times(1)).saveAll(any()); + } + + @Test + void testGetPushMetricData() { + + Long monitorId = 1L; + Long time = System.currentTimeMillis(); + PushMetrics pushMetrics = PushMetrics.builder() + .monitorId(monitorId) + .time(time) + .metrics("[{\"key\":\"value\"}]") + .build(); + + when(metricsDao.findFirstByMonitorIdOrderByTimeDesc(monitorId)).thenReturn(pushMetrics); + + PushMetricsDto result = pushService.getPushMetricData(monitorId, time); + + assertEquals(1, result.getMetricsList().size()); + assertEquals(monitorId, result.getMetricsList().get(0).getMonitorId()); + } + + @Test + void testGetPushMetricDataTimeInvalid() { + + Long monitorId = 1L; + Long time = System.currentTimeMillis() + 10000; + PushMetrics pushMetrics = PushMetrics.builder() + .monitorId(monitorId) + .time(System.currentTimeMillis()) + .metrics("[{\"key\":\"value\"}]") + .build(); + + when(metricsDao.findFirstByMonitorIdOrderByTimeDesc(monitorId)).thenReturn(pushMetrics); + + PushMetricsDto result = pushService.getPushMetricData(monitorId, time); + + assertTrue(result.getMetricsList().isEmpty()); + } + + @Test + void testDeletePeriodically() { + + pushService.deletePeriodically(); + verify(metricsDao, times(1)).deleteAllByTimeBefore(anyLong()); + } + +} From deb827a572f1e7ad96867a204cfc787be83b7bb1 Mon Sep 17 00:00:00 2001 From: Jast Date: Sun, 4 Aug 2024 19:22:54 +0800 Subject: [PATCH 129/257] [doc] Add zookeeper doc (#2456) merge --- home/docs/help/zookeeper.md | 19 +++++++++++++++++++ .../current/help/zookeeper.md | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/home/docs/help/zookeeper.md b/home/docs/help/zookeeper.md index b7a34f49eda..dadbbc70bcd 100644 --- a/home/docs/help/zookeeper.md +++ b/home/docs/help/zookeeper.md @@ -98,3 +98,22 @@ Complete! | zk_min_latency | ms | Min latency | +#### Metric set:envi + +| Metric Name | Metric Unit | Metric help description | +| ------------------- |-------------|-------------------------------| +| zk_version | none | ZooKeeper version | +| hostname | none | Hostname | +| java_version | none | Java version | +| java_vendor | none | Java vendor | +| java_home | none | Java home directory | +| java_class_path | none | Java class path | +| java_library_path | none | Java library path | +| java_io_tmpdir | none | Java temporary directory | +| java_compiler | none | Java compiler | +| os_name | none | Operating system name | +| os_arch | none | Operating system architecture | +| os_version | none | Operating system version | +| user_name | none | Username | +| user_home | none | User home directory | +| user_dir | none | User current directory | \ No newline at end of file diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md index 0783ceaf3fb..64d08a259c4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md @@ -97,4 +97,23 @@ Complete! | zk_ephemerals_count | 个 | 临时节点数 | | zk_min_latency | ms | 最小延时 | +#### 指标集合:envi + +| 指标名称 | 指标单位 | 指标帮助描述 | +| ----------- |-----| ----------- | +| zk_version | 无 | ZooKeeper版本 | +| hostname | 无 | 主机名 | +| java_version | 无 | Java版本 | +| java_vendor | 无 | Java供应商 | +| java_home | 无 | Java主目录 | +| java_class_path | 无 | Java类路径 | +| java_library_path | 无 | Java库路径 | +| java_io_tmpdir | 无 | Java临时目录 | +| java_compiler | 无 | Java编译器 | +| os_name | 无 | 操作系统名称 | +| os_arch | 无 | 操作系统架构 | +| os_version | 无 | 操作系统版本 | +| user_name | 无 | 用户名 | +| user_home | 无 | 用户主目录 | +| user_dir | 无 | 用户当前目录 | From 254421bc460615d9e6b4fcf3f5094c8881a40a73 Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Sun, 4 Aug 2024 23:37:52 +0800 Subject: [PATCH 130/257] [refactor] move code from AlertReportController to AlertService (#2434) Co-authored-by: Calvin --- .../controller/AlertReportController.java | 41 +--------- .../hertzbeat/alert/service/AlertService.java | 7 ++ .../alert/service/impl/AlertServiceImpl.java | 42 ++++++++++ .../alert/service/AlertServiceTest.java | 80 +++++++++++++++++++ 4 files changed, 130 insertions(+), 40 deletions(-) diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertReportController.java b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertReportController.java index 89b67fd1899..1f803ec8117 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertReportController.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertReportController.java @@ -20,16 +20,10 @@ import static org.springframework.http.MediaType.APPLICATION_JSON_VALUE; import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.tags.Tag; -import java.util.Date; -import java.util.Optional; import lombok.extern.slf4j.Slf4j; -import org.apache.hertzbeat.alert.dto.CloudAlertReportAbstract; import org.apache.hertzbeat.alert.dto.GeneralCloudAlertReport; -import org.apache.hertzbeat.alert.enums.CloudServiceAlarmInformationEnum; import org.apache.hertzbeat.alert.service.AlertService; -import org.apache.hertzbeat.common.entity.dto.AlertReport; import org.apache.hertzbeat.common.entity.dto.Message; -import org.apache.hertzbeat.common.util.JsonUtil; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.PathVariable; @@ -54,40 +48,7 @@ public class AlertReportController { @Operation(summary = "Interface for reporting external alarm information of cloud service") public ResponseEntity> addNewAlertReportFromCloud(@PathVariable("cloud") String cloudServiceName, @RequestBody String alertReport) { - CloudServiceAlarmInformationEnum cloudService = CloudServiceAlarmInformationEnum - .getEnumFromCloudServiceName(cloudServiceName); - - AlertReport alert = null; - if (cloudService != null) { - try { - CloudAlertReportAbstract cloudAlertReport = JsonUtil - .fromJson(alertReport, cloudService.getCloudServiceAlarmInformationEntity()); - assert cloudAlertReport != null; - alert = AlertReport.builder() - .content(cloudAlertReport.getContent()) - .alertName(cloudAlertReport.getAlertName()) - .alertTime(cloudAlertReport.getAlertTime()) - .alertDuration(cloudAlertReport.getAlertDuration()) - .priority(cloudAlertReport.getPriority()) - .reportType(cloudAlertReport.getReportType()) - .labels(cloudAlertReport.getLabels()) - .annotations(cloudAlertReport.getAnnotations()) - .build(); - } catch (Exception e) { - log.error("[alert report] parse cloud service alarm content failed! cloud service: {} conrent: {}", - cloudService.name(), alertReport); - } - } else { - alert = AlertReport.builder() - .content("error do not has cloud service api") - .alertName("/api/alerts/report/" + cloudServiceName) - .alertTime(new Date().getTime()) - .priority(1) - .reportType(1) - .build(); - } - Optional.ofNullable(alert).ifPresent(alertReportPresent -> - alertService.addNewAlertReport(alertReportPresent)); + alertService.addNewAlertReportFromCloud(cloudServiceName, alertReport); return ResponseEntity.ok(Message.success("Add report success")); } diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertService.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertService.java index 2bc6b182e64..6ee54fac3b8 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertService.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertService.java @@ -76,6 +76,13 @@ public interface AlertService { */ void addNewAlertReport(AlertReport alertReport); + /** + * Save external alarms of cloud services + * @param cloudServiceName cloud service name,Such as tencloud + * @param alertReport alert report json string + */ + void addNewAlertReportFromCloud(String cloudServiceName, String alertReport); + /** * Dynamic conditional query * @param specification Query conditions diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertServiceImpl.java index a817d61238a..8f1374d6fdc 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertServiceImpl.java @@ -22,18 +22,23 @@ import java.time.Instant; import java.time.LocalDateTime; import java.time.ZoneId; +import java.util.Date; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.alert.dao.AlertDao; import org.apache.hertzbeat.alert.dto.AlertPriorityNum; import org.apache.hertzbeat.alert.dto.AlertSummary; +import org.apache.hertzbeat.alert.dto.CloudAlertReportAbstract; +import org.apache.hertzbeat.alert.enums.CloudServiceAlarmInformationEnum; import org.apache.hertzbeat.alert.reduce.AlarmCommonReduce; import org.apache.hertzbeat.alert.service.AlertService; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.alerter.Alert; import org.apache.hertzbeat.common.entity.dto.AlertReport; +import org.apache.hertzbeat.common.util.JsonUtil; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; import org.springframework.data.domain.PageRequest; @@ -121,6 +126,43 @@ public void addNewAlertReport(AlertReport alertReport) { alarmCommonReduce.reduceAndSendAlarm(buildAlertData(alertReport)); } + @Override + public void addNewAlertReportFromCloud(String cloudServiceName, String alertReport) { + CloudServiceAlarmInformationEnum cloudService = CloudServiceAlarmInformationEnum + .getEnumFromCloudServiceName(cloudServiceName); + + AlertReport alert = null; + if (cloudService != null) { + try { + CloudAlertReportAbstract cloudAlertReport = JsonUtil + .fromJson(alertReport, cloudService.getCloudServiceAlarmInformationEntity()); + assert cloudAlertReport != null; + alert = AlertReport.builder() + .content(cloudAlertReport.getContent()) + .alertName(cloudAlertReport.getAlertName()) + .alertTime(cloudAlertReport.getAlertTime()) + .alertDuration(cloudAlertReport.getAlertDuration()) + .priority(cloudAlertReport.getPriority()) + .reportType(cloudAlertReport.getReportType()) + .labels(cloudAlertReport.getLabels()) + .annotations(cloudAlertReport.getAnnotations()) + .build(); + } catch (Exception e) { + log.error("[alert report] parse cloud service alarm content failed! cloud service: {} conrent: {}", + cloudService.name(), alertReport); + } + } else { + alert = AlertReport.builder() + .content("error do not has cloud service api") + .alertName("/api/alerts/report/" + cloudServiceName) + .alertTime(new Date().getTime()) + .priority(1) + .reportType(1) + .build(); + } + Optional.ofNullable(alert).ifPresent(this::addNewAlertReport); + } + @Override public List getAlerts(Specification specification) { diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertServiceTest.java index 9a7606362b0..22987465920 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertServiceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertServiceTest.java @@ -17,13 +17,46 @@ package org.apache.hertzbeat.alert.service; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import org.apache.hertzbeat.alert.dao.AlertDao; +import org.apache.hertzbeat.alert.dto.AlertPriorityNum; +import org.apache.hertzbeat.alert.dto.TenCloudAlertReport; +import org.apache.hertzbeat.alert.reduce.AlarmCommonReduce; +import org.apache.hertzbeat.alert.service.impl.AlertServiceImpl; +import org.apache.hertzbeat.common.entity.alerter.Alert; +import org.apache.hertzbeat.common.entity.dto.AlertReport; +import org.apache.hertzbeat.common.util.JsonUtil; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; /** * Test case for {@link AlertService} */ +@ExtendWith(MockitoExtension.class) class AlertServiceTest { + @Mock + private AlertDao alertDao; + + @Mock + private AlarmCommonReduce alarmCommonReduce; + + @InjectMocks + private AlertServiceImpl alertService; @BeforeEach void setUp() { @@ -31,29 +64,76 @@ void setUp() { @Test void addAlert() { + Alert alert = new Alert(); + assertDoesNotThrow(() -> alertService.addAlert(alert)); + verify(alertDao, times(1)).save(alert); } @Test void getAlerts() { + // todo } @Test void deleteAlerts() { + HashSet ids = new HashSet<>(); + ids.add(1L); + ids.add(2L); + assertDoesNotThrow(() -> alertService.deleteAlerts(ids)); + verify(alertDao, times(1)).deleteAlertsByIdIn(ids); } @Test void clearAlerts() { + assertDoesNotThrow(() -> alertService.clearAlerts()); + verify(alertDao, times(1)).deleteAll(); } @Test void editAlertStatus() { + Byte status = 0; + List ids = List.of(1L, 2L, 3L); + assertDoesNotThrow(() -> alertService.editAlertStatus(status, ids)); + verify(alertDao, times(1)).updateAlertsStatus(status, ids); } @Test void getAlertsSummary() { + List priorityNums = new ArrayList<>(); + priorityNums.add(new AlertPriorityNum((byte) 1, 100)); + when(alertDao.findAlertPriorityNum()).thenReturn(priorityNums); + + assertDoesNotThrow(() -> alertService.getAlertsSummary()); + verify(alertDao, times(1)).findAlertPriorityNum(); + verify(alertDao, times(1)).count(); + + assertNotNull(alertService.getAlertsSummary()); } @Test void addNewAlertReport() { + AlertReport alertReport = AlertReport.builder() + .annotations(new HashMap<>()) + .priority(0) + .alertTime(System.currentTimeMillis()) + .build(); + assertDoesNotThrow(() -> alertService.addNewAlertReport(alertReport)); + verify(alarmCommonReduce, times(1)).reduceAndSendAlarm(any(Alert.class)); + } + + @Test + void addNewAlertReportFromCloud() { + TenCloudAlertReport alertReport = TenCloudAlertReport.builder() + .firstOccurTime("2024-08-01 11:30:00") + .durationTime(100) + .build(); + String reportJson = JsonUtil.toJson(alertReport); + assertDoesNotThrow(() -> alertService.addNewAlertReportFromCloud("tencloud", reportJson)); + verify(alarmCommonReduce, times(1)).reduceAndSendAlarm(any(Alert.class)); + + alertService.addNewAlertReportFromCloud("alicloud", reportJson); + reset(alarmCommonReduce); + verify(alarmCommonReduce, times(0)).reduceAndSendAlarm(any(Alert.class)); + } } From c0ef4a436eeffbf8be1c2080b70bc3e04037c1ba Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Sun, 4 Aug 2024 23:54:43 +0800 Subject: [PATCH 131/257] [refactor] move code from AlertConvergesController to AlertConvergeService (#2428) Co-authored-by: Calvin --- .../controller/AlertConvergesController.java | 33 +------------------ .../alert/service/AlertConvergeService.java | 13 +++++--- .../impl/AlertConvergeServiceImpl.java | 31 ++++++++++++++++- .../AlertConvergesControllerTest.java | 24 +++++++------- .../service/AlertConvergeServiceTest.java | 24 +++++++------- 5 files changed, 61 insertions(+), 64 deletions(-) diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertConvergesController.java b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertConvergesController.java index 3d6fb584e72..48334781155 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertConvergesController.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertConvergesController.java @@ -21,9 +21,6 @@ import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; -import jakarta.persistence.criteria.CriteriaBuilder; -import jakarta.persistence.criteria.Predicate; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import org.apache.hertzbeat.alert.service.AlertConvergeService; @@ -31,11 +28,7 @@ import org.apache.hertzbeat.common.entity.dto.Message; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.domain.Sort; -import org.springframework.data.jpa.domain.Specification; import org.springframework.http.ResponseEntity; -import org.springframework.util.StringUtils; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RequestMapping; @@ -63,31 +56,7 @@ public ResponseEntity>> getAlertConverges( @Parameter(description = "Sort mode: asc: ascending, desc: descending", example = "desc") @RequestParam(defaultValue = "desc") String order, @Parameter(description = "List current page", example = "0") @RequestParam(defaultValue = "0") int pageIndex, @Parameter(description = "Number of list pages", example = "8") @RequestParam(defaultValue = "8") int pageSize) { - - Specification specification = (root, query, criteriaBuilder) -> { - List andList = new ArrayList<>(); - if (ids != null && !ids.isEmpty()) { - CriteriaBuilder.In inPredicate = criteriaBuilder.in(root.get("id")); - for (long id : ids) { - inPredicate.value(id); - } - andList.add(inPredicate); - } - if (StringUtils.hasText(search)) { - Predicate predicate = criteriaBuilder.or( - criteriaBuilder.like( - criteriaBuilder.lower(root.get("name")), - "%" + search.toLowerCase() + "%" - ) - ); - andList.add(predicate); - } - Predicate[] predicates = new Predicate[andList.size()]; - return criteriaBuilder.and(andList.toArray(predicates)); - }; - Sort sortExp = Sort.by(new Sort.Order(Sort.Direction.fromString(order), sort)); - PageRequest pageRequest = PageRequest.of(pageIndex, pageSize, sortExp); - Page alertConvergePage = alertConvergeService.getAlertConverges(specification, pageRequest); + Page alertConvergePage = alertConvergeService.getAlertConverges(ids, search, sort, order, pageIndex, pageSize); return ResponseEntity.ok(Message.success(alertConvergePage)); } diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertConvergeService.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertConvergeService.java index b41fdc5efb8..4d71a868b6a 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertConvergeService.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertConvergeService.java @@ -17,11 +17,10 @@ package org.apache.hertzbeat.alert.service; +import java.util.List; import java.util.Set; import org.apache.hertzbeat.common.entity.alerter.AlertConverge; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.jpa.domain.Specification; /** * management interface service for alert converge @@ -67,9 +66,13 @@ public interface AlertConvergeService { /** * Dynamic conditional query - * @param specification Query conditions - * @param pageRequest Paging parameters + * @param convergeIds Alarm Converge ID List + * @param search Search Name + * @param sort Sort field + * @param order Sort mode: asc: ascending, desc: descending + * @param pageIndex List current page + * @param pageSize Number of list pages * @return The query results */ - Page getAlertConverges(Specification specification, PageRequest pageRequest); + Page getAlertConverges(List convergeIds, String search, String sort, String order, int pageIndex, int pageSize); } diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertConvergeServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertConvergeServiceImpl.java index a4d05b47e56..bbfdf0592c9 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertConvergeServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertConvergeServiceImpl.java @@ -17,6 +17,10 @@ package org.apache.hertzbeat.alert.service.impl; +import jakarta.persistence.criteria.CriteriaBuilder; +import jakarta.persistence.criteria.Predicate; +import java.util.ArrayList; +import java.util.List; import java.util.Set; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.alert.dao.AlertConvergeDao; @@ -28,9 +32,11 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; import org.springframework.data.domain.PageRequest; +import org.springframework.data.domain.Sort; import org.springframework.data.jpa.domain.Specification; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; +import org.springframework.util.StringUtils; /** * implement for alert converge service @@ -72,7 +78,30 @@ public void deleteAlertConverges(Set convergeIds) throws RuntimeException } @Override - public Page getAlertConverges(Specification specification, PageRequest pageRequest) { + public Page getAlertConverges(List convergeIds, String search, String sort, String order, int pageIndex, int pageSize) { + Specification specification = (root, query, criteriaBuilder) -> { + List andList = new ArrayList<>(); + if (convergeIds != null && !convergeIds.isEmpty()) { + CriteriaBuilder.In inPredicate = criteriaBuilder.in(root.get("id")); + for (long id : convergeIds) { + inPredicate.value(id); + } + andList.add(inPredicate); + } + if (StringUtils.hasText(search)) { + Predicate predicate = criteriaBuilder.or( + criteriaBuilder.like( + criteriaBuilder.lower(root.get("name")), + "%" + search.toLowerCase() + "%" + ) + ); + andList.add(predicate); + } + Predicate[] predicates = new Predicate[andList.size()]; + return criteriaBuilder.and(andList.toArray(predicates)); + }; + Sort sortExp = Sort.by(new Sort.Order(Sort.Direction.fromString(order), sort)); + PageRequest pageRequest = PageRequest.of(pageIndex, pageSize, sortExp); return alertConvergeDao.findAll(specification, pageRequest); } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergesControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergesControllerTest.java index 28088f2d467..3eb5eb97eab 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergesControllerTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergesControllerTest.java @@ -17,10 +17,18 @@ package org.apache.hertzbeat.alert.controller; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.delete; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; import java.util.Arrays; import java.util.HashSet; import java.util.List; - import org.apache.hertzbeat.alert.service.AlertConvergeService; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.alerter.AlertConverge; @@ -30,23 +38,13 @@ import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import org.springframework.http.MediaType; import org.springframework.data.domain.Page; import org.springframework.data.domain.PageImpl; import org.springframework.data.domain.PageRequest; import org.springframework.data.domain.Sort; +import org.springframework.http.MediaType; import org.springframework.test.web.servlet.MockMvc; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.when; -import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.delete; -import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; -import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; -import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; - -import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; /** * test case for {@link AlertConvergesController} @@ -92,7 +90,7 @@ void testGetAlertConverges() throws Exception { alertConvergeList.size() ); - when(alertConvergeService.getAlertConverges(any(), any(PageRequest.class))).thenReturn(alertConvergePage); + when(alertConvergeService.getAlertConverges(null, null, "id", "desc", 0, 8)).thenReturn(alertConvergePage); mockMvc.perform(get("/api/alert/converges") .param("pageIndex", "0") diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertConvergeServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertConvergeServiceTest.java index e763a160d70..e01b716ab24 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertConvergeServiceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertConvergeServiceTest.java @@ -17,10 +17,14 @@ package org.apache.hertzbeat.alert.service; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.util.Collections; -import java.util.Set; import java.util.Optional; - +import java.util.Set; import org.apache.hertzbeat.alert.dao.AlertConvergeDao; import org.apache.hertzbeat.alert.service.impl.AlertConvergeServiceImpl; import org.apache.hertzbeat.common.entity.alerter.AlertConverge; @@ -35,13 +39,6 @@ import org.springframework.data.domain.Pageable; import org.springframework.data.jpa.domain.Specification; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - /** * test case for {@link org.apache.hertzbeat.alert.service.impl.AlertConvergeServiceImpl} */ @@ -97,17 +94,18 @@ public void testDeleteAlertConverges() { @Test public void testGetAlertConverges() { - Specification specification = mock(Specification.class); - PageRequest pageRequest = PageRequest.of(0, 10); Page page = new PageImpl<>(Collections.emptyList()); when(alertConvergeDao.findAll( any(Specification.class), any(Pageable.class)) ).thenReturn(page); - Page result = alertConvergeService.getAlertConverges(specification, pageRequest); + Page result = alertConvergeService.getAlertConverges(null, null, "id", "desc", 1, 10); - verify(alertConvergeDao, times(1)).findAll(specification, pageRequest); + verify(alertConvergeDao, times(1)).findAll( + any(Specification.class), + any(PageRequest.class) + ); assertEquals(page, result); } From 98b8d95b927e36268323e542f9ca3f09affaac83 Mon Sep 17 00:00:00 2001 From: linDong <56677297@qq.com> Date: Mon, 5 Aug 2024 11:42:07 +0800 Subject: [PATCH 132/257] [improve] : Optimize queues to obtain data and prevent cpu idling (#2466) Co-authored-by: tomsun28 --- .../apache/hertzbeat/alert/calculate/CalculateAlarm.java | 2 +- .../common/queue/impl/InMemoryCommonDataQueue.java | 9 ++++----- .../manager/component/alerter/DispatcherAlarm.java | 1 + .../hertzbeat/warehouse/store/DataStorageDispatch.java | 5 ++++- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/calculate/CalculateAlarm.java b/alerter/src/main/java/org/apache/hertzbeat/alert/calculate/CalculateAlarm.java index 2542a58ac2f..ec75b4c5146 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/calculate/CalculateAlarm.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/calculate/CalculateAlarm.java @@ -123,7 +123,7 @@ private void startCalculate() { calculate(metricsData); } } catch (InterruptedException ignored) { - + Thread.currentThread().interrupt(); } catch (Exception e) { log.error("calculate alarm error: {}.", e.getMessage(), e); } diff --git a/common/src/main/java/org/apache/hertzbeat/common/queue/impl/InMemoryCommonDataQueue.java b/common/src/main/java/org/apache/hertzbeat/common/queue/impl/InMemoryCommonDataQueue.java index 42a9fe8fd49..446ac6a84a1 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/queue/impl/InMemoryCommonDataQueue.java +++ b/common/src/main/java/org/apache/hertzbeat/common/queue/impl/InMemoryCommonDataQueue.java @@ -20,7 +20,6 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.common.entity.alerter.Alert; import org.apache.hertzbeat.common.entity.message.CollectRep; @@ -67,22 +66,22 @@ public void sendAlertsData(Alert alert) { @Override public Alert pollAlertsData() throws InterruptedException { - return alertDataQueue.poll(2, TimeUnit.SECONDS); + return alertDataQueue.take(); } @Override public CollectRep.MetricsData pollMetricsDataToAlerter() throws InterruptedException { - return metricsDataToAlertQueue.poll(2, TimeUnit.SECONDS); + return metricsDataToAlertQueue.take(); } @Override public CollectRep.MetricsData pollMetricsDataToPersistentStorage() throws InterruptedException { - return metricsDataToPersistentStorageQueue.poll(2, TimeUnit.SECONDS); + return metricsDataToPersistentStorageQueue.take(); } @Override public CollectRep.MetricsData pollMetricsDataToRealTimeStorage() throws InterruptedException { - return metricsDataToRealTimeStorageQueue.poll(2, TimeUnit.SECONDS); + return metricsDataToRealTimeStorageQueue.take(); } @Override diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/DispatcherAlarm.java b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/DispatcherAlarm.java index c57d569716a..7aab58b8241 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/DispatcherAlarm.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/DispatcherAlarm.java @@ -135,6 +135,7 @@ public void run() { } } catch (IgnoreException ignored) { } catch (InterruptedException e) { + Thread.currentThread().interrupt(); log.error(e.getMessage()); } catch (Exception exception) { log.error(exception.getMessage(), exception); diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/DataStorageDispatch.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/DataStorageDispatch.java index f9f6b69213e..fc1304ebefc 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/DataStorageDispatch.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/DataStorageDispatch.java @@ -61,6 +61,8 @@ private void startRealTimeDataStorage() { continue; } realTimeDataWriter.saveData(metricsData); + } catch (InterruptedException interruptedException) { + Thread.currentThread().interrupt(); } catch (Exception e) { log.error(e.getMessage(), e); } @@ -78,8 +80,9 @@ protected void startPersistentDataStorage() { if (metricsData == null) { continue; } - historyDataWriter.ifPresent(dataWriter -> dataWriter.saveData(metricsData)); + } catch (InterruptedException interruptedException) { + Thread.currentThread().interrupt(); } catch (Exception e) { log.error(e.getMessage(), e); } From a5b8f5924292c11defd7402be2043ccc117e6e7e Mon Sep 17 00:00:00 2001 From: YuLuo Date: Mon, 5 Aug 2024 12:00:27 +0800 Subject: [PATCH 133/257] feat fix log upload, when e2e fails it still uploads (#2464) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .github/workflows/backend-build-test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/backend-build-test.yml b/.github/workflows/backend-build-test.yml index ecf9cfa4ad5..3ddbc454fe3 100644 --- a/.github/workflows/backend-build-test.yml +++ b/.github/workflows/backend-build-test.yml @@ -77,6 +77,7 @@ jobs: # upload application logs - name: Upload logs uses: actions/upload-artifact@v3 + if: always() with: name: hz-logs-${{ github.run_id }} path: e2e/logs/ From 477af0959fb62dc56ab106a279e1876e48b57e94 Mon Sep 17 00:00:00 2001 From: lingluojun <247677857yh@gmail.com> Date: Mon, 5 Aug 2024 12:34:18 +0800 Subject: [PATCH 134/257] [refactor] extracting common function for KafkaCommonDataQueue (#2453) Co-authored-by: tomsun28 --- .../entrance/internal/CollectJobService.java | 49 +++--- .../queue/impl/KafkaCommonDataQueue.java | 141 +++++------------- 2 files changed, 69 insertions(+), 121 deletions(-) diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/entrance/internal/CollectJobService.java b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/entrance/internal/CollectJobService.java index 940cea20424..35d15b80d25 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/entrance/internal/CollectJobService.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/entrance/internal/CollectJobService.java @@ -17,12 +17,14 @@ package org.apache.hertzbeat.collector.dispatch.entrance.internal; -import java.util.ArrayList; import java.util.LinkedList; import java.util.List; +import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.CollectionUtils; import org.apache.hertzbeat.collector.dispatch.DispatchProperties; import org.apache.hertzbeat.collector.dispatch.WorkerPool; import org.apache.hertzbeat.collector.dispatch.entrance.CollectServer; @@ -51,7 +53,7 @@ public class CollectJobService { private final WorkerPool workerPool; private final String collectorIdentity; - + private String mode = null; private CollectServer collectServer; @@ -59,18 +61,24 @@ public class CollectJobService { public CollectJobService(TimerDispatch timerDispatch, DispatchProperties properties, WorkerPool workerPool) { this.timerDispatch = timerDispatch; this.workerPool = workerPool; - if (properties != null && properties.getEntrance() != null && properties.getEntrance().getNetty() != null - && properties.getEntrance().getNetty().isEnabled()) { - mode = properties.getEntrance().getNetty().getMode(); - String collectorName = properties.getEntrance().getNetty().getIdentity(); - if (StringUtils.hasText(collectorName)) { - collectorIdentity = collectorName; - } else { - collectorIdentity = IpDomainUtil.getCurrentHostName() + COLLECTOR_STR; - log.info("user not config this collector identity, use [host name - host ip] default: {}.", collectorIdentity); - } - } else { + + Optional nettyPropertiesOptional = Optional.ofNullable(properties) + .map(DispatchProperties::getEntrance) + .map(DispatchProperties.EntranceProperties::getNetty) + .filter(DispatchProperties.EntranceProperties.NettyProperties::isEnabled); + + if (nettyPropertiesOptional.isEmpty()) { collectorIdentity = CommonConstants.MAIN_COLLECTOR_NODE; + return; + } + + DispatchProperties.EntranceProperties.NettyProperties nettyProperties = nettyPropertiesOptional.get(); + mode = nettyProperties.getMode(); + if (StringUtils.hasText(nettyProperties.getIdentity())) { + collectorIdentity = nettyProperties.getIdentity(); + } else { + collectorIdentity = IpDomainUtil.getCurrentHostName() + COLLECTOR_STR; + log.info("user not config this collector identity, use [host name - host ip] default: {}.", collectorIdentity); } } @@ -109,13 +117,12 @@ public void response(List responseMetrics) { public void collectSyncOneTimeJobData(Job oneTimeJob) { workerPool.executeJob(() -> { List metricsDataList = this.collectSyncJobData(oneTimeJob); - List jsons = new ArrayList<>(metricsDataList.size()); - for (CollectRep.MetricsData metricsData : metricsDataList) { - String json = ProtoJsonUtil.toJsonStr(metricsData); - if (json != null) { - jsons.add(json); - } - } + List jsons = CollectionUtils.emptyIfNull(metricsDataList) + .stream() + .map(ProtoJsonUtil::toJsonStr) + .filter(StringUtils::hasText) + .collect(Collectors.toList()); + String response = JsonUtil.toJson(jsons); ClusterMsg.Message message = ClusterMsg.Message.newBuilder() .setMsg(response) @@ -165,7 +172,7 @@ public void sendAsyncCollectData(CollectRep.MetricsData metricsData) { public String getCollectorIdentity() { return collectorIdentity; } - + public String getCollectorMode() { return mode; } diff --git a/common/src/main/java/org/apache/hertzbeat/common/queue/impl/KafkaCommonDataQueue.java b/common/src/main/java/org/apache/hertzbeat/common/queue/impl/KafkaCommonDataQueue.java index 51cdb1f50fa..2ff82b5cdaf 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/queue/impl/KafkaCommonDataQueue.java +++ b/common/src/main/java/org/apache/hertzbeat/common/queue/impl/KafkaCommonDataQueue.java @@ -45,6 +45,7 @@ import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.context.annotation.Configuration; + /** * common data queue implement kafka */ @@ -53,22 +54,22 @@ @Slf4j public class KafkaCommonDataQueue implements CommonDataQueue, DisposableBean { + private final ReentrantLock alertLock = new ReentrantLock(); + private final ReentrantLock metricDataToAlertLock = new ReentrantLock(); + private final ReentrantLock metricDataToPersistentLock = new ReentrantLock(); + private final ReentrantLock metricDataToRealTimeStorageLock = new ReentrantLock(); + private final LinkedBlockingQueue alertDataQueue; + private final LinkedBlockingQueue metricsDataToAlertQueue; + private final LinkedBlockingQueue metricsDataToPersistentStorageQueue; + private final LinkedBlockingQueue metricsDataToRealTimeStorageQueue; + private final CommonProperties.KafkaProperties kafka; private KafkaProducer metricsDataProducer; private KafkaProducer alertDataProducer; private KafkaConsumer alertDataConsumer; private KafkaConsumer metricsDataToAlertConsumer; private KafkaConsumer metricsDataToPersistentStorageConsumer; private KafkaConsumer metricsDataToRealTimeStorageConsumer; - private final ReentrantLock lock1 = new ReentrantLock(); - private final ReentrantLock lock2 = new ReentrantLock(); - private final ReentrantLock lock3 = new ReentrantLock(); - private final ReentrantLock lock4 = new ReentrantLock(); - private final LinkedBlockingQueue alertDataQueue; - private final LinkedBlockingQueue metricsDataToAlertQueue; - private final LinkedBlockingQueue metricsDataToPersistentStorageQueue; - private final LinkedBlockingQueue metricsDataToRealTimeStorageQueue; - private final CommonProperties.KafkaProperties kafka; - + public KafkaCommonDataQueue(CommonProperties properties) { if (properties == null || properties.getQueue() == null || properties.getQueue().getKafka() == null) { log.error("init error, please config common.queue.kafka props in application.yml"); @@ -81,8 +82,8 @@ public KafkaCommonDataQueue(CommonProperties properties) { metricsDataToRealTimeStorageQueue = new LinkedBlockingQueue<>(); initDataQueue(); } - - private void initDataQueue(){ + + private void initDataQueue() { try { Map producerConfig = new HashMap<>(3); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getServers()); @@ -103,22 +104,20 @@ private void initDataQueue(){ alertConsumerConfig.put("group.id", "alert-consumer"); alertDataConsumer = new KafkaConsumer<>(alertConsumerConfig, new LongDeserializer(), new AlertDeserializer()); alertDataConsumer.subscribe(Collections.singletonList(kafka.getAlertsDataTopic())); - + Map metricsToAlertConsumerConfig = new HashMap<>(consumerConfig); metricsToAlertConsumerConfig.put("group.id", "metrics-alert-consumer"); metricsDataToAlertConsumer = new KafkaConsumer<>(metricsToAlertConsumerConfig, new LongDeserializer(), new KafkaMetricsDataDeserializer()); metricsDataToAlertConsumer.subscribe(Collections.singletonList(kafka.getMetricsDataTopic())); - + Map metricsToPersistentConsumerConfig = new HashMap<>(consumerConfig); metricsToPersistentConsumerConfig.put("group.id", "metrics-persistent-consumer"); - metricsDataToPersistentStorageConsumer = new KafkaConsumer<>(metricsToPersistentConsumerConfig, new LongDeserializer(), - new KafkaMetricsDataDeserializer()); + metricsDataToPersistentStorageConsumer = new KafkaConsumer<>(metricsToPersistentConsumerConfig, new LongDeserializer(), new KafkaMetricsDataDeserializer()); metricsDataToPersistentStorageConsumer.subscribe(Collections.singletonList(kafka.getMetricsDataTopic())); - + Map metricsToRealTimeConsumerConfig = new HashMap<>(consumerConfig); metricsToRealTimeConsumerConfig.put("group.id", "metrics-memory-consumer"); - metricsDataToRealTimeStorageConsumer = new KafkaConsumer<>(metricsToRealTimeConsumerConfig, new LongDeserializer(), - new KafkaMetricsDataDeserializer()); + metricsDataToRealTimeStorageConsumer = new KafkaConsumer<>(metricsToRealTimeConsumerConfig, new LongDeserializer(), new KafkaMetricsDataDeserializer()); metricsDataToRealTimeStorageConsumer.subscribe(Collections.singletonList(kafka.getMetricsDataTopic())); } catch (Exception e) { log.error("please config common.queue.kafka props correctly", e); @@ -137,110 +136,52 @@ public void sendAlertsData(Alert alert) { @Override public Alert pollAlertsData() throws InterruptedException { - Alert alert = alertDataQueue.poll(); - if (alert != null) { - return alert; - } - lock1.lockInterruptibly(); - try { - ConsumerRecords records = alertDataConsumer.poll(Duration.ofSeconds(1)); - int index = 0; - for (ConsumerRecord record : records) { - if (index == 0) { - alert = record.value(); - } else { - alertDataQueue.offer(record.value()); - } - index++; - } - alertDataConsumer.commitAsync(); - } catch (Exception e){ - log.error(e.getMessage()); - } finally { - lock1.unlock(); - } - return alert; + return genericPollDataFunction(alertDataQueue, alertDataConsumer, alertLock); + } @Override public CollectRep.MetricsData pollMetricsDataToAlerter() throws InterruptedException { - CollectRep.MetricsData metricsData = metricsDataToAlertQueue.poll(); - if (metricsData != null) { - return metricsData; - } - lock2.lockInterruptibly(); - try { - ConsumerRecords records = metricsDataToAlertConsumer.poll(Duration.ofSeconds(1)); - int index = 0; - for (ConsumerRecord record : records) { - if (index == 0) { - metricsData = record.value(); - } else { - metricsDataToAlertQueue.offer(record.value()); - } - index++; - } - metricsDataToAlertConsumer.commitAsync(); - } catch (Exception e){ - log.error(e.getMessage()); - } finally { - lock2.unlock(); - } - return metricsData; + return genericPollDataFunction(metricsDataToAlertQueue, metricsDataToAlertConsumer, metricDataToAlertLock); } @Override public CollectRep.MetricsData pollMetricsDataToPersistentStorage() throws InterruptedException { - CollectRep.MetricsData persistentStorageMetricsData = metricsDataToPersistentStorageQueue.poll(); - if (persistentStorageMetricsData != null) { - return persistentStorageMetricsData; - } - lock3.lockInterruptibly(); - try { - ConsumerRecords records = metricsDataToPersistentStorageConsumer.poll(Duration.ofSeconds(1)); - int index = 0; - for (ConsumerRecord record : records) { - if (index == 0) { - persistentStorageMetricsData = record.value(); - } else { - metricsDataToPersistentStorageQueue.offer(record.value()); - } - index++; - } - metricsDataToPersistentStorageConsumer.commitAsync(); - } catch (Exception e){ - log.error(e.getMessage()); - } finally { - lock3.unlock(); - } - return persistentStorageMetricsData; + return genericPollDataFunction(metricsDataToPersistentStorageQueue, metricsDataToPersistentStorageConsumer, metricDataToPersistentLock); } + @Override public CollectRep.MetricsData pollMetricsDataToRealTimeStorage() throws InterruptedException { - CollectRep.MetricsData realTimeMetricsData = metricsDataToRealTimeStorageQueue.poll(); - if (realTimeMetricsData != null) { - return realTimeMetricsData; + return genericPollDataFunction(metricsDataToRealTimeStorageQueue, metricsDataToRealTimeStorageConsumer, metricDataToRealTimeStorageLock); + } + + + public T genericPollDataFunction(LinkedBlockingQueue dataQueue, KafkaConsumer dataConsumer, ReentrantLock lock) throws InterruptedException { + + T pollData = dataQueue.poll(); + if (pollData != null) { + return pollData; } - lock4.lockInterruptibly(); + lock.lockInterruptibly(); try { - ConsumerRecords records = metricsDataToRealTimeStorageConsumer.poll(Duration.ofSeconds(1)); + ConsumerRecords records = dataConsumer.poll(Duration.ofSeconds(1)); int index = 0; - for (ConsumerRecord record : records) { + for (ConsumerRecord record : records) { if (index == 0) { - realTimeMetricsData = record.value(); + pollData = record.value(); } else { - metricsDataToRealTimeStorageQueue.offer(record.value()); + dataQueue.offer(record.value()); } index++; } - metricsDataToRealTimeStorageConsumer.commitAsync(); - } catch (Exception e){ + dataConsumer.commitAsync(); + } catch (Exception e) { log.error(e.getMessage()); } finally { - lock4.unlock(); + lock.unlock(); } - return realTimeMetricsData; + return pollData; } @Override From 4b5a97044c8eb4ec8ac150d6156ae940904da60e Mon Sep 17 00:00:00 2001 From: aias00 Date: Mon, 5 Aug 2024 14:37:05 +0800 Subject: [PATCH 135/257] [improve] add missed field i18n (#2465) Co-authored-by: tomsun28 --- manager/src/main/resources/define/app-greenplum.yml | 3 +++ manager/src/main/resources/define/app-kingbase.yml | 3 +++ manager/src/main/resources/define/app-postgresql.yml | 3 +++ 3 files changed, 9 insertions(+) diff --git a/manager/src/main/resources/define/app-greenplum.yml b/manager/src/main/resources/define/app-greenplum.yml index a590bb9417a..d5ecbeea201 100644 --- a/manager/src/main/resources/define/app-greenplum.yml +++ b/manager/src/main/resources/define/app-greenplum.yml @@ -716,6 +716,9 @@ metrics: fields: - field: allocated type: 0 + i18n: + zh-CN: 已分配 + en-US: Allocated - field: fsync_calls_by_backend type: 0 i18n: diff --git a/manager/src/main/resources/define/app-kingbase.yml b/manager/src/main/resources/define/app-kingbase.yml index eb6e6b01f87..81d754942f5 100644 --- a/manager/src/main/resources/define/app-kingbase.yml +++ b/manager/src/main/resources/define/app-kingbase.yml @@ -716,6 +716,9 @@ metrics: fields: - field: allocated type: 0 + i18n: + zh-CN: 已分配 + en-US: Allocated - field: fsync_calls_by_backend type: 0 i18n: diff --git a/manager/src/main/resources/define/app-postgresql.yml b/manager/src/main/resources/define/app-postgresql.yml index ead5bd80da3..ebd487998a6 100644 --- a/manager/src/main/resources/define/app-postgresql.yml +++ b/manager/src/main/resources/define/app-postgresql.yml @@ -716,6 +716,9 @@ metrics: fields: - field: allocated type: 0 + i18n: + zh-CN: 已分配 + en-US: Allocated - field: fsync_calls_by_backend type: 0 i18n: From 9ac45ab34381a96fa79de05f67ded6277743f103 Mon Sep 17 00:00:00 2001 From: linDong <56677297@qq.com> Date: Mon, 5 Aug 2024 15:07:38 +0800 Subject: [PATCH 136/257] [improve] use constant (#2458) Co-authored-by: tomsun28 --- .../dispatch/export/NettyDataQueue.java | 16 ++++-- .../common/constants/DataQueueConstants.java | 54 +++++++++++++++++++ .../queue/impl/InMemoryCommonDataQueue.java | 8 ++- .../queue/impl/KafkaCommonDataQueue.java | 7 ++- .../queue/impl/RedisCommonDataQueue.java | 7 +-- 5 files changed, 84 insertions(+), 8 deletions(-) create mode 100644 common/src/main/java/org/apache/hertzbeat/common/constants/DataQueueConstants.java diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/export/NettyDataQueue.java b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/export/NettyDataQueue.java index 68f7c5c6212..5fbcb4a7ba5 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/export/NettyDataQueue.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/export/NettyDataQueue.java @@ -33,8 +33,8 @@ @Configuration @ConditionalOnProperty( prefix = NettyDataQueue.NETTY_DATA_QUEUE_PREFIX, - name = "type", - havingValue = "netty" + name = NettyDataQueue.NAME, + havingValue = NettyDataQueue.NETTY ) public class NettyDataQueue implements CommonDataQueue { @@ -42,7 +42,17 @@ public class NettyDataQueue implements CommonDataQueue { * netty data queue prefix. */ protected static final String NETTY_DATA_QUEUE_PREFIX = "common.queue"; - + + /** + * name constants + */ + protected static final String NAME = "type"; + + /** + * havingValue constants + */ + protected static final String NETTY = "netty"; + private final CollectJobService collectJobService; public NettyDataQueue(CollectJobService collectJobService) { diff --git a/common/src/main/java/org/apache/hertzbeat/common/constants/DataQueueConstants.java b/common/src/main/java/org/apache/hertzbeat/common/constants/DataQueueConstants.java new file mode 100644 index 00000000000..c8e51b68645 --- /dev/null +++ b/common/src/main/java/org/apache/hertzbeat/common/constants/DataQueueConstants.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.constants; + +/** + * Queue dependent constant + */ +public interface DataQueueConstants { + + /** + * Queue name prefix + */ + String PREFIX = "common.queue"; + + /** + * name + */ + String NAME = "type"; + + /** + * kafka + */ + String KAFKA = "kafka"; + + /** + * Local memory + */ + String IN_MEMORY = "memory"; + + /** + * redis + */ + String REDIS = "redis"; + + /** + * netty + */ + String NETTY = "netty"; +} diff --git a/common/src/main/java/org/apache/hertzbeat/common/queue/impl/InMemoryCommonDataQueue.java b/common/src/main/java/org/apache/hertzbeat/common/queue/impl/InMemoryCommonDataQueue.java index 446ac6a84a1..2156548a502 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/queue/impl/InMemoryCommonDataQueue.java +++ b/common/src/main/java/org/apache/hertzbeat/common/queue/impl/InMemoryCommonDataQueue.java @@ -21,6 +21,7 @@ import java.util.Map; import java.util.concurrent.LinkedBlockingQueue; import lombok.extern.slf4j.Slf4j; +import org.apache.hertzbeat.common.constants.DataQueueConstants; import org.apache.hertzbeat.common.entity.alerter.Alert; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.queue.CommonDataQueue; @@ -33,7 +34,12 @@ * common data queue implement memory */ @Configuration -@ConditionalOnProperty(prefix = "common.queue", name = "type", havingValue = "memory", matchIfMissing = true) +@ConditionalOnProperty( + prefix = DataQueueConstants.PREFIX, + name = DataQueueConstants.NAME, + havingValue = DataQueueConstants.IN_MEMORY, + matchIfMissing = true +) @Slf4j @Primary public class InMemoryCommonDataQueue implements CommonDataQueue, DisposableBean { diff --git a/common/src/main/java/org/apache/hertzbeat/common/queue/impl/KafkaCommonDataQueue.java b/common/src/main/java/org/apache/hertzbeat/common/queue/impl/KafkaCommonDataQueue.java index 2ff82b5cdaf..048f5a6415e 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/queue/impl/KafkaCommonDataQueue.java +++ b/common/src/main/java/org/apache/hertzbeat/common/queue/impl/KafkaCommonDataQueue.java @@ -25,6 +25,7 @@ import java.util.concurrent.locks.ReentrantLock; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.common.config.CommonProperties; +import org.apache.hertzbeat.common.constants.DataQueueConstants; import org.apache.hertzbeat.common.entity.alerter.Alert; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.queue.CommonDataQueue; @@ -50,7 +51,11 @@ * common data queue implement kafka */ @Configuration -@ConditionalOnProperty(prefix = "common.queue", name = "type", havingValue = "kafka") +@ConditionalOnProperty( + prefix = DataQueueConstants.PREFIX, + name = DataQueueConstants.NAME, + havingValue = DataQueueConstants.KAFKA +) @Slf4j public class KafkaCommonDataQueue implements CommonDataQueue, DisposableBean { diff --git a/common/src/main/java/org/apache/hertzbeat/common/queue/impl/RedisCommonDataQueue.java b/common/src/main/java/org/apache/hertzbeat/common/queue/impl/RedisCommonDataQueue.java index 701b7dd3529..1283489cea3 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/queue/impl/RedisCommonDataQueue.java +++ b/common/src/main/java/org/apache/hertzbeat/common/queue/impl/RedisCommonDataQueue.java @@ -23,6 +23,7 @@ import io.lettuce.core.api.sync.RedisCommands; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.common.config.CommonProperties; +import org.apache.hertzbeat.common.constants.DataQueueConstants; import org.apache.hertzbeat.common.entity.alerter.Alert; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.queue.CommonDataQueue; @@ -38,9 +39,9 @@ @Slf4j @Configuration @ConditionalOnProperty( - prefix = "common.queue", - name = "type", - havingValue = "redis" + prefix = DataQueueConstants.PREFIX, + name = DataQueueConstants.NAME, + havingValue = DataQueueConstants.REDIS ) public class RedisCommonDataQueue implements CommonDataQueue, DisposableBean { From 35a8ec42752b9266f55d0168c13bb94c881924c0 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Mon, 5 Aug 2024 15:10:31 +0800 Subject: [PATCH 137/257] [Improve] add AiServiceFactoryImpl unit test (#2459) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../manager/pojo/dto/AliAiResponse.java | 2 +- .../service/impl/AlibabaAiServiceImpl.java | 2 +- .../manager/service/AiServiceFactoryTest.java | 125 ++++++++++++++++++ 3 files changed, 127 insertions(+), 2 deletions(-) create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/AiServiceFactoryTest.java diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/AliAiResponse.java b/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/AliAiResponse.java index 2654879aaf5..e2562cc5c02 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/AliAiResponse.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/pojo/dto/AliAiResponse.java @@ -48,7 +48,7 @@ public class AliAiResponse { @Data @AllArgsConstructor @NoArgsConstructor - public class AliAiOutput { + public static class AliAiOutput { /** * response message diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AlibabaAiServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AlibabaAiServiceImpl.java index 55dce71c657..2cac8b4ef2b 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AlibabaAiServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AlibabaAiServiceImpl.java @@ -122,7 +122,7 @@ public Flux> requestAi(String text) { private void checkParam(String param, String apiKey, String model) { Assert.notNull(param, "text is null"); - Assert.notNull(param, "model is null"); + Assert.notNull(model, "model is null"); Assert.notNull(apiKey, "ai.api-key is null"); } } diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/AiServiceFactoryTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/AiServiceFactoryTest.java new file mode 100644 index 00000000000..067585ce159 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/AiServiceFactoryTest.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.hertzbeat.common.constants.AiTypeEnum; +import org.apache.hertzbeat.manager.service.impl.AiServiceFactoryImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.test.util.ReflectionTestUtils; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + +/** + * test case for {@link AiServiceFactoryImpl} + */ + +@ExtendWith(MockitoExtension.class) +class AiServiceFactoryTest { + + @Mock + private List aiService; + + @Mock + private AiService aiService1; + + @Mock + private AiService aiService2; + + @InjectMocks + private AiServiceFactoryImpl aiServiceFactory; + + @BeforeEach + public void setup() { + + when(aiService1.getType()).thenReturn(AiTypeEnum.alibabaAi); + when(aiService2.getType()).thenReturn(AiTypeEnum.zhiPu); + + aiService = Arrays.asList(aiService1, aiService2); + ReflectionTestUtils.setField(aiServiceFactory, "aiService", aiService); + + aiServiceFactory.init(); + } + + @Test + public void testInit() { + + Map expectedMap = aiService.stream() + .collect(Collectors.toMap(AiService::getType, Function.identity())); + + Map actualMap = (Map) ReflectionTestUtils.getField(aiServiceFactory, "aiServiceFactoryMap"); + + assertEquals(expectedMap, actualMap); + } + + @Test + public void testGetAiServiceImplBean_Success() { + + AiService service = aiServiceFactory.getAiServiceImplBean(AiTypeEnum.alibabaAi + ""); + assertNotNull(service); + assertEquals(aiService1, service); + + service = aiServiceFactory.getAiServiceImplBean(AiTypeEnum.zhiPu + ""); + assertNotNull(service); + assertEquals(aiService2, service); + } + + @Test + public void testGetAiServiceImplBeanTypeNotFound() { + + Exception exception = assertThrows( + IllegalArgumentException.class, + () -> aiServiceFactory.getAiServiceImplBean("InvalidType") + ); + + assertTrue(exception.getMessage().contains("The current type is not supported")); + } + + @Test + public void testGetAiServiceImplBeanNoBean() { + + aiServiceFactory.init(); + + when(aiService1.getType()).thenReturn(AiTypeEnum.kimiAi); + List singleServiceList = Collections.singletonList(aiService1); + ReflectionTestUtils.setField(aiServiceFactory, "aiService", singleServiceList); + aiServiceFactory.init(); + + Exception exception = assertThrows( + IllegalArgumentException.class, + () -> aiServiceFactory.getAiServiceImplBean(AiTypeEnum.sparkDesk + "") + ); + + assertTrue(exception.getMessage().contains("No bean for current type found")); + } + +} From 033240341b3ec87ba3a9a13bb47d192357f9e1ec Mon Sep 17 00:00:00 2001 From: YuLuo Date: Mon, 5 Aug 2024 15:36:33 +0800 Subject: [PATCH 138/257] [Improve] add ExcelImExportServiceImpl unit test (#2460) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../impl/AbstractImExportServiceImpl.java | 11 +- .../impl/ExcelImExportServiceImpl.java | 5 +- .../manager/service/CollectorServiceTest.java | 2 - .../manager/service/ConfigServiceTest.java | 2 - .../service/ExcelImExportServiceTest.java | 111 ++++++++++++++++++ 5 files changed, 116 insertions(+), 15 deletions(-) create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/ExcelImExportServiceTest.java diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AbstractImExportServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AbstractImExportServiceImpl.java index 1d1a682424d..079f67bc276 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AbstractImExportServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AbstractImExportServiceImpl.java @@ -48,7 +48,7 @@ * class AbstractImExportServiceImpl */ @Slf4j -abstract class AbstractImExportServiceImpl implements ImExportService { +public abstract class AbstractImExportServiceImpl implements ImExportService { @Resource @Lazy @@ -173,7 +173,7 @@ protected String fileNamePrefix() { @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @ExcelTarget(value = "ExportMonitorDTO") - protected static class ExportMonitorDTO { + public static class ExportMonitorDTO { @ExcelEntity(name = "Monitor") private MonitorDTO monitor; @ExcelCollection(name = "Params") @@ -184,12 +184,11 @@ protected static class ExportMonitorDTO { private Boolean detected; } - @Data @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @ExcelTarget(value = "MonitorDTO") - protected static class MonitorDTO { + public static class MonitorDTO { @Excel(name = "Name") private String name; @Excel(name = "App") @@ -208,12 +207,11 @@ protected static class MonitorDTO { private String collector; } - @Data @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @ExcelTarget(value = "ParamDTO") - protected static class ParamDTO { + public static class ParamDTO { @Excel(name = "Field") private String field; @Excel(name = "Type") @@ -222,5 +220,4 @@ protected static class ParamDTO { private String value; } - } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ExcelImExportServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ExcelImExportServiceImpl.java index f9aa05fa446..a2a70ec124c 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ExcelImExportServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ExcelImExportServiceImpl.java @@ -75,7 +75,6 @@ public String getFileName() { * @param is input stream * @return form */ - @Override public List parseImport(InputStream is) { try (Workbook workbook = WorkbookFactory.create(is)) { @@ -210,14 +209,13 @@ private Byte getCellValueAsByte(Cell cell) { return null; } - /** * Export Configuration to Output Stream * @param monitorList config list * @param os output stream */ @Override - void writeOs(List monitorList, OutputStream os) { + public void writeOs(List monitorList, OutputStream os) { try { Workbook workbook = WorkbookFactory.create(true); String sheetName = "Export Monitor"; @@ -318,5 +316,4 @@ void writeOs(List monitorList, OutputStream os) { } } - } diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/CollectorServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/CollectorServiceTest.java index 9a93590d6eb..8e011fe0303 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/CollectorServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/CollectorServiceTest.java @@ -17,7 +17,6 @@ package org.apache.hertzbeat.manager.service; - import org.apache.hertzbeat.common.entity.manager.Collector; import org.apache.hertzbeat.manager.dao.CollectorDao; import org.apache.hertzbeat.manager.dao.CollectorMonitorBindDao; @@ -41,7 +40,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; - /** * Test case for {@link CollectorService} */ diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/ConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/ConfigServiceTest.java index d0b16d4cca6..ed7f1ac4e76 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/ConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/ConfigServiceTest.java @@ -17,7 +17,6 @@ package org.apache.hertzbeat.manager.service; - import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.any; @@ -40,7 +39,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; - /** * Test case for {@link ConfigService} */ diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/ExcelImExportServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/ExcelImExportServiceTest.java new file mode 100644 index 00000000000..97fa7c11c97 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/ExcelImExportServiceTest.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.List; +import org.apache.hertzbeat.manager.service.impl.AbstractImExportServiceImpl; +import org.apache.hertzbeat.manager.service.impl.ExcelImExportServiceImpl; +import org.apache.poi.ss.usermodel.Workbook; +import org.apache.poi.ss.usermodel.WorkbookFactory; +import org.apache.poi.xssf.usermodel.XSSFWorkbook; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.InjectMocks; +import org.mockito.MockitoAnnotations; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +/** + * Test case for {@link ExcelImExportServiceImpl} + */ + +class ExcelImExportServiceImplTest { + + @InjectMocks + private ExcelImExportServiceImpl excelImExportService; + + @BeforeEach + public void setUp() { + + MockitoAnnotations.openMocks(this); + } + + @Test + public void testParseImport() throws IOException { + + Workbook workbook = new XSSFWorkbook(); + var sheet = workbook.createSheet(); + var headerRow = sheet.createRow(0); + headerRow.createCell(0).setCellValue("name"); + headerRow.createCell(1).setCellValue("app"); + headerRow.createCell(2).setCellValue("host"); + var dataRow = sheet.createRow(1); + dataRow.createCell(0).setCellValue("Monitor1"); + dataRow.createCell(1).setCellValue("App1"); + dataRow.createCell(2).setCellValue("Host1"); + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + workbook.write(bos); + ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray()); + + List result = excelImExportService.parseImport(bis); + assertNotNull(result); + assertEquals(1, result.size()); + assertEquals("Monitor1", result.get(0).getMonitor().getName()); + } + + @Test + public void testWriteOs() throws IOException { + + AbstractImExportServiceImpl.MonitorDTO monitorDTO = new AbstractImExportServiceImpl.MonitorDTO(); + monitorDTO.setName("Monitor1"); + monitorDTO.setApp("App1"); + monitorDTO.setHost("Host1"); + monitorDTO.setIntervals(10); + monitorDTO.setStatus((byte) 1); + monitorDTO.setTags(List.of(1L, 2L)); + + AbstractImExportServiceImpl.ParamDTO paramDTO = new AbstractImExportServiceImpl.ParamDTO(); + paramDTO.setField("field1"); + paramDTO.setValue("value1"); + paramDTO.setType((byte) 1); + + AbstractImExportServiceImpl.ExportMonitorDTO exportMonitorDTO = new AbstractImExportServiceImpl.ExportMonitorDTO(); + exportMonitorDTO.setMonitor(monitorDTO); + exportMonitorDTO.setParams(List.of(paramDTO)); + + List monitorList = List.of(exportMonitorDTO); + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + excelImExportService.writeOs(monitorList, bos); + + ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray()); + Workbook workbook = WorkbookFactory.create(bis); + var sheet = workbook.getSheetAt(0); + var dataRow = sheet.getRow(1); + + assertEquals("Monitor1", dataRow.getCell(0).getStringCellValue()); + assertEquals("App1", dataRow.getCell(1).getStringCellValue()); + assertEquals("Host1", dataRow.getCell(2).getStringCellValue()); + } + +} From 67983c27021ecde697b11b1b90619e90827100e1 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Mon, 5 Aug 2024 15:42:54 +0800 Subject: [PATCH 139/257] [Improve] add JsonImExportServiceImpl unit test (#2461) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../service/impl/JsonImExportServiceImpl.java | 7 +- .../service/JsonImExportServiceTest.java | 106 ++++++++++++++++++ 2 files changed, 109 insertions(+), 4 deletions(-) create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/JsonImExportServiceTest.java diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/JsonImExportServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/JsonImExportServiceImpl.java index 359297f9e16..5cf6dcc3e2e 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/JsonImExportServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/JsonImExportServiceImpl.java @@ -38,10 +38,9 @@ public class JsonImExportServiceImpl extends AbstractImExportServiceImpl { private final ObjectMapper objectMapper; @Override - List parseImport(InputStream is) { + public List parseImport(InputStream is) { try { - return objectMapper.readValue(is, new TypeReference<>() { - }); + return objectMapper.readValue(is, new TypeReference<>(){}); } catch (IOException ex) { log.error("import monitor failed.", ex); throw new RuntimeException("import monitor failed"); @@ -49,7 +48,7 @@ List parseImport(InputStream is) { } @Override - void writeOs(List monitorList, OutputStream os) { + public void writeOs(List monitorList, OutputStream os) { try { objectMapper.writeValue(os, monitorList); } catch (IOException ex) { diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/JsonImExportServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/JsonImExportServiceTest.java new file mode 100644 index 00000000000..5724c826c0a --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/JsonImExportServiceTest.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.List; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.type.ResolvedType; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.manager.service.impl.AbstractImExportServiceImpl; +import org.apache.hertzbeat.manager.service.impl.JsonImExportServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.springframework.test.util.ReflectionTestUtils; + +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Test case for {@link JsonImExportServiceImpl} + */ + +class JsonImExportServiceTest { + + @InjectMocks + private JsonImExportServiceImpl jsonImExportService; + + @Mock + private ObjectMapper objectMapper; + + @BeforeEach + public void setUp() { + + MockitoAnnotations.openMocks(this); + + ReflectionTestUtils.setField(jsonImExportService, "objectMapper", objectMapper); + } + + @Test + void testParseImport() throws IOException { + + String json = "[{}]"; + ByteArrayInputStream bis = new ByteArrayInputStream(json.getBytes()); + + AbstractImExportServiceImpl.MonitorDTO monitorDTO = new AbstractImExportServiceImpl.MonitorDTO(); + + AbstractImExportServiceImpl.ExportMonitorDTO exportMonitorDTO = new AbstractImExportServiceImpl.ExportMonitorDTO(); + exportMonitorDTO.setMonitor(monitorDTO); + + List expectedList = List.of(exportMonitorDTO); + + when(objectMapper.readValue(any(JsonParser.class), any(ResolvedType.class))).thenReturn(expectedList); + + List result = jsonImExportService.parseImport(bis); + assertNull(result); + } + + @Test + public void testWriteOs() throws IOException { + + AbstractImExportServiceImpl.MonitorDTO monitorDTO = new AbstractImExportServiceImpl.MonitorDTO(); + monitorDTO.setName("Monitor1"); + monitorDTO.setApp("App1"); + monitorDTO.setHost("Host1"); + + AbstractImExportServiceImpl.ExportMonitorDTO exportMonitorDTO = new AbstractImExportServiceImpl.ExportMonitorDTO(); + exportMonitorDTO.setMonitor(monitorDTO); + + List monitorList = List.of(exportMonitorDTO); + + doNothing().when(objectMapper).writeValue(any(OutputStream.class), anyList()); + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + jsonImExportService.writeOs(monitorList, bos); + + verify(objectMapper, times(1)).writeValue(any(OutputStream.class), eq(monitorList)); + } + +} From e560502d8e3ff4d12ac099254cba7aef1078c5f4 Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Mon, 5 Aug 2024 23:52:40 +0800 Subject: [PATCH 140/257] [refactor] move code from AlertsController to AlertService (#2435) Co-authored-by: tomsun28 Co-authored-by: Calvin --- .../alert/controller/AlertsController.java | 43 +------------------ .../hertzbeat/alert/service/AlertService.java | 14 ++++-- .../alert/service/impl/AlertServiceImpl.java | 37 +++++++++++++++- .../controller/AlertsControllerTest.java | 43 +++++++++---------- 4 files changed, 68 insertions(+), 69 deletions(-) diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertsController.java b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertsController.java index 65aac1e0419..d2970b3eaae 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertsController.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertsController.java @@ -21,9 +21,6 @@ import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; -import jakarta.persistence.criteria.CriteriaBuilder; -import jakarta.persistence.criteria.Predicate; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import org.apache.hertzbeat.alert.dto.AlertSummary; @@ -32,9 +29,6 @@ import org.apache.hertzbeat.common.entity.dto.Message; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.domain.Sort; -import org.springframework.data.jpa.domain.Specification; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; @@ -67,41 +61,8 @@ public ResponseEntity>> getAlerts( @Parameter(description = "Sort Type", example = "desc") @RequestParam(defaultValue = "desc") String order, @Parameter(description = "List current page", example = "0") @RequestParam(defaultValue = "0") int pageIndex, @Parameter(description = "Number of list pagination", example = "8") @RequestParam(defaultValue = "8") int pageSize) { - - Specification specification = (root, query, criteriaBuilder) -> { - List andList = new ArrayList<>(); - - if (ids != null && !ids.isEmpty()) { - CriteriaBuilder.In inPredicate = criteriaBuilder.in(root.get("id")); - for (long id : ids) { - inPredicate.value(id); - } - andList.add(inPredicate); - } - if (monitorId != null) { - Predicate predicate = criteriaBuilder.like(root.get("tags").as(String.class), "%" + monitorId + "%"); - andList.add(predicate); - } - if (priority != null) { - Predicate predicate = criteriaBuilder.equal(root.get("priority"), priority); - andList.add(predicate); - } - if (status != null) { - Predicate predicate = criteriaBuilder.equal(root.get("status"), status); - andList.add(predicate); - } - if (content != null && !content.isEmpty()) { - Predicate predicateContent = criteriaBuilder.like(root.get("content"), "%" + content + "%"); - andList.add(predicateContent); - } - Predicate[] predicates = new Predicate[andList.size()]; - return criteriaBuilder.and(andList.toArray(predicates)); - }; - Sort sortExp = Sort.by(new Sort.Order(Sort.Direction.fromString(order), sort)); - PageRequest pageRequest = PageRequest.of(pageIndex, pageSize, sortExp); - Page alertPage = alertService.getAlerts(specification, pageRequest); - Message> message = Message.success(alertPage); - return ResponseEntity.ok(message); + Page alertPage = alertService.getAlerts(ids, monitorId, priority, status, content, sort, order, pageIndex, pageSize); + return ResponseEntity.ok(Message.success(alertPage)); } @DeleteMapping diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertService.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertService.java index 6ee54fac3b8..67aa0b59f4a 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertService.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertService.java @@ -23,7 +23,6 @@ import org.apache.hertzbeat.common.entity.alerter.Alert; import org.apache.hertzbeat.common.entity.dto.AlertReport; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; import org.springframework.data.jpa.domain.Specification; /** @@ -40,11 +39,18 @@ public interface AlertService { /** * Dynamic conditional query - * @param specification Query conditions - * @param pageRequest pagination parameters + * @param alarmIds Alarm ID List + * @param monitorId Monitor ID + * @param priority Alarm level + * @param status Alarm Status + * @param content Alarm content fuzzy query + * @param sort Sort field + * @param order Sort Type + * @param pageIndex List current page + * @param pageSize Number of list pagination * @return search result */ - Page getAlerts(Specification specification, PageRequest pageRequest); + Page getAlerts(List alarmIds, Long monitorId, Byte priority, Byte status, String content, String sort, String order, int pageIndex, int pageSize); /** * Delete alarms in batches according to the alarm ID list diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertServiceImpl.java index 8f1374d6fdc..b411ecdd810 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertServiceImpl.java @@ -17,11 +17,14 @@ package org.apache.hertzbeat.alert.service.impl; +import jakarta.persistence.criteria.CriteriaBuilder; +import jakarta.persistence.criteria.Predicate; import java.math.BigDecimal; import java.math.RoundingMode; import java.time.Instant; import java.time.LocalDateTime; import java.time.ZoneId; +import java.util.ArrayList; import java.util.Date; import java.util.HashSet; import java.util.List; @@ -42,6 +45,7 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; import org.springframework.data.domain.PageRequest; +import org.springframework.data.domain.Sort; import org.springframework.data.jpa.domain.Specification; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; @@ -66,7 +70,38 @@ public void addAlert(Alert alert) throws RuntimeException { } @Override - public Page getAlerts(Specification specification, PageRequest pageRequest) { + public Page getAlerts(List alarmIds, Long monitorId, Byte priority, Byte status, String content, String sort, String order, int pageIndex, int pageSize) { + Specification specification = (root, query, criteriaBuilder) -> { + List andList = new ArrayList<>(); + + if (alarmIds != null && !alarmIds.isEmpty()) { + CriteriaBuilder.In inPredicate = criteriaBuilder.in(root.get("id")); + for (long id : alarmIds) { + inPredicate.value(id); + } + andList.add(inPredicate); + } + if (monitorId != null) { + Predicate predicate = criteriaBuilder.like(root.get("tags").as(String.class), "%" + monitorId + "%"); + andList.add(predicate); + } + if (priority != null) { + Predicate predicate = criteriaBuilder.equal(root.get("priority"), priority); + andList.add(predicate); + } + if (status != null) { + Predicate predicate = criteriaBuilder.equal(root.get("status"), status); + andList.add(predicate); + } + if (content != null && !content.isEmpty()) { + Predicate predicateContent = criteriaBuilder.like(root.get("content"), "%" + content + "%"); + andList.add(predicateContent); + } + Predicate[] predicates = new Predicate[andList.size()]; + return criteriaBuilder.and(andList.toArray(predicates)); + }; + Sort sortExp = Sort.by(new Sort.Order(Sort.Direction.fromString(order), sort)); + PageRequest pageRequest = PageRequest.of(pageIndex, pageSize, sortExp); return alertDao.findAll(specification, pageRequest); } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertsControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertsControllerTest.java index c35ee78f02b..e1044206617 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertsControllerTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertsControllerTest.java @@ -39,7 +39,7 @@ import org.springframework.data.domain.PageImpl; import org.springframework.data.domain.PageRequest; import org.springframework.data.domain.Sort; -import org.springframework.data.jpa.domain.Specification; +import org.springframework.http.MediaType; import org.springframework.test.web.servlet.MockMvc; import org.springframework.test.web.servlet.request.MockMvcRequestBuilders; import org.springframework.test.web.servlet.setup.MockMvcBuilders; @@ -67,40 +67,37 @@ void setUp() { ids = LongStream.rangeClosed(1, 10).boxed().collect(Collectors.toList()); } - // todo: fix this test + @Test void getAlerts() throws Exception { String sortField = "id"; - String orderType = "asc"; + String orderType = "desc"; + Byte priority = 1; + Byte status = 1; + Long monitorId = 1L; + String content = "test"; int pageIndex = 0; int pageSize = 10; - PageRequest pageRequest = PageRequest.of(pageIndex, pageSize, Sort.by(new Sort.Order(Sort.Direction.fromString(orderType), sortField))); - Page alertPage = new PageImpl<>(Collections.singletonList(Alert.builder().build())); - Mockito.when( - alertService.getAlerts( - Mockito.any(Specification.class) - , Mockito.argThat( - argument -> - argument.getPageNumber() == pageRequest.getPageNumber() - && argument.getPageSize() == pageRequest.getPageSize() - && argument.getSort().equals(pageRequest.getSort()) - ) - ) - ) + + Page alertPage = new PageImpl<>( + Collections.singletonList(Alert.builder().build()), + PageRequest.of(pageIndex, pageSize, Sort.by(sortField).descending()), + ids.size() + ); + Mockito.when(alertService.getAlerts(ids, monitorId, priority, status, content, sortField, orderType, pageIndex, pageSize)) .thenReturn(alertPage); - mockMvc.perform( - MockMvcRequestBuilders + mockMvc.perform(MockMvcRequestBuilders .get("/api/alerts") .param("ids", ids.stream().map(String::valueOf).collect(Collectors.joining(","))) - .param("monitorId", "1") - .param("priority", "1") - .param("status", "1") - .param("content", "test") + .param("monitorId", String.valueOf(monitorId)) + .param("priority", String.valueOf(priority)) + .param("status", String.valueOf(status)) + .param("content", content) .param("sort", sortField) .param("order", orderType) .param("pageIndex", String.valueOf(pageIndex)) .param("pageSize", String.valueOf(pageSize)) - ) + .accept(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) .andExpect(jsonPath("$.data.content.length()").value(1)) From 111f0f6b99f607ed6f25a62fbf0c4c6cd1107493 Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Tue, 6 Aug 2024 00:03:43 +0800 Subject: [PATCH 141/257] [refactor] move code from AlertSilencesController to AlertSilenceService (#2436) Co-authored-by: Calvin --- .../controller/AlertSilencesController.java | 36 ++----------------- .../alert/service/AlertSilenceService.java | 13 ++++--- .../service/impl/AlertSilenceServiceImpl.java | 31 +++++++++++++++- .../service/AlertSilenceServiceTest.java | 30 ++++++++++------ 4 files changed, 60 insertions(+), 50 deletions(-) diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertSilencesController.java b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertSilencesController.java index 60fa156ea00..1b209b1093c 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertSilencesController.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertSilencesController.java @@ -21,9 +21,6 @@ import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; -import jakarta.persistence.criteria.CriteriaBuilder; -import jakarta.persistence.criteria.Predicate; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import org.apache.hertzbeat.alert.service.AlertSilenceService; @@ -31,11 +28,7 @@ import org.apache.hertzbeat.common.entity.dto.Message; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.domain.Sort; -import org.springframework.data.jpa.domain.Specification; import org.springframework.http.ResponseEntity; -import org.springframework.util.StringUtils; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RequestMapping; @@ -63,33 +56,8 @@ public ResponseEntity>> getAlertSilences( @Parameter(description = "Sort mode: asc: ascending, desc: descending", example = "desc") @RequestParam(defaultValue = "desc") String order, @Parameter(description = "List current page", example = "0") @RequestParam(defaultValue = "0") int pageIndex, @Parameter(description = "Number of list pages", example = "8") @RequestParam(defaultValue = "8") int pageSize) { - - Specification specification = (root, query, criteriaBuilder) -> { - List andList = new ArrayList<>(); - if (ids != null && !ids.isEmpty()) { - CriteriaBuilder.In inPredicate = criteriaBuilder.in(root.get("id")); - for (long id : ids) { - inPredicate.value(id); - } - andList.add(inPredicate); - } - if (StringUtils.hasText(search)) { - Predicate predicate = criteriaBuilder.or( - criteriaBuilder.like( - criteriaBuilder.lower(root.get("name")), - "%" + search.toLowerCase() + "%" - ) - ); - andList.add(predicate); - } - Predicate[] predicates = new Predicate[andList.size()]; - return criteriaBuilder.and(andList.toArray(predicates)); - }; - Sort sortExp = Sort.by(new Sort.Order(Sort.Direction.fromString(order), sort)); - PageRequest pageRequest = PageRequest.of(pageIndex, pageSize, sortExp); - Page alertSilencePage = alertSilenceService.getAlertSilences(specification, pageRequest); - Message> message = Message.success(alertSilencePage); - return ResponseEntity.ok(message); + Page alertSilencePage = alertSilenceService.getAlertSilences(ids, search, sort, order, pageIndex, pageSize); + return ResponseEntity.ok(Message.success(alertSilencePage)); } @DeleteMapping diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertSilenceService.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertSilenceService.java index 3e7e114e4f1..36846b6b44d 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertSilenceService.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/AlertSilenceService.java @@ -17,11 +17,10 @@ package org.apache.hertzbeat.alert.service; +import java.util.List; import java.util.Set; import org.apache.hertzbeat.common.entity.alerter.AlertSilence; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.jpa.domain.Specification; /** * management interface service for alert silence @@ -67,9 +66,13 @@ public interface AlertSilenceService { /** * Dynamic conditional query - * @param specification Query conditions - * @param pageRequest Paging parameters + * @param silenceIds Alarm Silence ID + * @param search Search Name + * @param sort Sort field + * @param order Sort mode: asc: ascending, desc: descending + * @param pageIndex List current page + * @param pageSize Number of list pages * @return The query results */ - Page getAlertSilences(Specification specification, PageRequest pageRequest); + Page getAlertSilences(List silenceIds, String search, String sort, String order, int pageIndex, int pageSize); } diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertSilenceServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertSilenceServiceImpl.java index a8a347af5b4..9a960863a12 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertSilenceServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertSilenceServiceImpl.java @@ -17,7 +17,11 @@ package org.apache.hertzbeat.alert.service.impl; +import jakarta.persistence.criteria.CriteriaBuilder; +import jakarta.persistence.criteria.Predicate; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import java.util.Set; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.alert.dao.AlertSilenceDao; @@ -29,9 +33,11 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; import org.springframework.data.domain.PageRequest; +import org.springframework.data.domain.Sort; import org.springframework.data.jpa.domain.Specification; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; +import org.springframework.util.StringUtils; /** * management interface service implement for alert silence @@ -77,7 +83,30 @@ public void deleteAlertSilences(Set silenceIds) throws RuntimeException { } @Override - public Page getAlertSilences(Specification specification, PageRequest pageRequest) { + public Page getAlertSilences(List silenceIds, String search, String sort, String order, int pageIndex, int pageSize) { + Specification specification = (root, query, criteriaBuilder) -> { + List andList = new ArrayList<>(); + if (silenceIds != null && !silenceIds.isEmpty()) { + CriteriaBuilder.In inPredicate = criteriaBuilder.in(root.get("id")); + for (long id : silenceIds) { + inPredicate.value(id); + } + andList.add(inPredicate); + } + if (StringUtils.hasText(search)) { + Predicate predicate = criteriaBuilder.or( + criteriaBuilder.like( + criteriaBuilder.lower(root.get("name")), + "%" + search.toLowerCase() + "%" + ) + ); + andList.add(predicate); + } + Predicate[] predicates = new Predicate[andList.size()]; + return criteriaBuilder.and(andList.toArray(predicates)); + }; + Sort sortExp = Sort.by(new Sort.Order(Sort.Direction.fromString(order), sort)); + PageRequest pageRequest = PageRequest.of(pageIndex, pageSize, sortExp); return alertSilenceDao.findAll(specification, pageRequest); } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertSilenceServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertSilenceServiceTest.java index 93cff97beae..2915b287592 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertSilenceServiceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertSilenceServiceTest.java @@ -17,9 +17,16 @@ package org.apache.hertzbeat.alert.service; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.util.Optional; import java.util.Set; - import org.apache.hertzbeat.alert.dao.AlertSilenceDao; import org.apache.hertzbeat.alert.service.impl.AlertSilenceServiceImpl; import org.apache.hertzbeat.common.entity.alerter.AlertSilence; @@ -28,15 +35,9 @@ import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import org.springframework.data.domain.Page; +import org.springframework.data.domain.PageRequest; +import org.springframework.data.jpa.domain.Specification; /** * test case for {@link AlertSilenceServiceImpl} @@ -106,6 +107,15 @@ void testGetAlertSilence() { verify(alertSilenceDao, times(1)).findById(1L); } + @Test + void testGetAlertSilences() { + when(alertSilenceDao.findAll(any(Specification.class), any(PageRequest.class))).thenReturn(Page.empty()); + assertDoesNotThrow(() -> alertSilenceService.getAlertSilences(null, null, "id", "desc", 1, 10)); + verify(alertSilenceDao, times(1)).findAll(any(Specification.class), any(PageRequest.class)); + + assertNotNull(alertSilenceService.getAlertSilences(null, null, "id", "desc", 1, 10)); + } + @Test void testDeleteAlertSilences() { From 0c2cc59b558c8f8bc800c308aab1b7f45b7bc552 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Tue, 6 Aug 2024 00:37:55 +0800 Subject: [PATCH 142/257] [chore] add service test tmpl file (#2472) Signed-off-by: yuluo-yx Co-authored-by: Calvin --- .../manager/service/AccountServiceTest.java | 27 ++++++++++++++++++ .../manager/service/AlibabaAiServiceTest.java | 27 ++++++++++++++++++ .../manager/service/KimiAiServiceTest.java | 28 +++++++++++++++++++ .../service/MailGeneralConfigServiceTest.java | 27 ++++++++++++++++++ .../service/ObjectStoreConfigServiceTest.java | 27 ++++++++++++++++++ .../service/ObsObjectStoreServiceTest.java | 27 ++++++++++++++++++ .../service/SmsGeneralConfigServiceTest.java | 27 ++++++++++++++++++ .../service/SparkDeskAiServiceTest.java | 27 ++++++++++++++++++ .../service/StatusPageServiceTest.java | 27 ++++++++++++++++++ .../SystemGeneralConfigServiceTest.java | 27 ++++++++++++++++++ .../service/SystemSecretServiceTest.java | 27 ++++++++++++++++++ .../manager/service/ZhiPuServiceTest.java | 27 ++++++++++++++++++ 12 files changed, 325 insertions(+) create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/AccountServiceTest.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/AlibabaAiServiceTest.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/KimiAiServiceTest.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/MailGeneralConfigServiceTest.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/ObjectStoreConfigServiceTest.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/ObsObjectStoreServiceTest.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/SmsGeneralConfigServiceTest.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/SparkDeskAiServiceTest.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/StatusPageServiceTest.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/SystemGeneralConfigServiceTest.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/SystemSecretServiceTest.java create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/ZhiPuServiceTest.java diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/AccountServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/AccountServiceTest.java new file mode 100644 index 00000000000..46f55d22bf9 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/AccountServiceTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import org.apache.hertzbeat.manager.service.impl.AccountServiceImpl; + +/** + * test case for {@link AccountServiceImpl} + */ + +class AccountServiceTest { +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/AlibabaAiServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/AlibabaAiServiceTest.java new file mode 100644 index 00000000000..ac6b7bbccec --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/AlibabaAiServiceTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import org.apache.hertzbeat.manager.service.impl.AlibabaAiServiceImpl; + +/** + * test case for {@link AlibabaAiServiceImpl} + */ + +class AlibabaAiServiceTest { +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/KimiAiServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/KimiAiServiceTest.java new file mode 100644 index 00000000000..30f0eb790f9 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/KimiAiServiceTest.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + + +import org.apache.hertzbeat.manager.service.impl.KimiAiServiceImpl; + +/** + * test case for {@link KimiAiServiceImpl} + */ + +class KimiAiServiceTest { +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/MailGeneralConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/MailGeneralConfigServiceTest.java new file mode 100644 index 00000000000..7d9ea39793d --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/MailGeneralConfigServiceTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import org.apache.hertzbeat.manager.service.impl.MailGeneralConfigServiceImpl; + +/** + * test case for {@link MailGeneralConfigServiceImpl} + */ + +class MailGeneralConfigServiceTest { +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/ObjectStoreConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/ObjectStoreConfigServiceTest.java new file mode 100644 index 00000000000..54c2403db86 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/ObjectStoreConfigServiceTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import org.apache.hertzbeat.manager.service.impl.ObjectStoreConfigServiceImpl; + +/** + * test case for {@link ObjectStoreConfigServiceImpl} + */ + +class ObjectStoreConfigServiceTest { +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/ObsObjectStoreServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/ObsObjectStoreServiceTest.java new file mode 100644 index 00000000000..f0e6a0e2e67 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/ObsObjectStoreServiceTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import org.apache.hertzbeat.manager.service.impl.ObsObjectStoreServiceImpl; + +/** + * test case for {@link ObsObjectStoreServiceImpl} + */ + +class ObsObjectStoreServiceTest { +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/SmsGeneralConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/SmsGeneralConfigServiceTest.java new file mode 100644 index 00000000000..6ba364b853e --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/SmsGeneralConfigServiceTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import org.apache.hertzbeat.manager.service.impl.SmsGeneralConfigServiceImpl; + +/** + * test case for {@link SmsGeneralConfigServiceImpl} + */ + +class SmsGeneralConfigServiceTest { +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/SparkDeskAiServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/SparkDeskAiServiceTest.java new file mode 100644 index 00000000000..f8dd26a998b --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/SparkDeskAiServiceTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import org.apache.hertzbeat.manager.service.impl.SparkDeskAiServiceImpl; + +/** + * test case for {@link SparkDeskAiServiceImpl} + */ + +class SparkDeskAiServiceTest { +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/StatusPageServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/StatusPageServiceTest.java new file mode 100644 index 00000000000..e3714240a42 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/StatusPageServiceTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import org.apache.hertzbeat.manager.service.impl.StatusPageServiceImpl; + +/** + * test case for {@link StatusPageServiceImpl} + */ + +class StatusPageServiceTest { +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemGeneralConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemGeneralConfigServiceTest.java new file mode 100644 index 00000000000..bed51fdc874 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemGeneralConfigServiceTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import org.apache.hertzbeat.manager.service.impl.SystemGeneralConfigServiceImpl; + +/** + * test case for {@link SystemGeneralConfigServiceImpl} + */ + +class SystemGeneralConfigServiceTest { +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemSecretServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemSecretServiceTest.java new file mode 100644 index 00000000000..729cd130f61 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemSecretServiceTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import org.apache.hertzbeat.manager.service.impl.SystemSecretServiceImpl; + +/** + * test case for {@link SystemSecretServiceImpl} + */ + +class SystemSecretServiceTest { +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/ZhiPuServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/ZhiPuServiceTest.java new file mode 100644 index 00000000000..68329d472b4 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/ZhiPuServiceTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import org.apache.hertzbeat.manager.service.impl.ZhiPuServiceImpl; + +/** + * test case for {@link ZhiPuServiceImpl} + */ + +class ZhiPuServiceTest { +} From 58e6e1a2a7724e4f845fef06e3154a06f514c7a3 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Tue, 6 Aug 2024 10:17:14 +0800 Subject: [PATCH 143/257] [improve] optimize collect thread prevent cpu idling (#2468) Signed-off-by: tomsun28 --- .../dispatch/MetricsCollectorQueue.java | 3 +-- .../redis/RedisClusterCollectImplTest.java | 7 ++++++- .../redis/RedisSingleCollectImplTest.java | 8 +++++++ .../dispatch/MetricsCollectorQueueTest.java | 21 +++++++++++++++---- 4 files changed, 32 insertions(+), 7 deletions(-) diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/MetricsCollectorQueue.java b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/MetricsCollectorQueue.java index c927a71faa6..b53f5fdc3a3 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/MetricsCollectorQueue.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/MetricsCollectorQueue.java @@ -18,7 +18,6 @@ package org.apache.hertzbeat.collector.dispatch; import java.util.concurrent.PriorityBlockingQueue; -import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; import org.springframework.stereotype.Component; @@ -40,7 +39,7 @@ public void addJob(MetricsCollect job) { } public MetricsCollect getJob() throws InterruptedException { - return jobQueue.poll(2, TimeUnit.SECONDS); + return jobQueue.take(); } } diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisClusterCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisClusterCollectImplTest.java index e8f9810a899..df41c165f51 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisClusterCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisClusterCollectImplTest.java @@ -31,6 +31,7 @@ import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.RedisProtocol; import org.apache.hertzbeat.common.entity.message.CollectRep; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -62,7 +63,11 @@ public class RedisClusterCollectImplTest { void setUp() { } - + @AfterEach + void setDown() { + connection.close(); + client.shutdown(); + } @Test void testCollect(){ diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisSingleCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisSingleCollectImplTest.java index 010402aefcb..be6a259667b 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisSingleCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisSingleCollectImplTest.java @@ -29,6 +29,7 @@ import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.RedisProtocol; import org.apache.hertzbeat.common.entity.message.CollectRep; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -69,6 +70,12 @@ void setUp() { .pattern("1") .build(); } + + @AfterEach + void setDown() { + connection.close(); + client.shutdown(); + } @Test void getInstance() { @@ -150,5 +157,6 @@ void testCollect() { assertEquals(row.getColumns(1), version); } clientMockedStatic.close(); + client.shutdown(); } } diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/MetricsCollectorQueueTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/MetricsCollectorQueueTest.java index 791f7b000a7..ee7181828c5 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/MetricsCollectorQueueTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/MetricsCollectorQueueTest.java @@ -17,10 +17,11 @@ package org.apache.hertzbeat.collector.dispatch; +import java.util.concurrent.locks.ReentrantLock; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.mock; /** @@ -47,9 +48,21 @@ void testAddJob() throws InterruptedException { } @Test - void testGetJobTimeout() throws InterruptedException { - - assertNull(metricsCollectorQueue.getJob()); + void testGetJobTimeout() { + ReentrantLock lock = new ReentrantLock(); + Thread run = new Thread(() -> { + try { + metricsCollectorQueue.getJob(); + } catch (Exception e) { + assertThrows(InterruptedException.class, () -> { + throw e; + }); + lock.unlock(); + } + }); + run.start(); + run.interrupt(); + lock.lock(); } } From e9da2d29254d735d269c7765e8b91ed3e2ef8bc3 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Tue, 6 Aug 2024 10:57:31 +0800 Subject: [PATCH 144/257] [Improve] add push module controller unit test (#2451) Signed-off-by: yuluo-yx Signed-off-by: YuLuo --- .../alert/reduce/AlarmSilenceReduce.java | 45 +++++---- .../push/controller/PushControllerTest.java | 94 +++++++++++++++++++ .../controller/PushGatewayControllerTest.java | 91 ++++++++++++++++++ 3 files changed, 210 insertions(+), 20 deletions(-) create mode 100644 push/src/test/java/org/apache/hertzbeat/push/controller/PushControllerTest.java create mode 100644 push/src/test/java/org/apache/hertzbeat/push/controller/PushGatewayControllerTest.java diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/reduce/AlarmSilenceReduce.java b/alerter/src/main/java/org/apache/hertzbeat/alert/reduce/AlarmSilenceReduce.java index 8a807a003cf..5f8a0b0633a 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/reduce/AlarmSilenceReduce.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/reduce/AlarmSilenceReduce.java @@ -18,7 +18,6 @@ package org.apache.hertzbeat.alert.reduce; import java.time.LocalDateTime; -import java.time.LocalTime; import java.util.List; import java.util.Map; import java.util.Optional; @@ -84,32 +83,16 @@ public boolean filterSilence(Alert alert) { } } if (match) { - LocalDateTime nowDate = LocalDateTime.now(); if (alertSilence.getType() == 0) { // once time - boolean startMatch = alertSilence.getPeriodStart() == null || nowDate.isAfter(alertSilence.getPeriodStart().toLocalDateTime()); - boolean endMatch = alertSilence.getPeriodEnd() == null || nowDate.isBefore(alertSilence.getPeriodEnd().toLocalDateTime()); - if (startMatch && endMatch) { - int times = Optional.ofNullable(alertSilence.getTimes()).orElse(0); - alertSilence.setTimes(times + 1); - alertSilenceDao.save(alertSilence); - return false; - } + return checkAndSave(LocalDateTime.now(), alertSilence); } else if (alertSilence.getType() == 1) { // cyc time - int currentDayOfWeek = nowDate.toLocalDate().getDayOfWeek().getValue(); + int currentDayOfWeek = LocalDateTime.now().toLocalDate().getDayOfWeek().getValue(); if (alertSilence.getDays() != null && !alertSilence.getDays().isEmpty()) { boolean dayMatch = alertSilence.getDays().stream().anyMatch(item -> item == currentDayOfWeek); if (dayMatch) { - LocalTime nowTime = nowDate.toLocalTime(); - boolean startMatch = alertSilence.getPeriodStart() == null || nowTime.isAfter(alertSilence.getPeriodStart().toLocalTime()); - boolean endMatch = alertSilence.getPeriodEnd() == null || nowTime.isBefore(alertSilence.getPeriodEnd().toLocalTime()); - if (startMatch && endMatch) { - int times = Optional.ofNullable(alertSilence.getTimes()).orElse(0); - alertSilence.setTimes(times + 1); - alertSilenceDao.save(alertSilence); - return false; - } + return checkAndSave(LocalDateTime.now(), alertSilence); } } } @@ -117,4 +100,26 @@ public boolean filterSilence(Alert alert) { } return true; } + + /** + * Check AlertSilence start and end match, to save alertSilence obj. + * @param times LocalDateTime. + * @param alertSilence {@link AlertSilence} + * @return boolean + */ + private boolean checkAndSave(LocalDateTime times, AlertSilence alertSilence) { + + boolean startMatch = alertSilence.getPeriodStart() == null || times.isAfter(alertSilence.getPeriodStart().toLocalDateTime()); + boolean endMatch = alertSilence.getPeriodEnd() == null || times.isBefore(alertSilence.getPeriodEnd().toLocalDateTime()); + + if (startMatch && endMatch) { + + int time = Optional.ofNullable(alertSilence.getTimes()).orElse(0); + alertSilence.setTimes(time + 1); + alertSilenceDao.save(alertSilence); + return false; + } + + return true; + } } diff --git a/push/src/test/java/org/apache/hertzbeat/push/controller/PushControllerTest.java b/push/src/test/java/org/apache/hertzbeat/push/controller/PushControllerTest.java new file mode 100644 index 00000000000..11a8e15f7cd --- /dev/null +++ b/push/src/test/java/org/apache/hertzbeat/push/controller/PushControllerTest.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hertzbeat.push.controller; + +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.entity.push.PushMetricsDto; +import org.apache.hertzbeat.common.util.JsonUtil; +import org.apache.hertzbeat.push.service.PushService; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.http.MediaType; +import org.springframework.test.web.servlet.MockMvc; +import org.springframework.test.web.servlet.request.MockMvcRequestBuilders; + +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; + +/** + * test case for {@link PushController} + */ + +@ExtendWith(MockitoExtension.class) +class PushControllerTest { + + private MockMvc mockMvc; + + @Mock + private PushService pushService; + + @InjectMocks + private PushController pushController; + + private PushMetricsDto mockPushMetricsDto; + + @BeforeEach + void setUp() { + + this.mockMvc = standaloneSetup(this.pushController).build(); + + mockPushMetricsDto = PushMetricsDto.builder().build(); + } + + @Test + void testPushMetrics() throws Exception { + + this.mockMvc.perform(MockMvcRequestBuilders.post("/api/push") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(mockPushMetricsDto))) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andReturn(); + } + + @Test + void testGetMetrics() throws Exception { + + Long id = 6565463543L; + Long time = 6565463543L; + + when(pushService.getPushMetricData(id, time)).thenReturn(mockPushMetricsDto); + + this.mockMvc.perform(MockMvcRequestBuilders.get("/api/push") + .contentType(MediaType.APPLICATION_JSON) + .param("id", id.toString()) + .param("time", time.toString())) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andReturn(); + } + +} diff --git a/push/src/test/java/org/apache/hertzbeat/push/controller/PushGatewayControllerTest.java b/push/src/test/java/org/apache/hertzbeat/push/controller/PushGatewayControllerTest.java new file mode 100644 index 00000000000..11238319ef8 --- /dev/null +++ b/push/src/test/java/org/apache/hertzbeat/push/controller/PushGatewayControllerTest.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hertzbeat.push.controller; + +import java.io.InputStream; +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.push.service.PushGatewayService; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.http.MediaType; +import org.springframework.test.web.servlet.MockMvc; +import org.springframework.test.web.servlet.setup.MockMvcBuilders; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; + +/** + * test case for {@link PushGatewayController} + */ + +@ExtendWith(MockitoExtension.class) +class PushGatewayControllerTest { + + private MockMvc mockMvc; + + @Mock + private PushGatewayService pushGatewayService; + + @InjectMocks + private PushGatewayController gatewayController; + + @BeforeEach + void setUp() { + + mockMvc = MockMvcBuilders.standaloneSetup(gatewayController).build(); + } + + @Test + void testPushMetricsSuccess() throws Exception { + + String mockData = "some metric data"; + + when(pushGatewayService.pushMetricsData(any(InputStream.class))).thenReturn(true); + + mockMvc.perform(post("/api/push/pushgateway") + .contentType(MediaType.APPLICATION_JSON) + .content(mockData)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andExpect(jsonPath("$.msg").value("Push success")); + } + + @Test + void testPushMetricsFailure() throws Exception { + + String mockData = "some metric data"; + + when(pushGatewayService.pushMetricsData(any(InputStream.class))).thenReturn(false); + + mockMvc.perform(post("/api/push/pushgateway") + .contentType(MediaType.APPLICATION_JSON) + .content(mockData)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.msg").value("Push failed")); + } + +} From c2c00d44c75ffca35e9361c18ac53213f9d472b2 Mon Sep 17 00:00:00 2001 From: aias00 Date: Tue, 6 Aug 2024 19:10:23 +0800 Subject: [PATCH 145/257] [doc] fix md index order and typo (#2480) --- home/blog/2022-06-01-hertzbeat-v1.0.md | 4 ++-- home/blog/2022-06-22-one-step-up.md | 2 +- home/blog/2022-07-10-hertzbeat-v1.1.1.md | 6 +++--- home/blog/2022-09-10-ssl-practice.md | 2 +- home/blog/2022-11-28-hertzbeat-v1.2.2.md | 8 ++++---- home/blog/2022-12-28-hertzbeat-v1.2.3.md | 12 ++++++------ home/docs/help/alert_slack.md | 2 +- .../2022-06-01-hertzbeat-v1.0.md | 2 +- .../2022-06-19-hertzbeat-v1.1.0.md | 2 +- .../2022-06-22-one-step-up.md | 2 +- .../2022-07-10-hertzbeat-v1.1.1.md | 6 +++--- .../2022-11-28-hertzbeat-v1.2.2.md | 8 ++++---- .../2022-12-28-hertzbeat-v1.2.3.md | 12 ++++++------ .../current/help/alert_slack.md | 2 +- .../version-v1.4.x/help/alert_slack.md | 2 +- .../version-v1.5.x/help/alert_slack.md | 2 +- .../version-v1.4.x/help/alert_slack.md | 2 +- .../version-v1.5.x/help/alert_slack.md | 2 +- 18 files changed, 39 insertions(+), 39 deletions(-) diff --git a/home/blog/2022-06-01-hertzbeat-v1.0.md b/home/blog/2022-06-01-hertzbeat-v1.0.md index 30252217861..8350ddb5825 100644 --- a/home/blog/2022-06-01-hertzbeat-v1.0.md +++ b/home/blog/2022-06-01-hertzbeat-v1.0.md @@ -50,7 +50,7 @@ Bug fix. 3. [[monitor]bugfix: fix only one filter label can be set when notification #140](https://github.com/apache/hertzbeat/pull/140). issue by @daqianxiaoyao 4. [[td-engine store]bugfix: fix log #150](https://github.com/apache/hertzbeat/pull/150). contribute by @ChineseTony 5. [[collector]bugfix: fix warehouse data queue consume error #153](https://github.com/apache/hertzbeat/pull/153). issue by @daqianxiaoyao -7. [[web-app]bugfix:fix input blocking when input error in dark theme #157](https://github.com/apache/hertzbeat/pull/157). issue by @ConradWen +6. [[web-app]bugfix:fix input blocking when input error in dark theme #157](https://github.com/apache/hertzbeat/pull/157). issue by @ConradWen **Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.0-beta.8...v1.0 @@ -65,7 +65,7 @@ Redis monitor is coming: 2022-05-29 20 24 21 > [HertzBeat](https://github.com/apache/hertzbeat), incubated by [Dromara](https://dromara.org) and open-sourced by [TanCloud](https://tancloud.cn), is an open-source monitoring and alerting project with a user-friendly visual interface that supports monitoring types such as websites, APIs, PING, ports, databases, operating systems, and more. -> Of course, we also provide a corresponding [SAAS cloud monitoring version](https://console.tancloud.cn), so small and medium-sized teams and individuals no longer need to deploy a cumbersome monitoring system to monitor their website resources; you can [log in](https://console.tancloud.cn) to start monitoring for free. +> Of course, we also provide a corresponding [SaaS cloud monitoring version](https://console.tancloud.cn), so small and medium-sized teams and individuals no longer need to deploy a cumbersome monitoring system to monitor their website resources; you can [log in](https://console.tancloud.cn) to start monitoring for free. > HertzBeat supports custom monitoring; by configuring the YML file, we can customize the required monitoring types and metrics to meet common personalized needs. > HertzBeat is modular, with manager, collector, scheduler, warehouse, alerter modules decoupled, making it easy to understand and customize for development. diff --git a/home/blog/2022-06-22-one-step-up.md b/home/blog/2022-06-22-one-step-up.md index 1c287803bda..59518f2e679 100644 --- a/home/blog/2022-06-22-one-step-up.md +++ b/home/blog/2022-06-22-one-step-up.md @@ -114,7 +114,7 @@ Have Fun! --- > [HertzBeat](https://github.com/apache/hertzbeat), incubated by [Dromara](https://dromara.org) and open-sourced by [TanCloud](https://tancloud.cn), is an open-source monitoring and alerting project that supports monitoring types such as websites, APIs, PING, ports, databases, operating systems, etc., with a user-friendly and easy-to-use visual interface. -> We also offer a [SAAS cloud monitoring version](https://console.tancloud.cn) for small and medium teams and individuals, eliminating the need to deploy a complicated monitoring system to monitor their web resources. [Sign up to start](https://console.tancloud.cn) your monitoring journey for free. +> We also offer a [SaaS cloud monitoring version](https://console.tancloud.cn) for small and medium teams and individuals, eliminating the need to deploy a complicated monitoring system to monitor their web resources. [Sign up to start](https://console.tancloud.cn) your monitoring journey for free. > HertzBeat supports custom monitoring; by configuring the YML file, we can customize the required monitoring types and metrics to meet common personalized needs. > HertzBeat is modular, with `manager, collector, scheduler, warehouse, alerter` modules decoupled, making it easy to understand and customize development. > HertzBeat supports more flexible alarm configurations (calculation expressions) and alarm notifications, including alarm templates, emails, DingTalk, WeChat, Feishu, etc., for timely delivery of notifications. diff --git a/home/blog/2022-07-10-hertzbeat-v1.1.1.md b/home/blog/2022-07-10-hertzbeat-v1.1.1.md index 0982fed4047..7139cbfd0a4 100644 --- a/home/blog/2022-07-10-hertzbeat-v1.1.1.md +++ b/home/blog/2022-07-10-hertzbeat-v1.1.1.md @@ -36,9 +36,9 @@ Bugfix. 4. [[common] bugfix alert status can not update #203](https://github.com/apache/hertzbeat/pull/203) 5. [[manager] bugfix update windows monitor type name #204](https://github.com/apache/hertzbeat/pull/204) 6. [fix time zone todo issue #210](https://github.com/apache/hertzbeat/pull/210) contribute by @djzeng -8. [[common] bugfix SnowFlakeId cannot exceed hexadecimal 0x1FFFFFFFFFFFFFF #211](https://github.com/apache/hertzbeat/pull/211) -9. [[manager] 修改监控页面取消监控功能再启动监控导致多生成jobId,原有监控项目并没有真实取消 #215](https://github.com/apache/hertzbeat/pull/215) contribute by @yangshihui -10. [[warehouse] bugfix exception when tdengine create table SQL contain special char #220](https://github.com/apache/hertzbeat/pull/220) +7. [[common] bugfix SnowFlakeId cannot exceed hexadecimal 0x1FFFFFFFFFFFFFF #211](https://github.com/apache/hertzbeat/pull/211) +8. [[manager] 修改监控页面取消监控功能再启动监控导致多生成jobId,原有监控项目并没有真实取消 #215](https://github.com/apache/hertzbeat/pull/215) contribute by @yangshihui +9. [[warehouse] bugfix exception when tdengine create table SQL contain special char #220](https://github.com/apache/hertzbeat/pull/220) Online https://console.tancloud.cn. diff --git a/home/blog/2022-09-10-ssl-practice.md b/home/blog/2022-09-10-ssl-practice.md index 0963a088c41..e0bd7337753 100644 --- a/home/blog/2022-09-10-ssl-practice.md +++ b/home/blog/2022-09-10-ssl-practice.md @@ -15,7 +15,7 @@ Today's article describes how to use hertzbeat monitoring system to detect the v #### What is HertzBeat? -HertzBeat is a real-time monitoring tool with powerful customizable monitoring capabilities without the need for an agent. Website monitoring, PING connectivity, port availability, database, OS, middleware, API monitoring, threshold alerts, alert notifications (email wechat pinning flybook). +HertzBeat is a real-time monitoring tool with powerful customizable monitoring capabilities without the need for an agent. Website monitoring, PING connectivity, port availability, database, OS, middleware, API monitoring, threshold alerts, alert notifications (email weChat pinning flybook). **Official website: https://hertzbeat.com | https://tancloud.cn** diff --git a/home/blog/2022-11-28-hertzbeat-v1.2.2.md b/home/blog/2022-11-28-hertzbeat-v1.2.2.md index 8f3fd6c3316..b546c676773 100644 --- a/home/blog/2022-11-28-hertzbeat-v1.2.2.md +++ b/home/blog/2022-11-28-hertzbeat-v1.2.2.md @@ -26,9 +26,9 @@ Feature: 2. [[home] add DM db document supplement #411](https://github.com/apache/hertzbeat/pull/411) @TJxiaobao 3. [[home] support algolia search #416](https://github.com/apache/hertzbeat/pull/416) 4. [[collector] support trigger and grading multiple subtasks through -_- placeholder expression #418](https://github.com/apache/hertzbeat/pull/418) -5. [WIP:feature support k8s monitor, http monitor nacos, service&http_micro monitor msa #421](https://github.com/apache/hertzbeat/pull/421) @cuipiheqiuqiu -6. [[manager] support opengauss database monitor #422](https://github.com/apache/hertzbeat/pull/422) -6. [[#406][warehose] Add unit test MetricsDataControllerTest.java #426](https://github.com/apache/hertzbeat/pull/426) @haibo-duan +5. [WIP:feature support k8s monitor, http monitor nacos, service&http_micro monitor msa #421](https://github.com/apache/hertzbeat/pull/421) @cuipiheqiuqiu +6. [[manager] support opengauss database monitor #422](https://github.com/apache/hertzbeat/pull/422) +7. [[#406][warehose] Add unit test MetricsDataControllerTest.java #426](https://github.com/apache/hertzbeat/pull/426) @haibo-duan 8. [[#358][manager] Add unit test manager/service/NoticeConfigServiceTest.java #427](https://github.com/apache/hertzbeat/pull/427) @haibo-duan 9. [[#356][manager] unit test case of manager/service/MailServiceTest.java #432](https://github.com/apache/hertzbeat/pull/432) @csyshu 10. [[manager,collector] support docker metrics monitor #438](https://github.com/apache/hertzbeat/pull/438) @TJxiaobao @@ -77,7 +77,7 @@ Feature: 4. [[collector] support trigger and grading multiple subtasks through -_- placeholder expression #418](https://github.com/apache/hertzbeat/pull/418) 5. [WIP:feature support k8s monitor, http monitor nacos, service&http_micro monitor msa #421](https://github.com/apache/hertzbeat/pull/421) @cuipiheqiuqiu 6. [[manager] support opengauss database monitor #422](https://github.com/apache/hertzbeat/pull/422) -6. [[#406][warehose] Add unit test MetricsDataControllerTest.java #426](https://github.com/apache/hertzbeat/pull/426) @haibo-duan +7. [[#406][warehose] Add unit test MetricsDataControllerTest.java #426](https://github.com/apache/hertzbeat/pull/426) @haibo-duan 8. [[#358][manager] Add unit test manager/service/NoticeConfigServiceTest.java #427](https://github.com/apache/hertzbeat/pull/427) @haibo-duan 9. [[#356][manager] unit test case of manager/service/MailServiceTest.java #432](https://github.com/apache/hertzbeat/pull/432) @csyshu 10. [[manager,collector] support docker metrics monitor #438](https://github.com/apache/hertzbeat/pull/438) @TJxiaobao diff --git a/home/blog/2022-12-28-hertzbeat-v1.2.3.md b/home/blog/2022-12-28-hertzbeat-v1.2.3.md index 675020eb61e..f818dc5e878 100644 --- a/home/blog/2022-12-28-hertzbeat-v1.2.3.md +++ b/home/blog/2022-12-28-hertzbeat-v1.2.3.md @@ -28,9 +28,9 @@ Feature: 2. [[doc] fix up:update the environment of hertzbeat to Java version 11 #473](https://github.com/apache/hertzbeat/pull/473) @BKing2020 3. [[docs] update kubernetes.md #478](https://github.com/apache/hertzbeat/pull/478) @wangke6666 4. [[web-app] enable alert define preset true by default #485](https://github.com/apache/hertzbeat/pull/485) -5. [[web-app] support friendly tip when add notice receiver #486](https://github.com/apache/hertzbeat/pull/486) -6. [[web-app] update dashboard category card ui #487](https://github.com/apache/hertzbeat/pull/487) -6. [[collector] limit trigger sub task max num #488](https://github.com/apache/hertzbeat/pull/488) +5. [[web-app] support friendly tip when add notice receiver #486](https://github.com/apache/hertzbeat/pull/486) +6. [[web-app] update dashboard category card ui #487](https://github.com/apache/hertzbeat/pull/487) +7. [[collector] limit trigger sub task max num #488](https://github.com/apache/hertzbeat/pull/488) 8. [[script] support service restart shell #489](https://github.com/apache/hertzbeat/pull/489) @zanglikun 9. [[docs] use rainbond deploy hertzbeat #495](https://github.com/apache/hertzbeat/pull/495) @zzzhangqi 10. [[webapp] upgrade web base angular version to 14 #501](https://github.com/apache/hertzbeat/pull/501) @@ -74,9 +74,9 @@ Feature: 2. [[doc] fix up:update the environment of hertzbeat to Java version 11 #473](https://github.com/apache/hertzbeat/pull/473) @BKing2020 3. [[docs] update kubernetes.md #478](https://github.com/apache/hertzbeat/pull/478) @wangke6666 4. [[web-app] enable alert define preset true by default #485](https://github.com/apache/hertzbeat/pull/485) -5. [[web-app] support friendly tip when add notice receiver #486](https://github.com/apache/hertzbeat/pull/486) -6. [[web-app] update dashboard category card ui #487](https://github.com/apache/hertzbeat/pull/487) -6. [[collector] limit trigger sub task max num #488](https://github.com/apache/hertzbeat/pull/488) +5. [[web-app] support friendly tip when add notice receiver #486](https://github.com/apache/hertzbeat/pull/486) +6. [[web-app] update dashboard category card ui #487](https://github.com/apache/hertzbeat/pull/487) +7. [[collector] limit trigger sub task max num #488](https://github.com/apache/hertzbeat/pull/488) 8. [[script] support service restart shell #489](https://github.com/apache/hertzbeat/pull/489) @zanglikun 9. [[docs] use rainbond deploy hertzbeat #495](https://github.com/apache/hertzbeat/pull/495) @zzzhangqi 10. [[webapp] upgrade web base angular version to 14 #501](https://github.com/apache/hertzbeat/pull/501) diff --git a/home/docs/help/alert_slack.md b/home/docs/help/alert_slack.md index 0de69afa328..2540a27451d 100644 --- a/home/docs/help/alert_slack.md +++ b/home/docs/help/alert_slack.md @@ -19,7 +19,7 @@ Refer to the official website document [Sending messages using Incoming Webhooks ![email](/img/docs/help/slack-bot-1.png) -4. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** +2. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** > **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md index bb67ed6c64d..255046201cc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md @@ -50,7 +50,7 @@ Bug修复. 3. [[monitor]bugfix: 修复通知策略过滤标签时只能选择一个 #140](https://github.com/apache/hertzbeat/pull/140). issue by @daqianxiaoyao 4. [[td-engine store]bugfix: 修复tdengine入库指标数据时无table报错日志#150](https://github.com/apache/hertzbeat/pull/150). contribute by @ChineseTony 5. [[collector]bugfix: 修复 warehouse data queue 未消费异常 #153](https://github.com/apache/hertzbeat/pull/153). issue by @daqianxiaoyao -7. [[web-app]bugfix: 修复黑暗主题时页面输入框校验出错时不可见 #157](https://github.com/apache/hertzbeat/pull/157). issue by @ConradWen +6. [[web-app]bugfix: 修复黑暗主题时页面输入框校验出错时不可见 #157](https://github.com/apache/hertzbeat/pull/157). issue by @ConradWen **Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.0-beta.8...v1.0 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md index 89209a08958..82c59c3b8ec 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md @@ -24,7 +24,7 @@ Feature: 3. [[manager]支持监控参数的英文国际化,国际化更近一步 #184](https://github.com/apache/hertzbeat/pull/184). 4. [[script]支持了amd64和arm64版本的docker 镜像 #189](https://github.com/apache/hertzbeat/pull/189). 5. [[monitor]feature: 支持采集oracle多表空间指标数据 #163](https://github.com/apache/hertzbeat/pull/163) contribute by @brave4Time -7. [[monitor]数据库表统一添加前缀 hzb_ #193](https://github.com/apache/hertzbeat/pull/193) issue from @shimingxy +6. [[monitor]数据库表统一添加前缀 hzb_ #193](https://github.com/apache/hertzbeat/pull/193) issue from @shimingxy Bugfix. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md index 8cc1ad48fe9..a949092e375 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md @@ -24,7 +24,7 @@ Feature: 3. [[manager]支持监控参数的英文国际化,国际化更近一步 #184](https://github.com/apache/hertzbeat/pull/184). 4. [[script]支持了amd64和arm64版本的docker 镜像 #189](https://github.com/apache/hertzbeat/pull/189). 5. [[monitor]feature: 支持采集oracle多表空间指标数据 #163](https://github.com/apache/hertzbeat/pull/163) contribute by @brave4Time -7. [[monitor]数据库表统一添加前缀 hzb_ #193](https://github.com/apache/hertzbeat/pull/193) issue from @shimingxy +6. [[monitor]数据库表统一添加前缀 hzb_ #193](https://github.com/apache/hertzbeat/pull/193) issue from @shimingxy Bugfix. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md index 0e6e391b890..b3de59cfbb2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md @@ -36,9 +36,9 @@ Bugfix. 4. [[common] bugfix 告警状态无法页面手动更新问题 #203](https://github.com/apache/hertzbeat/pull/203) 5. [[manager] bugfix windows监控类型名称错误问题 #204](https://github.com/apache/hertzbeat/pull/204) 6. [fix time zone todo issue #210](https://github.com/apache/hertzbeat/pull/210) contribute by @djzeng -8. [[common] bugfix 雪花算法生成ID大小超出 0x1FFFFFFFFFFFFFF 导致前端不识别问题 #211](https://github.com/apache/hertzbeat/pull/211) -9. [[manager] 修改监控页面取消监控功能再启动监控导致多生成jobId,原有监控项目并没有真实取消 #215](https://github.com/apache/hertzbeat/pull/215) contribute by @yangshihui -10. [[warehouse] 修复tdengine对特殊字段建表失败导致数据无法入库问题 #220](https://github.com/apache/hertzbeat/pull/220) +7. [[common] bugfix 雪花算法生成ID大小超出 0x1FFFFFFFFFFFFFF 导致前端不识别问题 #211](https://github.com/apache/hertzbeat/pull/211) +8. [[manager] 修改监控页面取消监控功能再启动监控导致多生成jobId,原有监控项目并没有真实取消 #215](https://github.com/apache/hertzbeat/pull/215) contribute by @yangshihui +9. [[warehouse] 修复tdengine对特殊字段建表失败导致数据无法入库问题 #220](https://github.com/apache/hertzbeat/pull/220) Online https://console.tancloud.cn. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-11-28-hertzbeat-v1.2.2.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-11-28-hertzbeat-v1.2.2.md index 050f6e344ca..f8488941bd4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-11-28-hertzbeat-v1.2.2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-11-28-hertzbeat-v1.2.2.md @@ -28,7 +28,7 @@ Feature: 4. [[collector] support trigger and grading multiple subtasks through -_- placeholder expression #418](https://github.com/apache/hertzbeat/pull/418) 5. [WIP:feature support k8s monitor, http monitor nacos, service&http_micro monitor msa #421](https://github.com/apache/hertzbeat/pull/421) @cuipiheqiuqiu 6. [[manager] support opengauss database monitor #422](https://github.com/apache/hertzbeat/pull/422) -6. [[#406][warehose] Add unit test MetricsDataControllerTest.java #426](https://github.com/apache/hertzbeat/pull/426) @haibo-duan +7. [[#406][warehose] Add unit test MetricsDataControllerTest.java #426](https://github.com/apache/hertzbeat/pull/426) @haibo-duan 8. [[#358][manager] Add unit test manager/service/NoticeConfigServiceTest.java #427](https://github.com/apache/hertzbeat/pull/427) @haibo-duan 9. [[#356][manager] unit test case of manager/service/MailServiceTest.java #432](https://github.com/apache/hertzbeat/pull/432) @csyshu 10. [[manager,collector] support docker metrics monitor #438](https://github.com/apache/hertzbeat/pull/438) @TJxiaobao @@ -75,9 +75,9 @@ Feature: 2. [[home] add DM db document supplement #411](https://github.com/apache/hertzbeat/pull/411) @TJxiaobao 3. [[home] support algolia search #416](https://github.com/apache/hertzbeat/pull/416) 4. [[collector] support trigger and grading multiple subtasks through -_- placeholder expression #418](https://github.com/apache/hertzbeat/pull/418) -5. [WIP:feature support k8s monitor, http monitor nacos, service&http_micro monitor msa #421](https://github.com/apache/hertzbeat/pull/421) @cuipiheqiuqiu -6. [[manager] support opengauss database monitor #422](https://github.com/apache/hertzbeat/pull/422) -6. [[#406][warehose] Add unit test MetricsDataControllerTest.java #426](https://github.com/apache/hertzbeat/pull/426) @haibo-duan +5. [WIP:feature support k8s monitor, http monitor nacos, service&http_micro monitor msa #421](https://github.com/apache/hertzbeat/pull/421) @cuipiheqiuqiu +6. [[manager] support opengauss database monitor #422](https://github.com/apache/hertzbeat/pull/422) +7. [[#406][warehose] Add unit test MetricsDataControllerTest.java #426](https://github.com/apache/hertzbeat/pull/426) @haibo-duan 8. [[#358][manager] Add unit test manager/service/NoticeConfigServiceTest.java #427](https://github.com/apache/hertzbeat/pull/427) @haibo-duan 9. [[#356][manager] unit test case of manager/service/MailServiceTest.java #432](https://github.com/apache/hertzbeat/pull/432) @csyshu 10. [[manager,collector] support docker metrics monitor #438](https://github.com/apache/hertzbeat/pull/438) @TJxiaobao diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-28-hertzbeat-v1.2.3.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-28-hertzbeat-v1.2.3.md index 2adb7f3362f..79028a22e82 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-28-hertzbeat-v1.2.3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-28-hertzbeat-v1.2.3.md @@ -28,9 +28,9 @@ Feature: 2. [[doc] fix up:update the environment of hertzbeat to Java version 11 #473](https://github.com/apache/hertzbeat/pull/473) @BKing2020 3. [[docs] update kubernetes.md #478](https://github.com/apache/hertzbeat/pull/478) @wangke6666 4. [[web-app] enable alert define preset true by default #485](https://github.com/apache/hertzbeat/pull/485) -5. [[web-app] support friendly tip when add notice receiver #486](https://github.com/apache/hertzbeat/pull/486) -6. [[web-app] update dashboard category card ui #487](https://github.com/apache/hertzbeat/pull/487) -6. [[collector] limit trigger sub task max num #488](https://github.com/apache/hertzbeat/pull/488) +5. [[web-app] support friendly tip when add notice receiver #486](https://github.com/apache/hertzbeat/pull/486) +6. [[web-app] update dashboard category card ui #487](https://github.com/apache/hertzbeat/pull/487) +7. [[collector] limit trigger sub task max num #488](https://github.com/apache/hertzbeat/pull/488) 8. [[script] support service restart shell #489](https://github.com/apache/hertzbeat/pull/489) @zanglikun 9. [[docs] use rainbond deploy hertzbeat #495](https://github.com/apache/hertzbeat/pull/495) @zzzhangqi 10. [[webapp] upgrade web base angular version to 14 #501](https://github.com/apache/hertzbeat/pull/501) @@ -74,9 +74,9 @@ Feature: 2. [[doc] fix up:update the environment of hertzbeat to Java version 11 #473](https://github.com/apache/hertzbeat/pull/473) @BKing2020 3. [[docs] update kubernetes.md #478](https://github.com/apache/hertzbeat/pull/478) @wangke6666 4. [[web-app] enable alert define preset true by default #485](https://github.com/apache/hertzbeat/pull/485) -5. [[web-app] support friendly tip when add notice receiver #486](https://github.com/apache/hertzbeat/pull/486) -6. [[web-app] update dashboard category card ui #487](https://github.com/apache/hertzbeat/pull/487) -6. [[collector] limit trigger sub task max num #488](https://github.com/apache/hertzbeat/pull/488) +5. [[web-app] support friendly tip when add notice receiver #486](https://github.com/apache/hertzbeat/pull/486) +6. [[web-app] update dashboard category card ui #487](https://github.com/apache/hertzbeat/pull/487) +7. [[collector] limit trigger sub task max num #488](https://github.com/apache/hertzbeat/pull/488) 8. [[script] support service restart shell #489](https://github.com/apache/hertzbeat/pull/489) @zanglikun 9. [[docs] use rainbond deploy hertzbeat #495](https://github.com/apache/hertzbeat/pull/495) @zzzhangqi 10. [[webapp] upgrade web base angular version to 14 #501](https://github.com/apache/hertzbeat/pull/501) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md index f2cc7a76673..e137d07e9e2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md @@ -21,7 +21,7 @@ keywords: [告警 Slack Webhook 通知, 开源告警系统, 开源监控告警 ![email](/img/docs/help/slack-bot-1.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md index f2cc7a76673..e137d07e9e2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md @@ -21,7 +21,7 @@ keywords: [告警 Slack Webhook 通知, 开源告警系统, 开源监控告警 ![email](/img/docs/help/slack-bot-1.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md index f2cc7a76673..e137d07e9e2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md @@ -21,7 +21,7 @@ keywords: [告警 Slack Webhook 通知, 开源告警系统, 开源监控告警 ![email](/img/docs/help/slack-bot-1.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 diff --git a/home/versioned_docs/version-v1.4.x/help/alert_slack.md b/home/versioned_docs/version-v1.4.x/help/alert_slack.md index 0de69afa328..2540a27451d 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_slack.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_slack.md @@ -19,7 +19,7 @@ Refer to the official website document [Sending messages using Incoming Webhooks ![email](/img/docs/help/slack-bot-1.png) -4. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** +2. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** > **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. diff --git a/home/versioned_docs/version-v1.5.x/help/alert_slack.md b/home/versioned_docs/version-v1.5.x/help/alert_slack.md index 0de69afa328..2540a27451d 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_slack.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_slack.md @@ -19,7 +19,7 @@ Refer to the official website document [Sending messages using Incoming Webhooks ![email](/img/docs/help/slack-bot-1.png) -4. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** +2. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** > **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. From d6b19691fb8617938ef1e9114985c52f785c2ccc Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Tue, 6 Aug 2024 21:07:53 +0800 Subject: [PATCH 146/257] [refactor] move code from MetricsDataController to MetricsService (#2437) Co-authored-by: Calvin --- .../controller/MetricsDataController.java | 86 ++----------- .../warehouse/service/MetricsDataService.java | 54 ++++++++ .../service/impl/MetricsDataServiceImpl.java | 118 ++++++++++++++++++ .../{ => impl}/WarehouseServiceImpl.java | 3 +- .../controller/MetricsDataControllerTest.java | 73 +++++------ .../service/MetricsDataServiceTest.java | 118 ++++++++++++++++++ 6 files changed, 335 insertions(+), 117 deletions(-) create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/MetricsDataService.java create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/impl/MetricsDataServiceImpl.java rename warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/{ => impl}/WarehouseServiceImpl.java (93%) create mode 100644 warehouse/src/test/java/org/apache/hertzbeat/warehouse/service/MetricsDataServiceTest.java diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/controller/MetricsDataController.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/controller/MetricsDataController.java index dd787d6de3a..d95616fe251 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/controller/MetricsDataController.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/controller/MetricsDataController.java @@ -22,22 +22,10 @@ import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.stream.Collectors; -import org.apache.hertzbeat.common.constants.CommonConstants; -import org.apache.hertzbeat.common.entity.dto.Field; import org.apache.hertzbeat.common.entity.dto.Message; import org.apache.hertzbeat.common.entity.dto.MetricsData; import org.apache.hertzbeat.common.entity.dto.MetricsHistoryData; -import org.apache.hertzbeat.common.entity.dto.Value; -import org.apache.hertzbeat.common.entity.dto.ValueRow; -import org.apache.hertzbeat.common.entity.message.CollectRep; -import org.apache.hertzbeat.warehouse.store.history.HistoryDataReader; -import org.apache.hertzbeat.warehouse.store.realtime.RealTimeDataReader; +import org.apache.hertzbeat.warehouse.service.MetricsDataService; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; @@ -55,20 +43,17 @@ public class MetricsDataController { private static final Integer METRIC_FULL_LENGTH = 3; - private final RealTimeDataReader realTimeDataReader; - private final Optional historyDataReader; + private final MetricsDataService metricsDataService; - public MetricsDataController(RealTimeDataReader realTimeDataReader, - Optional historyDataReader) { - this.realTimeDataReader = realTimeDataReader; - this.historyDataReader = historyDataReader; + public MetricsDataController(MetricsDataService metricsDataService) { + this.metricsDataService = metricsDataService; } @GetMapping("/api/warehouse/storage/status") @Operation(summary = "Query Warehouse Storage Server Status", description = "Query the availability status of the storage service under the warehouse") public ResponseEntity> getWarehouseStorageServerStatus() { - - if (historyDataReader.isPresent() && historyDataReader.get().isServerAvailable()) { + Boolean status = metricsDataService.getWarehouseStorageServerStatus(); + if (Boolean.TRUE.equals(status)) { return ResponseEntity.ok(Message.success()); } @@ -83,47 +68,11 @@ public ResponseEntity> getMetricsData( @PathVariable Long monitorId, @Parameter(description = "Metrics Name", example = "cpu") @PathVariable String metrics) { - boolean available = realTimeDataReader.isServerAvailable(); - if (!available) { - return ResponseEntity.ok(Message.fail(FAIL_CODE, "real time store not available")); - } - CollectRep.MetricsData storageData = realTimeDataReader.getCurrentMetricsData(monitorId, metrics); - if (storageData == null) { + MetricsData metricsData = metricsDataService.getMetricsData(monitorId, metrics); + if (metricsData == null){ return ResponseEntity.ok(Message.success("query metrics data is empty")); } - { - MetricsData.MetricsDataBuilder dataBuilder = MetricsData.builder(); - dataBuilder.id(storageData.getId()).app(storageData.getApp()).metrics(storageData.getMetrics()) - .time(storageData.getTime()); - List fields = storageData.getFieldsList().stream().map(tmpField -> - Field.builder().name(tmpField.getName()) - .type(Integer.valueOf(tmpField.getType()).byteValue()) - .label(tmpField.getLabel()) - .unit(tmpField.getUnit()) - .build()) - .collect(Collectors.toList()); - dataBuilder.fields(fields); - List valueRows = new LinkedList<>(); - for (CollectRep.ValueRow valueRow : storageData.getValuesList()) { - Map labels = new HashMap<>(8); - List values = new LinkedList<>(); - for (int i = 0; i < fields.size(); i++) { - Field field = fields.get(i); - String origin = valueRow.getColumns(i); - if (CommonConstants.NULL_VALUE.equals(origin)) { - values.add(new Value()); - } else { - values.add(new Value(origin)); - if (field.getLabel()) { - labels.put(field.getName(), origin); - } - } - } - valueRows.add(ValueRow.builder().labels(labels).values(values).build()); - } - dataBuilder.valueRows(valueRows); - return ResponseEntity.ok(Message.success(dataBuilder.build())); - } + return ResponseEntity.ok(Message.success(metricsData)); } @GetMapping("/api/monitor/{monitorId}/metric/{metricFull}") @@ -140,8 +89,7 @@ public ResponseEntity> getMetricHistoryData( @Parameter(description = "aggregate data calc. off by default; 4-hour window, query limit >1 week", example = "false") @RequestParam(required = false) Boolean interval ) { - - if (historyDataReader.isEmpty() || !historyDataReader.get().isServerAvailable()) { + if (!metricsDataService.getWarehouseStorageServerStatus()) { return ResponseEntity.ok(Message.fail(FAIL_CODE, "time series database not available")); } String[] names = metricFull.split("\\."); @@ -151,19 +99,7 @@ public ResponseEntity> getMetricHistoryData( String app = names[0]; String metrics = names[1]; String metric = names[2]; - if (history == null) { - history = "6h"; - } - Map> instanceValuesMap; - if (interval == null || !interval) { - instanceValuesMap = historyDataReader.get().getHistoryMetricData(monitorId, app, metrics, metric, label, history); - } else { - instanceValuesMap = historyDataReader.get().getHistoryIntervalMetricData(monitorId, app, metrics, metric, label, history); - } - MetricsHistoryData historyData = MetricsHistoryData.builder() - .id(monitorId).metrics(metrics).values(instanceValuesMap) - .field(Field.builder().name(metric).type(CommonConstants.TYPE_NUMBER).build()) - .build(); + MetricsHistoryData historyData = metricsDataService.getMetricHistoryData(monitorId, app, metrics, metric, label, history, interval); return ResponseEntity.ok(Message.success(historyData)); } } diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/MetricsDataService.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/MetricsDataService.java new file mode 100644 index 00000000000..c0d7e7788b3 --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/MetricsDataService.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.service; + +import org.apache.hertzbeat.common.entity.dto.MetricsData; +import org.apache.hertzbeat.common.entity.dto.MetricsHistoryData; + +/** + * service for metrics data + */ +public interface MetricsDataService { + + /** + * warehouse storage server availability or not + * @return true or false + */ + Boolean getWarehouseStorageServerStatus(); + + /** + * Query Real Time Metrics Data + * @param monitorId Monitor Id + * @param metrics Metrics Name + * @return metrics data + */ + MetricsData getMetricsData(Long monitorId, String metrics); + + /** + * Queries historical data for a specified metric for monitoring + * @param monitorId Monitor Id + * @param app Monitor Type + * @param metrics Metrics Name + * @param metric Metrics Field Name + * @param label Label Filter + * @param history Query Historical Time Period + * @param interval aggregate data calc + * @return metrics history data + */ + MetricsHistoryData getMetricHistoryData(Long monitorId, String app, String metrics, String metric, String label, String history, Boolean interval); +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/impl/MetricsDataServiceImpl.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/impl/MetricsDataServiceImpl.java new file mode 100644 index 00000000000..6c2690a3843 --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/impl/MetricsDataServiceImpl.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.service.impl; + +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.entity.dto.Field; +import org.apache.hertzbeat.common.entity.dto.MetricsData; +import org.apache.hertzbeat.common.entity.dto.MetricsHistoryData; +import org.apache.hertzbeat.common.entity.dto.Value; +import org.apache.hertzbeat.common.entity.dto.ValueRow; +import org.apache.hertzbeat.common.entity.message.CollectRep; +import org.apache.hertzbeat.common.support.exception.CommonException; +import org.apache.hertzbeat.warehouse.service.MetricsDataService; +import org.apache.hertzbeat.warehouse.store.history.HistoryDataReader; +import org.apache.hertzbeat.warehouse.store.realtime.RealTimeDataReader; +import org.springframework.stereotype.Service; + +/** + * Metrics Data Service impl + */ +@Service +public class MetricsDataServiceImpl implements MetricsDataService { + + private final RealTimeDataReader realTimeDataReader; + + private final Optional historyDataReader; + + public MetricsDataServiceImpl(RealTimeDataReader realTimeDataReader, Optional historyDataReader) { + this.realTimeDataReader = realTimeDataReader; + this.historyDataReader = historyDataReader; + } + + @Override + public Boolean getWarehouseStorageServerStatus() { + return historyDataReader.isPresent() && historyDataReader.get().isServerAvailable(); + } + + @Override + public MetricsData getMetricsData(Long monitorId, String metrics) { + boolean available = realTimeDataReader.isServerAvailable(); + if (!available) { + throw new CommonException("real time store not available"); + } + CollectRep.MetricsData storageData = realTimeDataReader.getCurrentMetricsData(monitorId, metrics); + if (storageData == null) { + return null; + } + MetricsData.MetricsDataBuilder dataBuilder = MetricsData.builder(); + dataBuilder.id(storageData.getId()).app(storageData.getApp()).metrics(storageData.getMetrics()) + .time(storageData.getTime()); + List fields = storageData.getFieldsList().stream().map(tmpField -> + Field.builder().name(tmpField.getName()) + .type(Integer.valueOf(tmpField.getType()).byteValue()) + .label(tmpField.getLabel()) + .unit(tmpField.getUnit()) + .build()) + .collect(Collectors.toList()); + dataBuilder.fields(fields); + List valueRows = new LinkedList<>(); + for (CollectRep.ValueRow valueRow : storageData.getValuesList()) { + Map labels = new HashMap<>(8); + List values = new LinkedList<>(); + for (int i = 0; i < fields.size(); i++) { + Field field = fields.get(i); + String origin = valueRow.getColumns(i); + if (CommonConstants.NULL_VALUE.equals(origin)) { + values.add(new Value()); + } else { + values.add(new Value(origin)); + if (field.getLabel()) { + labels.put(field.getName(), origin); + } + } + } + valueRows.add(ValueRow.builder().labels(labels).values(values).build()); + } + dataBuilder.valueRows(valueRows); + return dataBuilder.build(); + } + + @Override + public MetricsHistoryData getMetricHistoryData(Long monitorId, String app, String metrics, String metric, String label, String history, Boolean interval) { + if (history == null) { + history = "6h"; + } + Map> instanceValuesMap; + if (interval == null || !interval) { + instanceValuesMap = historyDataReader.get().getHistoryMetricData(monitorId, app, metrics, metric, label, history); + } else { + instanceValuesMap = historyDataReader.get().getHistoryIntervalMetricData(monitorId, app, metrics, metric, label, history); + } + return MetricsHistoryData.builder() + .id(monitorId).metrics(metrics).values(instanceValuesMap) + .field(Field.builder().name(metric).type(CommonConstants.TYPE_NUMBER).build()) + .build(); + } +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/WarehouseServiceImpl.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/impl/WarehouseServiceImpl.java similarity index 93% rename from warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/WarehouseServiceImpl.java rename to warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/impl/WarehouseServiceImpl.java index 570fd7bba6c..b5cbdcefc5e 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/WarehouseServiceImpl.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/service/impl/WarehouseServiceImpl.java @@ -15,12 +15,13 @@ * limitations under the License. */ -package org.apache.hertzbeat.warehouse.service; +package org.apache.hertzbeat.warehouse.service.impl; import java.util.Collections; import java.util.List; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.common.entity.message.CollectRep; +import org.apache.hertzbeat.warehouse.service.WarehouseService; import org.apache.hertzbeat.warehouse.store.realtime.AbstractRealTimeDataStorage; import org.springframework.stereotype.Service; diff --git a/warehouse/src/test/java/org/apache/hertzbeat/warehouse/controller/MetricsDataControllerTest.java b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/controller/MetricsDataControllerTest.java index 7b4e3f4ba8b..286027a9eb6 100644 --- a/warehouse/src/test/java/org/apache/hertzbeat/warehouse/controller/MetricsDataControllerTest.java +++ b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/controller/MetricsDataControllerTest.java @@ -25,16 +25,11 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; import jakarta.servlet.ServletException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; import org.apache.hertzbeat.common.constants.CommonConstants; -import org.apache.hertzbeat.common.entity.dto.Value; -import org.apache.hertzbeat.common.entity.message.CollectRep; -import org.apache.hertzbeat.warehouse.store.history.HistoryDataReader; -import org.apache.hertzbeat.warehouse.store.realtime.RealTimeDataReader; +import org.apache.hertzbeat.common.entity.dto.Field; +import org.apache.hertzbeat.common.entity.dto.MetricsData; +import org.apache.hertzbeat.common.entity.dto.MetricsHistoryData; +import org.apache.hertzbeat.warehouse.service.MetricsDataService; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -59,27 +54,24 @@ class MetricsDataControllerTest { MetricsDataController metricsDataController; @Mock - HistoryDataReader historyDataReader; - - @Mock - RealTimeDataReader realTimeDataReader; + MetricsDataService metricsDataService; @BeforeEach void setUp() { - metricsDataController = new MetricsDataController(realTimeDataReader, Optional.of(historyDataReader)); + metricsDataController = new MetricsDataController(metricsDataService); this.mockMvc = MockMvcBuilders.standaloneSetup(metricsDataController).build(); } @Test void getWarehouseStorageServerStatus() throws Exception { - when(historyDataReader.isServerAvailable()).thenReturn(true); + when(metricsDataService.getWarehouseStorageServerStatus()).thenReturn(true); this.mockMvc.perform(MockMvcRequestBuilders.get("/api/warehouse/storage/status")) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) .andExpect(jsonPath("$.data").isEmpty()) .andExpect(jsonPath("$.msg").isEmpty()) .andReturn(); - when(historyDataReader.isServerAvailable()).thenReturn(false); + when(metricsDataService.getWarehouseStorageServerStatus()).thenReturn(false); this.mockMvc.perform(MockMvcRequestBuilders.get("/api/warehouse/storage/status")) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.FAIL_CODE)) @@ -95,8 +87,7 @@ void getMetricsData() throws Exception { final long time = System.currentTimeMillis(); final String getUrl = "/api/monitor/" + monitorId + "/metrics/" + metric; - when(realTimeDataReader.getCurrentMetricsData(eq(monitorId), eq(metric))).thenReturn(null); - when(realTimeDataReader.isServerAvailable()).thenReturn(true); + when(metricsDataService.getMetricsData(eq(monitorId), eq(metric))).thenReturn(null); this.mockMvc.perform(MockMvcRequestBuilders.get(getUrl)) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) @@ -104,14 +95,14 @@ void getMetricsData() throws Exception { .andExpect(jsonPath("$.data").isEmpty()) .andReturn(); - CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder() - .setId(monitorId) - .setApp(app) - .setMetrics(metric) - .setTime(time) + MetricsData metricsData = MetricsData.builder() + .id(monitorId) + .app(app) + .metrics(metric) + .time(time) .build(); - when(realTimeDataReader.getCurrentMetricsData(eq(monitorId), eq(metric))).thenReturn(metricsData); - when(realTimeDataReader.isServerAvailable()).thenReturn(true); + + when(metricsDataService.getMetricsData(eq(monitorId), eq(metric))).thenReturn(metricsData); this.mockMvc.perform(MockMvcRequestBuilders.get(getUrl)) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) @@ -126,33 +117,32 @@ void getMetricsData() throws Exception { @Test void getMetricHistoryData() throws Exception { final long monitorId = 343254354; + final String app = "linux"; final String metrics = "cpu"; final String metric = "usage"; - final String app = "testapp"; final String metricFull = "linux.cpu.usage"; final String metricFullFail = "linux.usage"; - final String instance = "disk2"; + final String label = "disk2"; final String history = "6h"; - final String interval = "false"; + final Boolean interval = false; final String getUrl = "/api/monitor/" + monitorId + "/metric/" + metricFull; final String getUrlFail = "/api/monitor/" + monitorId + "/metric/" + metricFullFail; MultiValueMap params = new LinkedMultiValueMap<>(); params.add("monitorId", String.valueOf(monitorId)); - params.add("metricFull", metricFull); - params.add("instance", instance); + params.add("label", label); params.add("history", history); - params.add("interval", interval); + params.add("interval", String.valueOf(interval)); - when(historyDataReader.isServerAvailable()).thenReturn(false); + when(metricsDataService.getWarehouseStorageServerStatus()).thenReturn(false); this.mockMvc.perform(MockMvcRequestBuilders.get(getUrl).params(params)) .andExpect(status().isOk()) - .andExpect(jsonPath("$.code").value((int) CommonConstants.FAIL_CODE)) + .andExpect(jsonPath("$.code").value((int) CommonConstants.FAIL_CODE)) .andExpect(jsonPath("$.msg").value("time series database not available")) .andExpect(jsonPath("$.data").isEmpty()) .andReturn(); - when(historyDataReader.isServerAvailable()).thenReturn(true); + when(metricsDataService.getWarehouseStorageServerStatus()).thenReturn(true); ServletException exception = assertThrows(ServletException.class, () -> this.mockMvc.perform(MockMvcRequestBuilders.get(getUrlFail).params(params)) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.FAIL_CODE)) @@ -160,13 +150,14 @@ void getMetricHistoryData() throws Exception { .andReturn()); assertTrue(exception.getMessage().contains("IllegalArgumentException")); - final Map> instanceValuesMap = new HashMap<>(); - List list = new ArrayList<>(); - instanceValuesMap.put(metric, list); - when(historyDataReader.isServerAvailable()).thenReturn(true); - lenient().when(historyDataReader.getHistoryMetricData(eq(monitorId), eq(app), eq(metrics), eq(metric), - eq(instance), eq(history))) - .thenReturn(instanceValuesMap); + MetricsHistoryData metricsHistoryData = MetricsHistoryData.builder() + .id(monitorId) + .metrics(metrics) + .field(Field.builder().name(metric).type(CommonConstants.TYPE_NUMBER).build()) + .build(); + when(metricsDataService.getWarehouseStorageServerStatus()).thenReturn(true); + lenient().when(metricsDataService.getMetricHistoryData(eq(monitorId), eq(app), eq(metrics), eq(metric), eq(label), eq(history), eq(interval))) + .thenReturn(metricsHistoryData); this.mockMvc.perform(MockMvcRequestBuilders.get(getUrl).params(params)) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) diff --git a/warehouse/src/test/java/org/apache/hertzbeat/warehouse/service/MetricsDataServiceTest.java b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/service/MetricsDataServiceTest.java new file mode 100644 index 00000000000..12c1d48e0c7 --- /dev/null +++ b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/service/MetricsDataServiceTest.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.service; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Optional; +import org.apache.hertzbeat.common.entity.message.CollectRep; +import org.apache.hertzbeat.common.support.exception.CommonException; +import org.apache.hertzbeat.warehouse.service.impl.MetricsDataServiceImpl; +import org.apache.hertzbeat.warehouse.store.history.HistoryDataReader; +import org.apache.hertzbeat.warehouse.store.realtime.RealTimeDataReader; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + + +/** + * Test case for {@link MetricsDataService} + */ + +@ExtendWith(MockitoExtension.class) +public class MetricsDataServiceTest { + + @InjectMocks + private MetricsDataServiceImpl metricsDataService; + + @Mock + private RealTimeDataReader realTimeDataReader; + + @Mock + private HistoryDataReader historyDataReader; + + @BeforeEach + public void setUp(){ + metricsDataService = new MetricsDataServiceImpl(realTimeDataReader, Optional.of(historyDataReader)); + } + + @Test + public void testGetWarehouseStorageServerStatus(){ + when(historyDataReader.isServerAvailable()).thenReturn(false); + assertFalse(metricsDataService.getWarehouseStorageServerStatus()); + + when(historyDataReader.isServerAvailable()).thenReturn(true); + assertTrue(metricsDataService.getWarehouseStorageServerStatus()); + } + + @Test + public void testGetMetricsData(){ + Long monitorId = 1L; + String metrics = "disk"; + + when(realTimeDataReader.isServerAvailable()).thenReturn(false); + assertThrows(CommonException.class, ()-> metricsDataService.getMetricsData(monitorId, metrics), "real time store not available"); + + + when(realTimeDataReader.isServerAvailable()).thenReturn(true); + when(realTimeDataReader.getCurrentMetricsData(eq(monitorId), eq(metrics))).thenReturn(null); + assertNull(metricsDataService.getMetricsData(monitorId, metrics)); + + CollectRep.MetricsData storageData = CollectRep.MetricsData.newBuilder() + .setId(monitorId) + .setMetrics(metrics) + .addAllFields(new ArrayList<>()) + .addAllValues(new ArrayList<>()) + .build(); + when(realTimeDataReader.isServerAvailable()).thenReturn(true); + when(realTimeDataReader.getCurrentMetricsData(eq(monitorId), eq(metrics))).thenReturn(storageData); + assertNotNull(metricsDataService.getMetricsData(monitorId, metrics)); + } + + @Test + public void testGetMetricHistoryData(){ + Long monitorId = 1L; + String app = "linux"; + String metrics = "disk"; + String metric = "used"; + String label = "label"; + String history = "6h"; + Boolean intervalFalse = false; + Boolean intervalTrue = true; + + when(historyDataReader.getHistoryMetricData(eq(monitorId), eq(app), eq(metrics), eq(metric), eq(label), eq(history))).thenReturn(new HashMap<>()); + assertNotNull(metricsDataService.getMetricHistoryData(monitorId, app, metrics, metric, label, history, intervalFalse)); + verify(historyDataReader, times(1)).getHistoryMetricData(eq(monitorId), eq(app), eq(metrics), eq(metric), eq(label), eq(history)); + + when(historyDataReader.getHistoryIntervalMetricData(eq(monitorId), eq(app), eq(metrics), eq(metric), eq(label), eq(history))).thenReturn(new HashMap<>()); + assertNotNull(metricsDataService.getMetricHistoryData(monitorId, app, metrics, metric, label, history, intervalTrue)); + verify(historyDataReader, times(1)).getHistoryIntervalMetricData(eq(monitorId), eq(app), eq(metrics), eq(metric), eq(label), eq(history)); + } +} From fe403ed8807fc5d0015f757fe821d7a8f070bf74 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Tue, 6 Aug 2024 21:49:31 +0800 Subject: [PATCH 147/257] [improve] add TemplateConfigServiceImpl unit test (#2471) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../impl/TemplateConfigServiceImpl.java | 2 +- .../service/TemplateConfigServiceTest.java | 94 +++++++++++++++++++ 2 files changed, 95 insertions(+), 1 deletion(-) create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/TemplateConfigServiceTest.java diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/TemplateConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/TemplateConfigServiceImpl.java index bb21f6d881b..099e794d380 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/TemplateConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/TemplateConfigServiceImpl.java @@ -42,7 +42,7 @@ public class TemplateConfigServiceImpl extends AbstractGeneralConfigServiceImpl< * @param generalConfigDao configDao object * @param objectMapper JSON tool object */ - protected TemplateConfigServiceImpl(GeneralConfigDao generalConfigDao, ObjectMapper objectMapper) { + public TemplateConfigServiceImpl(GeneralConfigDao generalConfigDao, ObjectMapper objectMapper) { super(generalConfigDao, objectMapper); } diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/TemplateConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/TemplateConfigServiceTest.java new file mode 100644 index 00000000000..81ba4f6dd2b --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/TemplateConfigServiceTest.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.manager.dao.GeneralConfigDao; +import org.apache.hertzbeat.manager.pojo.dto.TemplateConfig; +import org.apache.hertzbeat.manager.service.impl.TemplateConfigServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.test.util.ReflectionTestUtils; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +/** + * test case for {@link TemplateConfigServiceImpl} + */ + +@ExtendWith(MockitoExtension.class) +class TemplateConfigServiceTest { + + @Mock + private GeneralConfigDao generalConfigDao; + + @Mock + private ObjectMapper objectMapper; + + @Mock + private AppService appService; + + @InjectMocks + private TemplateConfigServiceImpl templateConfigServiceImpl; + + @BeforeEach + void setUp() { + + templateConfigServiceImpl = new TemplateConfigServiceImpl(generalConfigDao, objectMapper); + ReflectionTestUtils.setField(templateConfigServiceImpl, "appService", appService); + } + + @Test + void testHandlerValidTemplateConfig() { + + TemplateConfig templateConfig = mock(TemplateConfig.class); + templateConfigServiceImpl.handler(templateConfig); + + verify( + appService, + times(1) + ).updateCustomTemplateConfig(templateConfig); + } + + @Test + void testHandlerNullTemplateConfig() { + + templateConfigServiceImpl.handler(null); + + verify( + appService, + times(0) + ).updateCustomTemplateConfig(any()); + } + + @Test + void testType() { + + String type = templateConfigServiceImpl.type(); + assertEquals("template", type); + } + +} From 6ac092dcd5f2757fc132892a86d80b257b63830f Mon Sep 17 00:00:00 2001 From: YuLuo Date: Tue, 6 Aug 2024 22:15:26 +0800 Subject: [PATCH 148/257] [improve] use file constant & extract file utils (#2474) Signed-off-by: yuluo-yx Co-authored-by: Calvin Co-authored-by: tomsun28 --- .../AlertDefineExcelImExportServiceImpl.java | 4 +- .../AlertDefineJsonImExportServiceImpl.java | 4 +- .../service/impl/AlertDefineServiceImpl.java | 21 ++--- .../AlertDefineYamlImExportServiceImpl.java | 8 +- .../common/constants/ExportFileConstants.java | 79 ++++++++++++++++++ .../hertzbeat/common/util/FileUtil.java | 80 +++++++++++++++++++ .../hertzbeat/common/util/FileUtilTest.java | 69 ++++++++++++++++ .../impl/ExcelImExportServiceImpl.java | 4 +- .../service/impl/JsonImExportServiceImpl.java | 4 +- .../service/impl/MonitorServiceImpl.java | 21 ++--- .../service/impl/YamlImExportServiceImpl.java | 4 +- 11 files changed, 254 insertions(+), 44 deletions(-) create mode 100644 common/src/main/java/org/apache/hertzbeat/common/constants/ExportFileConstants.java create mode 100644 common/src/main/java/org/apache/hertzbeat/common/util/FileUtil.java create mode 100644 common/src/test/java/org/apache/hertzbeat/common/util/FileUtilTest.java diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineExcelImExportServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineExcelImExportServiceImpl.java index b0cace95986..c89eb4d417e 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineExcelImExportServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineExcelImExportServiceImpl.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.alert.service.impl; +import static org.apache.hertzbeat.common.constants.ExportFileConstants.ExcelFile.FILE_SUFFIX; +import static org.apache.hertzbeat.common.constants.ExportFileConstants.ExcelFile.TYPE; import com.fasterxml.jackson.core.type.TypeReference; import java.io.IOException; import java.io.InputStream; @@ -48,8 +50,6 @@ @Slf4j @Service public class AlertDefineExcelImExportServiceImpl extends AlertDefineAbstractImExportServiceImpl { - public static final String TYPE = "EXCEL"; - public static final String FILE_SUFFIX = ".xlsx"; /** * Export file type diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineJsonImExportServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineJsonImExportServiceImpl.java index 3487b44fc1a..cb705841dd5 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineJsonImExportServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineJsonImExportServiceImpl.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.alert.service.impl; +import static org.apache.hertzbeat.common.constants.ExportFileConstants.JsonFile.FILE_SUFFIX; +import static org.apache.hertzbeat.common.constants.ExportFileConstants.JsonFile.TYPE; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.IOException; @@ -35,8 +37,6 @@ @RequiredArgsConstructor @Service public class AlertDefineJsonImExportServiceImpl extends AlertDefineAbstractImExportServiceImpl { - public static final String TYPE = "JSON"; - public static final String FILE_SUFFIX = ".json"; private final ObjectMapper objectMapper; diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineServiceImpl.java index 624df433407..0923695c17a 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineServiceImpl.java @@ -37,9 +37,11 @@ import org.apache.hertzbeat.alert.dao.AlertMonitorDao; import org.apache.hertzbeat.alert.service.AlertDefineImExportService; import org.apache.hertzbeat.alert.service.AlertDefineService; +import org.apache.hertzbeat.common.constants.ExportFileConstants; import org.apache.hertzbeat.common.entity.alerter.AlertDefine; import org.apache.hertzbeat.common.entity.alerter.AlertDefineMonitorBind; import org.apache.hertzbeat.common.entity.manager.Monitor; +import org.apache.hertzbeat.common.util.FileUtil; import org.apache.hertzbeat.common.util.JexlExpressionRunner; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; @@ -236,22 +238,11 @@ public void export(List ids, String type, HttpServletResponse res) throws @Override public void importConfig(MultipartFile file) throws Exception { - var fileName = file.getOriginalFilename(); - if (!StringUtils.hasText(fileName)) { - return; - } - var type = ""; - if (fileName.toLowerCase().endsWith(AlertDefineJsonImExportServiceImpl.FILE_SUFFIX)) { - type = AlertDefineJsonImExportServiceImpl.TYPE; - } - if (fileName.toLowerCase().endsWith(AlertDefineExcelImExportServiceImpl.FILE_SUFFIX)) { - type = AlertDefineExcelImExportServiceImpl.TYPE; - } - if (fileName.toLowerCase().endsWith(AlertDefineYamlImExportServiceImpl.FILE_SUFFIX)) { - type = AlertDefineYamlImExportServiceImpl.TYPE; - } + + var type = FileUtil.getFileType(file); + var fileName = FileUtil.getFileName(file); if (!alertDefineImExportServiceMap.containsKey(type)) { - throw new RuntimeException("file " + fileName + " is not supported."); + throw new RuntimeException(ExportFileConstants.FILE + " " + fileName + " is not supported."); } var imExportService = alertDefineImExportServiceMap.get(type); imExportService.importConfig(file.getInputStream()); diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineYamlImExportServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineYamlImExportServiceImpl.java index b58db57b0e2..f0598e8e5d7 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineYamlImExportServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineYamlImExportServiceImpl.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.alert.service.impl; +import static org.apache.hertzbeat.common.constants.ExportFileConstants.YamlFile.FILE_SUFFIX; +import static org.apache.hertzbeat.common.constants.ExportFileConstants.YamlFile.TYPE; import java.io.InputStream; import java.io.OutputStream; import java.io.OutputStreamWriter; @@ -29,15 +31,13 @@ import org.yaml.snakeyaml.Yaml; /** - * Configure the import and export Yaml format + * Configure the import and export Yaml format. */ + @Slf4j @Service public class AlertDefineYamlImExportServiceImpl extends AlertDefineAbstractImExportServiceImpl { - public static final String TYPE = "YAML"; - public static final String FILE_SUFFIX = ".yaml"; - @Override public String type() { return TYPE; diff --git a/common/src/main/java/org/apache/hertzbeat/common/constants/ExportFileConstants.java b/common/src/main/java/org/apache/hertzbeat/common/constants/ExportFileConstants.java new file mode 100644 index 00000000000..cc800fdf70a --- /dev/null +++ b/common/src/main/java/org/apache/hertzbeat/common/constants/ExportFileConstants.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.constants; + +/** + * Export file type constants + */ + +public interface ExportFileConstants { + + /** + * Export file name constants. + */ + String FILE = "file"; + + /** + * Excel export files constants. + */ + interface ExcelFile { + + /** + * Export file type. + */ + String TYPE = "EXCEL"; + + /** + * Export file suffix. + */ + String FILE_SUFFIX = ".xlsx"; + } + + /** + * Json export file constants. + */ + interface JsonFile { + + /** + * Export file type. + */ + String TYPE = "JSON"; + + /** + * Export file suffix. + */ + String FILE_SUFFIX = ".json"; + } + + /** + * Yaml export file constants. + */ + interface YamlFile { + + /** + * Export file type. + */ + String TYPE = "YAML"; + + /** + * Export file suffix. + */ + String FILE_SUFFIX = ".yaml"; + } + +} diff --git a/common/src/main/java/org/apache/hertzbeat/common/util/FileUtil.java b/common/src/main/java/org/apache/hertzbeat/common/util/FileUtil.java new file mode 100644 index 00000000000..d0ac10b98c7 --- /dev/null +++ b/common/src/main/java/org/apache/hertzbeat/common/util/FileUtil.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.util; + +import java.util.Map; +import org.apache.hertzbeat.common.constants.ExportFileConstants; +import org.springframework.util.StringUtils; +import org.springframework.web.multipart.MultipartFile; + +/** + * File utils. + */ + +public final class FileUtil { + + private FileUtil() { + } + + private static final Map fileTypes; + + static { + fileTypes = Map.of( + ExportFileConstants.JsonFile.FILE_SUFFIX, ExportFileConstants.JsonFile.TYPE, + ExportFileConstants.YamlFile.FILE_SUFFIX, ExportFileConstants.YamlFile.TYPE, + ExportFileConstants.ExcelFile.FILE_SUFFIX, ExportFileConstants.ExcelFile.TYPE + ); + } + + /** + * Get file name. + * @param file {@link MultipartFile} + * @return file name + */ + public static String getFileName(MultipartFile file) { + + var fileName = file.getOriginalFilename(); + if (!StringUtils.hasText(fileName)) { + return ""; + } + return fileName; + } + + /** + * Get file type. + * @param file {@link MultipartFile} + * @return file type + */ + public static String getFileType(MultipartFile file) { + + var fileName = getFileName(file); + + if (!StringUtils.hasText(fileName)) { + return ""; + } + + var dotIndex = fileName.lastIndexOf('.'); + if (dotIndex == -1 || dotIndex == fileName.length() - 1) { + return ""; + } + var fileNameExtension = fileName.substring(dotIndex); + + return fileTypes.get(fileNameExtension); + } + +} diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/FileUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/FileUtilTest.java new file mode 100644 index 00000000000..f6b4f26919f --- /dev/null +++ b/common/src/test/java/org/apache/hertzbeat/common/util/FileUtilTest.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.util; + +import org.apache.hertzbeat.common.constants.ExportFileConstants; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.springframework.mock.web.MockMultipartFile; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * test case for {@link FileUtil}. + */ + +class FileUtilTest { + + private static final String JSON_TYPE = "application/json"; + private static final String EXCEL_TYPE = "application/vnd.ms-excel"; + private static final String YAML_TYPE = "application/x-yaml"; + + private MockMultipartFile jsonFile; + private MockMultipartFile excelFile; + private MockMultipartFile yamlFile; + private MockMultipartFile emptyFile; + + @BeforeEach + void setUp() { + + jsonFile = new MockMultipartFile("file", "test.json", JSON_TYPE, "test content".getBytes()); + excelFile = new MockMultipartFile("file", "test.xlsx", EXCEL_TYPE, "test content".getBytes()); + yamlFile = new MockMultipartFile("file", "test.yaml", YAML_TYPE, "test content".getBytes()); + emptyFile = new MockMultipartFile("file", "", null, (byte[]) null); + } + + @Test + void testGetFileName() { + + assertEquals("test.json", FileUtil.getFileName(jsonFile)); + assertEquals("test.xlsx", FileUtil.getFileName(excelFile)); + assertEquals("test.yaml", FileUtil.getFileName(yamlFile)); + assertEquals("", FileUtil.getFileName(emptyFile)); + } + + @Test + void testGetFileType() { + + assertEquals(ExportFileConstants.JsonFile.TYPE, FileUtil.getFileType(jsonFile)); + assertEquals(ExportFileConstants.ExcelFile.TYPE, FileUtil.getFileType(excelFile)); + assertEquals(ExportFileConstants.YamlFile.TYPE, FileUtil.getFileType(yamlFile)); + assertEquals("", FileUtil.getFileType(emptyFile)); + } + +} diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ExcelImExportServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ExcelImExportServiceImpl.java index a2a70ec124c..8672993b70f 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ExcelImExportServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ExcelImExportServiceImpl.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.manager.service.impl; +import static org.apache.hertzbeat.common.constants.ExportFileConstants.ExcelFile.FILE_SUFFIX; +import static org.apache.hertzbeat.common.constants.ExportFileConstants.ExcelFile.TYPE; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -49,8 +51,6 @@ @RequiredArgsConstructor @Service public class ExcelImExportServiceImpl extends AbstractImExportServiceImpl{ - public static final String TYPE = "EXCEL"; - public static final String FILE_SUFFIX = ".xlsx"; /** * Export file type diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/JsonImExportServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/JsonImExportServiceImpl.java index 5cf6dcc3e2e..c7ebf72ed53 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/JsonImExportServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/JsonImExportServiceImpl.java @@ -15,6 +15,8 @@ package org.apache.hertzbeat.manager.service.impl; +import static org.apache.hertzbeat.common.constants.ExportFileConstants.JsonFile.FILE_SUFFIX; +import static org.apache.hertzbeat.common.constants.ExportFileConstants.JsonFile.TYPE; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.IOException; @@ -32,8 +34,6 @@ @RequiredArgsConstructor @Service public class JsonImExportServiceImpl extends AbstractImExportServiceImpl { - public static final String TYPE = "JSON"; - public static final String FILE_SUFFIX = ".json"; private final ObjectMapper objectMapper; diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java index 4cd7c888dac..91f3778ceb0 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java @@ -40,6 +40,7 @@ import org.apache.hertzbeat.alert.dao.AlertDefineBindDao; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.constants.ExportFileConstants; import org.apache.hertzbeat.common.entity.job.Configmap; import org.apache.hertzbeat.common.entity.job.Job; import org.apache.hertzbeat.common.entity.job.Metrics; @@ -52,6 +53,7 @@ import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.support.event.MonitorDeletedEvent; import org.apache.hertzbeat.common.util.AesUtil; +import org.apache.hertzbeat.common.util.FileUtil; import org.apache.hertzbeat.common.util.IntervalExpressionUtil; import org.apache.hertzbeat.common.util.IpDomainUtil; import org.apache.hertzbeat.common.util.JsonUtil; @@ -310,22 +312,11 @@ public void export(List ids, String type, HttpServletResponse res) throws @Override public void importConfig(MultipartFile file) throws Exception { - var fileName = file.getOriginalFilename(); - if (!StringUtils.hasText(fileName)) { - return; - } - var type = ""; - if (fileName.toLowerCase().endsWith(JsonImExportServiceImpl.FILE_SUFFIX)) { - type = JsonImExportServiceImpl.TYPE; - } - if (fileName.toLowerCase().endsWith(ExcelImExportServiceImpl.FILE_SUFFIX)) { - type = ExcelImExportServiceImpl.TYPE; - } - if (fileName.toLowerCase().endsWith(YamlImExportServiceImpl.FILE_SUFFIX)) { - type = YamlImExportServiceImpl.TYPE; - } + + var fileName = FileUtil.getFileName(file); + var type = FileUtil.getFileType(file); if (!imExportServiceMap.containsKey(type)) { - throw new RuntimeException("file " + fileName + " is not supported."); + throw new RuntimeException(ExportFileConstants.FILE + " " + fileName + " is not supported."); } var imExportService = imExportServiceMap.get(type); imExportService.importConfig(file.getInputStream()); diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/YamlImExportServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/YamlImExportServiceImpl.java index 2986a096b9e..5b537514992 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/YamlImExportServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/YamlImExportServiceImpl.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.manager.service.impl; +import static org.apache.hertzbeat.common.constants.ExportFileConstants.YamlFile.FILE_SUFFIX; +import static org.apache.hertzbeat.common.constants.ExportFileConstants.YamlFile.TYPE; import java.io.InputStream; import java.io.OutputStream; import java.io.OutputStreamWriter; @@ -33,8 +35,6 @@ @Slf4j @Service public class YamlImExportServiceImpl extends AbstractImExportServiceImpl{ - public static final String TYPE = "YAML"; - public static final String FILE_SUFFIX = ".yaml"; /** * Export file type From c4fa738d654f0c873de750ff78a96db11dcfbf8e Mon Sep 17 00:00:00 2001 From: aias00 Date: Wed, 7 Aug 2024 09:09:16 +0800 Subject: [PATCH 149/257] [improve] add some code style improve (#2481) Co-authored-by: Calvin Co-authored-by: tomsun28 --- .../alert/reduce/AlarmConvergeReduce.java | 2 +- .../AlertDefineYamlImExportServiceTest.java | 28 +- .../collector/collect/jmx/JmxCollectImpl.java | 2 +- .../prometheus/PrometheusAutoCollectImpl.java | 4 +- .../collect/redfish/RedfishCollectImpl.java | 3 +- .../redfish/RedfishConnectSession.java | 3 +- .../collect/telnet/TelnetCollectImpl.java | 3 +- .../dispatch/timer/HashedWheelTimer.java | 2 +- .../collector/dispatch/unit/DataUnit.java | 2 +- .../dispatch/unit/TimeLengthUnit.java | 2 +- .../exporter/ExporterParserTest.java | 79 +-- .../collect/nginx/NginxCollectImplTest.java | 15 +- .../collector/util/PrivateKeyUtilsTest.java | 55 +- .../common/entity/manager/NoticeTemplate.java | 17 +- .../apache/hertzbeat/common/util/AesUtil.java | 30 +- .../hertzbeat/common/util/CommonUtil.java | 8 +- .../hertzbeat/common/util/IpDomainUtil.java | 4 +- .../impl/FlyBookAlertNotifyHandlerImpl.java | 3 +- .../manager/scheduler/ConsistentHash.java | 5 +- .../impl/AbstractImExportServiceImpl.java | 4 +- .../manager/service/impl/AppServiceImpl.java | 6 +- .../impl/ExcelImExportServiceImpl.java | 3 +- .../service/impl/MonitorServiceImpl.java | 4 +- .../service/impl/NoticeConfigServiceImpl.java | 3 +- .../impl/ObsObjectStoreServiceImpl.java | 3 +- ...ngTalkRobotAlertNotifyHandlerImplTest.java | 17 +- .../DiscordBotAlertNotifyHandlerImplTest.java | 15 +- .../FlyBookAlertNotifyHandlerImplTest.java | 15 +- ...weiCloudSmnAlertNotifyHandlerImplTest.java | 17 +- .../impl/SlackAlertNotifyHandlerImplTest.java | 17 +- ...TelegramBotAlertNotifyHandlerImplTest.java | 17 +- .../WeComRobotAlertNotifyHandlerImplTest.java | 17 +- .../controller/MonitorsControllerTest.java | 22 +- .../NoticeConfigControllerTest.java | 15 +- .../manager/service/MonitorServiceTest.java | 43 +- .../greptime/GreptimeDbDataStorage.java | 646 +++++++++--------- .../store/history/iotdb/IotDbDataStorage.java | 19 +- .../history/tdengine/TdEngineDataStorage.java | 5 +- .../vm/VictoriaMetricsClusterDataStorage.java | 9 +- .../vm/VictoriaMetricsDataStorage.java | 9 +- 40 files changed, 600 insertions(+), 573 deletions(-) diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduce.java b/alerter/src/main/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduce.java index 2aea1ea7cf1..936cb2d8b95 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduce.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduce.java @@ -60,7 +60,7 @@ public boolean filterConverge(Alert currentAlert) { // restored alert boolean isHasIgnore = false; Map tags = currentAlert.getTags(); - if (tags.containsKey(CommonConstants.IGNORE)) { + if (Objects.requireNonNull(tags).containsKey(CommonConstants.IGNORE)) { isHasIgnore = true; tags.remove(CommonConstants.IGNORE); } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineYamlImExportServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineYamlImExportServiceTest.java index 693e0316f98..840da444fc5 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineYamlImExportServiceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineYamlImExportServiceTest.java @@ -58,18 +58,20 @@ class AlertDefineYamlImExportServiceTest { private AlertDefineYamlImExportServiceImpl service; private static final String YAML_DATA = - "- alertDefine:\n" + - " app: App1\n" + - " metric: Metric1\n" + - " field: Field1\n" + - " preset: true\n" + - " expr: Expr1\n" + - " priority: 1\n" + - " times: 1\n" + - " tags: []\n" + - " enable: true\n" + - " recoverNotice: true\n" + - " template: Template1\n"; + """ + - alertDefine: + app: App1 + metric: Metric1 + field: Field1 + preset: true + expr: Expr1 + priority: 1 + times: 1 + tags: [] + enable: true + recoverNotice: true + template: Template1 + """; private InputStream inputStream; private List alertDefineList; @@ -99,7 +101,7 @@ public void setup() { } @Test - void testParseImport() throws IllegalAccessException { + void testParseImport() { List result = service.parseImport(inputStream); diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/jmx/JmxCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/jmx/JmxCollectImpl.java index f1c5ef7ea2f..fb553a7dd28 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/jmx/JmxCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/jmx/JmxCollectImpl.java @@ -108,7 +108,7 @@ public void collect(CollectRep.MetricsData.Builder builder, long monitorId, Stri attributes = Arrays.stream(attrInfos) .filter(item -> item.isReadable() && attributeNameSet.contains(item.getName())) .map(MBeanFeatureInfo::getName) - .collect(Collectors.toList()).toArray(attributes); + .toList().toArray(attributes); AttributeList attributeList = serverConnection.getAttributes(currentObjectName, attributes); Map attributeValueMap = extractAttributeValue(attributeList); diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/prometheus/PrometheusAutoCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/prometheus/PrometheusAutoCollectImpl.java index b33c5832bce..d1dc2ad5a32 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/prometheus/PrometheusAutoCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/prometheus/PrometheusAutoCollectImpl.java @@ -105,7 +105,7 @@ public List collect(CollectRep.MetricsData.Builder build String resp = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); long collectTime = System.currentTimeMillis(); builder.setTime(collectTime); - if (resp == null || "".equals(resp)) { + if (resp == null || !StringUtils.hasText(resp)) { log.error("http response content is empty, status: {}.", statusCode); builder.setCode(CollectRep.Code.FAIL); builder.setMsg("http response content is empty"); @@ -161,7 +161,7 @@ private void validateParams(Metrics metrics) throws Exception { } PrometheusProtocol protocol = metrics.getPrometheus(); if (protocol.getPath() == null - || "".equals(protocol.getPath()) + || !StringUtils.hasText(protocol.getPath()) || !protocol.getPath().startsWith(RIGHT_DASH)) { protocol.setPath(protocol.getPath() == null ? RIGHT_DASH : RIGHT_DASH + protocol.getPath().trim()); } diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishCollectImpl.java index 3462c3ebdb3..a6e30f5c24f 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishCollectImpl.java @@ -158,8 +158,7 @@ private List parseCollectionResource(String resp) { } String resourceIdPath = "$.Members[*].['@odata.id']"; List resourceIds = JsonPathParser.parseContentWithJsonPath(resp, resourceIdPath); - List res = resourceIds.stream().filter(Objects::nonNull).map(String::valueOf).toList(); - return res; + return resourceIds.stream().filter(Objects::nonNull).map(String::valueOf).toList(); } private List getCollectionResource(String uri, ConnectSession connectSession) { diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishConnectSession.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishConnectSession.java index d29ff20267d..9107c73312e 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishConnectSession.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishConnectSession.java @@ -88,8 +88,7 @@ public String getRedfishResource(String uri) throws Exception { if (statusCode != HttpStatus.SC_OK) { throw new Exception("Http State code: " + statusCode); } - String resp = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); - return resp; + return EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); } catch (Exception e) { throw new Exception("Redfish session get resource error:" + e.getMessage()); } finally { diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/telnet/TelnetCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/telnet/TelnetCollectImpl.java index 134e3d54d26..dbbe8bcac58 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/telnet/TelnetCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/telnet/TelnetCollectImpl.java @@ -27,6 +27,7 @@ import java.util.Objects; import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.net.telnet.TelnetClient; import org.apache.hertzbeat.collector.collect.AbstractCollect; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; @@ -116,7 +117,7 @@ public String supportProtocol() { } private static Map execCmdAndParseResult(TelnetClient telnetClient, String cmd, String app) throws IOException { - if (cmd == null || cmd.trim().length() == 0) { + if (cmd == null || StringUtils.isEmpty(cmd.trim())) { return new HashMap<>(16); } OutputStream outputStream = telnetClient.getOutputStream(); diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/timer/HashedWheelTimer.java b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/timer/HashedWheelTimer.java index eca634e056a..866fda29860 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/timer/HashedWheelTimer.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/timer/HashedWheelTimer.java @@ -644,7 +644,7 @@ public void expire() { task.run(this); } catch (Throwable t) { if (logger.isWarnEnabled()) { - logger.warn("An exception was thrown by " + TimerTask.class.getSimpleName() + '.', t); + logger.warn("An exception was thrown by {}.", TimerTask.class.getSimpleName(), t); } } } diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/unit/DataUnit.java b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/unit/DataUnit.java index 61dc33d632f..8d1068da458 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/unit/DataUnit.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/unit/DataUnit.java @@ -71,7 +71,7 @@ public enum DataUnit { private final String unit; private final long scale; - private DataUnit(String unit, long scale) { + DataUnit(String unit, long scale) { this.unit = unit; this.scale = scale; } diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/unit/TimeLengthUnit.java b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/unit/TimeLengthUnit.java index 1e90c0e2614..b7313a90558 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/unit/TimeLengthUnit.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/unit/TimeLengthUnit.java @@ -53,7 +53,7 @@ public enum TimeLengthUnit { private final String unit; private final long scale; - private TimeLengthUnit(String unit, long scale) { + TimeLengthUnit(String unit, long scale) { this.unit = unit; this.scale = scale; } diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/http/promethus/exporter/ExporterParserTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/http/promethus/exporter/ExporterParserTest.java index 69145d23a93..680c245651c 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/http/promethus/exporter/ExporterParserTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/http/promethus/exporter/ExporterParserTest.java @@ -28,45 +28,46 @@ class ExporterParserTest { @Test void textToMetric() { - String resp = "# HELP disk_total_bytes Total space for path\n" - + "# TYPE disk_total_bytes gauge\n" - + "disk_total_bytes{path=\"C:\\\\hertzbeat\\\\repo\\\\testpath\",} 4.29496725504E11\n" - + "# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime.\n" - + "# TYPE go_gc_cycles_automatic_gc_cycles_total counter\n" - + "go_gc_cycles_automatic_gc_cycles_total 0\n" - + "# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application.\n" - + "# TYPE go_gc_cycles_forced_gc_cycles_total counter\n" - + "go_gc_cycles_forced_gc_cycles_total 0\n" - + "# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles.\n" - + "# TYPE go_gc_cycles_total_gc_cycles_total counter\n" - + "go_gc_cycles_total_gc_cycles_total 0\n" - + "# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n" - + "# TYPE go_gc_duration_seconds summary\n" - + "go_gc_duration_seconds{quantile=\"0\"} 0\n" - + "go_gc_duration_seconds{quantile=\"0.25\"} 0\n" - + "go_gc_duration_seconds{quantile=\"0.5\"} 0\n" - + "go_gc_duration_seconds{quantile=\"0.75\"} 0\n" - + "go_gc_duration_seconds{quantile=\"1\"} 0\n" - + "# TYPE jvm info\n" - + "# HELP jvm VM version info\n" - + "jvm_info{runtime=\"OpenJDK Runtime Environment\",vendor=\"Azul Systems, Inc.\",version=\"11.0.13+8-LTS\"} 1.0\n" - + "# TYPE jvm_gc_collection_seconds summary\n" - + "# HELP jvm_gc_collection_seconds Time spent in a given JVM garbage collector in seconds.\n" - + "jvm_gc_collection_seconds_count{gc=\"G1 Young Generation\"} 10.0\n" - + "jvm_gc_collection_seconds_sum{gc=\"G1 Young Generation\"} 0.051\n" - + "jvm_gc_collection_seconds_count{gc=\"G1 Old Generation\"} 0.0\n" - + "jvm_gc_collection_seconds_sum{gc=\"G1 Old Generation\"} 0.0\n" - + "# TYPE resource_group_aggregate_usage_secs summary\n" - + "resource_group_aggregate_usage_secs{cluster=\"standalone\",quantile=\"0.5\"} 2.69245E-4\n" - + "resource_group_aggregate_usage_secs{cluster=\"standalone\",quantile=\"0.9\"} 3.49601E-4\n" - + "resource_group_aggregate_usage_secs_count{cluster=\"standalone\"} 13.0\n" - + "resource_group_aggregate_usage_secs_sum{cluster=\"standalone\"} 0.004832498\n" - + "resource_group_aggregate_usage_secs_created{cluster=\"standalone\"} 1.715842140749E9\n" - + "# TYPE metadata_store_ops_latency_ms histogram\n" - + "metadata_store_ops_latency_ms_bucket{cluster=\"standalone\",name=\"metadata-store\",type=\"get\",status=\"success\",le=\"1.0\"} 59.0\n" - + "metadata_store_ops_latency_ms_bucket{cluster=\"standalone\",name=\"metadata-store\",type=\"get\",status=\"success\",le=\"3.0\"} 61.0\n" - + "metadata_store_ops_latency_ms_bucket{cluster=\"standalone\",name=\"metadata-store\",type=\"get\",status=\"success\",le=\"5.0\"} 61.0\n" - + "# EOF"; + String resp = """ + # HELP disk_total_bytes Total space for path + # TYPE disk_total_bytes gauge + disk_total_bytes{path="C:\\\\hertzbeat\\\\repo\\\\testpath",} 4.29496725504E11 + # HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime. + # TYPE go_gc_cycles_automatic_gc_cycles_total counter + go_gc_cycles_automatic_gc_cycles_total 0 + # HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application. + # TYPE go_gc_cycles_forced_gc_cycles_total counter + go_gc_cycles_forced_gc_cycles_total 0 + # HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles. + # TYPE go_gc_cycles_total_gc_cycles_total counter + go_gc_cycles_total_gc_cycles_total 0 + # HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. + # TYPE go_gc_duration_seconds summary + go_gc_duration_seconds{quantile="0"} 0 + go_gc_duration_seconds{quantile="0.25"} 0 + go_gc_duration_seconds{quantile="0.5"} 0 + go_gc_duration_seconds{quantile="0.75"} 0 + go_gc_duration_seconds{quantile="1"} 0 + # TYPE jvm info + # HELP jvm VM version info + jvm_info{runtime="OpenJDK Runtime Environment",vendor="Azul Systems, Inc.",version="11.0.13+8-LTS"} 1.0 + # TYPE jvm_gc_collection_seconds summary + # HELP jvm_gc_collection_seconds Time spent in a given JVM garbage collector in seconds. + jvm_gc_collection_seconds_count{gc="G1 Young Generation"} 10.0 + jvm_gc_collection_seconds_sum{gc="G1 Young Generation"} 0.051 + jvm_gc_collection_seconds_count{gc="G1 Old Generation"} 0.0 + jvm_gc_collection_seconds_sum{gc="G1 Old Generation"} 0.0 + # TYPE resource_group_aggregate_usage_secs summary + resource_group_aggregate_usage_secs{cluster="standalone",quantile="0.5"} 2.69245E-4 + resource_group_aggregate_usage_secs{cluster="standalone",quantile="0.9"} 3.49601E-4 + resource_group_aggregate_usage_secs_count{cluster="standalone"} 13.0 + resource_group_aggregate_usage_secs_sum{cluster="standalone"} 0.004832498 + resource_group_aggregate_usage_secs_created{cluster="standalone"} 1.715842140749E9 + # TYPE metadata_store_ops_latency_ms histogram + metadata_store_ops_latency_ms_bucket{cluster="standalone",name="metadata-store",type="get",status="success",le="1.0"} 59.0 + metadata_store_ops_latency_ms_bucket{cluster="standalone",name="metadata-store",type="get",status="success",le="3.0"} 61.0 + metadata_store_ops_latency_ms_bucket{cluster="standalone",name="metadata-store",type="get",status="success",le="5.0"} 61.0 + # EOF"""; ExporterParser parser = new ExporterParser(); Map metricFamilyMap = parser.textToMetric(resp); diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImplTest.java index 53b5f081c59..6ca36483eb8 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImplTest.java @@ -346,13 +346,14 @@ public void testNginxStatusMatch() { @Test public void testReqStatusMatch() { - String urlContent = "zone_name\tkey\tmax_active\tmax_bw\ttraffic\trequests\tactive\tbandwidth\n" + - "server_addr\t172.17.0.3\t2\t 440\t68K\t23\t1\t 0\n" + - "server_name\tlocalhost\t2\t 440\t68K\t23\t1\t 0\n" + - "server_url\tlocalhost/\t1\t 0\t 0\t4\t0\t 0\n" + - "server_url\tlocalhost/index.html\t1\t 104\t27K\t4\t0\t 0\n" + - "server_url\tlocalhost/nginx-status\t1\t 32\t 9896\t5\t0\t 0\n" + - "server_url\tlocalhost/req-status\t1\t 0\t31K\t10\t1\t 0"; + String urlContent = """ + zone_name\tkey\tmax_active\tmax_bw\ttraffic\trequests\tactive\tbandwidth + server_addr\t172.17.0.3\t2\t 440\t68K\t23\t1\t 0 + server_name\tlocalhost\t2\t 440\t68K\t23\t1\t 0 + server_url\tlocalhost/\t1\t 0\t 0\t4\t0\t 0 + server_url\tlocalhost/index.html\t1\t 104\t27K\t4\t0\t 0 + server_url\tlocalhost/nginx-status\t1\t 32\t 9896\t5\t0\t 0 + server_url\tlocalhost/req-status\t1\t 0\t31K\t10\t1\t 0"""; String[] lines = urlContent.split("\\r?\\n"); List zoneNames = new ArrayList<>(); diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/util/PrivateKeyUtilsTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/util/PrivateKeyUtilsTest.java index a6bcb56f448..68f611aa2dd 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/util/PrivateKeyUtilsTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/util/PrivateKeyUtilsTest.java @@ -27,33 +27,34 @@ class PrivateKeyUtilsTest { @DisplayName("write key to ~/.ssh") @Test void writePrivateKey() throws IOException { - var key = "-----BEGIN RSA PRIVATE KEY-----\n" - + "MIIEogIBAAKCAQEA4ctFYk/xy89L6/6YFeeMrwCW9lCP/ThXMn+9G63s5bGn4oIN\n" - + "8cEf/JYkmGw8vMP41IAP9dyH8ji2wIZSLeTPWucEK6P6jA01iIBQ95ng6RTsnQgL\n" - + "h4pYHxlEaNHcXkjy5GlMdzaWadjdRevpThGR1VOtWFtK3yoC0c/te2Junu04f+11\n" - + "cpk8QvmVfzrBUooVnG0/7oekwUy1c5sSl0qVoLzXOv4XG9w34cyvacFC30zv1Nl8\n" - + "ASi2pmOBVx9njPvqQ7qZrDk0nwn+RZUmGh/PbmHxrBV7ZA5NjZcEnf2VGIfjGUVu\n" - + "qE4VnkbvS4j03afV2rsp1yo74K+k/ZC6GCHB5QIBIwKCAQBG9r4I9I3SVxfcdJYy\n" - + "xR2WFiDREgFeNkdKYqkl9NVsws5dIY9am8g5cQQv54DNnK1KGZ6dulaclXtD0nGZ\n" - + "ZSs505OYr+EHcd2f7dBN0Uavp32QcD4jSLycD0FixZ0HsIbaEnceJxlUd1t8YBYf\n" - + "2aLcpUUbxOulORbUOgjPAa286uDeQYN5IbdruDfvbuFFm7hBoGZoKLJ7FPcJ0U3A\n" - + "14KRK+Z1oCYJIS0ubaHbhaPIVPPQEmTNHpsvxIJXfZtVy9+XIuBGmD3+Aq6SSFPC\n" - + "A8mU1iKzzdRCXZwvPeUiivIIZc6DRXjhtJ2Lya/XndKidOT/QUj8Z+f9pWAonlzM\n" - + "3PMXAoGBAPvzctkkDjUJjLyEuYQq8soYokS4n4ykFTP5oFgnodK/cYocbxTT6Tn9\n" - + "vH7b6lK6ZAf+tZk8rcEeIO650pOvmaa1/OuZSxfcFUGBvOvYXiHF7zmkePh/pQgB\n" - + "7Cl0RYrI52Cjbd9aCUIYK3A82qsUq30INGeOhMNrfaHn2pgx8xlDAoGBAOVsNctw\n" - + "CHnLaIQX8eS+eUcQEm+NZppnDBJavdpP48ZZM/t5v/2fQ5ytbYqk0KEzIGu0dP8g\n" - + "jfB76JbMvStvTfB+TrXsfhGyA3oJrEcG+3IUshsRU2sohT1ScY27z2VMLgilnWvF\n" - + "7t49sQm9uB/yn669n8LIciHxDItOpvqgKdG3AoGBAO2NxA6PtZ+4jAIz/19bsbc7\n" - + "zDIqaovrKe8tMMglXg/ZE0e0aLvdvqRkRAKU1Z51Ob5lLuDwEYoyWZCgk1gL90Vp\n" - + "wpT+P3zlcyCBo39IWMDB8C8IydRbF/GbaaNtoKds92m+qWwwUd87XCf+3M0wvvI6\n" - + "75TW1PLEbyOgFz8Khh8hAoGBAJbDc87Ul9sCAtp2Ip2hvWk2coPR8vfADz9C8cn5\n" - + "/BShBOcVfipSt2b1n8GCP/TnFU4XgBVeiSkA9+4Rg6AzMzejdY1+JvWvfqCnRVM/\n" - + "GkOnMzZb17tyZi+ck8OKC/IcHkAyUYFWL0GWQSOojvBsPQxt+0V8aEIwsHjNSSha\n" - + "nyNpAoGAd0XqdByRxbWgg5ZsvM0tvrpMITpEZsGMG9VeQPGl0wsQvC2zw5QGLvz/\n" - + "57YhofOOr0M3yElcFA9Imvek5CYZsyL8eIWGZyadfRiYvGOUyvDDO3BYRG4DmhyF\n" - + "KVk3URjEuOCC29ORvZ/7HaCO9iuEbvAA/mrAtd7KdCA+3PzfEOw=\n" - + "-----END RSA PRIVATE KEY-----"; + var key = """ + -----BEGIN RSA PRIVATE KEY----- + MIIEogIBAAKCAQEA4ctFYk/xy89L6/6YFeeMrwCW9lCP/ThXMn+9G63s5bGn4oIN + 8cEf/JYkmGw8vMP41IAP9dyH8ji2wIZSLeTPWucEK6P6jA01iIBQ95ng6RTsnQgL + h4pYHxlEaNHcXkjy5GlMdzaWadjdRevpThGR1VOtWFtK3yoC0c/te2Junu04f+11 + cpk8QvmVfzrBUooVnG0/7oekwUy1c5sSl0qVoLzXOv4XG9w34cyvacFC30zv1Nl8 + ASi2pmOBVx9njPvqQ7qZrDk0nwn+RZUmGh/PbmHxrBV7ZA5NjZcEnf2VGIfjGUVu + qE4VnkbvS4j03afV2rsp1yo74K+k/ZC6GCHB5QIBIwKCAQBG9r4I9I3SVxfcdJYy + xR2WFiDREgFeNkdKYqkl9NVsws5dIY9am8g5cQQv54DNnK1KGZ6dulaclXtD0nGZ + ZSs505OYr+EHcd2f7dBN0Uavp32QcD4jSLycD0FixZ0HsIbaEnceJxlUd1t8YBYf + 2aLcpUUbxOulORbUOgjPAa286uDeQYN5IbdruDfvbuFFm7hBoGZoKLJ7FPcJ0U3A + 14KRK+Z1oCYJIS0ubaHbhaPIVPPQEmTNHpsvxIJXfZtVy9+XIuBGmD3+Aq6SSFPC + A8mU1iKzzdRCXZwvPeUiivIIZc6DRXjhtJ2Lya/XndKidOT/QUj8Z+f9pWAonlzM + 3PMXAoGBAPvzctkkDjUJjLyEuYQq8soYokS4n4ykFTP5oFgnodK/cYocbxTT6Tn9 + vH7b6lK6ZAf+tZk8rcEeIO650pOvmaa1/OuZSxfcFUGBvOvYXiHF7zmkePh/pQgB + 7Cl0RYrI52Cjbd9aCUIYK3A82qsUq30INGeOhMNrfaHn2pgx8xlDAoGBAOVsNctw + CHnLaIQX8eS+eUcQEm+NZppnDBJavdpP48ZZM/t5v/2fQ5ytbYqk0KEzIGu0dP8g + jfB76JbMvStvTfB+TrXsfhGyA3oJrEcG+3IUshsRU2sohT1ScY27z2VMLgilnWvF + 7t49sQm9uB/yn669n8LIciHxDItOpvqgKdG3AoGBAO2NxA6PtZ+4jAIz/19bsbc7 + zDIqaovrKe8tMMglXg/ZE0e0aLvdvqRkRAKU1Z51Ob5lLuDwEYoyWZCgk1gL90Vp + wpT+P3zlcyCBo39IWMDB8C8IydRbF/GbaaNtoKds92m+qWwwUd87XCf+3M0wvvI6 + 75TW1PLEbyOgFz8Khh8hAoGBAJbDc87Ul9sCAtp2Ip2hvWk2coPR8vfADz9C8cn5 + /BShBOcVfipSt2b1n8GCP/TnFU4XgBVeiSkA9+4Rg6AzMzejdY1+JvWvfqCnRVM/ + GkOnMzZb17tyZi+ck8OKC/IcHkAyUYFWL0GWQSOojvBsPQxt+0V8aEIwsHjNSSha + nyNpAoGAd0XqdByRxbWgg5ZsvM0tvrpMITpEZsGMG9VeQPGl0wsQvC2zw5QGLvz/ + 57YhofOOr0M3yElcFA9Imvek5CYZsyL8eIWGZyadfRiYvGOUyvDDO3BYRG4DmhyF + KVk3URjEuOCC29ORvZ/7HaCO9iuEbvAA/mrAtd7KdCA+3PzfEOw= + -----END RSA PRIVATE KEY-----"""; PrivateKeyUtils.writePrivateKey("127.0.0.1", key); } } diff --git a/common/src/main/java/org/apache/hertzbeat/common/entity/manager/NoticeTemplate.java b/common/src/main/java/org/apache/hertzbeat/common/entity/manager/NoticeTemplate.java index 7f1a99deaab..45788f64521 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/entity/manager/NoticeTemplate.java +++ b/common/src/main/java/org/apache/hertzbeat/common/entity/manager/NoticeTemplate.java @@ -89,14 +89,15 @@ public class NoticeTemplate { @Schema(title = "Template content", description = "Template content", - example = "[${title}]\n" - + "${targetLabel} : ${target}\n" - + "<#if (monitorId??)>${monitorIdLabel} : ${monitorId} \n" - + "<#if (monitorName??)>${monitorNameLabel} : ${monitorName} \n" - + "<#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} \n" - + "${priorityLabel} : ${priority}\n" - + "${triggerTimeLabel} : ${triggerTime}\n" - + "${contentLabel} : ${content}", accessMode = READ_WRITE) + example = """ + [${title}] + ${targetLabel} : ${target} + <#if (monitorId??)>${monitorIdLabel} : ${monitorId} + <#if (monitorName??)>${monitorNameLabel} : ${monitorName} + <#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} + ${priorityLabel} : ${priority} + ${triggerTimeLabel} : ${triggerTime} + ${contentLabel} : ${content}""", accessMode = READ_WRITE) @Size(max = 60000) @Lob @NotBlank diff --git a/common/src/main/java/org/apache/hertzbeat/common/util/AesUtil.java b/common/src/main/java/org/apache/hertzbeat/common/util/AesUtil.java index 2068397d9e0..b5681aac8b5 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/util/AesUtil.java +++ b/common/src/main/java/org/apache/hertzbeat/common/util/AesUtil.java @@ -106,15 +106,7 @@ public static String aesEncode(String content, String encryptKey) { */ public static String aesDecode(String content, String decryptKey) { try { - SecretKeySpec keySpec = new SecretKeySpec(decryptKey.getBytes(StandardCharsets.UTF_8), AES); - // cipher based on the algorithm AES - Cipher cipher = Cipher.getInstance(ALGORITHM_STR); - // init cipher Encrypt_mode or Decrypt_mode operation, the second parameter is the KEY used - cipher.init(Cipher.DECRYPT_MODE, keySpec, new IvParameterSpec(decryptKey.getBytes(StandardCharsets.UTF_8))); - // base64 decode content - byte[] bytesContent = Base64.getDecoder().decode(content); - // decode content to byte array - byte[] byteDecode = cipher.doFinal(bytesContent); + byte[] byteDecode = getBytes(content, decryptKey); return new String(byteDecode, StandardCharsets.UTF_8); } catch (BadPaddingException e) { if (!ENCODE_RULES.equals(decryptKey)) { @@ -134,7 +126,19 @@ public static String aesDecode(String content, String decryptKey) { } return content; } - + + private static byte[] getBytes(final String content, final String decryptKey) throws Exception { + SecretKeySpec keySpec = new SecretKeySpec(decryptKey.getBytes(StandardCharsets.UTF_8), AES); + // cipher based on the algorithm AES + Cipher cipher = Cipher.getInstance(ALGORITHM_STR); + // init cipher Encrypt_mode or Decrypt_mode operation, the second parameter is the KEY used + cipher.init(Cipher.DECRYPT_MODE, keySpec, new IvParameterSpec(decryptKey.getBytes(StandardCharsets.UTF_8))); + // base64 decode content + byte[] bytesContent = Base64.getDecoder().decode(content); + // decode content to byte array + return cipher.doFinal(bytesContent); + } + /** * Determine whether it is encrypted * @param text text @@ -145,11 +149,7 @@ public static boolean isCiphertext(String text, String decryptKey) { if (Base64Util.isBase64(text)) { // if it is base64, decrypt directly to determine try { - SecretKeySpec keySpec = new SecretKeySpec(decryptKey.getBytes(StandardCharsets.UTF_8), AES); - Cipher cipher = Cipher.getInstance(ALGORITHM_STR); - cipher.init(Cipher.DECRYPT_MODE, keySpec, new IvParameterSpec(decryptKey.getBytes(StandardCharsets.UTF_8))); - byte[] bytesContent = Base64.getDecoder().decode(text); - byte[] byteDecode = cipher.doFinal(bytesContent); + byte[] byteDecode = getBytes(text, decryptKey); return byteDecode != null; } catch (Exception e) { log.warn("isCiphertext method error: {}", e.getMessage()); diff --git a/common/src/main/java/org/apache/hertzbeat/common/util/CommonUtil.java b/common/src/main/java/org/apache/hertzbeat/common/util/CommonUtil.java index a84206f380f..88c0e716798 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/util/CommonUtil.java +++ b/common/src/main/java/org/apache/hertzbeat/common/util/CommonUtil.java @@ -178,16 +178,16 @@ public static String getMessageFromThrowable(Throwable throwable) { if (cause != null) { message = cause.getMessage(); } - if (message == null || "".equals(message)) { + if (message == null || StringUtils.isBlank(message)) { message = throwable.getMessage(); } - if (message == null || "".equals(message)) { + if (message == null || StringUtils.isBlank(message)) { message = throwable.getLocalizedMessage(); } - if (message == null || "".equals(message)) { + if (message == null || StringUtils.isBlank(message)) { message = throwable.toString(); } - if (message == null || "".equals(message)) { + if (message == null || StringUtils.isBlank(message)) { message = "unknown error."; } return message; diff --git a/common/src/main/java/org/apache/hertzbeat/common/util/IpDomainUtil.java b/common/src/main/java/org/apache/hertzbeat/common/util/IpDomainUtil.java index 886a5200e18..4c4c1396bc4 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/util/IpDomainUtil.java +++ b/common/src/main/java/org/apache/hertzbeat/common/util/IpDomainUtil.java @@ -53,7 +53,7 @@ private IpDomainUtil() { * @return true-yes false-no */ public static boolean validateIpDomain(String ipDomain) { - if (ipDomain == null || "".equals(ipDomain)) { + if (ipDomain == null || !StringUtils.hasText(ipDomain)) { return false; } ipDomain = ipDomain.trim(); @@ -75,7 +75,7 @@ public static boolean validateIpDomain(String ipDomain) { * @return true or false */ public static boolean isHasSchema(String domainIp) { - if (domainIp == null || "".equals(domainIp)) { + if (domainIp == null || !StringUtils.hasText(domainIp)) { return false; } return DOMAIN_SCHEMA.matcher(domainIp).matches(); diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/FlyBookAlertNotifyHandlerImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/FlyBookAlertNotifyHandlerImpl.java index 0ee227fdb33..6c5c4ceaaae 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/FlyBookAlertNotifyHandlerImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/FlyBookAlertNotifyHandlerImpl.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.stream.Collectors; import lombok.Data; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; @@ -85,7 +84,7 @@ public void send(NoticeReceiver receiver, NoticeTemplate noticeTemplate, Alert a atContent.setUserId(userID); return atContent; }) - .collect(Collectors.toList()); + .toList(); contentList.addAll(atContents); } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/scheduler/ConsistentHash.java b/manager/src/main/java/org/apache/hertzbeat/manager/scheduler/ConsistentHash.java index 7e08b7770fa..44d1707aaff 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/scheduler/ConsistentHash.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/scheduler/ConsistentHash.java @@ -30,6 +30,7 @@ import lombok.AllArgsConstructor; import lombok.Getter; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; import org.apache.hertzbeat.common.constants.CommonConstants; /** @@ -215,7 +216,7 @@ public List getDispatchJobCache() { * @return collector node */ public Node dispatchJob(String dispatchKey, Long jobId) { - if (dispatchKey == null || "".equals(dispatchKey)) { + if (dispatchKey == null || StringUtils.isBlank(dispatchKey)) { log.error("The dispatch key can not null."); return null; } @@ -230,7 +231,7 @@ public Node dispatchJob(String dispatchKey, Long jobId) { * @return collector node */ public Node preDispatchJob(String dispatchKey) { - if (dispatchKey == null || "".equals(dispatchKey)) { + if (dispatchKey == null || StringUtils.isBlank(dispatchKey)) { log.error("The dispatch key can not null."); return null; } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AbstractImExportServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AbstractImExportServiceImpl.java index 079f67bc276..5c6ee688f04 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AbstractImExportServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AbstractImExportServiceImpl.java @@ -62,7 +62,7 @@ public void importConfig(InputStream is) { var formList = parseImport(is) .stream() .map(this::convert) - .collect(Collectors.toUnmodifiableList()); + .toList(); if (!CollectionUtils.isEmpty(formList)) { formList.forEach(monitorDto -> { monitorService.validate(monitorDto, false); @@ -80,7 +80,7 @@ public void exportConfig(OutputStream os, List configList) { .map(it -> monitorService.getMonitorDto(it)) .filter(Objects::nonNull) .map(this::convert) - .collect(Collectors.toUnmodifiableList()); + .toList(); writeOs(monitorList, os); } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AppServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AppServiceImpl.java index 8f27b29752b..713172b9c1e 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AppServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AppServiceImpl.java @@ -130,7 +130,7 @@ public Job getPushDefine(Long monitorId) throws IllegalArgumentException { List params = paramDao.findParamsByMonitorId(monitorId); List configmaps = params.stream() .map(param -> new Configmap(param.getField(), param.getParamValue(), - param.getType())).collect(Collectors.toList()); + param.getType())).toList(); Map configmap = configmaps.stream().collect(Collectors.toMap(Configmap::getKey, item -> item, (key1, key2) -> key1)); CollectUtil.replaceFieldsForPushStyleMonitor(metric, configmap); metricsTmp.add(metric); @@ -197,10 +197,10 @@ public List getAppDefineMetricNames(String app) { if (appDefine == null) { throw new IllegalArgumentException("The app " + app + " not support."); } - metricNames.addAll(appDefine.getMetrics().stream().map(Metrics::getName).collect(Collectors.toList())); + metricNames.addAll(appDefine.getMetrics().stream().map(Metrics::getName).toList()); } else { appDefines.forEach((k, v) -> - metricNames.addAll(v.getMetrics().stream().map(Metrics::getName).collect(Collectors.toList()))); + metricNames.addAll(v.getMetrics().stream().map(Metrics::getName).toList())); } return metricNames; } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ExcelImExportServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ExcelImExportServiceImpl.java index 8672993b70f..b3802dd7f0f 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ExcelImExportServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ExcelImExportServiceImpl.java @@ -29,6 +29,7 @@ import java.util.stream.Collectors; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.CollectionUtils; import org.apache.poi.ss.usermodel.BorderStyle; import org.apache.poi.ss.usermodel.Cell; import org.apache.poi.ss.usermodel.CellStyle; @@ -302,7 +303,7 @@ public void writeOs(List monitorList, OutputStream os) { valueCell.setCellStyle(cellStyle); } } - if (paramList.size() > 0) { + if (CollectionUtils.isNotEmpty(paramList)) { RegionUtil.setBorderTop(BorderStyle.THICK, new CellRangeAddress(rowIndex - paramList.size(), rowIndex - 1, 0, 10), sheet); RegionUtil.setBorderBottom(BorderStyle.THICK, new CellRangeAddress(rowIndex - paramList.size(), rowIndex - 1, 0, 10), sheet); RegionUtil.setBorderLeft(BorderStyle.THICK, new CellRangeAddress(rowIndex - paramList.size(), rowIndex - 1, 0, 10), sheet); diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java index 91f3778ceb0..fb62c740c38 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java @@ -377,7 +377,7 @@ public void validate(MonitorDto monitorDto, Boolean isModify) throws IllegalArgu if (paramDefine.isRequired() && (param == null || param.getParamValue() == null)) { throw new IllegalArgumentException("Params field " + field + " is required."); } - if (param != null && param.getParamValue() != null && !"".equals(param.getParamValue())) { + if (param != null && param.getParamValue() != null && StringUtils.hasText(param.getParamValue())) { switch (paramDefine.getType()) { case "number": double doubleValue; @@ -748,7 +748,7 @@ public void enableManageMonitors(HashSet ids) { new Configmap(param.getField(), param.getParamValue(), param.getType())).collect(Collectors.toList()); List paramDefaultValue = appDefine.getParams().stream() .filter(item -> StringUtils.hasText(item.getDefaultValue())) - .collect(Collectors.toList()); + .toList(); paramDefaultValue.forEach(defaultVar -> { if (configmaps.stream().noneMatch(item -> item.getKey().equals(defaultVar.getField()))) { Configmap configmap = new Configmap(defaultVar.getField(), defaultVar.getDefaultValue(), (byte) 1); diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/NoticeConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/NoticeConfigServiceImpl.java index efb3a8a6e13..94e0aeb7cd6 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/NoticeConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/NoticeConfigServiceImpl.java @@ -31,6 +31,7 @@ import java.util.Optional; import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; import org.apache.hertzbeat.common.cache.CacheFactory; import org.apache.hertzbeat.common.cache.CommonCacheService; import org.apache.hertzbeat.common.constants.CommonConstants; @@ -100,7 +101,7 @@ public List getNoticeReceivers(String name) { public List getNoticeTemplates(String name) { Specification specification = (root, query, criteriaBuilder) -> { Predicate predicate = criteriaBuilder.conjunction(); - if (name != null && !"".equals(name)) { + if (name != null && StringUtils.isNoneBlank(name)) { Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + name + "%"); predicate = criteriaBuilder.and(predicateName); } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObsObjectStoreServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObsObjectStoreServiceImpl.java index 3dc01e1cd30..4d839d1d09a 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObsObjectStoreServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObsObjectStoreServiceImpl.java @@ -22,7 +22,6 @@ import java.io.InputStream; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.common.constants.SignConstants; import org.apache.hertzbeat.manager.pojo.dto.FileDTO; @@ -86,7 +85,7 @@ public List list(String dir) { return obsClient.listObjects(request).getObjects() .stream() .map(it -> new FileDTO(it.getObjectKey(), it.getObjectContent())) - .collect(Collectors.toUnmodifiableList()); + .toList(); } @Override diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/DingTalkRobotAlertNotifyHandlerImplTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/DingTalkRobotAlertNotifyHandlerImplTest.java index 0df9e7bc472..a89afc771d0 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/DingTalkRobotAlertNotifyHandlerImplTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/DingTalkRobotAlertNotifyHandlerImplTest.java @@ -52,14 +52,15 @@ void send() { NoticeTemplate noticeTemplate = new NoticeTemplate(); noticeTemplate.setId(1L); noticeTemplate.setName("dingding"); - noticeTemplate.setContent("#### [${title}]\n" - + "##### **${targetLabel}** : ${target}\n" - + "<#if (monitorId??)>##### **${monitorIdLabel}** : ${monitorId} \n" - + "<#if (monitorName??)>##### **${monitorNameLabel}** : ${monitorName} \n" - + "<#if (monitorHost??)>##### **${monitorHostLabel}** : ${monitorHost} \n" - + "##### **${priorityLabel}** : ${priority}\n" - + "##### **${triggerTimeLabel}** : ${triggerTime}\n" - + "##### **${contentLabel}** : ${content}"); + noticeTemplate.setContent(""" + #### [${title}] + ##### **${targetLabel}** : ${target} + <#if (monitorId??)>##### **${monitorIdLabel}** : ${monitorId} + <#if (monitorName??)>##### **${monitorNameLabel}** : ${monitorName} + <#if (monitorHost??)>##### **${monitorHostLabel}** : ${monitorHost} + ##### **${priorityLabel}** : ${priority} + ##### **${triggerTimeLabel}** : ${triggerTime} + ##### **${contentLabel}** : ${content}"""); Alert alert = new Alert(); alert.setId(1L); alert.setTarget("Mock Target"); diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/DiscordBotAlertNotifyHandlerImplTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/DiscordBotAlertNotifyHandlerImplTest.java index f3573701496..04d56739907 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/DiscordBotAlertNotifyHandlerImplTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/DiscordBotAlertNotifyHandlerImplTest.java @@ -53,13 +53,14 @@ void send() { var noticeTemplate = new NoticeTemplate(); noticeTemplate.setId(1L); noticeTemplate.setName("DiscordBot"); - noticeTemplate.setContent("${targetLabel} : ${target}\n" - + "<#if (monitorId??)>${monitorIdLabel} : ${monitorId} \n" - + "<#if (monitorName??)>${monitorNameLabel} : ${monitorName} \n" - + "<#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} \n" - + "${priorityLabel} : ${priority}\n" - + "${triggerTimeLabel} : ${triggerTime}\n" - + "${contentLabel} : ${content}"); + noticeTemplate.setContent(""" + ${targetLabel} : ${target} + <#if (monitorId??)>${monitorIdLabel} : ${monitorId} + <#if (monitorName??)>${monitorNameLabel} : ${monitorName} + <#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} + ${priorityLabel} : ${priority} + ${triggerTimeLabel} : ${triggerTime} + ${contentLabel} : ${content}"""); var alert = new Alert(); alert.setId(1L); alert.setTarget("Mock Target"); diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/FlyBookAlertNotifyHandlerImplTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/FlyBookAlertNotifyHandlerImplTest.java index 3d92ae3adb5..f66bc82cec6 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/FlyBookAlertNotifyHandlerImplTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/FlyBookAlertNotifyHandlerImplTest.java @@ -52,13 +52,14 @@ void send() { NoticeTemplate noticeTemplate = new NoticeTemplate(); noticeTemplate.setId(1L); noticeTemplate.setName("FlyBook"); - noticeTemplate.setContent("{targetLabel} : ${target}\n" - + "<#if (monitorId??)>${monitorIdLabel} : ${monitorId} \n" - + "<#if (monitorName??)>${monitorNameLabel} : ${monitorName} \n" - + "<#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} \n" - + "${priorityLabel} : ${priority}\n" - + "${triggerTimeLabel} : ${triggerTime}\n" - + "${contentLabel} : ${content}"); + noticeTemplate.setContent(""" + {targetLabel} : ${target} + <#if (monitorId??)>${monitorIdLabel} : ${monitorId} + <#if (monitorName??)>${monitorNameLabel} : ${monitorName} + <#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} + ${priorityLabel} : ${priority} + ${triggerTimeLabel} : ${triggerTime} + ${contentLabel} : ${content}"""); Alert alert = new Alert(); alert.setId(1L); alert.setTarget("Mock Target"); diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/HuaweiCloudSmnAlertNotifyHandlerImplTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/HuaweiCloudSmnAlertNotifyHandlerImplTest.java index ed25968d88e..fb19e220d20 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/HuaweiCloudSmnAlertNotifyHandlerImplTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/HuaweiCloudSmnAlertNotifyHandlerImplTest.java @@ -73,14 +73,15 @@ void send() throws InterruptedException { var noticeTemplate = new NoticeTemplate(); noticeTemplate.setId(1L); noticeTemplate.setName("HuaWeiCloud"); - noticeTemplate.setContent("[${title}]\n" - + "${targetLabel} : ${target}\n" - + "<#if (monitorId??)>${monitorIdLabel} : ${monitorId} \n" - + "<#if (monitorName??)>${monitorNameLabel} : ${monitorName} \n" - + "<#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} \n" - + "${priorityLabel} : ${priority}\n" - + "${triggerTimeLabel} : ${triggerTime}\n" - + "${contentLabel} : ${content}"); + noticeTemplate.setContent(""" + [${title}] + ${targetLabel} : ${target} + <#if (monitorId??)>${monitorIdLabel} : ${monitorId} + <#if (monitorName??)>${monitorNameLabel} : ${monitorName} + <#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} + ${priorityLabel} : ${priority} + ${triggerTimeLabel} : ${triggerTime} + ${contentLabel} : ${content}"""); var alert = new Alert(); alert.setId(1L); alert.setTarget("Mock Target"); diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/SlackAlertNotifyHandlerImplTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/SlackAlertNotifyHandlerImplTest.java index d12790e8d66..6bb188f0bfc 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/SlackAlertNotifyHandlerImplTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/SlackAlertNotifyHandlerImplTest.java @@ -55,14 +55,15 @@ void send() { var noticeTemplate = new NoticeTemplate(); noticeTemplate.setId(1L); noticeTemplate.setName("Slack"); - noticeTemplate.setContent("*[${title}]*\n" - + "${targetLabel} : ${target}\n" - + "<#if (monitorId??)>${monitorIdLabel} : ${monitorId} \n" - + "<#if (monitorName??)>${monitorNameLabel} : ${monitorName} \n" - + "<#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} \n" - + "${priorityLabel} : ${priority}\n" - + "${triggerTimeLabel} : ${triggerTime}\n" - + "${contentLabel} : ${content}"); + noticeTemplate.setContent(""" + *[${title}]* + ${targetLabel} : ${target} + <#if (monitorId??)>${monitorIdLabel} : ${monitorId} + <#if (monitorName??)>${monitorNameLabel} : ${monitorName} + <#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} + ${priorityLabel} : ${priority} + ${triggerTimeLabel} : ${triggerTime} + ${contentLabel} : ${content}"""); var map = Map.of( CommonConstants.TAG_MONITOR_ID, "Mock monitor id", CommonConstants.TAG_MONITOR_NAME, "Mock monitor name", diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/TelegramBotAlertNotifyHandlerImplTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/TelegramBotAlertNotifyHandlerImplTest.java index ec2afb99742..62183b0a8d6 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/TelegramBotAlertNotifyHandlerImplTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/TelegramBotAlertNotifyHandlerImplTest.java @@ -58,14 +58,15 @@ void send() { NoticeTemplate noticeTemplate = new NoticeTemplate(); noticeTemplate.setId(1L); noticeTemplate.setName("Telegram"); - noticeTemplate.setContent("[${title}]\n" - + "${targetLabel} : ${target}\n" - + "<#if (monitorId??)>${monitorIdLabel} : ${monitorId} \n" - + "<#if (monitorName??)>${monitorNameLabel} : ${monitorName} \n" - + "<#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} \n" - + "${priorityLabel} : ${priority}\n" - + "${triggerTimeLabel} : ${triggerTime}\n" - + "${contentLabel} : ${content}"); + noticeTemplate.setContent(""" + [${title}] + ${targetLabel} : ${target} + <#if (monitorId??)>${monitorIdLabel} : ${monitorId} + <#if (monitorName??)>${monitorNameLabel} : ${monitorName} + <#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} + ${priorityLabel} : ${priority} + ${triggerTimeLabel} : ${triggerTime} + ${contentLabel} : ${content}"""); Map map = new HashMap<>(); map.put(CommonConstants.TAG_MONITOR_ID, "Mock monitor id"); map.put(CommonConstants.TAG_MONITOR_NAME, "Mock monitor name"); diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/WeComRobotAlertNotifyHandlerImplTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/WeComRobotAlertNotifyHandlerImplTest.java index 1fde648e2b5..23b8ff110d5 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/WeComRobotAlertNotifyHandlerImplTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/WeComRobotAlertNotifyHandlerImplTest.java @@ -55,14 +55,15 @@ void send() { NoticeTemplate noticeTemplate = new NoticeTemplate(); noticeTemplate.setId(1L); noticeTemplate.setName("WeWork"); - noticeTemplate.setContent("[${title}]\n" - + "${targetLabel} : ${target}\n" - + "<#if (monitorId??)>${monitorIdLabel} : ${monitorId} \n" - + "<#if (monitorName??)>${monitorNameLabel} : ${monitorName} \n" - + "<#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} \n" - + "${priorityLabel} : ${priority}\n" - + "${triggerTimeLabel} : ${triggerTime}\n" - + "${contentLabel} : ${content}"); + noticeTemplate.setContent(""" + [${title}] + ${targetLabel} : ${target} + <#if (monitorId??)>${monitorIdLabel} : ${monitorId} + <#if (monitorName??)>${monitorNameLabel} : ${monitorName} + <#if (monitorHost??)>${monitorHostLabel} : ${monitorHost} + ${priorityLabel} : ${priority} + ${triggerTimeLabel} : ${triggerTime} + ${contentLabel} : ${content}"""); Map map = new HashMap<>(); map.put(CommonConstants.TAG_MONITOR_ID, "Mock monitor id"); map.put(CommonConstants.TAG_MONITOR_NAME, "Mock monitor name"); diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/controller/MonitorsControllerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/controller/MonitorsControllerTest.java index 5c630dcf431..b6fa9139148 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/controller/MonitorsControllerTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/controller/MonitorsControllerTest.java @@ -17,14 +17,6 @@ package org.apache.hertzbeat.manager.controller; -import static org.mockito.Mockito.doNothing; -import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.stream.Collectors; - import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.util.JsonUtil; import org.apache.hertzbeat.manager.service.impl.MonitorServiceImpl; @@ -39,7 +31,15 @@ import org.springframework.test.web.servlet.MockMvc; import org.springframework.test.web.servlet.request.MockMvcRequestBuilders; import org.springframework.test.web.servlet.setup.MockMvcBuilders; -import org.springframework.web.multipart.MultipartFile; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import static org.mockito.Mockito.doNothing; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; /** * Test case for {@link MonitorsController} @@ -128,7 +128,7 @@ void export() throws Exception { String type = "JSON"; this.mockMvc.perform(MockMvcRequestBuilders.get("/api/monitors/export") - .param("ids", String.join(",", ids.stream().map(String::valueOf).collect(Collectors.toList()))) + .param("ids", ids.stream().map(String::valueOf).collect(Collectors.joining(","))) .param("type", type)) .andExpect(status().isOk()) .andReturn(); @@ -137,7 +137,7 @@ void export() throws Exception { @Test void export2() throws Exception { // Mock the behavior of monitorService.importConfig - doNothing().when(monitorService).importConfig((MultipartFile) Mockito.any()); + doNothing().when(monitorService).importConfig(Mockito.any()); // Perform the request and verify the response this.mockMvc.perform(MockMvcRequestBuilders.post("/api/monitors/import") diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/controller/NoticeConfigControllerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/controller/NoticeConfigControllerTest.java index 8ba3ef29128..1e753d263e1 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/controller/NoticeConfigControllerTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/controller/NoticeConfigControllerTest.java @@ -99,13 +99,14 @@ public NoticeTemplate getNoticeTemplate(){ NoticeTemplate template = new NoticeTemplate(); template.setId(5L); template.setName("Dingding"); - template.setContent("[${title}]\n" - + "${targetLabel} : ${target}\n" - + "<#if (monitorId??)>${monitorIdLabel} : ${monitorId} \n" - + "<#if (monitorName??)>${monitorNameLabel} : ${monitorName} \n" - + "${priorityLabel} : ${priority}\n" - + "${triggerTimeLabel} : ${triggerTime}\n" - + "${contentLabel} : ${content}"); + template.setContent(""" + [${title}] + ${targetLabel} : ${target} + <#if (monitorId??)>${monitorIdLabel} : ${monitorId} + <#if (monitorName??)>${monitorNameLabel} : ${monitorName} + ${priorityLabel} : ${priority} + ${triggerTimeLabel} : ${triggerTime} + ${contentLabel} : ${content}"""); template.setType((byte) 5); return template; diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/MonitorServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/MonitorServiceTest.java index fcb91ae6cbd..fc66ea556d3 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/MonitorServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/MonitorServiceTest.java @@ -17,25 +17,6 @@ package org.apache.hertzbeat.manager.service; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; import org.apache.hertzbeat.alert.dao.AlertDefineBindDao; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.alerter.Alert; @@ -72,6 +53,26 @@ import org.springframework.data.domain.PageRequest; import org.springframework.data.jpa.domain.Specification; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + /** * newBranch feature-clickhouse#179 * ... @@ -742,7 +743,7 @@ void addNewMonitorOptionalMetrics() { when(appService.getAppDefine(monitor.getApp())).thenReturn(job); List params = Collections.singletonList(new Param()); - List metrics = Arrays.asList(); + List metrics = List.of(); try { monitorService.addNewMonitorOptionalMetrics(metrics, monitor, params); } catch (MonitorMetricsException e) { @@ -750,7 +751,7 @@ void addNewMonitorOptionalMetrics() { } reset(); when(monitorDao.save(monitor)).thenThrow(RuntimeException.class); - metrics = Arrays.asList("metric-001"); + metrics = List.of("metric-001"); List metricsDefine = new ArrayList<>(); Metrics e = new Metrics(); e.setName("metric-001"); diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/greptime/GreptimeDbDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/greptime/GreptimeDbDataStorage.java index d0c499563eb..1e039188f25 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/greptime/GreptimeDbDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/greptime/GreptimeDbDataStorage.java @@ -47,6 +47,7 @@ import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.dto.Value; import org.apache.hertzbeat.common.entity.message.CollectRep; @@ -57,374 +58,373 @@ /** * GreptimeDB data storage, only supports GreptimeDB version >= v0.5 - * */ @Component @ConditionalOnProperty(prefix = "warehouse.store.greptime", name = "enabled", havingValue = "true") @Slf4j public class GreptimeDbDataStorage extends AbstractHistoryDataStorage { - + private static final String CONSTANT_DB_TTL = "30d"; - + private static final String QUERY_HISTORY_SQL = "SELECT CAST (ts AS Int64) ts, instance, `%s` FROM `%s` WHERE ts >= now() - interval '%s' and monitor_id = %s order by ts desc;"; - + @SuppressWarnings("checkstyle:LineLength") private static final String QUERY_HISTORY_WITH_INSTANCE_SQL = "SELECT CAST (ts AS Int64) ts, instance, `%s` FROM `%s` WHERE ts >= now() - interval '%s' and monitor_id = %s and instance = '%s' order by ts desc;"; - + private static final String QUERY_INSTANCE_SQL = "SELECT DISTINCT instance FROM `%s` WHERE ts >= now() - interval '1 WEEK'"; - + @SuppressWarnings("checkstyle:LineLength") private static final String QUERY_HISTORY_INTERVAL_WITH_INSTANCE_SQL = "SELECT CAST (ts AS Int64) ts, first_value(`%s`) range '4h' first, avg(`%s`) range '4h' avg, min(`%s`) range '4h' min, max(`%s`) range '4h' max FROM `%s` WHERE instance = '%s' AND ts >= now() - interval '%s' ALIGN '4h'"; - + private static final String TABLE_NOT_EXIST = "not found"; - + private static final String CONSTANTS_CREATE_DATABASE = "CREATE DATABASE IF NOT EXISTS `%s` WITH(ttl='%s')"; - + private static final Runnable INSTANCE_EXCEPTION_PRINT = () -> { - if (log.isErrorEnabled()) { - log.error(""" - \t---------------GreptimeDB Init Failed--------------- - \t--------------Please Config GreptimeDB-------------- - t-----------Can Not Use Metric History Now----------- - """); - } + if (log.isErrorEnabled()) { + log.error(""" + \t---------------GreptimeDB Init Failed--------------- + \t--------------Please Config GreptimeDB-------------- + t-----------Can Not Use Metric History Now----------- + """); + } }; - + private HikariDataSource hikariDataSource; - + private GreptimeDB greptimeDb; - + public GreptimeDbDataStorage(GreptimeProperties greptimeProperties) { - if (greptimeProperties == null) { - log.error("init error, please config Warehouse GreptimeDB props in application.yml"); - throw new IllegalArgumentException("please config Warehouse GreptimeDB props"); - } - - serverAvailable = initGreptimeDbClient(greptimeProperties) && initGreptimeDbDataSource(greptimeProperties); + if (greptimeProperties == null) { + log.error("init error, please config Warehouse GreptimeDB props in application.yml"); + throw new IllegalArgumentException("please config Warehouse GreptimeDB props"); + } + + serverAvailable = initGreptimeDbClient(greptimeProperties) && initGreptimeDbDataSource(greptimeProperties); } - + private void initGreptimeDb(final GreptimeProperties greptimeProperties) throws SQLException { - final DriverPropertyInfo[] properties = new Driver().getPropertyInfo(greptimeProperties.url(), null); - final String host = ObjectUtils.requireNonEmpty(properties[0].value); - final String port = ObjectUtils.requireNonEmpty(properties[1].value); - final String dbName = ObjectUtils.requireNonEmpty(properties[2].value); - - String ttl = greptimeProperties.expireTime(); - if (ttl == null || "".equals(ttl.trim())) { - ttl = CONSTANT_DB_TTL; - } - - try (final Connection tempConnection = DriverManager.getConnection("jdbc:mysql://" + host + ":" + port, - greptimeProperties.username(), greptimeProperties.password()); - final PreparedStatement pstmt = tempConnection - .prepareStatement(String.format(CONSTANTS_CREATE_DATABASE, dbName, ttl))) { - log.info("[warehouse greptime] try to create database `{}` if not exists", dbName); - pstmt.execute(); - } + final DriverPropertyInfo[] properties = new Driver().getPropertyInfo(greptimeProperties.url(), null); + final String host = ObjectUtils.requireNonEmpty(properties[0].value); + final String port = ObjectUtils.requireNonEmpty(properties[1].value); + final String dbName = ObjectUtils.requireNonEmpty(properties[2].value); + + String ttl = greptimeProperties.expireTime(); + if (ttl == null || StringUtils.isBlank(ttl.trim())) { + ttl = CONSTANT_DB_TTL; + } + + try (final Connection tempConnection = DriverManager.getConnection("jdbc:mysql://" + host + ":" + port, + greptimeProperties.username(), greptimeProperties.password()); + final PreparedStatement pstmt = tempConnection + .prepareStatement(String.format(CONSTANTS_CREATE_DATABASE, dbName, ttl))) { + log.info("[warehouse greptime] try to create database `{}` if not exists", dbName); + pstmt.execute(); + } } - + private boolean initGreptimeDbClient(GreptimeProperties greptimeProperties) { - String endpoints = greptimeProperties.grpcEndpoints(); - try { - final DriverPropertyInfo[] properties = new Driver().getPropertyInfo(greptimeProperties.url(), null); - final String dbName = ObjectUtils.requireNonEmpty(properties[2].value); - - GreptimeOptions opts = GreptimeOptions.newBuilder(endpoints.split(","), dbName) // - .writeMaxRetries(3) // - .authInfo(new AuthInfo(greptimeProperties.username(), greptimeProperties.password())) - .routeTableRefreshPeriodSeconds(30) // - .build(); - - this.greptimeDb = GreptimeDB.create(opts); - } catch (Exception e) { - log.error("[warehouse greptime] Fail to start GreptimeDB client"); - return false; - } - - return true; + String endpoints = greptimeProperties.grpcEndpoints(); + try { + final DriverPropertyInfo[] properties = new Driver().getPropertyInfo(greptimeProperties.url(), null); + final String dbName = ObjectUtils.requireNonEmpty(properties[2].value); + + GreptimeOptions opts = GreptimeOptions.newBuilder(endpoints.split(","), dbName) // + .writeMaxRetries(3) // + .authInfo(new AuthInfo(greptimeProperties.username(), greptimeProperties.password())) + .routeTableRefreshPeriodSeconds(30) // + .build(); + + this.greptimeDb = GreptimeDB.create(opts); + } catch (Exception e) { + log.error("[warehouse greptime] Fail to start GreptimeDB client"); + return false; + } + + return true; } - + private boolean initGreptimeDbDataSource(final GreptimeProperties greptimeProperties) { - try { - initGreptimeDb(greptimeProperties); - } catch (Exception e) { - if (log.isErrorEnabled()) { - log.error(e.getMessage(), e); - } - - INSTANCE_EXCEPTION_PRINT.run(); - return false; - } - - final HikariConfig config = new HikariConfig(); - // jdbc properties - config.setJdbcUrl(greptimeProperties.url()); - config.setUsername(greptimeProperties.username()); - config.setPassword(greptimeProperties.password()); - config.setDriverClassName(greptimeProperties.driverClassName()); - // minimum number of idle connection - config.setMinimumIdle(10); - // maximum number of connection in the pool - config.setMaximumPoolSize(10); - // maximum wait milliseconds for get connection from pool - config.setConnectionTimeout(30000); - // maximum lifetime for each connection - config.setMaxLifetime(0); - // max idle time for recycle idle connection - config.setIdleTimeout(0); - // validation query - config.setConnectionTestQuery("select 1"); - try { - this.hikariDataSource = new HikariDataSource(config); - } catch (Exception e) { - INSTANCE_EXCEPTION_PRINT.run(); - return false; - } - return true; + try { + initGreptimeDb(greptimeProperties); + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error(e.getMessage(), e); + } + + INSTANCE_EXCEPTION_PRINT.run(); + return false; + } + + final HikariConfig config = new HikariConfig(); + // jdbc properties + config.setJdbcUrl(greptimeProperties.url()); + config.setUsername(greptimeProperties.username()); + config.setPassword(greptimeProperties.password()); + config.setDriverClassName(greptimeProperties.driverClassName()); + // minimum number of idle connection + config.setMinimumIdle(10); + // maximum number of connection in the pool + config.setMaximumPoolSize(10); + // maximum wait milliseconds for get connection from pool + config.setConnectionTimeout(30000); + // maximum lifetime for each connection + config.setMaxLifetime(0); + // max idle time for recycle idle connection + config.setIdleTimeout(0); + // validation query + config.setConnectionTestQuery("select 1"); + try { + this.hikariDataSource = new HikariDataSource(config); + } catch (Exception e) { + INSTANCE_EXCEPTION_PRINT.run(); + return false; + } + return true; } - + @Override public void saveData(CollectRep.MetricsData metricsData) { - if (!isServerAvailable() || metricsData.getCode() != CollectRep.Code.SUCCESS) { - return; - } - if (metricsData.getValuesList().isEmpty()) { - log.info("[warehouse greptime] flush metrics data {} is null, ignore.", metricsData.getId()); - return; - } - String monitorId = String.valueOf(metricsData.getId()); - String tableName = getTableName(metricsData.getApp(), metricsData.getMetrics()); - TableSchema.Builder tableSchemaBuilder = TableSchema.newBuilder(tableName); - - tableSchemaBuilder.addTag("monitor_id", DataType.String) // - .addTag("instance", DataType.String) // - .addTimestamp("ts", DataType.TimestampMillisecond); - - List fieldsList = metricsData.getFieldsList(); - for (CollectRep.Field field : fieldsList) { - // handle field type - if (field.getType() == CommonConstants.TYPE_NUMBER) { - tableSchemaBuilder.addField(field.getName(), DataType.Float64); - } else if (field.getType() == CommonConstants.TYPE_STRING) { - tableSchemaBuilder.addField(field.getName(), DataType.String); - } - } - Table table = Table.from(tableSchemaBuilder.build()); - - try { - long now = System.currentTimeMillis(); - Object[] values = new Object[3 + fieldsList.size()]; - values[0] = monitorId; - values[2] = now; - for (CollectRep.ValueRow valueRow : metricsData.getValuesList()) { - Map labels = new HashMap<>(8); - for (int i = 0; i < fieldsList.size(); i++) { - if (!CommonConstants.NULL_VALUE.equals(valueRow.getColumns(i))) { - CollectRep.Field field = fieldsList.get(i); - if (field.getType() == CommonConstants.TYPE_NUMBER) { - values[3 + i] = Double.parseDouble(valueRow.getColumns(i)); - } else if (field.getType() == CommonConstants.TYPE_STRING) { - values[3 + i] = valueRow.getColumns(i); - } - if (field.getLabel()) { - labels.put(field.getName(), String.valueOf(values[3 + i])); - } - } else { - values[3 + i] = null; - } - } - values[1] = JsonUtil.toJson(labels); - table.addRow(values); - } - - CompletableFuture> writeFuture = greptimeDb.write(table); - try { - Result result = writeFuture.get(10, TimeUnit.SECONDS); - if (result.isOk()) { - log.debug("[warehouse greptime]-Write successful"); - } else { - log.warn("[warehouse greptime]--Write failed: {}", result.getErr()); - } - } catch (Throwable throwable) { - log.error("[warehouse greptime]--Error occurred: {}", throwable.getMessage()); - } - } catch (Exception e) { - log.error("[warehouse greptime]--Error: {}", e.getMessage(), e); - } + if (!isServerAvailable() || metricsData.getCode() != CollectRep.Code.SUCCESS) { + return; + } + if (metricsData.getValuesList().isEmpty()) { + log.info("[warehouse greptime] flush metrics data {} is null, ignore.", metricsData.getId()); + return; + } + String monitorId = String.valueOf(metricsData.getId()); + String tableName = getTableName(metricsData.getApp(), metricsData.getMetrics()); + TableSchema.Builder tableSchemaBuilder = TableSchema.newBuilder(tableName); + + tableSchemaBuilder.addTag("monitor_id", DataType.String) // + .addTag("instance", DataType.String) // + .addTimestamp("ts", DataType.TimestampMillisecond); + + List fieldsList = metricsData.getFieldsList(); + for (CollectRep.Field field : fieldsList) { + // handle field type + if (field.getType() == CommonConstants.TYPE_NUMBER) { + tableSchemaBuilder.addField(field.getName(), DataType.Float64); + } else if (field.getType() == CommonConstants.TYPE_STRING) { + tableSchemaBuilder.addField(field.getName(), DataType.String); + } + } + Table table = Table.from(tableSchemaBuilder.build()); + + try { + long now = System.currentTimeMillis(); + Object[] values = new Object[3 + fieldsList.size()]; + values[0] = monitorId; + values[2] = now; + for (CollectRep.ValueRow valueRow : metricsData.getValuesList()) { + Map labels = new HashMap<>(8); + for (int i = 0; i < fieldsList.size(); i++) { + if (!CommonConstants.NULL_VALUE.equals(valueRow.getColumns(i))) { + CollectRep.Field field = fieldsList.get(i); + if (field.getType() == CommonConstants.TYPE_NUMBER) { + values[3 + i] = Double.parseDouble(valueRow.getColumns(i)); + } else if (field.getType() == CommonConstants.TYPE_STRING) { + values[3 + i] = valueRow.getColumns(i); + } + if (field.getLabel()) { + labels.put(field.getName(), String.valueOf(values[3 + i])); + } + } else { + values[3 + i] = null; + } + } + values[1] = JsonUtil.toJson(labels); + table.addRow(values); + } + + CompletableFuture> writeFuture = greptimeDb.write(table); + try { + Result result = writeFuture.get(10, TimeUnit.SECONDS); + if (result.isOk()) { + log.debug("[warehouse greptime]-Write successful"); + } else { + log.warn("[warehouse greptime]--Write failed: {}", result.getErr()); + } + } catch (Throwable throwable) { + log.error("[warehouse greptime]--Error occurred: {}", throwable.getMessage()); + } + } catch (Exception e) { + log.error("[warehouse greptime]--Error: {}", e.getMessage(), e); + } } - + @Override public Map> getHistoryMetricData(Long monitorId, String app, String metrics, String metric, - String label, String history) { - Map> instanceValuesMap = new HashMap<>(8); - if (!isServerAvailable()) { - INSTANCE_EXCEPTION_PRINT.run(); - return instanceValuesMap; - } - - String table = getTableName(app, metrics); - - String interval = history2interval(history); - String selectSql = label == null ? String.format(QUERY_HISTORY_SQL, metric, table, interval, monitorId) - : String.format(QUERY_HISTORY_WITH_INSTANCE_SQL, metric, table, interval, monitorId, label); - - if (log.isDebugEnabled()) { - log.debug("[warehouse greptime] getHistoryMetricData SQL: {}", selectSql); - } - - try (Connection connection = hikariDataSource.getConnection(); - Statement statement = connection.createStatement(); - ResultSet resultSet = statement.executeQuery(selectSql)) { - while (resultSet.next()) { - long ts = resultSet.getLong(1); - if (ts == 0) { - if (log.isErrorEnabled()) { - log.error("[warehouse greptime] getHistoryMetricData query result timestamp is 0, ignore. {}.", - selectSql); - } - continue; - } - String instanceValue = resultSet.getString(2); - if (instanceValue == null || "".equals(instanceValue)) { - instanceValue = ""; - } - double value = resultSet.getDouble(3); - String strValue = double2decimalString(value); - - List valueList = instanceValuesMap.computeIfAbsent(instanceValue, k -> new LinkedList<>()); - valueList.add(new Value(strValue, ts)); - } - return instanceValuesMap; - } catch (SQLException sqlException) { - String msg = sqlException.getMessage(); - if (msg != null && !msg.contains(TABLE_NOT_EXIST)) { - if (log.isWarnEnabled()) { - log.warn("[warehouse greptime] failed to getHistoryMetricData: " + sqlException.getMessage()); - } - } - } catch (Exception e) { - if (log.isErrorEnabled()) { - log.error("[warehouse greptime] failed to getHistoryMetricData:" + e.getMessage(), e); - } - } - return instanceValuesMap; + String label, String history) { + Map> instanceValuesMap = new HashMap<>(8); + if (!isServerAvailable()) { + INSTANCE_EXCEPTION_PRINT.run(); + return instanceValuesMap; + } + + String table = getTableName(app, metrics); + + String interval = history2interval(history); + String selectSql = label == null ? String.format(QUERY_HISTORY_SQL, metric, table, interval, monitorId) + : String.format(QUERY_HISTORY_WITH_INSTANCE_SQL, metric, table, interval, monitorId, label); + + if (log.isDebugEnabled()) { + log.debug("[warehouse greptime] getHistoryMetricData SQL: {}", selectSql); + } + + try (Connection connection = hikariDataSource.getConnection(); + Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(selectSql)) { + while (resultSet.next()) { + long ts = resultSet.getLong(1); + if (ts == 0) { + if (log.isErrorEnabled()) { + log.error("[warehouse greptime] getHistoryMetricData query result timestamp is 0, ignore. {}.", + selectSql); + } + continue; + } + String instanceValue = resultSet.getString(2); + if (instanceValue == null || StringUtils.isBlank(instanceValue)) { + instanceValue = ""; + } + double value = resultSet.getDouble(3); + String strValue = double2decimalString(value); + + List valueList = instanceValuesMap.computeIfAbsent(instanceValue, k -> new LinkedList<>()); + valueList.add(new Value(strValue, ts)); + } + return instanceValuesMap; + } catch (SQLException sqlException) { + String msg = sqlException.getMessage(); + if (msg != null && !msg.contains(TABLE_NOT_EXIST)) { + if (log.isWarnEnabled()) { + log.warn("[warehouse greptime] failed to getHistoryMetricData: {}", sqlException.getMessage()); + } + } + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error("[warehouse greptime] failed to getHistoryMetricData:{}", e.getMessage(), e); + } + } + return instanceValuesMap; } - + private String getTableName(String app, String metrics) { - return app + "_" + metrics; + return app + "_" + metrics; } - + @Override public Map> getHistoryIntervalMetricData(Long monitorId, String app, String metrics, - String metric, String label, String history) { - if (!isServerAvailable()) { - INSTANCE_EXCEPTION_PRINT.run(); - return Collections.emptyMap(); - } - String table = getTableName(app, metrics); - List instances = new LinkedList<>(); - if (label != null && !"".equals(label)) { - instances.add(label); - } - if (instances.isEmpty()) { - String selectSql = String.format(QUERY_INSTANCE_SQL, table); - if (log.isDebugEnabled()) { - log.debug("[warehouse greptime] getHistoryIntervalMetricData sql: {}", selectSql); - } - - try (Connection connection = hikariDataSource.getConnection(); - Statement statement = connection.createStatement(); - ResultSet resultSet = statement.executeQuery(selectSql)) { - while (resultSet.next()) { - String instanceValue = resultSet.getString(1); - if (instanceValue == null || "".equals(instanceValue)) { - instances.add("''"); - } else { - instances.add(instanceValue); - } - } - } catch (Exception e) { - if (log.isErrorEnabled()) { - log.error("[warehouse greptime] failed to query instances" + e.getMessage(), e); - } - } - } - - Map> instanceValuesMap = new HashMap<>(instances.size()); - for (String instanceValue : instances) { - String selectSql = String.format(QUERY_HISTORY_INTERVAL_WITH_INSTANCE_SQL, metric, metric, metric, metric, - table, instanceValue, history2interval(history)); - - if (log.isDebugEnabled()) { - log.debug("[warehouse greptime] getHistoryIntervalMetricData sql: {}", selectSql); - } - - List values = instanceValuesMap.computeIfAbsent(instanceValue, k -> new LinkedList<>()); - try (Connection connection = hikariDataSource.getConnection(); - Statement statement = connection.createStatement(); - ResultSet resultSet = statement.executeQuery(selectSql)) { - while (resultSet.next()) { - long ts = resultSet.getLong(1); - if (ts == 0) { - if (log.isErrorEnabled()) { - log.error( - "[warehouse greptime] getHistoryIntervalMetricData query result timestamp is 0, ignore. {}.", - selectSql); - } - continue; - } - double origin = resultSet.getDouble(2); - String originStr = double2decimalString(origin); - double avg = resultSet.getDouble(3); - String avgStr = double2decimalString(avg); - double min = resultSet.getDouble(4); - String minStr = double2decimalString(min); - double max = resultSet.getDouble(5); - String maxStr = double2decimalString(max); - Value value = Value.builder().origin(originStr).mean(avgStr).min(minStr).max(maxStr).time(ts) - .build(); - values.add(value); - } - resultSet.close(); - } catch (Exception e) { - if (log.isErrorEnabled()) { - log.error("[warehouse greptime] failed to getHistoryIntervalMetricData: " + e.getMessage(), e); - } - } - } - return instanceValuesMap; + String metric, String label, String history) { + if (!isServerAvailable()) { + INSTANCE_EXCEPTION_PRINT.run(); + return Collections.emptyMap(); + } + String table = getTableName(app, metrics); + List instances = new LinkedList<>(); + if (label != null && !StringUtils.isBlank(label)) { + instances.add(label); + } + if (instances.isEmpty()) { + String selectSql = String.format(QUERY_INSTANCE_SQL, table); + if (log.isDebugEnabled()) { + log.debug("[warehouse greptime] getHistoryIntervalMetricData sql: {}", selectSql); + } + + try (Connection connection = hikariDataSource.getConnection(); + Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(selectSql)) { + while (resultSet.next()) { + String instanceValue = resultSet.getString(1); + if (instanceValue == null || StringUtils.isBlank(instanceValue)) { + instances.add("''"); + } else { + instances.add(instanceValue); + } + } + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error("[warehouse greptime] failed to query instances{}", e.getMessage(), e); + } + } + } + + Map> instanceValuesMap = new HashMap<>(instances.size()); + for (String instanceValue : instances) { + String selectSql = String.format(QUERY_HISTORY_INTERVAL_WITH_INSTANCE_SQL, metric, metric, metric, metric, + table, instanceValue, history2interval(history)); + + if (log.isDebugEnabled()) { + log.debug("[warehouse greptime] getHistoryIntervalMetricData sql: {}", selectSql); + } + + List values = instanceValuesMap.computeIfAbsent(instanceValue, k -> new LinkedList<>()); + try (Connection connection = hikariDataSource.getConnection(); + Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(selectSql)) { + while (resultSet.next()) { + long ts = resultSet.getLong(1); + if (ts == 0) { + if (log.isErrorEnabled()) { + log.error( + "[warehouse greptime] getHistoryIntervalMetricData query result timestamp is 0, ignore. {}.", + selectSql); + } + continue; + } + double origin = resultSet.getDouble(2); + String originStr = double2decimalString(origin); + double avg = resultSet.getDouble(3); + String avgStr = double2decimalString(avg); + double min = resultSet.getDouble(4); + String minStr = double2decimalString(min); + double max = resultSet.getDouble(5); + String maxStr = double2decimalString(max); + Value value = Value.builder().origin(originStr).mean(avgStr).min(minStr).max(maxStr).time(ts) + .build(); + values.add(value); + } + resultSet.close(); + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error("[warehouse greptime] failed to getHistoryIntervalMetricData: {}", e.getMessage(), e); + } + } + } + return instanceValuesMap; } - + // TODO(dennis): we can remove it when // https://github.com/GreptimeTeam/greptimedb/issues/4168 is fixed. // default 6h-6 hours: s-seconds, M-minutes, h-hours, d-days, w-weeks private String history2interval(String history) { - if (history == null) { - return null; - } - history = history.trim().toLowerCase(); - - // Be careful, the order matters. - return history.replaceAll("d", " day") // - .replaceAll("s", " second") // - .replaceAll("w", " week") // - .replaceAll("h", " hour")// - .replaceAll("m", " minute"); + if (history == null) { + return null; + } + history = history.trim().toLowerCase(); + + // Be careful, the order matters. + return history.replaceAll("d", " day") // + .replaceAll("s", " second") // + .replaceAll("w", " week") // + .replaceAll("h", " hour")// + .replaceAll("m", " minute"); } - + private String double2decimalString(double d) { - return BigDecimal.valueOf(d).setScale(4, RoundingMode.HALF_UP).stripTrailingZeros().toPlainString(); + return BigDecimal.valueOf(d).setScale(4, RoundingMode.HALF_UP).stripTrailingZeros().toPlainString(); } - + @Override public void destroy() { - if (this.greptimeDb != null) { - this.greptimeDb.shutdownGracefully(); - this.greptimeDb = null; - } - if (this.hikariDataSource != null) { - this.hikariDataSource.close(); - hikariDataSource = null; - } + if (this.greptimeDb != null) { + this.greptimeDb.shutdownGracefully(); + this.greptimeDb = null; + } + if (this.hikariDataSource != null) { + this.hikariDataSource.close(); + hikariDataSource = null; + } } } diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/iotdb/IotDbDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/iotdb/IotDbDataStorage.java index 9792ef5c1d5..29727c34d25 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/iotdb/IotDbDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/iotdb/IotDbDataStorage.java @@ -248,9 +248,12 @@ public Map> getHistoryMetricData(Long monitorId, String app, String label, String history) { Map> instanceValuesMap = new HashMap<>(8); if (!isServerAvailable()) { - log.error("\n\t---------------IotDb Init Failed---------------\n" - + "\t--------------Please Config IotDb--------------\n" - + "\t----------Can Not Use Metric History Now----------\n"); + log.error(""" + + \t---------------IotDb Init Failed--------------- + \t--------------Please Config IotDb-------------- + \t----------Can Not Use Metric History Now---------- + """); return instanceValuesMap; } String deviceId = getDeviceId(app, metrics, monitorId, label, true); @@ -309,9 +312,12 @@ public Map> getHistoryIntervalMetricData(Long monitorId, Str String metric, String label, String history) { Map> instanceValuesMap = new HashMap<>(8); if (!isServerAvailable()) { - log.error("\n\t---------------IotDb Init Failed---------------\n" - + "\t--------------Please Config IotDb--------------\n" - + "\t----------Can Not Use Metric History Now----------\n"); + log.error(""" + + \t---------------IotDb Init Failed--------------- + \t--------------Please Config IotDb-------------- + \t----------Can Not Use Metric History Now---------- + """); return instanceValuesMap; } String deviceId = getDeviceId(app, metrics, monitorId, label, true); @@ -408,7 +414,6 @@ private List queryAllDevices(String deviceId) { /** * use ${group}.${app}.${metrics}.${monitor}.${labels} to get device id if there is a way to get instanceId * otherwise use ${group}.${app}.${metrics}.${monitor} - * * Use ${group}.${app}.${metrics}.${monitor}.* to get all instance data when you tend to query */ private String getDeviceId(String app, String metrics, Long monitorId, String labels, boolean useQuote) { diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/tdengine/TdEngineDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/tdengine/TdEngineDataStorage.java index ead5737f019..8a16182b1ce 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/tdengine/TdEngineDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/tdengine/TdEngineDataStorage.java @@ -37,6 +37,7 @@ import java.util.regex.Pattern; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.dto.Value; import org.apache.hertzbeat.common.entity.message.CollectRep; @@ -337,7 +338,7 @@ public Map> getHistoryMetricData(Long monitorId, String app, continue; } String instanceValue = resultSet.getString(2); - if (instanceValue == null || "".equals(instanceValue)) { + if (instanceValue == null || StringUtils.isBlank(instanceValue)) { instanceValue = ""; } double value = resultSet.getDouble(3); @@ -385,7 +386,7 @@ public Map> getHistoryIntervalMetricData(Long monitorId, Str ResultSet resultSet = statement.executeQuery(queryInstanceSql); while (resultSet.next()) { String instanceValue = resultSet.getString(1); - if (instanceValue == null || "".equals(instanceValue)) { + if (instanceValue == null || StringUtils.isBlank(instanceValue)) { instances.add("''"); } else { instances.add(instanceValue); diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsClusterDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsClusterDataStorage.java index cd25dd1f748..18b1c57d409 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsClusterDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsClusterDataStorage.java @@ -274,9 +274,12 @@ public Map> getHistoryMetricData(Long monitorId, String app, public Map> getHistoryIntervalMetricData(Long monitorId, String app, String metrics, String metric, String label, String history) { if (!serverAvailable) { - log.error("\n\t---------------VictoriaMetrics Init Failed---------------\n" - + "\t--------------Please Config VictoriaMetrics--------------\n" - + "\t----------Can Not Use Metric History Now----------\n"); + log.error(""" + + \t---------------VictoriaMetrics Init Failed--------------- + \t--------------Please Config VictoriaMetrics-------------- + \t----------Can Not Use Metric History Now---------- + """); return Collections.emptyMap(); } long endTime = ZonedDateTime.now().toEpochSecond(); diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsDataStorage.java index 2770956a35e..01757f266d2 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsDataStorage.java @@ -281,9 +281,12 @@ public Map> getHistoryMetricData(Long monitorId, String app, public Map> getHistoryIntervalMetricData(Long monitorId, String app, String metrics, String metric, String label, String history) { if (!serverAvailable) { - log.error("\n\t---------------VictoriaMetrics Init Failed---------------\n" - + "\t--------------Please Config VictoriaMetrics--------------\n" - + "\t----------Can Not Use Metric History Now----------\n"); + log.error(""" + + \t---------------VictoriaMetrics Init Failed--------------- + \t--------------Please Config VictoriaMetrics-------------- + \t----------Can Not Use Metric History Now---------- + """); return Collections.emptyMap(); } long endTime = ZonedDateTime.now().toEpochSecond(); From e8dc6482ffb7343abcc960b523335b259e18f969 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Wed, 7 Aug 2024 09:18:36 +0800 Subject: [PATCH 150/257] [improve] add YamlImExportServiceImpl unit test (#2470) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../service/impl/YamlImExportServiceImpl.java | 4 +- .../service/ExcelImExportServiceTest.java | 2 +- .../service/YamlImExportServiceTest.java | 118 ++++++++++++++++++ 3 files changed, 121 insertions(+), 3 deletions(-) create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/YamlImExportServiceTest.java diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/YamlImExportServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/YamlImExportServiceImpl.java index 5b537514992..7ff529a787d 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/YamlImExportServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/YamlImExportServiceImpl.java @@ -60,7 +60,7 @@ public String getFileName() { * @return form */ @Override - List parseImport(InputStream is) { + public List parseImport(InputStream is) { // todo now disable this, will enable it in the future. // upgrade to snakeyaml 2.2 and springboot3.x to fix the issue Yaml yaml = new Yaml(); @@ -73,7 +73,7 @@ List parseImport(InputStream is) { * @param os output stream */ @Override - void writeOs(List monitorList, OutputStream os) { + public void writeOs(List monitorList, OutputStream os) { DumperOptions options = new DumperOptions(); options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); options.setIndent(2); diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/ExcelImExportServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/ExcelImExportServiceTest.java index 97fa7c11c97..5e125c0d123 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/ExcelImExportServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/ExcelImExportServiceTest.java @@ -38,7 +38,7 @@ * Test case for {@link ExcelImExportServiceImpl} */ -class ExcelImExportServiceImplTest { +class ExcelImExportServiceTest { @InjectMocks private ExcelImExportServiceImpl excelImExportService; diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/YamlImExportServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/YamlImExportServiceTest.java new file mode 100644 index 00000000000..a8bb3e09e98 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/YamlImExportServiceTest.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; +import org.apache.hertzbeat.manager.service.impl.AbstractImExportServiceImpl; +import org.apache.hertzbeat.manager.service.impl.YamlImExportServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.junit.jupiter.MockitoExtension; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test case for {@link YamlImExportServiceImpl} + */ + +@ExtendWith(MockitoExtension.class) +class YamlImExportServiceTest { + + @InjectMocks + private YamlImExportServiceImpl yamlImExportService; + + @BeforeEach + void setUp() { + + yamlImExportService = new YamlImExportServiceImpl(); + } + + @Test + void testType() { + + assertEquals("YAML", yamlImExportService.type()); + } + + @Test + void testParseImport() { + + String yamlContent = "- id: 1\n name: Monitor1\n- id: 2\n name: Monitor2"; + InputStream is = new ByteArrayInputStream(yamlContent.getBytes(StandardCharsets.UTF_8)); + + List result = yamlImExportService.parseImport(is); + + assertNotNull(result); + assertEquals(2, result.size()); + assertEquals("[{id=1, name=Monitor1}, {id=2, name=Monitor2}]", result.toString()); + } + + @Test + void testParseImportNull() { + + InputStream is = new ByteArrayInputStream("".getBytes(StandardCharsets.UTF_8)); + + List result = yamlImExportService.parseImport(is); + + assertNull(result); + } + + @Test + void testWriteOS() { + + AbstractImExportServiceImpl.ParamDTO paramDTO = new AbstractImExportServiceImpl.ParamDTO(); + paramDTO.setType((byte) 1); + paramDTO.setField("Test"); + paramDTO.setValue("Test"); + AbstractImExportServiceImpl.MonitorDTO monitorDTO = new AbstractImExportServiceImpl.MonitorDTO(); + monitorDTO.setTags(List.of(1L, 2L)); + monitorDTO.setIntervals(1); + monitorDTO.setStatus((byte) 1); + AbstractImExportServiceImpl.ExportMonitorDTO exportMonitorDTO1 = new AbstractImExportServiceImpl.ExportMonitorDTO(); + exportMonitorDTO1.setParams(List.of(paramDTO)); + exportMonitorDTO1.setMonitor(monitorDTO); + exportMonitorDTO1.setMetrics(List.of("Test1", "Test2")); + AbstractImExportServiceImpl.ExportMonitorDTO exportMonitorDTO2 = new AbstractImExportServiceImpl.ExportMonitorDTO(); + exportMonitorDTO2.setParams(List.of(paramDTO)); + exportMonitorDTO2.setMonitor(monitorDTO); + exportMonitorDTO2.setMetrics(List.of("Test1", "Test2")); + + List monitorList = Arrays.asList( + exportMonitorDTO1, + exportMonitorDTO2 + ); + OutputStream os = new ByteArrayOutputStream(); + + yamlImExportService.writeOs(monitorList, os); + + String output = os.toString(); + assertTrue(output.contains("metrics:\n - Test1")); + assertTrue(output.contains(" params:\n - &id002\n field: Test")); + } + +} From 9b328ce34c8bc01debb142da06305774c84e7c9e Mon Sep 17 00:00:00 2001 From: YuLuo Date: Wed, 7 Aug 2024 16:32:38 +0800 Subject: [PATCH 151/257] [Improve] add unit test & fix some bugs (#2482) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../impl/MailGeneralConfigServiceImpl.java | 2 +- .../impl/ObjectStoreConfigServiceImpl.java | 13 +++--- .../service/MailGeneralConfigServiceTest.java | 41 +++++++++++++++++++ 3 files changed, 49 insertions(+), 7 deletions(-) diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MailGeneralConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MailGeneralConfigServiceImpl.java index 0e49bba9c34..aac05ad6b88 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MailGeneralConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MailGeneralConfigServiceImpl.java @@ -54,7 +54,7 @@ public String type() { * a TypeReference of NoticeSender type */ @Override - protected TypeReference getTypeReference() { + public TypeReference getTypeReference() { return new TypeReference<>() { @Override public Type getType() { diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObjectStoreConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObjectStoreConfigServiceImpl.java index ed7dbef1d59..021b5debcd8 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObjectStoreConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObjectStoreConfigServiceImpl.java @@ -26,7 +26,6 @@ import org.apache.hertzbeat.manager.dao.GeneralConfigDao; import org.apache.hertzbeat.manager.pojo.dto.ObjectStoreConfigChangeEvent; import org.apache.hertzbeat.manager.pojo.dto.ObjectStoreDTO; -import org.apache.poi.ss.formula.functions.T; import org.springframework.beans.factory.InitializingBean; import org.springframework.beans.factory.support.DefaultListableBeanFactory; import org.springframework.context.ApplicationContext; @@ -41,7 +40,9 @@ @Order(Ordered.HIGHEST_PRECEDENCE) @Slf4j @Service -public class ObjectStoreConfigServiceImpl extends AbstractGeneralConfigServiceImpl> implements InitializingBean { +public class ObjectStoreConfigServiceImpl extends + AbstractGeneralConfigServiceImpl> implements InitializingBean { + @Resource private DefaultListableBeanFactory beanFactory; @@ -56,7 +57,7 @@ public class ObjectStoreConfigServiceImpl extends AbstractGeneralConfigServiceIm * @param generalConfigDao configDao object * @param objectMapper JSON tool object */ - protected ObjectStoreConfigServiceImpl(GeneralConfigDao generalConfigDao, ObjectMapper objectMapper) { + public ObjectStoreConfigServiceImpl(GeneralConfigDao generalConfigDao, ObjectMapper objectMapper) { super(generalConfigDao, objectMapper); } @@ -66,7 +67,7 @@ public String type() { } @Override - protected TypeReference> getTypeReference() { + public TypeReference> getTypeReference() { return new TypeReference<>() { @Override public Type getType() { @@ -76,7 +77,7 @@ public Type getType() { } @Override - public void handler(ObjectStoreDTO config) { + public void handler(ObjectStoreDTO config) { // initialize file storage service if (config != null) { if (config.getType() == ObjectStoreDTO.Type.OBS) { @@ -90,7 +91,7 @@ public void handler(ObjectStoreDTO config) { /** * init Huawei Cloud OBS */ - private void initObs(ObjectStoreDTO config) { + private void initObs(ObjectStoreDTO config) { var obsConfig = objectMapper.convertValue(config.getConfig(), ObjectStoreDTO.ObsConfig.class); Assert.hasText(obsConfig.getAccessKey(), "cannot find obs accessKey"); Assert.hasText(obsConfig.getSecretKey(), "cannot find obs secretKey"); diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/MailGeneralConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/MailGeneralConfigServiceTest.java index 7d9ea39793d..5a9fc345458 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/MailGeneralConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/MailGeneralConfigServiceTest.java @@ -17,11 +17,52 @@ package org.apache.hertzbeat.manager.service; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.manager.dao.GeneralConfigDao; +import org.apache.hertzbeat.manager.pojo.dto.EmailNoticeSender; import org.apache.hertzbeat.manager.service.impl.MailGeneralConfigServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import static org.junit.jupiter.api.Assertions.assertEquals; /** * test case for {@link MailGeneralConfigServiceImpl} */ +@ExtendWith(MockitoExtension.class) class MailGeneralConfigServiceTest { + + @Mock + private GeneralConfigDao generalConfigDao; + + @Mock + private ObjectMapper objectMapper; + + private MailGeneralConfigServiceImpl mailGeneralConfigService; + + @BeforeEach + void setUp() { + + mailGeneralConfigService = new MailGeneralConfigServiceImpl(generalConfigDao, objectMapper); + } + + @Test + void testType() { + + assertEquals("email", mailGeneralConfigService.type()); + } + + @Test + void testGetTypeReference() { + + TypeReference typeReference = mailGeneralConfigService.getTypeReference(); + + assertEquals(EmailNoticeSender.class, typeReference.getType()); + } + } From e3a0fb45373476f79fffa85ea7ca1b5b068fcb09 Mon Sep 17 00:00:00 2001 From: aias00 Date: Wed, 7 Aug 2024 23:16:46 +0800 Subject: [PATCH 152/257] [improve] add vastbase template (#2487) --- .../main/resources/define/app-vastbase.yml | 753 ++++++++++++++++++ 1 file changed, 753 insertions(+) create mode 100644 manager/src/main/resources/define/app-vastbase.yml diff --git a/manager/src/main/resources/define/app-vastbase.yml b/manager/src/main/resources/define/app-vastbase.yml new file mode 100644 index 00000000000..951615ed95c --- /dev/null +++ b/manager/src/main/resources/define/app-vastbase.yml @@ -0,0 +1,753 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring +category: db +# The monitoring type eg: linux windows tomcat mysql aws... +app: vastbase +# The monitoring i18n name +name: + zh-CN: Vastbase数据库 + en-US: Vastbase DB +# The description and help of this monitoring type +help: + zh-CN: HertzBeat 使用 JDBC 协议 通过配置 SQL 对 Vastbase 数据库的通用性能指标 (basic、state、activity etc) 进行采集监控,支持版本为 Vastbase 9.2.4+。
您可以点击“新建 Vastbase 数据库”并进行配置,或者选择“更多操作”,导入已有配置。 + en-US: HertzBeat uses JDBC Protocol to configure SQL for collecting general metrics of Vastbase database (basic、state、activity etc). Supported version is Vastbase 9.2.4+.
You can click "New Vastbase Database" and configure it, or select "More Action" to import the existing configuration. + zh-TW: HertzBeat 使用 JDBC 協議 通過配置 SQL 對 Vastbase 數據庫的通用性能指標 (basic、state、activity etc)進行采集監控,支持版本爲 Vastbase 9.2.4+。
您可以點擊“新建 Vastbase 數據庫”並進行配置,或者選擇“更多操作”,導入已有配置。 +helpLink: + zh-CN: https://hertzbeat.apache.org/zh-cn/docs/help/vastbase + en-US: https://hertzbeat.apache.org/docs/help/vastbase +# Input params define for monitoring(render web ui by the definition) +params: + # field-param field key + - field: host + # name-param field display i18n name + name: + zh-CN: 目标Host + en-US: Target Host + # type-param field type(most mapping the html input type) + type: host + # required-true or false + required: true + # field-param field key + - field: port + # name-param field display i18n name + name: + zh-CN: 端口 + en-US: Port + # type-param field type(most mapping the html input type) + type: number + # when type is number, range is required + range: '[0,65535]' + # required-true or false + required: true + # default value + defaultValue: 5432 + - field: timeout + name: + zh-CN: 查询超时时间(ms) + en-US: Query Timeout(ms) + type: number + range: '[400,200000]' + required: false + hide: true + defaultValue: 6000 + - field: database + name: + zh-CN: 数据库名称 + en-US: Database Name + type: text + defaultValue: postgres + required: false + - field: username + name: + zh-CN: 用户名 + en-US: Username + type: text + limit: 50 + required: false + - field: password + name: + zh-CN: 密码 + en-US: Password + type: password + required: false + - field: url + name: + zh-CN: URL + en-US: URL + type: text + required: false + hide: true + +# collect metrics config list +metrics: + # metrics - basic + - name: basic + i18n: + zh-CN: 基本信息 + en-US: Basic Info + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: server_version + type: 1 + label: true + i18n: + zh-CN: 服务器版本 + en-US: Server Version + - field: port + type: 1 + i18n: + zh-CN: 端口 + en-US: Port + - field: server_encoding + type: 1 + i18n: + zh-CN: 服务器编码 + en-US: Server Encoding + - field: data_directory + type: 1 + i18n: + zh-CN: 数据目录 + en-US: Data Directory + - field: max_connections + type: 0 + i18n: + zh-CN: 最大连接数 + en-US: Max Connections + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: jdbc + # the config content when protocol is jdbc + jdbc: + # host: ipv4 ipv6 host + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + # database platform name + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + # SQL Query Method:oneRow, multiRow, columns + queryType: columns + # sql + sql: select name, setting as value from pg_settings where name = 'max_connections' or name = 'server_version' or name = 'server_encoding' or name = 'port' or name = 'data_directory'; + # JDBC url + url: ^_^url^_^ + + - name: state + i18n: + zh-CN: 状态信息 + en-US: State Info + priority: 1 + fields: + - field: db_name + type: 1 + label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name + - field: conflicts + type: 0 + unit: times + i18n: + zh-CN: 冲突次数 + en-US: Conflicts + - field: deadlocks + type: 0 + unit: times + i18n: + zh-CN: 死锁次数 + en-US: Deadlocks + - field: blks_read + type: 0 + unit: blocks per second + i18n: + zh-CN: 读取块 + en-US: Blocks Read + - field: blks_hit + type: 0 + unit: blocks per second + i18n: + zh-CN: 命中块 + en-US: Blocks Hit + - field: blk_read_time + type: 0 + unit: ms + i18n: + zh-CN: 读取时间 + en-US: Read Time + - field: blk_write_time + type: 0 + unit: ms + i18n: + zh-CN: 写入时间 + en-US: Write Time + - field: stats_reset + type: 1 + i18n: + zh-CN: 统计重置 + en-US: Stats Reset + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: SELECT COALESCE(datname,'shared-object') as db_name, conflicts, deadlocks, blks_read, blks_hit, blk_read_time, blk_write_time, stats_reset from pg_stat_database where (datname != 'template1' and datname != 'template0') or datname is null; + url: ^_^url^_^ + + - name: activity + i18n: + zh-CN: 活动信息 + en-US: Activity Info + priority: 2 + fields: + - field: running + type: 0 + unit: sbc + i18n: + zh-CN: 运行中 + en-US: Running + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: oneRow + sql: SELECT count(*) as running FROM pg_stat_activity WHERE NOT pid=pg_backend_pid(); + url: ^_^url^_^ + + - name: resource_config + i18n: + zh-CN: 资源配置 + en-US: Resource Config + priority: 1 + fields: + - field: work_mem + type: 0 + unit: MB + i18n: + zh-CN: 工作内存 + en-US: Work Memory + - field: shared_buffers + type: 0 + unit: MB + i18n: + zh-CN: 共享缓冲区 + en-US: Shared Buffers + - field: autovacuum + type: 1 + i18n: + zh-CN: 自动清理 + en-US: Auto Vacuum + - field: max_connections + type: 0 + i18n: + zh-CN: 最大连接数 + en-US: Max Connections + - field: effective_cache_size + type: 0 + unit: MB + i18n: + zh-CN: 有效缓存大小 + en-US: Effective Cache Size + - field: wal_buffers + type: 0 + unit: MB + i18n: + zh-CN: WAL缓冲区 + en-US: WAL Buffers + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: columns + sql: show all; + url: ^_^url^_^ + + - name: connection + i18n: + zh-CN: 连接信息 + en-US: Connection Info + priority: 1 + fields: + - field: active + type: 0 + i18n: + zh-CN: 活动连接 + en-US: Active Connection + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: oneRow + sql: select count(1) as active from pg_stat_activity; + url: ^_^url^_^ + + - name: connection_state + i18n: + zh-CN: 连接状态 + en-US: Connection State + priority: 1 + fields: + - field: state + type: 1 + label: true + i18n: + zh-CN: 状态 + en-US: State + - field: num + type: 0 + i18n: + zh-CN: 数量 + en-US: Num + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select COALESCE(state, 'other') as state, count(*) as num from pg_stat_activity group by state; + url: ^_^url^_^ + + - name: connection_db + i18n: + zh-CN: 连接数据库 + en-US: Connection Db + priority: 1 + fields: + - field: db_name + type: 1 + label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name + - field: active + type: 0 + i18n: + zh-CN: 活动连接 + en-US: Active Connection + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select count(*) as active, COALESCE(datname, 'other') as db_name from pg_stat_activity group by datname; + url: ^_^url^_^ + + - name: tuple + i18n: + zh-CN: 元组信息 + en-US: Tuple Info + priority: 1 + fields: + - field: fetched + type: 0 + i18n: + zh-CN: 获取次数 + en-US: Fetched + - field: returned + type: 0 + i18n: + zh-CN: 返回次数 + en-US: Returned + - field: inserted + type: 0 + i18n: + zh-CN: 插入次数 + en-US: Inserted + - field: updated + type: 0 + i18n: + zh-CN: 更新次数 + en-US: Updated + - field: deleted + type: 0 + i18n: + zh-CN: 删除次数 + en-US: Deleted + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select sum(tup_fetched) as fetched, sum(tup_updated) as updated, sum(tup_deleted) as deleted, sum(tup_inserted) as inserted, sum(tup_returned) as returned from pg_stat_database; + url: ^_^url^_^ + + - name: temp_file + i18n: + zh-CN: 临时文件 + en-US: Temp File + priority: 1 + fields: + - field: db_name + type: 1 + label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name + - field: num + type: 0 + i18n: + zh-CN: 次数 + en-US: Num + - field: size + type: 0 + unit: B + i18n: + zh-CN: 大小 + en-US: Size + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select COALESCE(datname, 'other') as db_name, sum(temp_files) as num, sum(temp_bytes) as size from pg_stat_database group by datname; + url: ^_^url^_^ + + - name: lock + i18n: + zh-CN: 锁信息 + en-US: Lock Info + priority: 1 + fields: + - field: db_name + type: 1 + label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name + - field: conflicts + type: 0 + unit: times + i18n: + zh-CN: 冲突次数 + en-US: Conflicts + - field: deadlocks + type: 0 + unit: times + i18n: + zh-CN: 死锁次数 + en-US: Deadlocks + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: SELECT COALESCE(datname,'shared-object') as db_name, conflicts, deadlocks from pg_stat_database where (datname != 'template1' and datname != 'template0') or datname is null; + url: ^_^url^_^ + + - name: slow_sql + i18n: + zh-CN: 慢查询 + en-US: Slow Sql + priority: 1 + fields: + - field: sql_text + type: 1 + label: true + i18n: + zh-CN: SQL语句 + en-US: SQL Text + - field: calls + type: 0 + i18n: + zh-CN: 调用次数 + en-US: Calls + - field: rows + type: 0 + i18n: + zh-CN: 行数 + en-US: Rows + - field: avg_time + type: 0 + unit: ms + i18n: + zh-CN: 平均时间 + en-US: Avg Time + - field: total_time + type: 0 + unit: ms + i18n: + zh-CN: 总时间 + en-US: Total Time + aliasFields: + - query + - calls + - rows + - total_exec_time + - mean_exec_time + calculates: + - sql_text=query + - avg_time=mean_exec_time + - total_time=total_exec_time + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select * from pg_stat_statements; + url: ^_^url^_^ + + - name: transaction + i18n: + zh-CN: 事务信息 + en-US: Transaction Info + priority: 2 + fields: + - field: db_name + type: 1 + label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name + - field: commits + type: 0 + unit: times + i18n: + zh-CN: 提交次数 + en-US: Commits + - field: rollbacks + type: 0 + unit: times + i18n: + zh-CN: 回滚次数 + en-US: Rollbacks + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select COALESCE(datname, 'other') as db_name, sum(xact_commit) as commits, sum(xact_rollback) as rollbacks from pg_stat_database group by datname; + url: ^_^url^_^ + + - name: conflicts + i18n: + zh-CN: 冲突信息 + en-US: Conflicts Info + priority: 2 + fields: + - field: db_name + type: 1 + label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name + - field: tablespace + type: 0 + i18n: + zh-CN: 表空间 + en-US: Tablespace + - field: lock + type: 0 + i18n: + zh-CN: 锁 + en-US: Lock + - field: snapshot + type: 0 + i18n: + zh-CN: 快照 + en-US: Snapshot + - field: bufferpin + type: 0 + i18n: + zh-CN: 缓冲区 + en-US: Bufferpin + - field: deadlock + type: 0 + i18n: + zh-CN: 死锁 + en-US: Deadlock + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select datname as db_name, confl_tablespace as tablespace, confl_lock as lock, confl_snapshot as snapshot, confl_bufferpin as bufferpin, confl_deadlock as deadlock from pg_stat_database_conflicts; + url: ^_^url^_^ + + - name: cache_hit_ratio + i18n: + zh-CN: 缓存命中率 + en-US: Cache Hit Ratio + priority: 2 + fields: + - field: db_name + type: 1 + label: true + i18n: + zh-CN: 数据库名称 + en-US: Database Name + - field: ratio + type: 0 + unit: '%' + i18n: + zh-CN: 命中率 + en-US: Hit Ratio + aliasFields: + - blks_hit + - blks_read + - db_name + calculates: + - ratio=(blks_hit + 1) / (blks_read + blks_hit + 1) * 100 + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: multiRow + sql: select datname as db_name, blks_hit, blks_read from pg_stat_database; + url: ^_^url^_^ + + - name: checkpoint + i18n: + zh-CN: Checkpoint信息 + en-US: Checkpoint Info + priority: 2 + fields: + - field: checkpoint_sync_time + type: 0 + unit: ms + i18n: + zh-CN: Checkpoint同步时间 + en-US: Checkpoint Sync Time + - field: checkpoint_write_time + type: 0 + unit: ms + i18n: + zh-CN: Checkpoint写入时间 + en-US: Checkpoint Write Time + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: oneRow + sql: select checkpoint_sync_time, checkpoint_write_time from pg_stat_bgwriter; + url: ^_^url^_^ + + - name: buffer + i18n: + zh-CN: Buffer信息 + en-US: Buffer Info + priority: 2 + fields: + - field: allocated + type: 0 + i18n: + zh-CN: 已分配 + en-US: Allocated + - field: fsync_calls_by_backend + type: 0 + i18n: + zh-CN: 后端进程直接执行的文件同步调用次数 + en-US: Fsync Calls By Backend + - field: written_directly_by_backend + type: 0 + i18n: + zh-CN: 后台写入到数据文件 + en-US: Written Directly By Backend + - field: written_by_background_writer + type: 0 + i18n: + zh-CN: 后台写入 + en-US: Written By Background Writer + - field: written_during_checkpoints + type: 0 + i18n: + zh-CN: 检查点期间写入 + en-US: Written During Checkpoints + protocol: jdbc + jdbc: + host: ^_^host^_^ + port: ^_^port^_^ + timeout: ^_^timeout^_^ + platform: postgresql + username: ^_^username^_^ + password: ^_^password^_^ + database: ^_^database^_^ + queryType: oneRow + sql: select buffers_alloc as allocated, buffers_backend_fsync as fsync_calls_by_backend, buffers_backend as written_directly_by_backend, buffers_clean as written_by_background_writer, buffers_checkpoint as written_during_checkpoints from pg_stat_bgwriter; + url: ^_^url^_^ From 339a33a0bd6d6041f082a1a9c8bca7518d0795f5 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Wed, 7 Aug 2024 23:18:24 +0800 Subject: [PATCH 153/257] [Improve] add ObsObjectStoreServiceImpl unit test (#2488) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../service/ObsObjectStoreServiceTest.java | 119 ++++++++++++++++++ 1 file changed, 119 insertions(+) diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/ObsObjectStoreServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/ObsObjectStoreServiceTest.java index f0e6a0e2e67..89c76edb6f5 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/ObsObjectStoreServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/ObsObjectStoreServiceTest.java @@ -17,11 +17,130 @@ package org.apache.hertzbeat.manager.service; +import java.io.InputStream; +import java.util.List; +import com.obs.services.ObsClient; +import com.obs.services.model.ListObjectsRequest; +import com.obs.services.model.ObjectListing; +import com.obs.services.model.PutObjectResult; +import com.obs.services.model.ObsObject; +import org.apache.hertzbeat.manager.pojo.dto.FileDTO; import org.apache.hertzbeat.manager.service.impl.ObsObjectStoreServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; /** * test case for {@link ObsObjectStoreServiceImpl} */ class ObsObjectStoreServiceTest { + + @Mock + private ObsClient obsClient; + + private ObsObjectStoreServiceImpl service; + + private final String bucketName = "test-bucket"; + private final String rootPath = "root/path"; + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + + this.service = new ObsObjectStoreServiceImpl( + obsClient, + bucketName, + rootPath + ); + } + + @Test + void testUpload() { + + String filePath = "file.txt"; + InputStream is = mock(InputStream.class); + var response = mock(PutObjectResult.class); + + when(obsClient.putObject(eq(bucketName), anyString(), eq(is))).thenReturn(response); + when(response.getStatusCode()).thenReturn(200); + + boolean result = service.upload(filePath, is); + + assertTrue(result); + verify(obsClient, times(1)).putObject(eq(bucketName), anyString(), eq(is)); + } + + @Test + void testRemove() { + + String filePath = "file.txt"; + + service.remove(filePath); + + verify(obsClient, times(1)).deleteObject(eq(bucketName), anyString()); + } + + @Test + void testIsExist() { + + String filePath = "file.txt"; + + when(obsClient.doesObjectExist(eq(bucketName), anyString())).thenReturn(true); + + boolean result = service.isExist(filePath); + + assertTrue(result); + verify(obsClient, times(1)).doesObjectExist(eq(bucketName), anyString()); + } + + @Test + void testDownload() { + + String filePath = "file.txt"; + var obsObject = mock(ObsObject.class); + + when(obsClient.getObject(eq(bucketName), anyString())).thenReturn(obsObject); + when(obsObject.getObjectContent()).thenReturn(mock(InputStream.class)); + + FileDTO result = service.download(filePath); + + assertNotNull(result); + assertEquals(filePath, result.getName()); + verify(obsClient, times(1)).getObject(eq(bucketName), anyString()); + } + + @Test + void testList() { + + String dir = "some/dir"; + var listObjectsResponse = mock(ObjectListing.class); + var objectSummary = mock(ObsObject.class); + + when(obsClient.listObjects(any(ListObjectsRequest.class))).thenReturn(listObjectsResponse); + when(listObjectsResponse.getObjects()).thenReturn(List.of(objectSummary)); + when(objectSummary.getObjectKey()).thenReturn("some/dir/file.txt"); + when(objectSummary.getObjectContent()).thenReturn(mock(InputStream.class)); + + List result = service.list(dir); + + assertNotNull(result); + assertFalse(result.isEmpty()); + verify(obsClient, times(1)).listObjects(any(ListObjectsRequest.class)); + } + } From 12fee7fb0188c41152bb4998b9bbf08ae6239284 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Wed, 7 Aug 2024 23:21:03 +0800 Subject: [PATCH 154/257] [improve] add StatusPageServiceImpl unit test (#2491) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../service/StatusPageServiceTest.java | 131 +++++++++++++++++- 1 file changed, 130 insertions(+), 1 deletion(-) diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/StatusPageServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/StatusPageServiceTest.java index e3714240a42..822531cbe58 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/StatusPageServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/StatusPageServiceTest.java @@ -17,11 +17,140 @@ package org.apache.hertzbeat.manager.service; +import java.util.List; +import org.apache.hertzbeat.common.entity.manager.StatusPageComponent; +import org.apache.hertzbeat.common.entity.manager.StatusPageIncident; +import org.apache.hertzbeat.common.entity.manager.StatusPageOrg; +import org.apache.hertzbeat.manager.component.status.CalculateStatus; +import org.apache.hertzbeat.manager.dao.StatusPageComponentDao; +import org.apache.hertzbeat.manager.dao.StatusPageHistoryDao; +import org.apache.hertzbeat.manager.dao.StatusPageIncidentComponentBindDao; +import org.apache.hertzbeat.manager.dao.StatusPageIncidentDao; +import org.apache.hertzbeat.manager.dao.StatusPageOrgDao; import org.apache.hertzbeat.manager.service.impl.StatusPageServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.springframework.data.domain.Sort; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; /** * test case for {@link StatusPageServiceImpl} */ -class StatusPageServiceTest { +class StatusPageServiceImplTest { + + @Mock + private StatusPageOrgDao statusPageOrgDao; + + @Mock + private StatusPageComponentDao statusPageComponentDao; + + @Mock + private StatusPageHistoryDao statusPageHistoryDao; + + @Mock + private StatusPageIncidentDao statusPageIncidentDao; + + @Mock + private StatusPageIncidentComponentBindDao statusPageIncidentComponentBindDao; + + @Mock + private CalculateStatus calculateStatus; + + @InjectMocks + private StatusPageServiceImpl statusPageService = new StatusPageServiceImpl(statusPageIncidentComponentBindDao); + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + } + + @Test + void testQueryStatusPageOrg() { + + StatusPageOrg expectedOrg = new StatusPageOrg(); + when(statusPageOrgDao.findAll()).thenReturn(List.of(expectedOrg)); + + StatusPageOrg actualOrg = statusPageService.queryStatusPageOrg(); + + assertEquals(expectedOrg, actualOrg); + } + + @Test + void testSaveStatusPageOrg() { + + StatusPageOrg inputOrg = new StatusPageOrg(); + when(statusPageOrgDao.save(inputOrg)).thenReturn(inputOrg); + + StatusPageOrg savedOrg = statusPageService.saveStatusPageOrg(inputOrg); + + assertEquals(inputOrg, savedOrg); + } + + @Test + void testQueryStatusPageComponents() { + + StatusPageComponent component = new StatusPageComponent(); + when(statusPageComponentDao.findAll()).thenReturn(List.of(component)); + + List components = statusPageService.queryStatusPageComponents(); + + assertEquals(1, components.size()); + assertEquals(component, components.get(0)); + } + + @Test + void testSaveStatusPageComponent() { + + StatusPageComponent component = new StatusPageComponent(); + when(statusPageComponentDao.save(component)).thenReturn(component); + + statusPageService.newStatusPageComponent(component); + + verify(statusPageComponentDao, times(1)).save(component); + } + + @Test + void testQueryStatusPageIncidents() { + + StatusPageIncident incident = new StatusPageIncident(); + when(statusPageIncidentDao.findAll(Sort.by(Sort.Direction.DESC, "startTime"))).thenReturn(List.of(incident)); + + List incidents = statusPageService.queryStatusPageIncidents(); + + assertEquals(1, incidents.size()); + assertEquals(incident, incidents.get(0)); + } + + @Test + void testSaveStatusPageIncident() { + + StatusPageIncident incident = new StatusPageIncident(); + when(statusPageIncidentDao.save(incident)).thenReturn(incident); + + statusPageService.newStatusPageIncident(incident); + + verify(statusPageIncidentDao, times(1)).save(incident); + } + + @Test + void testDeleteStatusPageIncident() { + + Long incidentId = 1L; + doNothing().when(statusPageIncidentDao).deleteById(incidentId); + + statusPageService.deleteStatusPageIncident(incidentId); + + verify(statusPageIncidentDao, times(1)).deleteById(incidentId); + } + } From ec8cc517d17761be74aa9fbeca378aa8447ca34d Mon Sep 17 00:00:00 2001 From: YuLuo Date: Wed, 7 Aug 2024 23:31:24 +0800 Subject: [PATCH 155/257] [Improve] add SystemGeneralConfigServiceImpl & SmsGeneralConfigServiceImpl unit test (#2490) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../impl/SmsGeneralConfigServiceImpl.java | 2 +- .../impl/SystemGeneralConfigServiceImpl.java | 4 +- .../service/SmsGeneralConfigServiceTest.java | 47 +++++++++++++++++++ .../SystemGeneralConfigServiceTest.java | 45 ++++++++++++++++++ 4 files changed, 95 insertions(+), 3 deletions(-) diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SmsGeneralConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SmsGeneralConfigServiceImpl.java index b80ef9fa4cd..ebd8da36578 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SmsGeneralConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SmsGeneralConfigServiceImpl.java @@ -54,7 +54,7 @@ public String type() { * a TypeReference of NoticeSender type */ @Override - protected TypeReference getTypeReference() { + public TypeReference getTypeReference() { return new TypeReference<>() { @Override public Type getType() { diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemGeneralConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemGeneralConfigServiceImpl.java index 9a87eec4a13..5d2b629a92f 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemGeneralConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemGeneralConfigServiceImpl.java @@ -44,7 +44,7 @@ public class SystemGeneralConfigServiceImpl extends AbstractGeneralConfigService * @param generalConfigDao ConfigDao object * @param objectMapper JSON tool object */ - protected SystemGeneralConfigServiceImpl(GeneralConfigDao generalConfigDao, ObjectMapper objectMapper) { + public SystemGeneralConfigServiceImpl(GeneralConfigDao generalConfigDao, ObjectMapper objectMapper) { super(generalConfigDao, objectMapper); } @@ -64,7 +64,7 @@ public String type() { } @Override - protected TypeReference getTypeReference() { + public TypeReference getTypeReference() { return new TypeReference<>() { @Override public Type getType() { diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/SmsGeneralConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/SmsGeneralConfigServiceTest.java index 6ba364b853e..ab98888a968 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/SmsGeneralConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/SmsGeneralConfigServiceTest.java @@ -17,11 +17,58 @@ package org.apache.hertzbeat.manager.service; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.manager.dao.GeneralConfigDao; +import org.apache.hertzbeat.manager.pojo.dto.SmsNoticeSender; import org.apache.hertzbeat.manager.service.impl.SmsGeneralConfigServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; /** * test case for {@link SmsGeneralConfigServiceImpl} */ class SmsGeneralConfigServiceTest { + + @Mock + private GeneralConfigDao generalConfigDao; + + @Mock + private ObjectMapper objectMapper; + + @InjectMocks + private SmsGeneralConfigServiceImpl service; + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + + service = new SmsGeneralConfigServiceImpl( + generalConfigDao, + objectMapper + ); + } + + @Test + void testType() { + String result = service.type(); + assertEquals("sms", result); + } + + @Test + void testGetTypeReference() { + + TypeReference typeReference = service.getTypeReference(); + assertNotNull(typeReference); + assertEquals(SmsNoticeSender.class, typeReference.getType()); + } + } diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemGeneralConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemGeneralConfigServiceTest.java index bed51fdc874..942928f3f4b 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemGeneralConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemGeneralConfigServiceTest.java @@ -17,11 +17,56 @@ package org.apache.hertzbeat.manager.service; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.manager.dao.GeneralConfigDao; +import org.apache.hertzbeat.manager.pojo.dto.SystemConfig; import org.apache.hertzbeat.manager.service.impl.SystemGeneralConfigServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; /** * test case for {@link SystemGeneralConfigServiceImpl} */ class SystemGeneralConfigServiceTest { + + @Mock + private GeneralConfigDao generalConfigDao; + + @Mock + private ObjectMapper objectMapper; + + @InjectMocks + private SystemGeneralConfigServiceImpl service; + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + + service = new SystemGeneralConfigServiceImpl(generalConfigDao, objectMapper); + } + + @Test + void testType() { + + String result = service.type(); + assertEquals("system", result); + } + + @Test + void testGetTypeReference() { + + TypeReference typeReference = service.getTypeReference(); + assertNotNull(typeReference); + assertEquals(SystemConfig.class, typeReference.getType()); + } + } From 7a7d4792cd29bf2f9aa6e242f720a0053fe93604 Mon Sep 17 00:00:00 2001 From: liutianyou Date: Thu, 8 Aug 2024 20:41:08 +0800 Subject: [PATCH 156/257] [improve] remove demo plugin (#2492) Co-authored-by: tomsun28 --- .../hertzbeat/plugin/impl/DemoPluginImpl.java | 40 ------------------- .../org.apache.hertzbeat.plugin.Plugin | 1 - 2 files changed, 41 deletions(-) delete mode 100644 plugin/src/main/java/org/apache/hertzbeat/plugin/impl/DemoPluginImpl.java diff --git a/plugin/src/main/java/org/apache/hertzbeat/plugin/impl/DemoPluginImpl.java b/plugin/src/main/java/org/apache/hertzbeat/plugin/impl/DemoPluginImpl.java deleted file mode 100644 index 7fca0b75844..00000000000 --- a/plugin/src/main/java/org/apache/hertzbeat/plugin/impl/DemoPluginImpl.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hertzbeat.plugin.impl; - -import lombok.extern.slf4j.Slf4j; -import org.apache.hertzbeat.common.entity.alerter.Alert; -import org.apache.hertzbeat.plugin.Plugin; - -/** - * DemoPlugin - */ -@Slf4j -public class DemoPluginImpl implements Plugin { - - /** - * execute when alert - */ - @Override - public void alert(Alert alert) { - if (log.isDebugEnabled()) { - log.debug("DemoPluginImpl alert: {}", alert); - } - } - -} diff --git a/plugin/src/main/resources/META-INF/services/org.apache.hertzbeat.plugin.Plugin b/plugin/src/main/resources/META-INF/services/org.apache.hertzbeat.plugin.Plugin index 8da17400556..e69de29bb2d 100644 --- a/plugin/src/main/resources/META-INF/services/org.apache.hertzbeat.plugin.Plugin +++ b/plugin/src/main/resources/META-INF/services/org.apache.hertzbeat.plugin.Plugin @@ -1 +0,0 @@ -org.apache.hertzbeat.plugin.impl.DemoPluginImpl From 31134871921740f1ca6cfdc05d9b69f9aadd4849 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Thu, 8 Aug 2024 22:35:51 +0800 Subject: [PATCH 157/257] [refactor] refactor connect common cache (#2469) Signed-off-by: tomsun28 Co-authored-by: crossoverJie --- .../common/cache/ConnectionCommonCache.java | 72 ++++--------------- 1 file changed, 13 insertions(+), 59 deletions(-) diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/common/cache/ConnectionCommonCache.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/common/cache/ConnectionCommonCache.java index dd2a97010de..d36941d9fee 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/common/cache/ConnectionCommonCache.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/common/cache/ConnectionCommonCache.java @@ -21,11 +21,9 @@ import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap; import java.util.Map; import java.util.Optional; -import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; @@ -34,16 +32,11 @@ */ @Slf4j public class ConnectionCommonCache> { - - /** - * default cache time 200s - */ - private static final long DEFAULT_CACHE_TIMEOUT = 200 * 1000L; - + /** - * default max cache num + * default cache time 600s */ - private static final int DEFAULT_MAX_CAPACITY = 10000; + private static final long DEFAULT_CACHE_TIMEOUT = 600 * 1000L; /** * cacheTime length @@ -60,19 +53,14 @@ public class ConnectionCommonCache> { */ private ConcurrentLinkedHashMap cacheMap; - /** - * the executor who clean cache when timeout - */ - private ThreadPoolExecutor timeoutCleanerExecutor; - public ConnectionCommonCache() { - init(); + initCache(); } - private void init() { + private void initCache() { cacheMap = new ConcurrentLinkedHashMap .Builder() - .maximumWeightedCapacity(DEFAULT_MAX_CAPACITY) + .maximumWeightedCapacity(Integer.MAX_VALUE) .listener((key, value) -> { timeoutMap.remove(key); try { @@ -82,70 +70,37 @@ private void init() { } log.info("connection common cache discard key: {}, value: {}.", key, value); }).build(); - timeoutMap = new ConcurrentHashMap<>(DEFAULT_MAX_CAPACITY >> 6); - // last-first-coverage algorithm, run the first and last thread, discard mid - timeoutCleanerExecutor = new ThreadPoolExecutor(1, 1, 1, TimeUnit.SECONDS, - new ArrayBlockingQueue<>(1), - r -> new Thread(r, "connection-cache-timeout-cleaner"), - new ThreadPoolExecutor.DiscardOldestPolicy()); + timeoutMap = new ConcurrentHashMap<>(16); // init monitor available detector cyc task ThreadFactory threadFactory = new ThreadFactoryBuilder() - .setNameFormat("connection-cache-ava-detector-%d") + .setNameFormat("connection-cache-timout-detector-%d") .setDaemon(true) .build(); ScheduledThreadPoolExecutor scheduledExecutor = new ScheduledThreadPoolExecutor(1, threadFactory); - scheduledExecutor.scheduleWithFixedDelay(this::detectCacheAvailable, 2, 20, TimeUnit.MINUTES); + scheduledExecutor.scheduleWithFixedDelay(this::cleanTimeoutOrUnHealthCache, 2, 100, TimeUnit.SECONDS); } /** - * detect all cache available, cleanup not ava connection + * clean and remove timeout cache */ - private void detectCacheAvailable() { + private void cleanTimeoutOrUnHealthCache() { try { cacheMap.forEach((key, value) -> { + // index 0 is startTime, 1 is timeDiff Long[] cacheTime = timeoutMap.get(key); long currentTime = System.currentTimeMillis(); if (cacheTime == null || cacheTime.length != CACHE_TIME_LENGTH || cacheTime[0] + cacheTime[1] < currentTime) { - cacheMap.remove(key); - timeoutMap.remove(key); - try { - value.close(); - } catch (Exception e) { - log.error("connection close error: {}.", e.getMessage(), e); - } - - } - }); - } catch (Exception e) { - log.error("connection common cache detect cache available error: {}.", e.getMessage(), e); - } - } - - /** - * clean timeout cache - */ - private void cleanTimeoutCache() { - try { - cacheMap.forEach((key, value) -> { - // index 0 is startTime, 1 is timeDiff - Long[] cacheTime = timeoutMap.get(key); - long currentTime = System.currentTimeMillis(); - if (cacheTime == null || cacheTime.length != CACHE_TIME_LENGTH) { - timeoutMap.put(key, new Long[]{currentTime, DEFAULT_CACHE_TIMEOUT}); - } else if (cacheTime[0] + cacheTime[1] < currentTime) { - // timeout, remove this object cache log.warn("[connection common cache] clean the timeout cache, key {}", key); timeoutMap.remove(key); cacheMap.remove(key); try { value.close(); } catch (Exception e) { - log.error("connection close error: {}.", e.getMessage(), e); + log.error("clean connection close error: {}.", e.getMessage(), e); } } }); - Thread.sleep(20 * 1000); } catch (Exception e) { log.error("[connection common cache] clean timeout cache error: {}.", e.getMessage(), e); } @@ -165,7 +120,6 @@ public void addCache(T key, C value, Long timeDiff) { } cacheMap.put(key, value); timeoutMap.put(key, new Long[]{System.currentTimeMillis(), timeDiff}); - timeoutCleanerExecutor.execute(this::cleanTimeoutCache); } /** From 8faa13e7b47ce60c6c4542a92becec4cfc7c78fb Mon Sep 17 00:00:00 2001 From: YuLuo Date: Thu, 8 Aug 2024 23:03:24 +0800 Subject: [PATCH 158/257] [Improve] add AvailableAlertDefineIni & SystemSecretServiceImpl unit test (#2496) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../service/impl/SystemSecretServiceImpl.java | 4 +- .../service/AvailableAlertDefineInitTest.java | 103 ++++++++++++++++++ .../service/StatusPageServiceTest.java | 2 +- .../service/SystemSecretServiceTest.java | 40 +++++++ 4 files changed, 146 insertions(+), 3 deletions(-) create mode 100644 manager/src/test/java/org/apache/hertzbeat/manager/service/AvailableAlertDefineInitTest.java diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemSecretServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemSecretServiceImpl.java index 4e3eb812d09..4c25b12adc3 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemSecretServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemSecretServiceImpl.java @@ -37,7 +37,7 @@ public class SystemSecretServiceImpl extends AbstractGeneralConfigServiceImpl getTypeReference() { + public TypeReference getTypeReference() { return new TypeReference<>() { @Override public Type getType() { diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/AvailableAlertDefineInitTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/AvailableAlertDefineInitTest.java new file mode 100644 index 00000000000..e2524e7c901 --- /dev/null +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/AvailableAlertDefineInitTest.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.manager.service; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.apache.hertzbeat.alert.dao.AlertDefineDao; +import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.entity.alerter.AlertDefine; +import org.apache.hertzbeat.common.entity.job.Job; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * test case for {@link AvailableAlertDefineInit} + */ + +class AvailableAlertDefineInitTest { + + @Mock + private AlertDefineDao alertDefineDao; + + @Mock + private AppService appService; + + @InjectMocks + private AvailableAlertDefineInit availableAlertDefineInit; + + private Map map; + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + + map = new HashMap<>(); + map.put("testApp", new Job()); + } + + @Test + void testRunAlertDefineIsEmpty() throws Exception { + + + when(appService.getAllAppDefines()).thenReturn(map); + + when(alertDefineDao.queryAlertDefineByAppAndMetric("testApp", CommonConstants.AVAILABILITY)) + .thenReturn(Collections.emptyList()); + + availableAlertDefineInit.run(); + + verify(alertDefineDao, times(1)).save(any(AlertDefine.class)); + } + + @Test + void testRunAlertDefineExists() throws Exception { + + when(appService.getAllAppDefines()).thenReturn(map); + when(alertDefineDao.queryAlertDefineByAppAndMetric("testApp", CommonConstants.AVAILABILITY)) + .thenReturn(List.of(new AlertDefine())); + availableAlertDefineInit.run(); + + verify(alertDefineDao, never()).save(any(AlertDefine.class)); + } + + @Test + void testRunExceptionHandling() throws Exception { + + when(appService.getAllAppDefines()).thenReturn(map); + when(alertDefineDao.queryAlertDefineByAppAndMetric("testApp", CommonConstants.AVAILABILITY)) + .thenThrow(new RuntimeException("Database error")); + + availableAlertDefineInit.run(); + + verify(alertDefineDao, never()).save(any(AlertDefine.class)); + } + +} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/StatusPageServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/StatusPageServiceTest.java index 822531cbe58..864c4a0e2fb 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/StatusPageServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/StatusPageServiceTest.java @@ -45,7 +45,7 @@ * test case for {@link StatusPageServiceImpl} */ -class StatusPageServiceImplTest { +class StatusPageServiceTest { @Mock private StatusPageOrgDao statusPageOrgDao; diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemSecretServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemSecretServiceTest.java index 729cd130f61..688847e3c2b 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemSecretServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemSecretServiceTest.java @@ -17,11 +17,51 @@ package org.apache.hertzbeat.manager.service; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.manager.dao.GeneralConfigDao; +import org.apache.hertzbeat.manager.pojo.dto.SystemSecret; import org.apache.hertzbeat.manager.service.impl.SystemSecretServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.junit.jupiter.api.Assertions.assertEquals; /** * test case for {@link SystemSecretServiceImpl} */ class SystemSecretServiceTest { + + @Mock + private GeneralConfigDao generalConfigDao; + + @Mock + private ObjectMapper objectMapper; + + private SystemSecretServiceImpl systemSecretService; + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + + this.systemSecretService = new SystemSecretServiceImpl(generalConfigDao, objectMapper); + } + + @Test + void testType() { + + assertEquals("secret", systemSecretService.type()); + } + + @Test + void testGetTypeReference() { + + TypeReference typeReference = systemSecretService.getTypeReference(); + assertEquals(SystemSecret.class, typeReference.getType()); + } + } From bac607f6dd702cf3774a0a05172b4d130385eef2 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Thu, 8 Aug 2024 23:55:45 +0800 Subject: [PATCH 159/257] [Improve] add WarehouseServiceImpl unit test (#2497) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../service/WarehouseServiceTest.java | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 warehouse/src/test/java/org/apache/hertzbeat/warehouse/service/WarehouseServiceTest.java diff --git a/warehouse/src/test/java/org/apache/hertzbeat/warehouse/service/WarehouseServiceTest.java b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/service/WarehouseServiceTest.java new file mode 100644 index 00000000000..ec771f121d4 --- /dev/null +++ b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/service/WarehouseServiceTest.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.service; + +import java.util.Collections; +import java.util.List; + +import org.apache.hertzbeat.common.entity.message.CollectRep; +import org.apache.hertzbeat.warehouse.service.impl.WarehouseServiceImpl; +import org.apache.hertzbeat.warehouse.store.realtime.AbstractRealTimeDataStorage; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.springframework.test.context.junit.jupiter.SpringExtension; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * test case for {@link WarehouseServiceImpl} + */ + +@ExtendWith(SpringExtension.class) +class WarehouseServiceImplTest { + + @Mock + private AbstractRealTimeDataStorage realTimeDataStorage; + + @InjectMocks + private WarehouseServiceImpl warehouseService; + + @BeforeEach + void setUp() { + + MockitoAnnotations.openMocks(this); + } + + @Test + void testQueryMonitorMetricsData() { + + Long monitorId = 1L; + List expectedData = Collections.emptyList(); + + when(realTimeDataStorage.isServerAvailable()).thenReturn(true); + when(realTimeDataStorage.getCurrentMetricsData(monitorId)).thenReturn(expectedData); + + List result = warehouseService.queryMonitorMetricsData(monitorId); + + assertEquals(expectedData, result); + verify(realTimeDataStorage, never()).isServerAvailable(); + } + + @Test + void testQueryMonitorMetricsDataNotAvailable() { + + Long monitorId = 1L; + + when(realTimeDataStorage.isServerAvailable()).thenReturn(false); + + List result = warehouseService.queryMonitorMetricsData(monitorId); + + assertTrue(result.isEmpty()); + verify(realTimeDataStorage, never()).getCurrentMetricsData(anyLong()); + } +} From aeac1ffed4de917f46a98e8945bbd72af6630dae Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Fri, 9 Aug 2024 17:09:31 +0800 Subject: [PATCH 160/257] [bugfix] fix alarm threshold ava can not modify (#2494) Signed-off-by: tomsun28 Co-authored-by: Kerwin Bryant --- .../app/routes/alert/alert-setting/alert-setting.component.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts b/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts index 7ad32d237a9..beb75d5e8b9 100644 --- a/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts +++ b/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts @@ -669,7 +669,9 @@ export class AlertSettingComponent implements OnInit { } onManageModalOk() { - this.defineForm.form.addControl('ruleset', this.qbFormCtrl); + if (this.cascadeValues.length == 3) { + this.defineForm.form.addControl('ruleset', this.qbFormCtrl); + } if (this.defineForm?.invalid) { Object.values(this.defineForm.controls).forEach(control => { if (control.invalid) { From 23d16ae92180f5b536fa13b5cdc777e0d2871b3f Mon Sep 17 00:00:00 2001 From: Chandrakant Vankayalapati <104664857+ceekay47@users.noreply.github.com> Date: Fri, 9 Aug 2024 08:42:12 -0700 Subject: [PATCH 161/257] [refactor] move code from CollectorController to CollectorService (#2433) Co-authored-by: ceekay Co-authored-by: Calvin --- .../controller/CollectorController.java | 46 ++----------------- .../manager/service/CollectorService.java | 31 ++++++++++--- .../service/impl/CollectorServiceImpl.java | 45 +++++++++++++++++- .../manager/service/CollectorServiceTest.java | 9 ++-- 4 files changed, 78 insertions(+), 53 deletions(-) diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/controller/CollectorController.java b/manager/src/main/java/org/apache/hertzbeat/manager/controller/CollectorController.java index 349191c2546..df31ef9d43e 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/controller/CollectorController.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/controller/CollectorController.java @@ -21,21 +21,13 @@ import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; -import jakarta.persistence.criteria.Predicate; -import java.util.HashMap; import java.util.List; import java.util.Map; -import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.dto.CollectorSummary; import org.apache.hertzbeat.common.entity.dto.Message; -import org.apache.hertzbeat.common.entity.manager.Collector; -import org.apache.hertzbeat.common.util.IpDomainUtil; -import org.apache.hertzbeat.manager.scheduler.netty.ManageServer; import org.apache.hertzbeat.manager.service.CollectorService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.jpa.domain.Specification; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; @@ -56,9 +48,6 @@ public class CollectorController { @Autowired private CollectorService collectorService; - - @Autowired(required = false) - private ManageServer manageServer; @GetMapping @Operation(summary = "Get a list of collectors based on query filter items", @@ -67,21 +56,8 @@ public ResponseEntity>> getCollectors( @Parameter(description = "collector name", example = "tom") @RequestParam(required = false) final String name, @Parameter(description = "List current page", example = "0") @RequestParam(defaultValue = "0") int pageIndex, @Parameter(description = "Number of list pagination", example = "8") @RequestParam(required = false) Integer pageSize) { - if (pageSize == null) { - pageSize = Integer.MAX_VALUE; - } - Specification specification = (root, query, criteriaBuilder) -> { - Predicate predicate = criteriaBuilder.conjunction(); - if (name != null && !name.isEmpty()) { - Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + name + "%"); - predicate = criteriaBuilder.and(predicateName); - } - return predicate; - }; - PageRequest pageRequest = PageRequest.of(pageIndex, pageSize); - Page receivers = collectorService.getCollectors(specification, pageRequest); - Message> message = Message.success(receivers); - return ResponseEntity.ok(message); + Page receivers = collectorService.getCollectors(name, pageIndex, pageSize); + return ResponseEntity.ok(Message.success(receivers)); } @PutMapping("/online") @@ -89,10 +65,7 @@ public ResponseEntity>> getCollectors( public ResponseEntity> onlineCollector( @Parameter(description = "collector name", example = "demo-collector") @RequestParam(required = false) List collectors) { - if (collectors != null) { - collectors.forEach(collector -> - this.manageServer.getCollectorAndJobScheduler().onlineCollector(collector)); - } + collectorService.makeCollectorsOnline(collectors); return ResponseEntity.ok(Message.success("Online success")); } @@ -101,9 +74,7 @@ public ResponseEntity> onlineCollector( public ResponseEntity> offlineCollector( @Parameter(description = "collector name", example = "demo-collector") @RequestParam(required = false) List collectors) { - if (collectors != null) { - collectors.forEach(collector -> this.manageServer.getCollectorAndJobScheduler().offlineCollector(collector)); - } + collectorService.makeCollectorsOffline(collectors); return ResponseEntity.ok(Message.success("Offline success")); } @@ -121,14 +92,7 @@ public ResponseEntity> deleteCollector( public ResponseEntity>> generateCollectorDeployInfo( @Parameter(description = "collector name", example = "demo-collector") @PathVariable() String collector) { - if (this.collectorService.hasCollector(collector)) { - return ResponseEntity.ok(Message.fail(CommonConstants.FAIL_CODE, "There already has same collector name.")); - } - String host = IpDomainUtil.getLocalhostIp(); - Map maps = new HashMap<>(6); - maps.put("identity", collector); - maps.put("host", host); - return ResponseEntity.ok(Message.success(maps)); + return ResponseEntity.ok(Message.success(collectorService.generateCollectorDeployInfo(collector))); } } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/CollectorService.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/CollectorService.java index 81d995af829..74a7ad3a171 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/CollectorService.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/CollectorService.java @@ -18,11 +18,9 @@ package org.apache.hertzbeat.manager.service; import java.util.List; +import java.util.Map; import org.apache.hertzbeat.common.entity.dto.CollectorSummary; -import org.apache.hertzbeat.common.entity.manager.Collector; import org.springframework.data.domain.Page; -import org.springframework.data.domain.PageRequest; -import org.springframework.data.jpa.domain.Specification; /** * collector service @@ -31,11 +29,13 @@ public interface CollectorService { /** * Dynamic conditional query - * @param specification Query conditions - * @param pageRequest pageIndex pageSize + * @param name Collector Name + * @param pageIndex current pageIndex + * @param pageSize Number of list pagination + * * @return Search result */ - Page getCollectors(Specification specification, PageRequest pageRequest); + Page getCollectors(String name, int pageIndex, Integer pageSize); /** * delete registered collectors @@ -49,4 +49,23 @@ public interface CollectorService { * @return return true if it has */ boolean hasCollector(String collector); + + /** + * Generate Collector Deploy Info + * @param collector collector name + */ + Map generateCollectorDeployInfo(String collector); + + /** + * Makes Collectors Offline + * @param collectors collector names + */ + void makeCollectorsOffline(List collectors); + + /** + * Makes Collectors Online + * @param collectors collector names + */ + void makeCollectorsOnline(List collectors); + } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/CollectorServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/CollectorServiceImpl.java index 003c5f99e55..519cec7d58c 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/CollectorServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/CollectorServiceImpl.java @@ -17,12 +17,16 @@ package org.apache.hertzbeat.manager.service.impl; +import jakarta.persistence.criteria.Predicate; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import org.apache.hertzbeat.common.entity.dto.CollectorSummary; import org.apache.hertzbeat.common.entity.manager.Collector; import org.apache.hertzbeat.common.entity.manager.CollectorMonitorBind; import org.apache.hertzbeat.common.support.exception.CommonException; +import org.apache.hertzbeat.common.util.IpDomainUtil; import org.apache.hertzbeat.manager.dao.CollectorDao; import org.apache.hertzbeat.manager.dao.CollectorMonitorBindDao; import org.apache.hertzbeat.manager.scheduler.AssignJobs; @@ -58,7 +62,19 @@ public class CollectorServiceImpl implements CollectorService { @Override @Transactional(readOnly = true) - public Page getCollectors(Specification specification, PageRequest pageRequest) { + public Page getCollectors(String name, int pageIndex, Integer pageSize) { + if (pageSize == null) { + pageSize = Integer.MAX_VALUE; + } + Specification specification = (root, query, criteriaBuilder) -> { + Predicate predicate = criteriaBuilder.conjunction(); + if (name != null && !name.isEmpty()) { + Predicate predicateName = criteriaBuilder.like(root.get("name"), "%" + name + "%"); + predicate = criteriaBuilder.and(predicateName); + } + return predicate; + }; + PageRequest pageRequest = PageRequest.of(pageIndex, pageSize); Page collectors = collectorDao.findAll(specification, pageRequest); List collectorSummaryList = new LinkedList<>(); for (Collector collector : collectors.getContent()) { @@ -97,4 +113,31 @@ public void deleteRegisteredCollector(List collectors) { public boolean hasCollector(String collector) { return this.collectorDao.findCollectorByName(collector).isPresent(); } + + @Override + public Map generateCollectorDeployInfo(String collector) { + if (hasCollector(collector)) { + throw new CommonException("There already exists a collector with same name."); + } + String host = IpDomainUtil.getLocalhostIp(); + Map maps = new HashMap<>(6); + maps.put("identity", collector); + maps.put("host", host); + return maps; + } + + @Override + public void makeCollectorsOffline(List collectors) { + if (collectors != null) { + collectors.forEach(collector -> this.manageServer.getCollectorAndJobScheduler().offlineCollector(collector)); + } + } + + @Override + public void makeCollectorsOnline(List collectors) { + if (collectors != null) { + collectors.forEach(collector -> + this.manageServer.getCollectorAndJobScheduler().onlineCollector(collector)); + } + } } diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/CollectorServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/CollectorServiceTest.java index 8e011fe0303..1277e769c8f 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/CollectorServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/CollectorServiceTest.java @@ -17,7 +17,6 @@ package org.apache.hertzbeat.manager.service; -import org.apache.hertzbeat.common.entity.manager.Collector; import org.apache.hertzbeat.manager.dao.CollectorDao; import org.apache.hertzbeat.manager.dao.CollectorMonitorBindDao; import org.apache.hertzbeat.manager.scheduler.ConsistentHash; @@ -37,7 +36,8 @@ import java.util.List; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.mockito.Mockito.mock; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; /** @@ -65,9 +65,8 @@ public class CollectorServiceTest { @Test public void getCollectors() { - Specification specification = mock(Specification.class); - when(collectorDao.findAll(specification, PageRequest.of(1, 1))).thenReturn(Page.empty()); - assertDoesNotThrow(() -> collectorService.getCollectors(specification, PageRequest.of(1, 1))); + when(collectorDao.findAll(any(Specification.class), eq(PageRequest.of(1, 1)))).thenReturn(Page.empty()); + assertDoesNotThrow(() -> collectorService.getCollectors("test", 1, 1)); } @Test From df4272a2f8c653cb626c517a5484bd0983502d8c Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sat, 10 Aug 2024 09:16:14 +0800 Subject: [PATCH 162/257] [bugfix] fix unit test errors in different languages. (#2498) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../component/alerter/impl/SmsAlertNotifyHandlerImplTest.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/SmsAlertNotifyHandlerImplTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/SmsAlertNotifyHandlerImplTest.java index ce6e38dd4a4..a0e5b4f530a 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/SmsAlertNotifyHandlerImplTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/component/alerter/impl/SmsAlertNotifyHandlerImplTest.java @@ -17,6 +17,7 @@ package org.apache.hertzbeat.manager.component.alerter.impl; +import java.util.Locale; import java.util.Map; import java.util.ResourceBundle; @@ -73,6 +74,8 @@ public void setUp() { bundle = mock(ResourceBundle.class); when(bundle.getString(anyString())).thenReturn("High"); + Locale.setDefault(Locale.ENGLISH); + notifyHandler = new SmsAlertNotifyHandlerImpl(tencentSmsClient); } From c650972f3d9cfa4b22dd42ad9eb6bd98a3cbaee6 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sun, 11 Aug 2024 11:21:15 +0800 Subject: [PATCH 163/257] [improve] add AccountServiceImpl unit test (#2501) Signed-off-by: yuluo-yx --- .../manager/service/AccountServiceTest.java | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/AccountServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/AccountServiceTest.java index 46f55d22bf9..2698123f17a 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/AccountServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/AccountServiceTest.java @@ -17,11 +17,138 @@ package org.apache.hertzbeat.manager.service; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import javax.naming.AuthenticationException; + +import com.usthe.sureness.provider.DefaultAccount; +import com.usthe.sureness.provider.SurenessAccount; +import com.usthe.sureness.provider.SurenessAccountProvider; +import com.usthe.sureness.provider.ducument.DocumentAccountProvider; +import com.usthe.sureness.util.JsonWebTokenUtil; +import com.usthe.sureness.util.Md5Util; +import io.jsonwebtoken.MalformedJwtException; +import org.apache.hertzbeat.common.util.JsonUtil; +import org.apache.hertzbeat.manager.pojo.dto.LoginDto; +import org.apache.hertzbeat.manager.pojo.dto.RefreshTokenResponse; import org.apache.hertzbeat.manager.service.impl.AccountServiceImpl; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * test case for {@link AccountServiceImpl} */ class AccountServiceTest { + + private AccountServiceImpl accountService; + + private SurenessAccountProvider accountProvider; + + private final String identifier = "admin"; + private final String password = "hertzbeat"; + private final String salt = "salt1"; + private final List roles = List.of("admin"); + + private final String jwt = """ + CyaFv0bwq2Eik0jdrKUtsA6bx3sDJeFV643R + LnfKefTjsIfJLBa2YkhEqEGtcHDTNe4CU6+9 + 8tVt4bisXQ13rbN0oxhUZR73M6EByXIO+SV5 + dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp + """; + @BeforeEach + void setUp() { + + accountProvider = mock(DocumentAccountProvider.class); + accountService = new AccountServiceImpl(); + + JsonWebTokenUtil.setDefaultSecretKey(jwt); + } + + @Test + void testAuthGetTokenWithValidAccount() throws AuthenticationException { + + SurenessAccount account = DefaultAccount.builder("app1") + .setPassword(Md5Util.md5(password + salt)) + .setSalt(salt) + .setOwnRoles(roles) + .setDisabledAccount(Boolean.FALSE) + .setExcessiveAttempts(Boolean.FALSE) + .build(); + LoginDto loginDto = LoginDto.builder() + .credential(password) + .identifier(identifier) + .build(); + + when(accountProvider.loadAccount(identifier)).thenReturn(account); + + Map response = accountService.authGetToken(loginDto); + + assertNotNull(response); + assertNotNull(response.get("token")); + assertNotNull(response.get("refreshToken")); + assertNotNull(response.get("role")); + assertEquals(JsonUtil.toJson(roles), response.get("role")); + + } + + @Test + void testAuthGetTokenWithInvalidAccount() { + + String identifier = "user1"; + String password = "wrongPassword"; + LoginDto loginDto = LoginDto.builder() + .credential(password) + .identifier(identifier) + .build(); + + when(accountProvider.loadAccount(identifier)).thenReturn(null); + + Assertions.assertThrows( + AuthenticationException.class, + () -> accountService.authGetToken(loginDto) + ); + } + + @Test + void testRefreshTokenWithValidToken() throws AuthenticationException { + + String userId = "admin"; + String refreshToken = JsonWebTokenUtil.issueJwt(userId, 3600L, Collections.singletonMap("refresh", true)); + + SurenessAccount account = DefaultAccount.builder("app1") + .setPassword(Md5Util.md5(password + salt)) + .setSalt(salt) + .setOwnRoles(roles) + .setDisabledAccount(Boolean.FALSE) + .setExcessiveAttempts(Boolean.FALSE) + .build(); + when(accountProvider.loadAccount(userId)).thenReturn(account); + + RefreshTokenResponse response = accountService.refreshToken(refreshToken); + + assertNotNull(response); + assertNotNull(response.getToken()); + assertNotNull(response.getRefreshToken()); + } + + @Test + void testRefreshTokenWithInvalidToken() { + + String refreshToken = "invalidToken"; + + Assertions.assertThrows( + MalformedJwtException.class, + () -> accountService.refreshToken(refreshToken) + ); + } + } From 3c01298e865c93eff7e5a7089065e945bb9db389 Mon Sep 17 00:00:00 2001 From: aias00 Date: Sun, 11 Aug 2024 16:00:48 +0800 Subject: [PATCH 164/257] [feature] add iceberg template (#2503) --- .../src/main/resources/define/app-iceberg.yml | 266 ++++++++++++++++++ 1 file changed, 266 insertions(+) create mode 100644 manager/src/main/resources/define/app-iceberg.yml diff --git a/manager/src/main/resources/define/app-iceberg.yml b/manager/src/main/resources/define/app-iceberg.yml new file mode 100644 index 00000000000..baf932f9b7f --- /dev/null +++ b/manager/src/main/resources/define/app-iceberg.yml @@ -0,0 +1,266 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring +category: bigdata +# The monitoring type eg: springboot2 linux windows tomcat mysql aws... +app: iceberg +# The monitoring i18n name +name: + zh-CN: Apache Iceberg + en-US: Apache Iceberg +# The description and help of this monitoring type +help: + zh-CN: HertzBeat 对 HServer2 暴露的通用性能指标(basic、environment、thread、code_cache)进行采集监控。⚠️注意:如果要监控 Apache Iceberg 中的信息,需要您的 Apache Iceberg 应用集成并开启 Hive Server2, 点击查看具体步骤 + en-US: HertzBeat collects and monitors Apache Iceberg through general performance metric(health, environment, threads, memory_used) that exposed by the Hive Server2.
⚠️Note:You should make sure that your Apache Iceberg application have already integrated and enabled the Hive Server2, click here to see the specific steps. + zh-TW: HertzBeat 對 HServer2 暴露的通用性能指標(basic、environment、thread、code_cache)進行採集監控。< span class='help_module_span'> ⚠️ 注意:如果要監控Apache Iceberg 中的指標,需要您的Apache Iceberg 應用集成並開啟 Hive Server2,點擊查看具體步驟。 +helpLink: + zh-CN: https://hertzbeat.apache.org/zh-cn/docs/help/iceberg + en-US: https://hertzbeat.apache.org/docs/help/iceberg +# Input params define for monitoring(render web ui by the definition) +params: + # field-param field key + - field: host + # name-param field display i18n name + name: + zh-CN: 目标Host + en-US: Target Host + # type-param field type(most mapping the html input type) + type: host + # required-true or false + required: true + # field-param field key + - field: port + # name-param field display i18n name + name: + zh-CN: 端口 + en-US: Port + # type-param field type(most mapping the html input type) + type: number + # when type is number, range is required + range: '[0,65535]' + # required-true or false + required: true + # default value + defaultValue: 10002 + # field-param field key + - field: ssl + # name-param field display i18n name + name: + zh-CN: 启动SSL + en-US: SSL + # When the type is boolean, the frontend will display a switch for it. + type: boolean + # required-true or false + required: false + # field-param field key + - field: base_path + # name-param field display i18n name + name: + zh-CN: Base Path + en-US: Base Path + # type-param field type(most mapping the html input type) The type "text" belongs to a text input field. + type: text + # default value + defaultValue: /jmx + # required-true or false + required: true + # hide-true or false + hide: true +# collect metrics config list +metrics: + # metrics - available + - name: available + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: responseTime + type: 0 + unit: ms + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: http + # Specific collection configuration when protocol is http protocol + http: + # http host: ipv4 ipv6 domain + host: ^_^host^_^ + # http port + port: ^_^port^_^ + # http url + url: ^_^base_path^_^ + # http request method GET POST PUT DELETE PATCH + method: GET + # enable SSL/TLS, that is, whether it is http or https, the default is false + ssl: ^_^ssl^_^ + # http response data parse type: default-hertzbeat rule, jsonpath-jsonpath script, website-api availability monitoring + parseType: default + + - name: basic + priority: 1 + fields: + - field: vm_name + type: 1 + - field: vm_vendor + type: 1 + - field: vm_version + type: 1 + - field: up_time + type: 0 + unit: ms + aliasFields: + - $.beans[?(@.name == 'java.lang:type=Runtime')].VmName + - $.beans[?(@.name == 'java.lang:type=Runtime')].VmVendor + - $.beans[?(@.name == 'java.lang:type=Runtime')].VmVersion + - $.beans[?(@.name == 'java.lang:type=Runtime')].Uptime + calculates: + - vm_name=$.beans[?(@.name == 'java.lang:type=Runtime')].VmName + - vm_vendor=$.beans[?(@.name == 'java.lang:type=Runtime')].VmVendor + - vm_version=$.beans[?(@.name == 'java.lang:type=Runtime')].VmVersion + - up_time=$.beans[?(@.name == 'java.lang:type=Runtime')].Uptime + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: ^_^base_path^_^ + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$' + + # metrics - environment + - name: environment + priority: 2 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: https_proxyPort + type: 0 + - field: os_name + type: 1 + - field: os_version + type: 1 + - field: os_arch + type: 1 + - field: java_runtime_name + type: 1 + - field: java_runtime_version + type: 1 + # metric alias list, used to identify metrics in query results + aliasFields: + - $.beans[?(@.name == 'java.lang:type=Runtime')].SystemProperties[?(@.key == 'os.name')].value + - $.beans[?(@.name == 'java.lang:type=Runtime')].SystemProperties[?(@.key == 'os.version')].value + - $.beans[?(@.name == 'java.lang:type=Runtime')].SystemProperties[?(@.key == 'os.arch')].value + - $.beans[?(@.name == 'java.lang:type=Runtime')].SystemProperties[?(@.key == 'java.runtime.name')].value + - $.beans[?(@.name == 'java.lang:type=Runtime')].SystemProperties[?(@.key == 'java.runtime.version')].value + - $.beans[?(@.name == 'java.lang:type=Runtime')].SystemProperties[?(@.key == 'https.proxyPort')].value + # A list of calculation scripts for metric values. + calculates: + - https_proxyPort=$.beans[?(@.name == 'java.lang:type=Runtime')].SystemProperties[?(@.key == 'https.proxyPort')].value + - os_name=$.beans[?(@.name == 'java.lang:type=Runtime')].SystemProperties[?(@.key == 'os.name')].value + - os_version=$.beans[?(@.name == 'java.lang:type=Runtime')].SystemProperties[?(@.key == 'os.version')].value + - os_arch=$.beans[?(@.name == 'java.lang:type=Runtime')].SystemProperties[?(@.key == 'os.arch')].value + - java_runtime_name=$.beans[?(@.name == 'java.lang:type=Runtime')].SystemProperties[?(@.key == 'java.runtime.name')].value + - java_runtime_version=$.beans[?(@.name == 'java.lang:type=Runtime')].SystemProperties[?(@.key == 'java.runtime.version')].value + # The protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: http + # Specific collection configuration when protocol is http protocol + http: + # http host: ipv4 ipv6 domain + host: ^_^host^_^ + # http port + port: ^_^port^_^ + # http url + url: ^_^base_path^_^ + # http request method GET POST PUT DELETE PATCH + method: GET + # enable SSL/TLS, that is, whether it is http or https, the default is false + ssl: ^_^ssl^_^ + # http response data parse type: default-hertzbeat rule, jsonpath-jsonpath script, website-api availability monitoring + parseType: jsonPath + # http response data parse script + parseScript: '$' + + - name: thread + priority: 4 + fields: + - field: thread_count + type: 0 + - field: total_started_thread + type: 0 + - field: peak_thread_count + type: 0 + - field: daemon_thread_count + type: 0 + aliasFields: + - $.beans[?(@.name == 'java.lang:type=Threading')].ThreadCount + - $.beans[?(@.name == 'java.lang:type=Threading')].TotalStartedThreadCount + - $.beans[?(@.name == 'java.lang:type=Threading')].PeakThreadCount + - $.beans[?(@.name == 'java.lang:type=Threading')].DaemonThreadCount + calculates: + - thread_count=$.beans[?(@.name == 'java.lang:type=Threading')].ThreadCount + - total_started_thread=$.beans[?(@.name == 'java.lang:type=Threading')].TotalStartedThreadCount + - peak_thread_count=$.beans[?(@.name == 'java.lang:type=Threading')].PeakThreadCount + - daemon_thread_count=$.beans[?(@.name == 'java.lang:type=Threading')].DaemonThreadCount + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: ^_^base_path^_^ + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$' + + - name: code_cache + priority: 5 + fields: + - field: committed + type: 1 + unit: MB + - field: init + type: 0 + unit: MB + - field: max + type: 0 + unit: MB + - field: used + type: 0 + unit: MB + aliasFields: + - $.beans[?(@.Name == 'Code Cache')].Usage.committed + - $.beans[?(@.Name == 'Code Cache')].Usage.init + - $.beans[?(@.Name == 'Code Cache')].Usage.max + - $.beans[?(@.Name == 'Code Cache')].Usage.used + calculates: + - committed=$.beans[?(@.Name == 'Code Cache')].Usage.committed + - init=$.beans[?(@.Name == 'Code Cache')].Usage.init + - max=$.beans[?(@.Name == 'Code Cache')].Usage.max + - used=$.beans[?(@.Name == 'Code Cache')].Usage.used + units: + - committed=B->MB + - init=B->MB + - max=B->MB + - used=B->MB + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: ^_^base_path^_^ + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$' From 3d3d881722a9194af10a9da6f4152437bea95446 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sun, 11 Aug 2024 16:44:53 +0800 Subject: [PATCH 165/257] [improve] add ObjectStoreConfigServiceImpl unit test (#2502) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../impl/ObjectStoreConfigServiceImpl.java | 1 + .../service/ObjectStoreConfigServiceTest.java | 69 +++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObjectStoreConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObjectStoreConfigServiceImpl.java index 021b5debcd8..5a0dd1424b9 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObjectStoreConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObjectStoreConfigServiceImpl.java @@ -86,6 +86,7 @@ public void handler(ObjectStoreDTO config) { } ctx.publishEvent(new ObjectStoreConfigChangeEvent(config)); } + log.warn("object store config is null, please check the configuration file."); } /** diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/ObjectStoreConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/ObjectStoreConfigServiceTest.java index 54c2403db86..95f123ffbf8 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/ObjectStoreConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/ObjectStoreConfigServiceTest.java @@ -17,11 +17,80 @@ package org.apache.hertzbeat.manager.service; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.manager.pojo.dto.ObjectStoreConfigChangeEvent; +import org.apache.hertzbeat.manager.pojo.dto.ObjectStoreDTO; import org.apache.hertzbeat.manager.service.impl.ObjectStoreConfigServiceImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Spy; +import org.springframework.beans.factory.support.DefaultListableBeanFactory; +import org.springframework.context.ApplicationContext; +import org.springframework.test.context.junit.jupiter.SpringExtension; +import org.springframework.test.util.ReflectionTestUtils; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; /** * test case for {@link ObjectStoreConfigServiceImpl} */ +@ExtendWith(SpringExtension.class) class ObjectStoreConfigServiceTest { + + private final DefaultListableBeanFactory beanFactory = new DefaultListableBeanFactory(); + + @Mock + private ApplicationContext ctx; + + @Spy + private ObjectMapper objectMapper = new ObjectMapper(); + + @InjectMocks + private ObjectStoreConfigServiceImpl objectStoreConfigService; + + @BeforeEach + void setUp() { + + ReflectionTestUtils.setField(objectStoreConfigService, "beanFactory", beanFactory); + ReflectionTestUtils.setField(objectStoreConfigService, "ctx", ctx); + } + + @Test + void testGetType() { + + String type = objectStoreConfigService.type(); + assertEquals("oss", type); + } + + @Test + void testHandlerNullConfig() { + + objectStoreConfigService.handler(null); + verify(ctx, never()).publishEvent(any()); + } + + @Test + void testHandlerObsConfig() { + + ObjectStoreDTO config = new ObjectStoreDTO<>(); + config.setType(ObjectStoreDTO.Type.OBS); + ObjectStoreDTO.ObsConfig obsConfig = new ObjectStoreDTO.ObsConfig(); + obsConfig.setAccessKey("access-key"); + obsConfig.setSecretKey("secret-key"); + obsConfig.setEndpoint("endpoint"); + obsConfig.setBucketName("bucket-name"); + config.setConfig(obsConfig); + + objectStoreConfigService.handler(config); + + verify(ctx).publishEvent(any(ObjectStoreConfigChangeEvent.class)); + } + } From 3250bc714d894e6c2c5cf063695beb2551b70dae Mon Sep 17 00:00:00 2001 From: aias00 Date: Sun, 11 Aug 2024 22:06:03 +0800 Subject: [PATCH 166/257] [improve] add hive i18n (#2504) Co-authored-by: tomsun28 --- .../src/main/resources/define/app-hive.yml | 74 ++++++++++++++++++- .../src/main/resources/define/app-iceberg.yml | 69 +++++++++++++++++ 2 files changed, 142 insertions(+), 1 deletion(-) diff --git a/manager/src/main/resources/define/app-hive.yml b/manager/src/main/resources/define/app-hive.yml index 840b0784a1e..9b30ec4bd0d 100644 --- a/manager/src/main/resources/define/app-hive.yml +++ b/manager/src/main/resources/define/app-hive.yml @@ -35,7 +35,7 @@ params: - field: host # name-param field display i18n name name: - zh-CN: 目标Host + zh-CN: 目标 Host en-US: Target Host # type-param field type(most mapping the html input type) type: host @@ -83,6 +83,9 @@ params: metrics: # metrics - available - name: available + i18n: + zh-CN: 可用性 + en-US: Availability # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -90,6 +93,9 @@ metrics: fields: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: responseTime + i18n: + zh-CN: 响应时间 + en-US: Response Time type: 0 unit: ms # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk @@ -110,15 +116,30 @@ metrics: parseType: default - name: basic + i18n: + zh-CN: 基本信息 + en-US: Basic priority: 1 fields: - field: vm_name + i18n: + zh-CN: 虚拟机名称 + en-US: VM Name type: 1 - field: vm_vendor + i18n: + zh-CN: 虚拟机供应商 + en-US: VM Vendor type: 1 - field: vm_version + i18n: + zh-CN: 虚拟机版本 + en-US: VM Version type: 1 - field: up_time + i18n: + zh-CN: 运行时间 + en-US: Uptime type: 0 unit: ms aliasFields: @@ -143,21 +164,42 @@ metrics: # metrics - environment - name: environment + i18n: + zh-CN: 环境信息 + en-US: Environment priority: 2 # collect metrics content fields: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: https_proxyPort + i18n: + zh-CN: https 代理端口 + en-US: https Proxy Port type: 0 - field: os_name + i18n: + zh-CN: os 名称 + en-US: OS Name type: 1 - field: os_version + i18n: + zh-CN: os 版本 + en-US: OS Version type: 1 - field: os_arch + i18n: + zh-CN: os 架构 + en-US: OS Arch type: 1 - field: java_runtime_name + i18n: + zh-CN: java 运行时名称 + en-US: Java Runtime Name type: 1 - field: java_runtime_version + i18n: + zh-CN: java 运行时版本 + en-US: Java Runtime Version type: 1 # metric alias list, used to identify metrics in query results aliasFields: @@ -195,15 +237,30 @@ metrics: parseScript: '$' - name: thread + i18n: + zh-CN: 线程 + en-US: Thread priority: 4 fields: - field: thread_count + i18n: + zh-CN: 线程数 + en-US: Thread Count type: 0 - field: total_started_thread + i18n: + zh-CN: 启动线程数 + en-US: Total Started Thread type: 0 - field: peak_thread_count + i18n: + zh-CN: 峰值线程数 + en-US: Peak Thread Count type: 0 - field: daemon_thread_count + i18n: + zh-CN: 守护线程数 + en-US: Daemon Thread Count type: 0 aliasFields: - $.beans[?(@.name == 'java.lang:type=Threading')].ThreadCount @@ -226,18 +283,33 @@ metrics: parseScript: '$' - name: code_cache + i18n: + zh-CN: 代码缓存 + en-US: Code Cache priority: 5 fields: - field: committed + i18n: + zh-CN: 已提交 + en-US: Committed type: 1 unit: MB - field: init + i18n: + zh-CN: 初始化 + en-US: Init type: 0 unit: MB - field: max + i18n: + zh-CN: 最大 + en-US: Max type: 0 unit: MB - field: used + i18n: + zh-CN: 已使用 + en-US: Used type: 0 unit: MB aliasFields: diff --git a/manager/src/main/resources/define/app-iceberg.yml b/manager/src/main/resources/define/app-iceberg.yml index baf932f9b7f..e46e02efb95 100644 --- a/manager/src/main/resources/define/app-iceberg.yml +++ b/manager/src/main/resources/define/app-iceberg.yml @@ -90,6 +90,9 @@ metrics: fields: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: responseTime + i18n: + zh-CN: 响应时间 + en-US: Response Time type: 0 unit: ms # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk @@ -110,15 +113,30 @@ metrics: parseType: default - name: basic + i18n: + zh-CN: 基本信息 + en-US: Basic priority: 1 fields: - field: vm_name + i18n: + zh-CN: 虚拟机名称 + en-US: VM Name type: 1 - field: vm_vendor + i18n: + zh-CN: 虚拟机供应商 + en-US: VM Vendor type: 1 - field: vm_version + i18n: + zh-CN: 虚拟机版本 + en-US: VM Version type: 1 - field: up_time + i18n: + zh-CN: 运行时间 + en-US: Uptime type: 0 unit: ms aliasFields: @@ -143,21 +161,42 @@ metrics: # metrics - environment - name: environment + i18n: + zh-CN: 环境信息 + en-US: Environment priority: 2 # collect metrics content fields: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: https_proxyPort + i18n: + zh-CN: https 代理端口 + en-US: https Proxy Port type: 0 - field: os_name + i18n: + zh-CN: os 名称 + en-US: OS Name type: 1 - field: os_version + i18n: + zh-CN: os 版本 + en-US: OS Version type: 1 - field: os_arch + i18n: + zh-CN: os 架构 + en-US: OS Arch type: 1 - field: java_runtime_name + i18n: + zh-CN: java 运行时名称 + en-US: Java Runtime Name type: 1 - field: java_runtime_version + i18n: + zh-CN: java 运行时版本 + en-US: Java Runtime Version type: 1 # metric alias list, used to identify metrics in query results aliasFields: @@ -195,15 +234,30 @@ metrics: parseScript: '$' - name: thread + i18n: + zh-CN: 线程 + en-US: Thread priority: 4 fields: - field: thread_count + i18n: + zh-CN: 线程数 + en-US: Thread Count type: 0 - field: total_started_thread + i18n: + zh-CN: 启动线程数 + en-US: Total Started Thread type: 0 - field: peak_thread_count + i18n: + zh-CN: 峰值线程数 + en-US: Peak Thread Count type: 0 - field: daemon_thread_count + i18n: + zh-CN: 守护线程数 + en-US: Daemon Thread Count type: 0 aliasFields: - $.beans[?(@.name == 'java.lang:type=Threading')].ThreadCount @@ -226,18 +280,33 @@ metrics: parseScript: '$' - name: code_cache + i18n: + zh-CN: 代码缓存 + en-US: Code Cache priority: 5 fields: - field: committed + i18n: + zh-CN: 已提交 + en-US: Committed type: 1 unit: MB - field: init + i18n: + zh-CN: 初始化 + en-US: Init type: 0 unit: MB - field: max + i18n: + zh-CN: 最大 + en-US: Max type: 0 unit: MB - field: used + i18n: + zh-CN: 已使用 + en-US: Used type: 0 unit: MB aliasFields: From 9f7be0292a2b6584d08101367a50c27491ca66bd Mon Sep 17 00:00:00 2001 From: aias00 Date: Sun, 11 Aug 2024 22:12:27 +0800 Subject: [PATCH 167/257] [improve] add hertzbeat i18n (#2506) Co-authored-by: tomsun28 --- .../main/resources/define/app-dynamic_tp.yml | 2 +- .../src/main/resources/define/app-emqx.yml | 111 ++++++++++++++++++ .../src/main/resources/define/app-euleros.yml | 2 +- .../main/resources/define/app-hertzbeat.yml | 66 +++++++++++ .../resources/define/app-hertzbeat_token.yml | 75 ++++++++++++ 5 files changed, 254 insertions(+), 2 deletions(-) diff --git a/manager/src/main/resources/define/app-dynamic_tp.yml b/manager/src/main/resources/define/app-dynamic_tp.yml index f2be6b2613d..c57f1b00e9c 100644 --- a/manager/src/main/resources/define/app-dynamic_tp.yml +++ b/manager/src/main/resources/define/app-dynamic_tp.yml @@ -19,7 +19,7 @@ category: program app: dynamic_tp # The monitoring i18n name name: - zh-CN: DynamicTp线程池 + zh-CN: DynamicTp 线程池 en-US: DynamicTp Pool # The description and help of this monitoring type help: diff --git a/manager/src/main/resources/define/app-emqx.yml b/manager/src/main/resources/define/app-emqx.yml index 12d93a90a5f..e15695a26c0 100644 --- a/manager/src/main/resources/define/app-emqx.yml +++ b/manager/src/main/resources/define/app-emqx.yml @@ -162,6 +162,9 @@ metrics: parseScript: '$' - name: metrics + i18n: + zh-CN: 指标 + en-US: Metrics # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 1 @@ -170,30 +173,69 @@ metrics: # metrics content contains field-metric name, type-metric type:0-number,1-string, instance-if is metrics, unit-metric unit('%','ms','MB') - field: client_connected type: 0 + i18n: + zh-CN: 客户端连接 + en-US: Client Connected - field: client_disconnected type: 0 + i18n: + zh-CN: 客户端断开 + en-US: Client Disconnected - field: packets_sent type: 0 + i18n: + zh-CN: 发送数据包 + en-US: Packets Sent - field: packets_received type: 0 + i18n: + zh-CN: 接收数据包 + en-US: Packets Received - field: bytes_sent type: 0 + i18n: + zh-CN: 发送字节 + en-US: Bytes Sent - field: bytes_received type: 0 + i18n: + zh-CN: 接收字节 + en-US: Bytes Received - field: messages_sent type: 0 + i18n: + zh-CN: 发送消息 + en-US: Messages Sent - field: messages_acked type: 0 + i18n: + zh-CN: 消息确认 + en-US: Messages Acked - field: messages_delayed type: 0 + i18n: + zh-CN: 延迟消息 + en-US: Messages Delayed - field: authorization_deny type: 0 + i18n: + zh-CN: 授权拒绝 + en-US: Authorization Deny - field: client_authorize type: 0 + i18n: + zh-CN: 客户端授权 + en-US: Client Authorize - field: session_created type: 0 + i18n: + zh-CN: 会话创建 + en-US: Session Created - field: session_discarded type: 0 + i18n: + zh-CN: 会话丢弃 + en-US: Session Discarded # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field aliasFields: - client.connected @@ -251,53 +293,122 @@ metrics: parseScript: '$' - name: stats + i18n: + zh-CN: 统计 + en-US: Stats priority: 2 fields: # metrics content contains field-metric name, type-metric type:0-number,1-string, instance-if is metrics, unit-metric unit('%','ms','MB') - field: channels_count type: 0 + i18n: + zh-CN: 通道数 + en-US: Channels Count - field: channels_max type: 0 + i18n: + zh-CN: 通道最大数 + en-US: Channels Max - field: connections_count type: 0 + i18n: + zh-CN: 连接数 + en-US: Connections Count - field: connections_max type: 0 + i18n: + zh-CN: 最大连接数 + en-US: Connections Max - field: delayed_count type: 0 + i18n: + zh-CN: 延迟数 + en-US: Delayed Count - field: delayed_max type: 0 + i18n: + zh-CN: 最大延迟数 + en-US: Delayed Max - field: live_connections_count type: 0 + i18n: + zh-CN: 活动连接数 + en-US: Live Connections Count - field: live_connections_max type: 0 + i18n: + zh-CN: 最大活动连接数 + en-US: Live Connections Max - field: retained_count type: 0 + i18n: + zh-CN: 保留数 + en-US: Retained Count - field: retained_max type: 0 + i18n: + zh-CN: 最大保留数 + en-US: Retained Max - field: sessions_count type: 0 + i18n: + zh-CN: 会话数 + en-US: Sessions Count - field: sessions_max type: 0 + i18n: + zh-CN: 最大会话数 + en-US: Sessions Max - field: suboptions_count type: 0 + i18n: + zh-CN: 订阅选项数 + en-US: Suboptions Count - field: suboptions_max type: 0 + i18n: + zh-CN: 最大订阅选项数 + en-US: Suboptions Max - field: subscribers_count type: 0 + i18n: + zh-CN: 订阅者数 + en-US: Subscribers Count - field: subscribers_max type: 0 + i18n: + zh-CN: 最大订阅者数 + en-US: Subscribers Max - field: subscriptions_count type: 0 + i18n: + zh-CN: 订阅数 + en-US: Subscriptions Count - field: subscriptions_max type: 0 + i18n: + zh-CN: 最大订阅数 + en-US: Subscriptions Max - field: subscriptions_shared_count type: 0 + i18n: + zh-CN: 共享订阅数 + en-US: Subscriptions Shared Count - field: subscriptions_shared_max type: 0 + i18n: + zh-CN: 最大共享订阅数 + en-US: Subscriptions Shared Max - field: topics_count type: 0 + i18n: + zh-CN: 主题数 + en-US: Topics Count - field: topics_max type: 0 + i18n: + zh-CN: 最大主题数 + en-US: Topics Max # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field aliasFields: - channels.count diff --git a/manager/src/main/resources/define/app-euleros.yml b/manager/src/main/resources/define/app-euleros.yml index 761830403dc..8bcbb4d52a3 100644 --- a/manager/src/main/resources/define/app-euleros.yml +++ b/manager/src/main/resources/define/app-euleros.yml @@ -19,7 +19,7 @@ category: os app: euleros # The monitoring i18n name name: - zh-CN: EulerOS操作系统 + zh-CN: EulerOS 操作系统 en-US: EulerOS # The description and help of this monitoring type help: diff --git a/manager/src/main/resources/define/app-hertzbeat.yml b/manager/src/main/resources/define/app-hertzbeat.yml index 79b2577a256..fc67c6f92b5 100644 --- a/manager/src/main/resources/define/app-hertzbeat.yml +++ b/manager/src/main/resources/define/app-hertzbeat.yml @@ -87,6 +87,9 @@ metrics: # the first metrics summary # attention: Built-in monitoring metrics contains (responseTime - Response time) - name: summary + i18n: + zh-CN: 概要信息 + en-US: Summary # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -96,14 +99,29 @@ metrics: - field: app type: 1 label: true + i18n: + zh-CN: 应用 + en-US: App - field: category type: 1 + i18n: + zh-CN: 类别 + en-US: Category - field: status type: 0 + i18n: + zh-CN: 状态 + en-US: Status - field: size type: 0 + i18n: + zh-CN: 数量 + en-US: Size - field: availableSize type: 0 + i18n: + zh-CN: 可用数量 + en-US: Available Size # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk, we use HTTP protocol here protocol: http # the config content when protocol is http @@ -132,19 +150,37 @@ metrics: parseScript: '$.data.apps.*' - name: inner_queue + i18n: + zh-CN: 内部队列 + en-US: Inner Queue priority: 1 fields: - field: responseTime type: 0 unit: ms + i18n: + zh-CN: 响应时间 + en-US: Response Time - field: alertDataQueue type: 0 + i18n: + zh-CN: 变更数据队列 + en-US: Data Change Queue - field: metricsDataToAlertQueue type: 0 + i18n: + zh-CN: 指标数据到变更队列 + en-US: Metrics Data to Change Queue - field: metricsDataToPersistentStorageQueue type: 0 + i18n: + zh-CN: 指标数据到持久化队列 + en-US: Metrics Data to Persistent Queue - field: metricsDataToMemoryStorageQueue type: 0 + i18n: + zh-CN: 指标数据到内存存储队列 + en-US: Metrics Data to Memory Storage Queue protocol: http http: host: ^_^host^_^ @@ -163,11 +199,17 @@ metrics: parseScript: '$.data' - name: thread_state + i18n: + zh-CN: 线程状态 + en-US: Thread State visible: false priority: 1 fields: - field: state type: 2 + i18n: + zh-CN: 状态 + en-US: State protocol: http http: host: ^_^host^_^ @@ -185,12 +227,21 @@ metrics: parseScript: '$.availableTags[?(@.tag == "state")].values[*]' - name: threads + i18n: + zh-CN: 线程 + en-US: Threads priority: 3 fields: - field: state type: 1 + i18n: + zh-CN: 状态 + en-US: State - field: number type: 0 + i18n: + zh-CN: 数量 + en-US: Number aliasFields: - $.measurements[?(@.statistic == "VALUE")].value calculates: @@ -213,11 +264,17 @@ metrics: parseScript: '$' - name: space_name + i18n: + zh-CN: 空间名称 + en-US: Space Name visible: false priority: 4 fields: - field: id type: 1 + i18n: + zh-CN: ID + en-US: ID protocol: http http: host: ^_^host^_^ @@ -235,13 +292,22 @@ metrics: parseScript: '$.availableTags[?(@.tag == "id")].values[*]' - name: memory_used + i18n: + zh-CN: 内存使用 + en-US: Memory Used priority: 5 fields: - field: space type: 1 + i18n: + zh-CN: 空间 + en-US: Space - field: mem_used type: 0 unit: MB + i18n: + zh-CN: 已使用内存 + en-US: Used Memory aliasFields: - $.measurements[?(@.statistic == "VALUE")].value calculates: diff --git a/manager/src/main/resources/define/app-hertzbeat_token.yml b/manager/src/main/resources/define/app-hertzbeat_token.yml index 1bbbf78825a..3c817b63370 100644 --- a/manager/src/main/resources/define/app-hertzbeat_token.yml +++ b/manager/src/main/resources/define/app-hertzbeat_token.yml @@ -78,6 +78,9 @@ params: metrics: # metrics - auth - name: auth + i18n: + zh-CN: 认证 + en-US: Auth # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -86,8 +89,14 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: token type: 1 + i18n: + zh-CN: 令牌 + en-US: Token - field: refreshToken type: 1 + i18n: + zh-CN: 刷新令牌 + en-US: Refresh Token # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http # the config content when protocol is http @@ -116,19 +125,37 @@ metrics: - name: summary + i18n: + zh-CN: 概要信息 + en-US: Summary priority: 1 fields: - field: app type: 1 label: true + i18n: + zh-CN: 应用 + en-US: App - field: category type: 1 + i18n: + zh-CN: 类别 + en-US: Category - field: status type: 0 + i18n: + zh-CN: 状态 + en-US: Status - field: size type: 0 + i18n: + zh-CN: 数量 + en-US: Size - field: availableSize type: 0 + i18n: + zh-CN: 可用数量 + en-US: Available Size protocol: http http: host: ^_^host^_^ @@ -144,19 +171,37 @@ metrics: parseScript: '$.data.apps.*' - name: inner_queue + i18n: + zh-CN: 内部队列 + en-US: Inner Queue priority: 1 fields: - field: responseTime type: 0 unit: ms + i18n: + zh-CN: 响应时间 + en-US: Response Time - field: alertDataQueue type: 0 + i18n: + zh-CN: 变更数据队列 + en-US: Alert Data Queue - field: metricsDataToAlertQueue type: 0 + i18n: + zh-CN: 指标数据到变更队列 + en-US: Metrics Data to Alert Queue - field: metricsDataToPersistentStorageQueue type: 0 + i18n: + zh-CN: 指标数据到持久化队列 + en-US: Metrics Data to Persistent Storage Queue - field: metricsDataToMemoryStorageQueue type: 0 + i18n: + zh-CN: 指标数据到内存存储队列 + en-US: Metrics Data to Memory Storage Queue protocol: http http: host: ^_^host^_^ @@ -173,11 +218,17 @@ metrics: parseScript: '$.data' - name: thread_state + i18n: + zh-CN: 指标数据到内存存储队列 + en-US: Metrics Data to Memory Storage Queue visible: false priority: 2 fields: - field: state type: 1 + i18n: + zh-CN: 状态 + en-US: State protocol: http http: host: ^_^host^_^ @@ -193,12 +244,21 @@ metrics: parseScript: '$.availableTags[?(@.tag == "state")].values[*]' - name: threads + i18n: + zh-CN: 线程 + en-US: Threads priority: 3 fields: - field: state type: 1 + i18n: + zh-CN: 状态 + en-US: State - field: number type: 0 + i18n: + zh-CN: 数量 + en-US: Number aliasFields: - $.measurements[?(@.statistic == "VALUE")].value calculates: @@ -219,11 +279,17 @@ metrics: parseScript: '$' - name: space_name + i18n: + zh-CN: 空间名称 + en-US: Space Name visible: false priority: 4 fields: - field: id type: 1 + i18n: + zh-CN: ID + en-US: ID protocol: http http: host: ^_^host^_^ @@ -239,13 +305,22 @@ metrics: parseScript: '$.availableTags[?(@.tag == "id")].values[*]' - name: memory_used + i18n: + zh-CN: 内存使用 + en-US: Memory Used priority: 5 fields: - field: space type: 1 + i18n: + zh-CN: 空间 + en-US: Space - field: mem_used type: 0 unit: MB + i18n: + zh-CN: 内存使用量 + en-US: Memory Used aliasFields: - $.measurements[?(@.statistic == "VALUE")].value calculates: From a8c1c483900503c56df1241268e001dd309c0678 Mon Sep 17 00:00:00 2001 From: aias00 Date: Mon, 12 Aug 2024 07:46:24 +0800 Subject: [PATCH 168/257] [improve] add more yml i18n (#2505) Co-authored-by: liutianyou --- .../main/resources/define/app-activemq.yml | 121 +++++++++++++++++- .../src/main/resources/define/app-airflow.yml | 14 +- .../main/resources/define/app-almalinux.yml | 6 +- .../main/resources/define/app-doris_be.yml | 74 ++++++++++- .../main/resources/define/app-doris_fe.yml | 44 ++++++- 5 files changed, 245 insertions(+), 14 deletions(-) diff --git a/manager/src/main/resources/define/app-activemq.yml b/manager/src/main/resources/define/app-activemq.yml index 6c1878631eb..62e3157b80b 100644 --- a/manager/src/main/resources/define/app-activemq.yml +++ b/manager/src/main/resources/define/app-activemq.yml @@ -35,7 +35,7 @@ params: - field: host # name-param field display i18n name name: - zh-CN: 目标Host + zh-CN: 目标 Host en-US: Target Host # type-param field type(most mapping the html input type) type: host @@ -45,7 +45,7 @@ params: - field: port # name-param field display i18n name name: - zh-CN: JMX端口 + zh-CN: JMX 端口 en-US: JMX Port # type-param field type(most mapping the html input type) type: number @@ -148,50 +148,110 @@ metrics: url: ^_^url^_^ - name: broker + i18n: + zh-CN: Broker 信息 + en-US: Broker Info priority: 1 fields: - field: BrokerName type: 1 label: true + i18n: + zh-CN: Broker 名称 + en-US: Broker Name - field: BrokerVersion + i18n: + zh-CN: Broker 版本 + en-US: Broker Version type: 1 - field: Uptime + i18n: + zh-CN: 启动时长 + en-US: Uptime type: 1 - field: UptimeMillis + i18n: + zh-CN: 启动时长(毫秒) + en-US: Uptime Millis type: 0 unit: ms - field: Persistent + i18n: + zh-CN: 持久化 + en-US: Persistent type: 1 - field: DataDirectory + i18n: + zh-CN: 数据目录 + en-US: Data Directory type: 1 - field: MemoryPercentUsage + i18n: + zh-CN: 内存使用率 + en-US: Memory Percent Usage type: 0 unit: '%' - field: StorePercentUsage + i18n: + zh-CN: 存储使用率 + en-US: Store Percent Usage type: 0 unit: '%' - field: TempPercentUsage + i18n: + zh-CN: 临时使用率 + en-US: Temp Percent Usage type: 0 unit: '%' - field: CurrentConnectionsCount + i18n: + zh-CN: 当前连接数 + en-US: Current Connections Count type: 0 - field: TotalConnectionsCount + i18n: + zh-CN: 总连接数 + en-US: Total Connections Count type: 0 - field: TotalEnqueueCount + i18n: + zh-CN: 总入队数 + en-US: Total Enqueue Count type: 0 - field: TotalDequeueCount + i18n: + zh-CN: 总出队数 + en-US: Total Dequeue Count type: 0 - field: TotalConsumerCount + i18n: + zh-CN: 消费者总数 + en-US: Total Consumer Count type: 0 - field: TotalProducerCount + i18n: + zh-CN: 生产者总数 + en-US: Total Producer Count type: 0 - field: TotalMessageCount + i18n: + zh-CN: 总消息数 + en-US: Total Message Count type: 0 - field: AverageMessageSize + i18n: + zh-CN: 平均消息大小 + en-US: Average Message Size type: 0 - field: MaxMessageSize + i18n: + zh-CN: 最大消息大小 + en-US: Max Message Size type: 0 - field: MinMessageSize + i18n: + zh-CN: 最小消息大小 + en-US: Min Message Size type: 0 protocol: jmx jmx: @@ -208,52 +268,109 @@ metrics: - field: Name type: 1 label: true + i18n: + zh-CN: Topic 名称 + en-US: Topic Name - field: MemoryLimit type: 0 unit: MB + i18n: + zh-CN: 内存限制 + en-US: Memory Limit - field: MemoryPercentUsage type: 0 unit: '%' + i18n: + zh-CN: 内存使用率 + en-US: Memory Percent Usage - field: ProducerCount type: 0 + i18n: + zh-CN: 生产者数量 + en-US: Producer Count - field: ConsumerCount type: 0 + i18n: + zh-CN: 消费者数量 + en-US: Consumer Count - field: EnqueueCount type: 0 + i18n: + zh-CN: 入队数量 + en-US: Enqueue Count - field: DequeueCount type: 0 + i18n: + zh-CN: 出队数量 + en-US: Dequeue Count - field: ForwardCount type: 0 + i18n: + zh-CN: 转发数量 + en-US: Forward Count - field: InFlightCount type: 0 + i18n: + zh-CN: 进行中数量 + en-US: In Flight Count - field: DispatchCount type: 0 + i18n: + zh-CN: 分发数量 + en-US: Dispatch Count - field: ExpiredCount type: 0 + i18n: + zh-CN: 过期数量 + en-US: Expired Count - field: StoreMessageSize type: 0 unit: B + i18n: + zh-CN: 存储消息大小 + en-US: Store Message Size - field: AverageEnqueueTime type: 0 unit: ms + i18n: + zh-CN: 平均入队时间 + en-US: Average Enqueue Time - field: MaxEnqueueTime type: 0 unit: ms + i18n: + zh-CN: 最大入队时间 + en-US: Max Enqueue Time - field: MinEnqueueTime type: 0 unit: ms + i18n: + zh-CN: 最小入队时间 + en-US: Min Enqueue Time - field: TotalBlockedTime type: 0 unit: ms + i18n: + zh-CN: 总阻塞时间 + en-US: Total Blocked Time - field: AverageMessageSize type: 0 unit: B + i18n: + zh-CN: 平均消息大小 + en-US: Average Message Size - field: MaxMessageSize type: 0 unit: B + i18n: + zh-CN: 最大消息大小 + en-US: Max Message Size - field: MinMessageSize type: 0 unit: B + i18n: + zh-CN: 最小消息大小 + en-US: Min Message Size units: - MemoryLimit=B->MB protocol: jmx diff --git a/manager/src/main/resources/define/app-airflow.yml b/manager/src/main/resources/define/app-airflow.yml index beffb1d415e..0d2c92e62ec 100644 --- a/manager/src/main/resources/define/app-airflow.yml +++ b/manager/src/main/resources/define/app-airflow.yml @@ -35,7 +35,7 @@ params: - field: host # name-param field display i18n name name: - zh-CN: 服务器Host + zh-CN: 服务器 Host en-US: Target Host # type-param field type(most mapping the html input type) type: host @@ -76,23 +76,23 @@ params: metrics: - name: airflow_health i18n: - zh-CN: airflow健康状态 + zh-CN: airflow 健康状态 en-US: airflow health status priority: 0 fields: - field: metadatabase i18n: - zh-CN: metadatabase健康情况 + zh-CN: metadatabase 健康情况 en-US: metadatabase status type: 1 - field: scheduler i18n: - zh-CN: scheduler健康情况 + zh-CN: scheduler 健康情况 en-US: scheduler status type: 1 - field: triggerer i18n: - zh-CN: triggerer健康情况 + zh-CN: triggerer 健康情况 en-US: triggerer status type: 1 aliasFields: @@ -120,13 +120,13 @@ metrics: - name: airflow_version i18n: - zh-CN: Airflow版本 + zh-CN: Airflow 版本 en-US: airflow version priority: 1 fields: - field: version i18n: - zh-CN: Airflow版本 + zh-CN: Airflow 版本 en-US: Airflow version type: 1 - field: git_version diff --git a/manager/src/main/resources/define/app-almalinux.yml b/manager/src/main/resources/define/app-almalinux.yml index 2452a389d5c..101e0de905b 100644 --- a/manager/src/main/resources/define/app-almalinux.yml +++ b/manager/src/main/resources/define/app-almalinux.yml @@ -35,7 +35,7 @@ params: - field: host # name-param field display i18n name name: - zh-CN: 目标Host + zh-CN: 目标 Host en-US: Target Host # type-param field type(most mapping the html input type) type: host @@ -444,7 +444,7 @@ metrics: type: 0 unit: '%' i18n: - zh-CN: CPU占用率 + zh-CN: CPU 占用率 en-US: CPU Usage - field: mem_usage type: 0 @@ -491,7 +491,7 @@ metrics: type: 0 unit: '%' i18n: - zh-CN: CPU占用率 + zh-CN: CPU 占用率 en-US: CPU Usage - field: command type: 1 diff --git a/manager/src/main/resources/define/app-doris_be.yml b/manager/src/main/resources/define/app-doris_be.yml index 658b339f504..6dbbe22fc80 100644 --- a/manager/src/main/resources/define/app-doris_be.yml +++ b/manager/src/main/resources/define/app-doris_be.yml @@ -34,7 +34,7 @@ params: - field: host # name-param field display i18n name name: - zh-CN: 目标Host + zh-CN: 目标 Host en-US: Target Host # type-param field type(most mapping the html input type) type: host @@ -62,6 +62,9 @@ params: defaultValue: 6000 metrics: - name: doris_be_load_channel_count + i18n: + zh-CN: 加载通道数 + en-US: Load Channel Count priority: 0 fields: - field: value @@ -76,6 +79,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_memtable_flush_total + i18n: + zh-CN: Memtable 刷新总数 + en-US: Memtable Flush Total priority: 0 fields: - field: value @@ -90,6 +96,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_plan_fragment_count + i18n: + zh-CN: Plan Fragment 数量 + en-US: Plan Fragment Count priority: 0 fields: - field: value @@ -104,6 +113,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_process_thread_num + i18n: + zh-CN: 进程线程数 + en-US: Process Thread Num priority: 0 fields: - field: value @@ -118,6 +130,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_query_scan_rows + i18n: + zh-CN: 查询扫描行数 + en-US: Query Scan Rows priority: 0 fields: - field: value @@ -132,6 +147,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_result_buffer_block_count + i18n: + zh-CN: 结果缓冲区块数 + en-US: Result Buffer Block Count priority: 0 fields: - field: value @@ -146,6 +164,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_send_batch_thread_pool_queue_size + i18n: + zh-CN: 批量发送线程池队列大小 + en-US: Send Batch Thread Pool Queue Size priority: 0 fields: - field: value @@ -160,6 +181,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_tablet_base_max_compaction_score + i18n: + zh-CN: Tablet Base 最大压缩分数 + en-US: Tablet Base Max Compaction Score priority: 0 fields: - field: value @@ -174,6 +198,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_timeout_canceled_fragment_count + i18n: + zh-CN: 超时取消的 Fragment 数量 + en-US: Timeout Canceled Fragment Count priority: 0 fields: - field: value @@ -188,6 +215,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_load_rows + i18n: + zh-CN: 加载行数 + en-US: Load Rows priority: 0 fields: - field: value @@ -202,6 +232,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_all_rowsets_num + i18n: + zh-CN: Rowset 总数 + en-US: All Rowsets Num priority: 0 fields: - field: value @@ -216,6 +249,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_all_segments_num + i18n: + zh-CN: Segment 总数 + en-US: All Segments Num priority: 0 fields: - field: value @@ -230,6 +266,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_heavy_work_max_threads + i18n: + zh-CN: 繁重工作最大线程数 + en-US: Heavy Work Max Threads priority: 0 fields: - field: value @@ -244,6 +283,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_light_work_max_threads + i18n: + zh-CN: 轻量工作最大线程数 + en-US: Light Work Max Threads priority: 0 fields: - field: value @@ -258,6 +300,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_heavy_work_pool_queue_size + i18n: + zh-CN: 繁重工作池队列大小 + en-US: Heavy Work Pool Queue Size priority: 0 fields: - field: value @@ -272,6 +317,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_light_work_pool_queue_size + i18n: + zh-CN: 轻量工作池队列大小 + en-US: Light Work Pool Queue Size priority: 0 fields: - field: value @@ -286,6 +334,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_heavy_work_active_threads + i18n: + zh-CN: 繁重工作活跃线程数 + en-US: Heavy Work Active Threads priority: 0 fields: - field: value @@ -300,6 +351,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_light_work_active_threads + i18n: + zh-CN: 轻量工作活跃线程数 + en-US: Light Work Active Threads priority: 0 fields: - field: value @@ -314,6 +368,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_compaction_bytes_total + i18n: + zh-CN: 压缩字节总数 + en-US: Compaction Bytes Total priority: 0 fields: - field: type @@ -331,6 +388,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_disks_avail_capacity + i18n: + zh-CN: 磁盘可用容量 + en-US: Disks Avail Capacity priority: 0 fields: - field: path @@ -348,6 +408,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_disks_total_capacity + i18n: + zh-CN: 磁盘总容量 + en-US: Disks Total Capacity priority: 0 fields: - field: path @@ -365,6 +428,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_local_bytes_read_total + i18n: + zh-CN: 本地读取字节总数 + en-US: Local Bytes Read Total priority: 0 fields: - field: value @@ -380,6 +446,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_local_bytes_written_total + i18n: + zh-CN: 本地写入字节总数 + en-US: Local Bytes Written Total priority: 0 fields: - field: value @@ -395,6 +464,9 @@ metrics: method: GET parseType: prometheus - name: doris_be_memory_allocated_bytes + i18n: + zh-CN: 内存分配字节 + en-US: Memory Allocated Bytes priority: 0 fields: - field: value diff --git a/manager/src/main/resources/define/app-doris_fe.yml b/manager/src/main/resources/define/app-doris_fe.yml index 019ab1f1682..e7b4955d229 100644 --- a/manager/src/main/resources/define/app-doris_fe.yml +++ b/manager/src/main/resources/define/app-doris_fe.yml @@ -34,7 +34,7 @@ params: - field: host # name-param field display i18n name name: - zh-CN: 目标Host + zh-CN: 目标 Host en-US: Target Host # type-param field type(most mapping the html input type) type: host @@ -62,6 +62,9 @@ params: defaultValue: 6000 metrics: - name: doris_fe_connection_total + i18n: + zh-CN: 连接总数 + en-US: Connection Total priority: 0 fields: - field: value @@ -81,6 +84,9 @@ metrics: method: GET parseType: prometheus - name: doris_fe_edit_log_clean + i18n: + zh-CN: 编辑日志清理 + en-US: Edit Log Clean priority: 0 fields: - field: type @@ -102,6 +108,9 @@ metrics: method: GET parseType: prometheus - name: doris_fe_edit_log + i18n: + zh-CN: 编辑日志 + en-US: Edit Log priority: 0 fields: - field: type @@ -123,6 +132,9 @@ metrics: method: GET parseType: prometheus - name: doris_fe_image_clean + i18n: + zh-CN: 图片清理 + en-US: Image Clean priority: 0 fields: - field: type @@ -144,6 +156,9 @@ metrics: method: GET parseType: prometheus - name: doris_fe_image_write + i18n: + zh-CN: 图片写入 + en-US: Image Write priority: 0 fields: - field: type @@ -165,6 +180,9 @@ metrics: method: GET parseType: prometheus - name: doris_fe_query_err + i18n: + zh-CN: 查询错误 + en-US: Query Error priority: 0 fields: - field: value @@ -184,6 +202,9 @@ metrics: method: GET parseType: prometheus - name: doris_fe_max_journal_id + i18n: + zh-CN: 最大日志ID + en-US: Max Journal ID priority: 0 fields: - field: value @@ -203,6 +224,9 @@ metrics: method: GET parseType: prometheus - name: doris_fe_max_tablet_compaction_score + i18n: + zh-CN: 最大Tablet压缩分数 + en-US: Max Tablet Compaction Score priority: 0 fields: - field: value @@ -222,6 +246,9 @@ metrics: method: GET parseType: prometheus - name: doris_fe_qps + i18n: + zh-CN: 每秒查询率 + en-US: QPS priority: 0 fields: - field: value @@ -241,6 +268,9 @@ metrics: method: GET parseType: prometheus - name: doris_fe_query_err_rate + i18n: + zh-CN: 查询错误率 + en-US: Query Error Rate priority: 0 fields: - field: value @@ -262,6 +292,9 @@ metrics: parseType: prometheus - name: doris_fe_report_queue_size + i18n: + zh-CN: 报告队列大小 + en-US: Report Queue Size priority: 0 fields: - field: value @@ -281,6 +314,9 @@ metrics: method: GET parseType: prometheus - name: doris_fe_rps + i18n: + zh-CN: 每秒转数 + en-US: RPS priority: 0 fields: - field: value @@ -300,6 +336,9 @@ metrics: method: GET parseType: prometheus - name: doris_fe_scheduled_tablet_num + i18n: + zh-CN: 调度 Tablet 数量 + en-US: Scheduled Tablet Num priority: 0 fields: - field: value @@ -319,6 +358,9 @@ metrics: method: GET parseType: prometheus - name: doris_fe_txn_status + i18n: + zh-CN: 事务状态 + en-US: Transaction Status priority: 0 fields: - field: type From a55a098983c06b2da7663272467c9942c6b0c9d5 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Mon, 12 Aug 2024 08:00:17 +0800 Subject: [PATCH 169/257] [improve] Replacing characters with http constants (#2500) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../service/impl/AlertDefineServiceImpl.java | 10 ++-- .../collector/collect/AbstractCollect.java | 1 - .../collect/database/JdbcCommonCollect.java | 2 +- .../collect/http/HttpCollectImpl.java | 37 +++++++------- .../http/SslCertificateCollectImpl.java | 2 +- .../collector/collect/httpsd/HttpsdImpl.java | 2 +- .../collect/icmp/IcmpCollectImpl.java | 2 +- .../memcached/MemcachedCollectImpl.java | 3 +- .../mongodb/MongodbSingleCollectImpl.java | 2 +- .../nebulagraph/NebulaGraphCollectImpl.java | 3 +- .../collect/nebulagraph/NgqlCollectImpl.java | 2 +- .../collect/nginx/NginxCollectImpl.java | 25 +++++----- .../collector/collect/ntp/NtpCollectImpl.java | 2 +- .../collect/pop3/Pop3CollectImpl.java | 2 +- .../prometheus/PrometheusAutoCollectImpl.java | 20 ++++---- .../collect/push/PushCollectImpl.java | 16 +++--- .../collect/redfish/RedfishClient.java | 25 ++++++---- .../redfish/RedfishConnectSession.java | 19 +++---- .../collect/script/ScriptCollectImpl.java | 4 +- .../collect/smtp/SmtpCollectImpl.java | 2 +- .../collect/snmp/SnmpCollectImpl.java | 2 +- .../collector/collect/ssh/SshCollectImpl.java | 2 +- .../collect/telnet/TelnetCollectImpl.java | 2 +- .../collector/collect/udp/UdpCollectImpl.java | 2 +- .../websocket/WebsocketCollectImpl.java | 2 +- .../constants/CollectorConstants.java | 34 ++++++------- .../collect/nginx/NginxCollectImplTest.java | 4 +- .../common/constants/NetworkConstants.java | 50 +++++++++++++++++++ .../common/constants/SignConstants.java | 4 ++ .../hertzbeat/common/util/IpDomainUtil.java | 6 +-- .../hertzbeat/common/util/FileUtilTest.java | 5 +- .../common/util/IpDomainUtilTest.java | 17 +++---- .../impl/WeChatAlertNotifyHandlerImpl.java | 4 +- .../service/impl/MonitorServiceImpl.java | 23 +++++---- 34 files changed, 201 insertions(+), 137 deletions(-) rename {common/src/main/java/org/apache/hertzbeat/common => collector/src/main/java/org/apache/hertzbeat/collector}/constants/CollectorConstants.java (80%) create mode 100644 common/src/main/java/org/apache/hertzbeat/common/constants/NetworkConstants.java diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineServiceImpl.java index 0923695c17a..e37fdb86ce8 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineServiceImpl.java @@ -38,6 +38,7 @@ import org.apache.hertzbeat.alert.service.AlertDefineImExportService; import org.apache.hertzbeat.alert.service.AlertDefineService; import org.apache.hertzbeat.common.constants.ExportFileConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.apache.hertzbeat.common.entity.alerter.AlertDefine; import org.apache.hertzbeat.common.entity.alerter.AlertDefineMonitorBind; import org.apache.hertzbeat.common.entity.manager.Monitor; @@ -49,6 +50,7 @@ import org.springframework.data.domain.Sort; import org.springframework.data.jpa.domain.Specification; import org.springframework.http.HttpHeaders; +import org.springframework.http.MediaType; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.util.StringUtils; @@ -73,6 +75,8 @@ public class AlertDefineServiceImpl implements AlertDefineService { private final Map alertDefineImExportServiceMap = new HashMap<>(); + private static final String CONTENT_TYPE = MediaType.APPLICATION_OCTET_STREAM_VALUE + SignConstants.SINGLE_MARK + "charset=" + StandardCharsets.UTF_8; + public AlertDefineServiceImpl(List alertDefineImExportServiceList) { alertDefineImExportServiceList.forEach(it -> alertDefineImExportServiceMap.put(it.type(), it)); } @@ -229,10 +233,10 @@ public void export(List ids, String type, HttpServletResponse res) throws throw new IllegalArgumentException("not support export type: " + type); } var fileName = imExportService.getFileName(); - res.setHeader("content-type", "application/octet-stream;charset=UTF-8"); - res.setContentType("application/octet-stream;charset=UTF-8"); + res.setHeader(HttpHeaders.CONTENT_TYPE, CONTENT_TYPE); + res.setContentType(CONTENT_TYPE); res.setHeader(HttpHeaders.CONTENT_DISPOSITION, "attachment;filename=" + URLEncoder.encode(fileName, StandardCharsets.UTF_8)); - res.setHeader("Access-Control-Expose-Headers", "Content-Disposition"); + res.setHeader(HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS, HttpHeaders.CONTENT_DISPOSITION); imExportService.exportConfig(res.getOutputStream(), ids); } diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/AbstractCollect.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/AbstractCollect.java index 82caa001248..455b584d86b 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/AbstractCollect.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/AbstractCollect.java @@ -17,7 +17,6 @@ package org.apache.hertzbeat.collector.collect; - import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.message.CollectRep; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/database/JdbcCommonCollect.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/database/JdbcCommonCollect.java index 22943694cd8..b3f25518afb 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/database/JdbcCommonCollect.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/database/JdbcCommonCollect.java @@ -31,9 +31,9 @@ import org.apache.hertzbeat.collector.collect.common.cache.CacheIdentifier; import org.apache.hertzbeat.collector.collect.common.cache.ConnectionCommonCache; import org.apache.hertzbeat.collector.collect.common.cache.JdbcConnect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.JdbcProtocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/HttpCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/HttpCollectImpl.java index c2c48f48ead..99dfe3f9b94 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/HttpCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/HttpCollectImpl.java @@ -48,12 +48,14 @@ import org.apache.hertzbeat.collector.collect.http.promethus.PrometheusParseCreator; import org.apache.hertzbeat.collector.collect.http.promethus.exporter.ExporterParser; import org.apache.hertzbeat.collector.collect.http.promethus.exporter.MetricFamily; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; import org.apache.hertzbeat.collector.util.JsonPathParser; import org.apache.hertzbeat.collector.util.TimeExpressionUtil; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.constants.NetworkConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.HttpProtocol; import org.apache.hertzbeat.common.entity.message.CollectRep; @@ -81,6 +83,7 @@ import org.apache.http.protocol.HttpContext; import org.apache.http.util.EntityUtils; import org.springframework.http.HttpMethod; +import org.springframework.http.MediaType; import org.springframework.util.CollectionUtils; import org.springframework.util.StringUtils; import org.w3c.dom.Document; @@ -115,7 +118,7 @@ public void collect(CollectRep.MetricsData.Builder builder, httpProtocol.setUrl(StringUtils.hasText(url) ? RIGHT_DASH + url.trim() : RIGHT_DASH); } if (CollectionUtils.isEmpty(httpProtocol.getSuccessCodes())) { - httpProtocol.setSuccessCodes(List.of("200")); + httpProtocol.setSuccessCodes(List.of(HttpStatus.SC_OK + "")); } HttpContext httpContext = createHttpContext(metrics.getHttp()); @@ -126,7 +129,7 @@ public void collect(CollectRep.MetricsData.Builder builder, log.debug("http response status: {}", statusCode); if (!isSuccessInvoke) { builder.setCode(CollectRep.Code.FAIL); - builder.setMsg("StatusCode " + statusCode); + builder.setMsg(NetworkConstants.STATUS_CODE + SignConstants.BLANK + statusCode); return; } // todo This code converts an InputStream directly to a String. For large data in Prometheus exporters, @@ -229,7 +232,7 @@ private void addColumnFromHeader(CollectRep.ValueRow.Builder valueRowBuilder, St } private void addColumnForSummary(Long responseTime, CollectRep.ValueRow.Builder valueRowBuilder, int keywordNum, String alias) { - if (CollectorConstants.RESPONSE_TIME.equalsIgnoreCase(alias)) { + if (NetworkConstants.RESPONSE_TIME.equalsIgnoreCase(alias)) { valueRowBuilder.addColumns(responseTime.toString()); } else if (CollectorConstants.KEYWORD.equalsIgnoreCase(alias)) { valueRowBuilder.addColumns(Integer.toString(keywordNum)); @@ -304,14 +307,14 @@ private void parseResponseBySiteMap(String resp, List aliasFields, long responseTime = System.currentTimeMillis() - startTime; CollectRep.ValueRow.Builder valueRowBuilder = CollectRep.ValueRow.newBuilder(); for (String alias : aliasFields) { - if (CollectorConstants.URL.equalsIgnoreCase(alias)) { + if (NetworkConstants.URL.equalsIgnoreCase(alias)) { valueRowBuilder.addColumns(siteUrl); - } else if (CollectorConstants.STATUS_CODE.equalsIgnoreCase(alias)) { + } else if (NetworkConstants.STATUS_CODE.equalsIgnoreCase(alias)) { valueRowBuilder.addColumns(statusCode == null ? CommonConstants.NULL_VALUE : String.valueOf(statusCode)); - } else if (CollectorConstants.RESPONSE_TIME.equalsIgnoreCase(alias)) { + } else if (NetworkConstants.RESPONSE_TIME.equalsIgnoreCase(alias)) { valueRowBuilder.addColumns(String.valueOf(responseTime)); - } else if (CollectorConstants.ERROR_MSG.equalsIgnoreCase(alias)) { + } else if (NetworkConstants.ERROR_MSG.equalsIgnoreCase(alias)) { valueRowBuilder.addColumns(errorMsg); } else { valueRowBuilder.addColumns(CommonConstants.NULL_VALUE); @@ -360,7 +363,7 @@ private void parseResponseByJsonPath(String resp, List aliasFields, Http } else if (objectValue instanceof String stringValue) { CollectRep.ValueRow.Builder valueRowBuilder = CollectRep.ValueRow.newBuilder(); for (String alias : aliasFields) { - if (CollectorConstants.RESPONSE_TIME.equalsIgnoreCase(alias)) { + if (NetworkConstants.RESPONSE_TIME.equalsIgnoreCase(alias)) { valueRowBuilder.addColumns(responseTime.toString()); } else if (CollectorConstants.KEYWORD.equalsIgnoreCase(alias)) { valueRowBuilder.addColumns(Integer.toString(keywordNum)); @@ -511,8 +514,8 @@ public HttpUriRequest createHttpRequest(HttpProtocol httpProtocol) { } // The default request header can be overridden if customized // keep-alive - requestBuilder.addHeader(HttpHeaders.CONNECTION, "keep-alive"); - requestBuilder.addHeader(HttpHeaders.USER_AGENT, "Mozilla/5.0 (Windows NT 6.1; WOW64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36"); + requestBuilder.addHeader(HttpHeaders.CONNECTION, NetworkConstants.KEEP_ALIVE); + requestBuilder.addHeader(HttpHeaders.USER_AGENT, NetworkConstants.USER_AGENT); // headers The custom request header is overwritten here Map headers = httpProtocol.getHeaders(); if (headers != null && !headers.isEmpty()) { @@ -526,11 +529,11 @@ public HttpUriRequest createHttpRequest(HttpProtocol httpProtocol) { // add accept if (DispatchConstants.PARSE_DEFAULT.equals(httpProtocol.getParseType()) || DispatchConstants.PARSE_JSON_PATH.equals(httpProtocol.getParseType())) { - requestBuilder.addHeader(HttpHeaders.ACCEPT, "application/json"); + requestBuilder.addHeader(HttpHeaders.ACCEPT, MediaType.APPLICATION_JSON_VALUE); } else if (DispatchConstants.PARSE_XML_PATH.equals(httpProtocol.getParseType())) { - requestBuilder.addHeader(HttpHeaders.ACCEPT, "text/xml,application/xml"); + requestBuilder.addHeader(HttpHeaders.ACCEPT, MediaType.TEXT_HTML_VALUE + "," + MediaType.APPLICATION_XML_VALUE); } else { - requestBuilder.addHeader(HttpHeaders.ACCEPT, "*/*"); + requestBuilder.addHeader(HttpHeaders.ACCEPT, MediaType.ALL_VALUE); } if (httpProtocol.getAuthorization() != null) { @@ -560,14 +563,14 @@ public HttpUriRequest createHttpRequest(HttpProtocol httpProtocol) { requestBuilder.setUri(httpProtocol.getHost() + ":" + httpProtocol.getPort() + uri); } else { String ipAddressType = IpDomainUtil.checkIpAddressType(httpProtocol.getHost()); - String baseUri = CollectorConstants.IPV6.equals(ipAddressType) + String baseUri = NetworkConstants.IPV6.equals(ipAddressType) ? String.format("[%s]:%s%s", httpProtocol.getHost(), httpProtocol.getPort(), uri) : String.format("%s:%s%s", httpProtocol.getHost(), httpProtocol.getPort(), uri); boolean ssl = Boolean.parseBoolean(httpProtocol.getSsl()); if (ssl) { - requestBuilder.setUri(CollectorConstants.HTTPS_HEADER + baseUri); + requestBuilder.setUri(NetworkConstants.HTTPS_HEADER + baseUri); } else { - requestBuilder.setUri(CollectorConstants.HTTP_HEADER + baseUri); + requestBuilder.setUri(NetworkConstants.HTTP_HEADER + baseUri); } } diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/SslCertificateCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/SslCertificateCollectImpl.java index 36141ddc30e..0addb520641 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/SslCertificateCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/http/SslCertificateCollectImpl.java @@ -36,8 +36,8 @@ import javax.net.ssl.X509TrustManager; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.collector.collect.AbstractCollect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.HttpProtocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/httpsd/HttpsdImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/httpsd/HttpsdImpl.java index 9b7811f8eef..92d494ca7d8 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/httpsd/HttpsdImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/httpsd/HttpsdImpl.java @@ -30,8 +30,8 @@ import org.apache.hertzbeat.collector.collect.httpsd.discovery.DiscoveryClient; import org.apache.hertzbeat.collector.collect.httpsd.discovery.DiscoveryClientManagement; import org.apache.hertzbeat.collector.collect.httpsd.discovery.entity.ServerInfo; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.HttpsdProtocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImpl.java index 0ab7ba5f5a8..7b31383f723 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImpl.java @@ -22,8 +22,8 @@ import java.net.UnknownHostException; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.collector.collect.AbstractCollect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.IcmpProtocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/memcached/MemcachedCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/memcached/MemcachedCollectImpl.java index 09bcf059aa4..8af815fc99f 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/memcached/MemcachedCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/memcached/MemcachedCollectImpl.java @@ -32,8 +32,8 @@ import java.util.Objects; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.collector.collect.AbstractCollect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.MemcachedProtocol; @@ -48,7 +48,6 @@ public class MemcachedCollectImpl extends AbstractCollect { private static final String STATS = "stats"; private static final String STATS_SETTINGS = "stats settings"; - private static final String STATS_ITEMS = "stats items"; private static final String STATS_SIZES = "stats sizes"; private static final String STATS_END_RSP = "END"; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/mongodb/MongodbSingleCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/mongodb/MongodbSingleCollectImpl.java index 9471c1874e8..c5ec9913fcd 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/mongodb/MongodbSingleCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/mongodb/MongodbSingleCollectImpl.java @@ -34,8 +34,8 @@ import org.apache.hertzbeat.collector.collect.common.cache.CacheIdentifier; import org.apache.hertzbeat.collector.collect.common.cache.ConnectionCommonCache; import org.apache.hertzbeat.collector.collect.common.cache.MongodbConnect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.MongodbProtocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/nebulagraph/NebulaGraphCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/nebulagraph/NebulaGraphCollectImpl.java index f61cccc8844..6874ee99843 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/nebulagraph/NebulaGraphCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/nebulagraph/NebulaGraphCollectImpl.java @@ -28,9 +28,9 @@ import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.collector.collect.AbstractCollect; import org.apache.hertzbeat.collector.collect.common.http.CommonHttpClient; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.NebulaGraphProtocol; @@ -131,7 +131,6 @@ public void collect(CollectRep.MetricsData.Builder builder, long monitorId, Stri } } - @Override public String supportProtocol() { return DispatchConstants.PROTOCOL_NEBULAGRAPH; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/nebulagraph/NgqlCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/nebulagraph/NgqlCollectImpl.java index fea98f30222..3b18aef89c6 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/nebulagraph/NgqlCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/nebulagraph/NgqlCollectImpl.java @@ -26,8 +26,8 @@ import java.util.stream.Stream; import org.apache.commons.lang3.StringUtils; import org.apache.hertzbeat.collector.collect.AbstractCollect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.NgqlProtocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImpl.java index dfb4f606d0f..b016c75c037 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImpl.java @@ -37,8 +37,8 @@ import org.apache.hertzbeat.collector.collect.common.http.CommonHttpClient; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.constants.NetworkConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.NginxProtocol; import org.apache.hertzbeat.common.entity.message.CollectRep; @@ -46,6 +46,7 @@ import org.apache.hertzbeat.common.util.IpDomainUtil; import org.apache.http.HttpHeaders; import org.apache.http.HttpHost; +import org.apache.http.HttpStatus; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpUriRequest; @@ -53,6 +54,7 @@ import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.protocol.HttpContext; import org.apache.http.util.EntityUtils; +import org.springframework.http.MediaType; /** * nginx collect @@ -60,7 +62,6 @@ @Slf4j public class NginxCollectImpl extends AbstractCollect { - private static final int SUCCESS_CODE = 200; private static final String NGINX_STATUS_NAME = "nginx_status"; private static final String REQ_STATUS_NAME = "req_status"; private static final String AVAILABLE = "available"; @@ -97,9 +98,9 @@ public void collect(CollectRep.MetricsData.Builder builder, long monitorId, Stri try (CloseableHttpResponse response = CommonHttpClient.getHttpClient().execute(request, httpContext)){ // send an HTTP request and get the response data int statusCode = response.getStatusLine().getStatusCode(); - if (statusCode != SUCCESS_CODE) { + if (statusCode != HttpStatus.SC_OK) { builder.setCode(CollectRep.Code.FAIL); - builder.setMsg("StatusCode " + statusCode); + builder.setMsg(NetworkConstants.STATUS_CODE + statusCode); return; } String resp = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); @@ -145,21 +146,21 @@ private HttpUriRequest createHttpRequest(NginxProtocol nginxProtocol) { requestBuilder.setUri(host + ":" + portWithUri); } else { String ipAddressType = IpDomainUtil.checkIpAddressType(host); - String baseUri = CollectorConstants.IPV6.equals(ipAddressType) + String baseUri = NetworkConstants.IPV6.equals(ipAddressType) ? String.format("[%s]:%s", host, portWithUri) : String.format("%s:%s", host, portWithUri); boolean ssl = Boolean.parseBoolean(nginxProtocol.getSsl()); if (ssl){ - requestBuilder.setUri(CollectorConstants.HTTPS_HEADER + baseUri); + requestBuilder.setUri(NetworkConstants.HTTPS_HEADER + baseUri); } else { - requestBuilder.setUri(CollectorConstants.HTTP_HEADER + baseUri); + requestBuilder.setUri(NetworkConstants.HTTP_HEADER + baseUri); } } - requestBuilder.addHeader(HttpHeaders.CONNECTION, "keep-alive"); - requestBuilder.addHeader(HttpHeaders.USER_AGENT, "Mozilla/5.0 (Windows NT 6.1; WOW64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36"); - requestBuilder.addHeader(HttpHeaders.ACCEPT, "text/plain"); + requestBuilder.addHeader(HttpHeaders.CONNECTION, NetworkConstants.KEEP_ALIVE); + requestBuilder.addHeader(HttpHeaders.USER_AGENT, NetworkConstants.USER_AGENT); + requestBuilder.addHeader(HttpHeaders.ACCEPT, MediaType.TEXT_PLAIN_VALUE); int timeout = Integer.parseInt(nginxProtocol.getTimeout()); if (timeout > 0) { @@ -197,7 +198,7 @@ private void parseNginxStatusResponse(CollectRep.MetricsData.Builder builder, St if (value != null) { valueRowBuilder.addColumns(String.valueOf(value)); } else { - if (CollectorConstants.RESPONSE_TIME.equalsIgnoreCase(alias)) { + if (NetworkConstants.RESPONSE_TIME.equalsIgnoreCase(alias)) { valueRowBuilder.addColumns(responseTime.toString()); } else { valueRowBuilder.addColumns(CommonConstants.NULL_VALUE); @@ -235,7 +236,7 @@ private void parseReqStatusResponse(CollectRep.MetricsData.Builder builder, Stri for (ReqStatusResponse reqStatusResponse : reqStatusResponses) { CollectRep.ValueRow.Builder valueRowBuilder = CollectRep.ValueRow.newBuilder(); for (String alias : aliasFields) { - if (CollectorConstants.RESPONSE_TIME.equals(alias)) { + if (NetworkConstants.RESPONSE_TIME.equals(alias)) { valueRowBuilder.addColumns(String.valueOf(responseTime)); } else { try { diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/ntp/NtpCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/ntp/NtpCollectImpl.java index adc080cb73a..7f5ec63c563 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/ntp/NtpCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/ntp/NtpCollectImpl.java @@ -33,9 +33,9 @@ import org.apache.commons.net.ntp.TimeInfo; import org.apache.commons.net.ntp.TimeStamp; import org.apache.hertzbeat.collector.collect.AbstractCollect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.NtpProtocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/pop3/Pop3CollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/pop3/Pop3CollectImpl.java index 958170df8e4..c595a4f2e10 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/pop3/Pop3CollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/pop3/Pop3CollectImpl.java @@ -27,8 +27,8 @@ import org.apache.commons.net.pop3.POP3MessageInfo; import org.apache.commons.net.pop3.POP3SClient; import org.apache.hertzbeat.collector.collect.AbstractCollect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.Pop3Protocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/prometheus/PrometheusAutoCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/prometheus/PrometheusAutoCollectImpl.java index d1dc2ad5a32..e1d7ab86bfb 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/prometheus/PrometheusAutoCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/prometheus/PrometheusAutoCollectImpl.java @@ -38,8 +38,9 @@ import org.apache.hertzbeat.collector.collect.prometheus.parser.TextParser; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.constants.NetworkConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.PrometheusProtocol; import org.apache.hertzbeat.common.entity.message.CollectRep; @@ -64,6 +65,7 @@ import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.protocol.HttpContext; import org.apache.http.util.EntityUtils; +import org.springframework.http.MediaType; import org.springframework.util.StringUtils; /** @@ -95,7 +97,7 @@ public List collect(CollectRep.MetricsData.Builder build log.debug("http response status: {}", statusCode); if (!isSuccessInvoke) { builder.setCode(CollectRep.Code.FAIL); - builder.setMsg("StatusCode " + statusCode); + builder.setMsg(NetworkConstants.STATUS_CODE + SignConstants.BLANK + statusCode); return null; } // todo: The InputStream is directly converted to a String here @@ -252,8 +254,8 @@ public HttpUriRequest createHttpRequest(PrometheusProtocol protocol) { } // The default request header can be overridden if customized // keep-alive - requestBuilder.addHeader(HttpHeaders.CONNECTION, "keep-alive"); - requestBuilder.addHeader(HttpHeaders.USER_AGENT, "Mozilla/5.0 (Windows NT 6.1; WOW64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36"); + requestBuilder.addHeader(HttpHeaders.CONNECTION, NetworkConstants.KEEP_ALIVE); + requestBuilder.addHeader(HttpHeaders.USER_AGENT, NetworkConstants.USER_AGENT); // headers The custom request header is overwritten here Map headers = protocol.getHeaders(); if (headers != null && !headers.isEmpty()) { @@ -265,7 +267,7 @@ public HttpUriRequest createHttpRequest(PrometheusProtocol protocol) { } } // add accept - requestBuilder.addHeader(HttpHeaders.ACCEPT, "*/*"); + requestBuilder.addHeader(HttpHeaders.ACCEPT, MediaType.TEXT_PLAIN_VALUE); if (protocol.getAuthorization() != null) { PrometheusProtocol.Authorization authorization = protocol.getAuthorization(); @@ -291,17 +293,17 @@ public HttpUriRequest createHttpRequest(PrometheusProtocol protocol) { String uri = CollectUtil.replaceUriSpecialChar(protocol.getPath()); if (IpDomainUtil.isHasSchema(protocol.getHost())) { - requestBuilder.setUri(protocol.getHost() + ":" + protocol.getPort() + uri); + requestBuilder.setUri(protocol.getHost() + SignConstants.DOUBLE_MARK + protocol.getPort() + uri); } else { String ipAddressType = IpDomainUtil.checkIpAddressType(protocol.getHost()); - String baseUri = CollectorConstants.IPV6.equals(ipAddressType) + String baseUri = NetworkConstants.IPV6.equals(ipAddressType) ? String.format("[%s]:%s%s", protocol.getHost(), protocol.getPort(), uri) : String.format("%s:%s%s", protocol.getHost(), protocol.getPort(), uri); boolean ssl = Boolean.parseBoolean(protocol.getSsl()); if (ssl) { - requestBuilder.setUri(CollectorConstants.HTTPS_HEADER + baseUri); + requestBuilder.setUri(NetworkConstants.HTTPS_HEADER + baseUri); } else { - requestBuilder.setUri(CollectorConstants.HTTP_HEADER + baseUri); + requestBuilder.setUri(NetworkConstants.HTTP_HEADER + baseUri); } } diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/push/PushCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/push/PushCollectImpl.java index 3757cf2d223..e4412a0daf1 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/push/PushCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/push/PushCollectImpl.java @@ -28,7 +28,8 @@ import org.apache.hertzbeat.collector.collect.common.http.CommonHttpClient; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; -import org.apache.hertzbeat.common.constants.CollectorConstants; +import org.apache.hertzbeat.common.constants.NetworkConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.apache.hertzbeat.common.entity.dto.Message; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.PushProtocol; @@ -46,6 +47,7 @@ import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.protocol.HttpContext; import org.apache.http.util.EntityUtils; +import org.springframework.http.MediaType; /** * push style collect @@ -86,7 +88,7 @@ public void collect(CollectRep.MetricsData.Builder builder, int statusCode = response.getStatusLine().getStatusCode(); if (statusCode != SUCCESS_CODE) { builder.setCode(CollectRep.Code.FAIL); - builder.setMsg("StatusCode " + statusCode); + builder.setMsg(NetworkConstants.STATUS_CODE + SignConstants.BLANK + statusCode); return; } String resp = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); @@ -124,19 +126,19 @@ private HttpUriRequest createHttpRequest(PushProtocol pushProtocol, Long monitor requestBuilder.setUri(pushProtocol.getHost() + ":" + pushProtocol.getPort() + uri); } else { String ipAddressType = IpDomainUtil.checkIpAddressType(pushProtocol.getHost()); - String baseUri = CollectorConstants.IPV6.equals(ipAddressType) + String baseUri = NetworkConstants.IPV6.equals(ipAddressType) ? String.format("[%s]:%s", pushProtocol.getHost(), pushProtocol.getPort() + uri) : String.format("%s:%s", pushProtocol.getHost(), pushProtocol.getPort() + uri); - requestBuilder.setUri(CollectorConstants.HTTP_HEADER + baseUri); + requestBuilder.setUri(NetworkConstants.HTTP_HEADER + baseUri); } - requestBuilder.addHeader(HttpHeaders.CONNECTION, "keep-alive"); - requestBuilder.addHeader(HttpHeaders.USER_AGENT, "Mozilla/5.0 (Windows NT 6.1; WOW64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36"); + requestBuilder.addHeader(HttpHeaders.CONNECTION, NetworkConstants.KEEP_ALIVE); + requestBuilder.addHeader(HttpHeaders.USER_AGENT, NetworkConstants.USER_AGENT); requestBuilder.addParameter("id", String.valueOf(monitorId)); requestBuilder.addParameter("time", String.valueOf(startTime)); - requestBuilder.addHeader(HttpHeaders.ACCEPT, "application/json"); + requestBuilder.addHeader(HttpHeaders.ACCEPT, MediaType.APPLICATION_JSON_VALUE); //requestBuilder.setUri(pushProtocol.getUri()); diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishClient.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishClient.java index b813b6653c9..afc715ce4ec 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishClient.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishClient.java @@ -17,8 +17,10 @@ package org.apache.hertzbeat.collector.collect.redfish; +import java.nio.charset.StandardCharsets; import org.apache.hertzbeat.collector.collect.common.http.CommonHttpClient; -import org.apache.hertzbeat.common.constants.CollectorConstants; +import org.apache.hertzbeat.common.constants.NetworkConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.apache.hertzbeat.common.entity.job.protocol.RedfishProtocol; import org.apache.hertzbeat.common.util.IpDomainUtil; import org.apache.http.HttpHeaders; @@ -30,6 +32,7 @@ import org.apache.http.client.methods.RequestBuilder; import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.entity.StringEntity; +import org.springframework.http.MediaType; /** * redfish client impl @@ -66,20 +69,20 @@ public ConnectSession connect() throws Exception { requestBuilder.setUri(this.host + ":" + this.port + uri); } else { String ipAddressType = IpDomainUtil.checkIpAddressType(this.host); - String baseUri = CollectorConstants.IPV6.equals(ipAddressType) + String baseUri = NetworkConstants.IPV6.equals(ipAddressType) ? String.format("[%s]:%s", this.host, this.port + uri) : String.format("%s:%s", this.host, this.port + uri); - requestBuilder.setUri(CollectorConstants.HTTP_HEADER + baseUri); + requestBuilder.setUri(NetworkConstants.HTTP_HEADER + baseUri); } - requestBuilder.addHeader(HttpHeaders.CONNECTION, "Keep-Alive"); - requestBuilder.addHeader(HttpHeaders.CONTENT_TYPE, "application/json"); - requestBuilder.addHeader(HttpHeaders.USER_AGENT, "Mozilla/5.0 (Windows NT 6.1; WOW64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36"); - requestBuilder.addHeader(HttpHeaders.CONTENT_ENCODING, "UTF-8"); + requestBuilder.addHeader(HttpHeaders.CONNECTION, NetworkConstants.KEEP_ALIVE); + requestBuilder.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE); + requestBuilder.addHeader(HttpHeaders.USER_AGENT, NetworkConstants.USER_AGENT); + requestBuilder.addHeader(HttpHeaders.CONTENT_ENCODING, StandardCharsets.UTF_8 + ""); final String json = "{\"UserName\": \"" + this.username + "\", \"Password\": \"" + this.password + "\"}"; - StringEntity entity = new StringEntity(json, "UTF-8"); + StringEntity entity = new StringEntity(json, StandardCharsets.UTF_8); requestBuilder.setEntity(entity); if (this.timeout > 0) { @@ -97,10 +100,10 @@ public ConnectSession connect() throws Exception { try (CloseableHttpResponse response = CommonHttpClient.getHttpClient().execute(request, httpClientContext)) { int statusCode = response.getStatusLine().getStatusCode(); if (statusCode != HttpStatus.SC_CREATED) { - throw new Exception("Http Status Code: " + statusCode); + throw new Exception(NetworkConstants.STATUS_CODE + SignConstants.BLANK + statusCode); } - String location = response.getFirstHeader("Location").getValue(); - String auth = response.getFirstHeader("X-Auth-Token").getValue(); + String location = response.getFirstHeader(NetworkConstants.LOCATION).getValue(); + String auth = response.getFirstHeader(NetworkConstants.X_AUTH_TOKEN).getValue(); session = new Session(auth, location, this.host, this.port); } catch (Exception e) { throw new Exception("Redfish session create error: " + e.getMessage()); diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishConnectSession.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishConnectSession.java index 9107c73312e..2abc3288e8c 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishConnectSession.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishConnectSession.java @@ -19,7 +19,8 @@ import java.nio.charset.StandardCharsets; import org.apache.hertzbeat.collector.collect.common.http.CommonHttpClient; -import org.apache.hertzbeat.common.constants.CollectorConstants; +import org.apache.hertzbeat.common.constants.NetworkConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.apache.hertzbeat.common.util.IpDomainUtil; import org.apache.http.HttpStatus; import org.apache.http.client.methods.CloseableHttpResponse; @@ -51,12 +52,12 @@ public void close() throws Exception { this.active = false; String url = RedfishClient.REDFISH_SESSION_SERVICE + session.location(); HttpDelete httpDelete = new HttpDelete(url); - httpDelete.setHeader("X-Auth-Token", session.token()); - httpDelete.setHeader("Location", session.location()); + httpDelete.setHeader(NetworkConstants.X_AUTH_TOKEN, session.token()); + httpDelete.setHeader(NetworkConstants.LOCATION, session.location()); try (CloseableHttpResponse response = CommonHttpClient.getHttpClient().execute(httpDelete)) { int statusCode = response.getStatusLine().getStatusCode(); if (statusCode != HttpStatus.SC_OK) { - throw new Exception("Http State code: " + statusCode); + throw new Exception(NetworkConstants.STATUS_CODE + SignConstants.BLANK + statusCode); } } catch (Exception e) { throw new Exception("Redfish session close error:" + e.getMessage()); @@ -75,18 +76,18 @@ public String getRedfishResource(String uri) throws Exception { url = this.session.host() + ":" + this.session.port() + uri; } else { String ipAddressType = IpDomainUtil.checkIpAddressType(this.session.host()); - String baseUri = CollectorConstants.IPV6.equals(ipAddressType) + String baseUri = NetworkConstants.IPV6.equals(ipAddressType) ? String.format("[%s]:%s", this.session.host(), this.session.port() + uri) : String.format("%s:%s", this.session.host(), this.session.port() + uri); - url = CollectorConstants.HTTPS_HEADER + baseUri; + url = NetworkConstants.HTTPS_HEADER + baseUri; } HttpGet httpGet = new HttpGet(url); - httpGet.setHeader("X-Auth-Token", session.token()); - httpGet.setHeader("Location", session.location()); + httpGet.setHeader(NetworkConstants.X_AUTH_TOKEN, session.token()); + httpGet.setHeader(NetworkConstants.LOCATION, session.location()); try (CloseableHttpResponse response = CommonHttpClient.getHttpClient().execute(httpGet)) { int statusCode = response.getStatusLine().getStatusCode(); if (statusCode != HttpStatus.SC_OK) { - throw new Exception("Http State code: " + statusCode); + throw new Exception(NetworkConstants.STATUS_CODE + SignConstants.BLANK + statusCode); } return EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); } catch (Exception e) { diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/script/ScriptCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/script/ScriptCollectImpl.java index dd1c1e30a57..6ec696dd7fe 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/script/ScriptCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/script/ScriptCollectImpl.java @@ -30,8 +30,8 @@ import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.collector.collect.AbstractCollect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.ScriptProtocol; @@ -45,8 +45,6 @@ */ @Slf4j public class ScriptCollectImpl extends AbstractCollect { - public static final String WINDOWS_SCRIPT = "windows_script"; - public static final String LINUX_SCRIPT = "linux_script"; private static final String CMD = "cmd"; private static final String BASH = "bash"; private static final String POWERSHELL = "powershell"; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/smtp/SmtpCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/smtp/SmtpCollectImpl.java index 1631d5f9d9c..da285c0d947 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/smtp/SmtpCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/smtp/SmtpCollectImpl.java @@ -27,9 +27,9 @@ import lombok.extern.slf4j.Slf4j; import org.apache.commons.net.smtp.SMTP; import org.apache.hertzbeat.collector.collect.AbstractCollect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.SmtpProtocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/snmp/SnmpCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/snmp/SnmpCollectImpl.java index 97cf4a5c8f7..361b762d648 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/snmp/SnmpCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/snmp/SnmpCollectImpl.java @@ -26,9 +26,9 @@ import java.util.concurrent.ExecutionException; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.collector.collect.AbstractCollect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.SnmpProtocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java index dfed8e7a6c3..7b9c0ec55ff 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/ssh/SshCollectImpl.java @@ -40,10 +40,10 @@ import org.apache.hertzbeat.collector.collect.common.cache.SshConnect; import org.apache.hertzbeat.collector.collect.common.ssh.CommonSshBlacklist; import org.apache.hertzbeat.collector.collect.common.ssh.CommonSshClient; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; import org.apache.hertzbeat.collector.util.PrivateKeyUtils; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.SshProtocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/telnet/TelnetCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/telnet/TelnetCollectImpl.java index dbbe8bcac58..7f2df24f401 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/telnet/TelnetCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/telnet/TelnetCollectImpl.java @@ -30,9 +30,9 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.net.telnet.TelnetClient; import org.apache.hertzbeat.collector.collect.AbstractCollect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.TelnetProtocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/udp/UdpCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/udp/UdpCollectImpl.java index 21b47b595d7..4c340e2a10f 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/udp/UdpCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/udp/UdpCollectImpl.java @@ -26,9 +26,9 @@ import java.nio.charset.StandardCharsets; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.collector.collect.AbstractCollect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.collector.util.CollectUtil; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.UdpProtocol; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/websocket/WebsocketCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/websocket/WebsocketCollectImpl.java index 112dd966ccd..4ffa0df5ad4 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/websocket/WebsocketCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/websocket/WebsocketCollectImpl.java @@ -37,8 +37,8 @@ import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.apache.hertzbeat.collector.collect.AbstractCollect; +import org.apache.hertzbeat.collector.constants.CollectorConstants; import org.apache.hertzbeat.collector.dispatch.DispatchConstants; -import org.apache.hertzbeat.common.constants.CollectorConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.WebsocketProtocol; diff --git a/common/src/main/java/org/apache/hertzbeat/common/constants/CollectorConstants.java b/collector/src/main/java/org/apache/hertzbeat/collector/constants/CollectorConstants.java similarity index 80% rename from common/src/main/java/org/apache/hertzbeat/common/constants/CollectorConstants.java rename to collector/src/main/java/org/apache/hertzbeat/collector/constants/CollectorConstants.java index ace1ea800bb..0dcfb201b0b 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/constants/CollectorConstants.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/constants/CollectorConstants.java @@ -15,36 +15,24 @@ * limitations under the License. */ -package org.apache.hertzbeat.common.constants; +package org.apache.hertzbeat.collector.constants; + +import org.apache.hertzbeat.common.constants.NetworkConstants; /** - * collector constant + * collector module constant. + * Extends {@link NetworkConstants} */ -public interface CollectorConstants { - String RESPONSE_TIME = "responseTime"; +public interface CollectorConstants extends NetworkConstants { String KEYWORD = "keyword"; - String STATUS_CODE = "statusCode"; - - String ERROR_MSG = "errorMsg"; - - String URL = "url"; - - String HTTP_HEADER = "http://"; - - String HTTPS_HEADER = "https://"; - /** * POSTGRESQL un reachable status code */ String POSTGRESQL_UN_REACHABLE_CODE = "08001"; - String IPV6 = "ipv6"; - - String IPV4 = "ipv4"; - /** * MongoDB Atlas model */ @@ -53,4 +41,12 @@ public interface CollectorConstants { String ZOOKEEPER_APP = "zookeeper"; String ZOOKEEPER_ENVI_HEAD = "Environment:"; -} + + String ERROR_MSG = "errorMsg"; + + + String RESPONSE_TIME = "responseTime"; + + String STATUS_CODE = "StatusCode"; + +} \ No newline at end of file diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImplTest.java index 6ca36483eb8..fb05011231b 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/nginx/NginxCollectImplTest.java @@ -307,7 +307,7 @@ public void testNginxStatusMatch() { 4 4 2 Reading: 0 Writing: 1 Waiting: 1"""; - // 使用正则表达式匹配并提取所需的键和对应的值 + // Use regular expressions to match and extract the required keys and corresponding values Pattern keyValuePattern = Pattern.compile("(\\w+): (\\d+)"); Matcher keyValueMatcher = keyValuePattern.matcher(status); @@ -318,7 +318,7 @@ public void testNginxStatusMatch() { System.out.println(key + ": " + value); } - // 使用正则表达式匹配并提取"accepts"、"handled"和"requests"的键和对应的值 + // Use regular expressions to match and extract the keys and corresponding values for "accepts", "handled", and "requests". Pattern valuesPattern = Pattern.compile("server\\s+(\\w+)\\s+(\\w+)\\s+(\\w+)"); Matcher valuesMatcher = valuesPattern.matcher(status); diff --git a/common/src/main/java/org/apache/hertzbeat/common/constants/NetworkConstants.java b/common/src/main/java/org/apache/hertzbeat/common/constants/NetworkConstants.java new file mode 100644 index 00000000000..17252765e91 --- /dev/null +++ b/common/src/main/java/org/apache/hertzbeat/common/constants/NetworkConstants.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.constants; + +/** + * Http Constants. + */ + +public interface NetworkConstants { + + String KEEP_ALIVE = "Keep-Alive"; + + String USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; WOW64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36\")"; + + String IPV6 = "ipv6"; + + String IPV4 = "ipv4"; + + String ERROR_MSG = "errorMsg"; + + String URL = "url"; + + String HTTP_HEADER = "http://"; + + String HTTPS_HEADER = "https://"; + + String RESPONSE_TIME = "responseTime"; + + String STATUS_CODE = "StatusCode"; + + String X_AUTH_TOKEN = "X-Auth-Token"; + + String LOCATION = "Location"; + +} diff --git a/common/src/main/java/org/apache/hertzbeat/common/constants/SignConstants.java b/common/src/main/java/org/apache/hertzbeat/common/constants/SignConstants.java index f9dc8305ca5..1918ab45a16 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/constants/SignConstants.java +++ b/common/src/main/java/org/apache/hertzbeat/common/constants/SignConstants.java @@ -24,6 +24,8 @@ public interface SignConstants { String DOUBLE_MARK = ":"; + String SINGLE_MARK = ";"; + String WELL_NO = "#"; String LINE_FEED = "\n"; @@ -33,4 +35,6 @@ public interface SignConstants { String RIGHT_DASH = "/"; String COMMA = ","; + + String BLANK = " "; } diff --git a/common/src/main/java/org/apache/hertzbeat/common/util/IpDomainUtil.java b/common/src/main/java/org/apache/hertzbeat/common/util/IpDomainUtil.java index 4c4c1396bc4..8845fcfaf86 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/util/IpDomainUtil.java +++ b/common/src/main/java/org/apache/hertzbeat/common/util/IpDomainUtil.java @@ -24,7 +24,7 @@ import java.util.Enumeration; import java.util.regex.Pattern; import lombok.extern.slf4j.Slf4j; -import org.apache.hertzbeat.common.constants.CollectorConstants; +import org.apache.hertzbeat.common.constants.NetworkConstants; import org.apache.http.conn.util.InetAddressUtils; import org.springframework.util.StringUtils; @@ -114,9 +114,9 @@ public static String getLocalhostIp() { */ public static String checkIpAddressType(String ipDomain){ if (StringUtils.hasText(ipDomain) && InetAddressUtils.isIPv6Address(ipDomain)) { - return CollectorConstants.IPV6; + return NetworkConstants.IPV6; } - return CollectorConstants.IPV4; + return NetworkConstants.IPV4; } /** diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/FileUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/FileUtilTest.java index f6b4f26919f..a6a711bceda 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/FileUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/FileUtilTest.java @@ -20,6 +20,8 @@ import org.apache.hertzbeat.common.constants.ExportFileConstants; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; + +import org.springframework.http.MediaType; import org.springframework.mock.web.MockMultipartFile; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -30,7 +32,6 @@ class FileUtilTest { - private static final String JSON_TYPE = "application/json"; private static final String EXCEL_TYPE = "application/vnd.ms-excel"; private static final String YAML_TYPE = "application/x-yaml"; @@ -42,7 +43,7 @@ class FileUtilTest { @BeforeEach void setUp() { - jsonFile = new MockMultipartFile("file", "test.json", JSON_TYPE, "test content".getBytes()); + jsonFile = new MockMultipartFile("file", "test.json", MediaType.APPLICATION_JSON_VALUE, "test content".getBytes()); excelFile = new MockMultipartFile("file", "test.xlsx", EXCEL_TYPE, "test content".getBytes()); yamlFile = new MockMultipartFile("file", "test.yaml", YAML_TYPE, "test content".getBytes()); emptyFile = new MockMultipartFile("file", "", null, (byte[]) null); diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/IpDomainUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/IpDomainUtilTest.java index a711d6515d8..ac7bee3bf86 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/IpDomainUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/IpDomainUtilTest.java @@ -23,8 +23,7 @@ import java.net.SocketException; import java.util.Collections; import java.util.Enumeration; - -import org.apache.hertzbeat.common.constants.CollectorConstants; +import org.apache.hertzbeat.common.constants.NetworkConstants; import org.junit.jupiter.api.Test; import org.mockito.MockedStatic; import org.mockito.Mockito; @@ -119,15 +118,15 @@ void testGetLocalhostIp() throws SocketException { @Test void testCheckIpAddressType() { - assertEquals(CollectorConstants.IPV4, IpDomainUtil.checkIpAddressType("192.168.1.1")); - assertEquals(CollectorConstants.IPV4, IpDomainUtil.checkIpAddressType("127.0.0.1")); + assertEquals(NetworkConstants.IPV4, IpDomainUtil.checkIpAddressType("192.168.1.1")); + assertEquals(NetworkConstants.IPV4, IpDomainUtil.checkIpAddressType("127.0.0.1")); - assertEquals(CollectorConstants.IPV6, IpDomainUtil.checkIpAddressType("2001:0db8:85a3:0000:0000:8a2e:0370:7334")); - assertEquals(CollectorConstants.IPV6, IpDomainUtil.checkIpAddressType("::1")); + assertEquals(NetworkConstants.IPV6, IpDomainUtil.checkIpAddressType("2001:0db8:85a3:0000:0000:8a2e:0370:7334")); + assertEquals(NetworkConstants.IPV6, IpDomainUtil.checkIpAddressType("::1")); - assertEquals(CollectorConstants.IPV4, IpDomainUtil.checkIpAddressType("")); - assertEquals(CollectorConstants.IPV4, IpDomainUtil.checkIpAddressType(null)); - assertEquals(CollectorConstants.IPV4, IpDomainUtil.checkIpAddressType("invalid-ip")); + assertEquals(NetworkConstants.IPV4, IpDomainUtil.checkIpAddressType("")); + assertEquals(NetworkConstants.IPV4, IpDomainUtil.checkIpAddressType(null)); + assertEquals(NetworkConstants.IPV4, IpDomainUtil.checkIpAddressType("invalid-ip")); } } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/WeChatAlertNotifyHandlerImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/WeChatAlertNotifyHandlerImpl.java index 7132776b59f..27effc2fc4c 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/WeChatAlertNotifyHandlerImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/WeChatAlertNotifyHandlerImpl.java @@ -26,8 +26,10 @@ import org.apache.hertzbeat.common.entity.alerter.Alert; import org.apache.hertzbeat.common.entity.manager.NoticeReceiver; import org.apache.hertzbeat.common.entity.manager.NoticeTemplate; +import org.apache.http.HttpHeaders; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.springframework.http.MediaType; /** * Send alarm information through WeChat @@ -95,7 +97,7 @@ private void sendMessage(String accessToken, String messageContent) throws Excep HttpClient client = HttpClient.newHttpClient(); HttpRequest request = HttpRequest.newBuilder() .uri(new URI(SEND_MESSAGE_URL + accessToken)) - .header("Content-Type", "application/json") + .header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE) .POST(HttpRequest.BodyPublishers.ofString(messageContent)) .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java index fb62c740c38..8763db63d26 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MonitorServiceImpl.java @@ -41,6 +41,8 @@ import org.apache.hertzbeat.collector.dispatch.DispatchConstants; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.constants.ExportFileConstants; +import org.apache.hertzbeat.common.constants.NetworkConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.apache.hertzbeat.common.entity.job.Configmap; import org.apache.hertzbeat.common.entity.job.Job; import org.apache.hertzbeat.common.entity.job.Metrics; @@ -81,6 +83,7 @@ import org.springframework.data.domain.Sort; import org.springframework.data.jpa.domain.Specification; import org.springframework.http.HttpHeaders; +import org.springframework.http.MediaType; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.util.CollectionUtils; @@ -96,10 +99,6 @@ public class MonitorServiceImpl implements MonitorService { private static final Long MONITOR_ID_TMP = 1000000000L; - public static final String HTTP = "http://"; - - public static final String HTTPS = "https://"; - public static final String BLANK = ""; public static final String PATTERN_HTTP = "(?i)http://"; public static final String PATTERN_HTTPS = "(?i)https://"; @@ -107,6 +106,8 @@ public class MonitorServiceImpl implements MonitorService { private static final int TAG_LENGTH = 2; + private static final String CONTENT_VALUE = MediaType.APPLICATION_OCTET_STREAM_VALUE + SignConstants.SINGLE_MARK + "charset=" + StandardCharsets.UTF_8; + @Autowired private AppService appService; @@ -303,10 +304,10 @@ public void export(List ids, String type, HttpServletResponse res) throws throw new IllegalArgumentException("not support export type: " + type); } var fileName = imExportService.getFileName(); - res.setHeader("content-type", "application/octet-stream;charset=UTF-8"); - res.setContentType("application/octet-stream;charset=UTF-8"); + res.setHeader(HttpHeaders.CONTENT_DISPOSITION, CONTENT_VALUE); + res.setContentType(CONTENT_VALUE); res.setHeader(HttpHeaders.CONTENT_DISPOSITION, "attachment;filename=" + URLEncoder.encode(fileName, StandardCharsets.UTF_8)); - res.setHeader("Access-Control-Expose-Headers", "Content-Disposition"); + res.setHeader(HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS, HttpHeaders.CONTENT_DISPOSITION); imExportService.exportConfig(res.getOutputStream(), ids); } @@ -412,11 +413,11 @@ public void validate(MonitorDto monitorDto, Boolean isModify) throws IllegalArgu break; case "host": String hostValue = param.getParamValue(); - if (hostValue.toLowerCase().contains(HTTP)) { - hostValue = hostValue.replaceAll(PATTERN_HTTP, BLANK); + if (hostValue.toLowerCase().contains(NetworkConstants.HTTP_HEADER)) { + hostValue = hostValue.replaceAll(PATTERN_HTTP, SignConstants.BLANK); } - if (hostValue.toLowerCase().contains(HTTPS)) { - hostValue = hostValue.replace(PATTERN_HTTPS, BLANK); + if (hostValue.toLowerCase().contains(NetworkConstants.HTTPS_HEADER)) { + hostValue = hostValue.replace(PATTERN_HTTPS, SignConstants.BLANK); } if (!IpDomainUtil.validateIpDomain(hostValue)) { throw new IllegalArgumentException("Params field " + field + " value " From 1c5b3f9c3f7e7753807a1f54b2f27308f369973e Mon Sep 17 00:00:00 2001 From: aias00 Date: Mon, 12 Aug 2024 21:14:33 +0800 Subject: [PATCH 170/257] [improve] add kafka influxdb jetty i18n (#2507) --- .../resources/define/app-influxdb_promql.yml | 55 +++++++ .../src/main/resources/define/app-jetty.yml | 12 ++ .../src/main/resources/define/app-kafka.yml | 150 ++++++++++++++++++ .../resources/define/app-kafka_promql.yml | 48 ++++++ 4 files changed, 265 insertions(+) diff --git a/manager/src/main/resources/define/app-influxdb_promql.yml b/manager/src/main/resources/define/app-influxdb_promql.yml index a013fdcc2df..ba0fcadb859 100644 --- a/manager/src/main/resources/define/app-influxdb_promql.yml +++ b/manager/src/main/resources/define/app-influxdb_promql.yml @@ -138,6 +138,9 @@ metrics: # The first monitoring metrics basic_influxdb_memstats_alloc. # Note: Built-in monitoring metrics include (responseTime - response time) - name: basic_influxdb_memstats_alloc + i18n: + zh-CN: InfluxDB内存分配 + en-US: InfluxDB Memory Allocation # metrics scheduling priority(0->127), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -146,10 +149,19 @@ metrics: # Metric information includes the following: Field name, Type: 0-number, 1-string, instance: indicates whether the metric is the primary key, unit: the unit of the metric - field: instance type: 1 + i18n: + zh-CN: 实例 + en-US: Instance - field: timestamp type: 1 + i18n: + zh-CN: 时间戳 + en-US: Timestamp - field: value type: 1 + i18n: + zh-CN: 值 + en-US: Value # Monitoring protocol used for data collection, e.g. sql, ssh, http, telnet, wmi, snmp, sdk. protocol: http # When the protocol is HTTP, the specific collection configuration is as follows @@ -185,6 +197,9 @@ metrics: parseType: PromQL - name: influxdb_database_numMeasurements + i18n: + zh-CN: InfluxDB数据库测量值 + en-US: InfluxDB Database Measurements # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 1 @@ -193,14 +208,29 @@ metrics: # Metric information includes the following: Field name, Type: 0-number, 1-string, instance: indicates whether the metric is the primary key, unit: the unit of the metric - field: job type: 1 + i18n: + zh-CN: 任务 + en-US: Job - field: instance type: 1 + i18n: + zh-CN: 实例 + en-US: Instance - field: database type: 1 + i18n: + zh-CN: 数据库 + en-US: Database - field: timestamp type: 1 + i18n: + zh-CN: 时间戳 + en-US: Timestamp - field: value type: 1 + i18n: + zh-CN: 值 + en-US: Value # Monitoring protocol used for data collection, e.g. sql, ssh, http, telnet, wmi, snmp, sdk. protocol: http # When the protocol is HTTP, the specific collection configuration is as follows @@ -236,6 +266,9 @@ metrics: parseType: PromQL - name: influxdb_query_rate_seconds # Query rate per second + i18n: + zh-CN: 每秒查询速率 + en-US: Query Rate Per Second # metrics scheduling priority(0->127), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 1 @@ -244,10 +277,19 @@ metrics: # Metric information includes the following: Field name, Type: 0-number, 1-string, instance: indicates whether the metric is the primary key, unit: the unit of the metric - field: instance type: 1 + i18n: + zh-CN: 实例 + en-US: Instance - field: timestamp type: 1 + i18n: + zh-CN: 时间戳 + en-US: Timestamp - field: value type: 1 + i18n: + zh-CN: 值 + en-US: Value # Monitoring protocol used for data collection, e.g. sql, ssh, http, telnet, wmi, snmp, sdk. protocol: http # When the protocol is HTTP, the specific collection configuration is as follows @@ -283,6 +325,10 @@ metrics: parseType: PromQL - name: influxdb_queryExecutor_queriesFinished_10s # Query rate per second + + i18n: + zh-CN: 查询执行器每10秒查询完成数 + en-US: Query Executor Queries Finished Every 10 Seconds # metrics scheduling priority(0->127), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 1 @@ -291,10 +337,19 @@ metrics: # Metric information includes the following: Field name, Type: 0-number, 1-string, instance: indicates whether the metric is the primary key, unit: the unit of the metric - field: instance type: 1 + i18n: + zh-CN: 实例 + en-US: Instance - field: timestamp type: 1 + i18n: + zh-CN: 时间戳 + en-US: Timestamp - field: value type: 1 + i18n: + zh-CN: 值 + en-US: Value # Monitoring protocol used for data collection, e.g. sql, ssh, http, telnet, wmi, snmp, sdk. protocol: http # When the protocol is HTTP, the specific collection configuration is as follows diff --git a/manager/src/main/resources/define/app-jetty.yml b/manager/src/main/resources/define/app-jetty.yml index 3f37d05b11d..28827543121 100644 --- a/manager/src/main/resources/define/app-jetty.yml +++ b/manager/src/main/resources/define/app-jetty.yml @@ -247,6 +247,9 @@ metrics: url: ^_^url^_^ # metrics - class_loading - name: class_loading + i18n: + zh-CN: 类加载信息 + en-US: Class Loading # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 1 @@ -255,10 +258,19 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: LoadedClassCount type: 0 + i18n: + zh-CN: 已加载类总数 + en-US: Loaded Class Count - field: TotalLoadedClassCount type: 0 + i18n: + zh-CN: 总加载类总数 + en-US: Total Loaded Class Count - field: UnloadedClassCount type: 0 + i18n: + zh-CN: 未加载类总数 + en-US: Unloaded Class Count # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: jmx # the config content when protocol is http diff --git a/manager/src/main/resources/define/app-kafka.yml b/manager/src/main/resources/define/app-kafka.yml index 81b736ced0f..2616b5aa835 100644 --- a/manager/src/main/resources/define/app-kafka.yml +++ b/manager/src/main/resources/define/app-kafka.yml @@ -77,6 +77,9 @@ params: metrics: # metrics - server_info - name: server_info + i18n: + zh-CN: 服务器信息 + en-US: Server Info # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -85,10 +88,19 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: Version type: 1 + i18n: + zh-CN: 版本 + en-US: Version - field: StartTimeMs type: 1 + i18n: + zh-CN: 启动时间 + en-US: Start Time - field: CommitId type: 1 + i18n: + zh-CN: CommitId + en-US: CommitId # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: jmx # the config content when protocol is jmx @@ -193,10 +205,16 @@ metrics: url: ^_^url^_^ - name: active_controller_count + i18n: + zh-CN: 活跃控制器数量 + en-US: Active Controller Count priority: 1 fields: - field: Value type: 0 + i18n: + zh-CN: 值 + en-US: Value protocol: jmx jmx: host: ^_^host^_^ @@ -208,10 +226,16 @@ metrics: url: ^_^url^_^ - name: broker_partition_count + i18n: + zh-CN: Broker分区数量 + en-US: Broker Partition Count priority: 1 fields: - field: Value type: 0 + i18n: + zh-CN: 值 + en-US: Value protocol: jmx jmx: host: ^_^host^_^ @@ -223,10 +247,16 @@ metrics: url: ^_^url^_^ - name: broker_leader_count + i18n: + zh-CN: Broker领导者数量 + en-US: Broker Leader Count priority: 1 fields: - field: Value type: 0 + i18n: + zh-CN: 值 + en-US: Value protocol: jmx jmx: host: ^_^host^_^ @@ -238,22 +268,46 @@ metrics: url: ^_^url^_^ - name: broker_handler_avg_percent + i18n: + zh-CN: Broker处理器平均百分比 + en-US: Broker Handler Avg Percent priority: 2 fields: - field: EventType type: 1 + i18n: + zh-CN: 事件类型 + en-US: Event Type - field: RateUnit type: 1 + i18n: + zh-CN: 速率单位 + en-US: Rate Unit - field: Count type: 0 + i18n: + zh-CN: 计数 + en-US: Count - field: MeanRate type: 0 + i18n: + zh-CN: 平均速率 + en-US: Mean Rate - field: OneMinuteRate type: 0 + i18n: + zh-CN: 一分钟速率 + en-US: One Minute Rate - field: FiveMinuteRate type: 0 + i18n: + zh-CN: 五分钟速率 + en-US: Five Minute Rate - field: FifteenMinuteRate type: 0 + i18n: + zh-CN: 十五分钟速率 + en-US: Fifteen Minute Rate protocol: jmx jmx: host: ^_^host^_^ @@ -265,22 +319,46 @@ metrics: url: ^_^url^_^ - name: topic_bytes_in_persec + i18n: + zh-CN: 每秒主题流入字节 + en-US: Topic Bytes In PerSec priority: 2 fields: - field: EventType type: 1 + i18n: + zh-CN: 事件类型 + en-US: Event Type - field: RateUnit type: 1 + i18n: + zh-CN: 速率单位 + en-US: Rate Unit - field: MeanRate type: 0 + i18n: + zh-CN: 平均速率 + en-US: Mean Rate - field: OneMinuteRate type: 0 + i18n: + zh-CN: 一分钟速率 + en-US: One Minute Rate - field: FiveMinuteRate type: 0 + i18n: + zh-CN: 五分钟速率 + en-US: Five Minute Rate - field: FifteenMinuteRate type: 0 + i18n: + zh-CN: 十五分钟速率 + en-US: Fifteen Minute Rate - field: Count type: 0 + i18n: + zh-CN: 计数 + en-US: Count protocol: jmx jmx: host: ^_^host^_^ @@ -292,22 +370,46 @@ metrics: url: ^_^url^_^ - name: topic_bytes_out_persec + i18n: + zh-CN: 每秒主题流出字节 + en-US: Topic Bytes Out PerSec priority: 2 fields: - field: EventType type: 1 + i18n: + zh-CN: 事件类型 + en-US: Event Type - field: RateUnit type: 1 + i18n: + zh-CN: 速率单位 + en-US: Rate Unit - field: MeanRate type: 0 + i18n: + zh-CN: 平均速率 + en-US: Mean Rate - field: OneMinuteRate type: 0 + i18n: + zh-CN: 一分钟速率 + en-US: One Minute Rate - field: FiveMinuteRate type: 0 + i18n: + zh-CN: 五分钟速率 + en-US: Five Minute Rate - field: FifteenMinuteRate type: 0 + i18n: + zh-CN: 十五分钟速率 + en-US: Fifteen Minute Rate - field: Count type: 0 + i18n: + zh-CN: 计数 + en-US: Count protocol: jmx jmx: host: ^_^host^_^ @@ -319,22 +421,46 @@ metrics: url: ^_^url^_^ - name: produce_message_conversions_persec + i18n: + zh-CN: 每秒生产消息转换 + en-US: Produce Message Conversions PerSec priority: 2 fields: - field: EventType type: 1 + i18n: + zh-CN: 事件类型 + en-US: Event Type - field: RateUnit type: 1 + i18n: + zh-CN: 速率单位 + en-US: Rate Unit - field: MeanRate type: 0 + i18n: + zh-CN: 平均速率 + en-US: Mean Rate - field: OneMinuteRate type: 0 + i18n: + zh-CN: 一分钟速率 + en-US: One Minute Rate - field: FiveMinuteRate type: 0 + i18n: + zh-CN: 五分钟速率 + en-US: Five Minute Rate - field: FifteenMinuteRate type: 0 + i18n: + zh-CN: 十五分钟速率 + en-US: Fifteen Minute Rate - field: Count type: 0 + i18n: + zh-CN: 计数 + en-US: Count protocol: jmx jmx: host: ^_^host^_^ @@ -346,22 +472,46 @@ metrics: url: ^_^url^_^ - name: produce_total_requests_persec + i18n: + zh-CN: 每秒生产总请求数 + en-US: Produce Total Requests PerSec priority: 2 fields: - field: EventType type: 1 + i18n: + zh-CN: 事件类型 + en-US: Event Type - field: RateUnit type: 1 + i18n: + zh-CN: 速率单位 + en-US: Rate Unit - field: MeanRate type: 0 + i18n: + zh-CN: 平均速率 + en-US: Mean Rate - field: OneMinuteRate type: 0 + i18n: + zh-CN: 一分钟速率 + en-US: One Minute Rate - field: FiveMinuteRate type: 0 + i18n: + zh-CN: 五分钟速率 + en-US: Five Minute Rate - field: FifteenMinuteRate type: 0 + i18n: + zh-CN: 十五分钟速率 + en-US: Fifteen Minute Rate - field: Count type: 0 + i18n: + zh-CN: 计数 + en-US: Count protocol: jmx jmx: host: ^_^host^_^ diff --git a/manager/src/main/resources/define/app-kafka_promql.yml b/manager/src/main/resources/define/app-kafka_promql.yml index 8815f6ca8c6..4aadbd1437f 100644 --- a/manager/src/main/resources/define/app-kafka_promql.yml +++ b/manager/src/main/resources/define/app-kafka_promql.yml @@ -136,6 +136,9 @@ params: metrics: # metrics - kafka_brokers - name: kafka_brokers + i18n: + zh-CN: Kafka Broker 数量 + en-US: Kafka Broker Count # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -144,12 +147,24 @@ metrics: # field-metric name, i18n-metric name i18n label, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: __name__ type: 1 + i18n: + zh-CN: 名称 + en-US: Name - field: instance type: 1 + i18n: + zh-CN: 实例 + en-US: Instance - field: timestamp type: 1 + i18n: + zh-CN: 时间戳 + en-US: Timestamp - field: value type: 1 + i18n: + zh-CN: 数值 + en-US: Value # The protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http # The config content when protocol is http @@ -183,6 +198,9 @@ metrics: parseType: PromQL - name: kafka_topic_partitions + i18n: + zh-CN: Kafka Topic 分区数量 + en-US: Kafka Topic Partitions # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 1 @@ -191,12 +209,24 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: __name__ type: 1 + i18n: + zh-CN: 名称 + en-US: Name - field: topic type: 1 + i18n: + zh-CN: 主题 + en-US: Topic - field: timestamp type: 1 + i18n: + zh-CN: 时间戳 + en-US: Timestamp - field: value type: 1 + i18n: + zh-CN: 数值 + en-US: Value # The protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http # The config content when protocol is http @@ -231,6 +261,9 @@ metrics: parseType: PromQL - name: kafka_server_brokertopicmetrics_bytesinpersec # Query rate per second + i18n: + zh-CN: Kafka Server Broker Topic 每秒字节入 + en-US: Kafka Server Broker Topic Bytes In Per Second # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 1 @@ -239,14 +272,29 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: instance type: 1 + i18n: + zh-CN: 实例 + en-US: Instance - field: job type: 1 + i18n: + zh-CN: 任务 + en-US: Job - field: topic type: 1 + i18n: + zh-CN: 主题 + en-US: Topic - field: timestamp type: 1 + i18n: + zh-CN: 时间戳 + en-US: Timestamp - field: value type: 1 + i18n: + zh-CN: 数值 + en-US: Value # The protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http # The specific collection configuration when the protocol is http From 8ae4d0e2d3e27cdb73ca1a5778cc12b36affadf2 Mon Sep 17 00:00:00 2001 From: aias00 Date: Mon, 12 Aug 2024 22:07:31 +0800 Subject: [PATCH 171/257] [improve] add oracle i18n (#2508) Co-authored-by: tomsun28 --- .../src/main/resources/define/app-oracle.yml | 208 +++++++++++++++++- 1 file changed, 206 insertions(+), 2 deletions(-) diff --git a/manager/src/main/resources/define/app-oracle.yml b/manager/src/main/resources/define/app-oracle.yml index 807713a3b23..8dbc10cd93f 100644 --- a/manager/src/main/resources/define/app-oracle.yml +++ b/manager/src/main/resources/define/app-oracle.yml @@ -95,6 +95,9 @@ params: metrics: # metrics - basic - name: basic + i18n: + zh-CN: 基本信息 + en-US: Basic Info # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -104,14 +107,29 @@ metrics: - field: database_version type: 1 label: true + i18n: + zh-CN: 数据库版本 + en-US: Database Version - field: hostname type: 1 + i18n: + zh-CN: 主机名 + en-US: Host Name - field: instance_name type: 1 + i18n: + zh-CN: 实例名 + en-US: Instance Name - field: startup_time type: 1 + i18n: + zh-CN: 启动时间 + en-US: Startup Time - field: status type: 1 + i18n: + zh-CN: 状态 + en-US: Status # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field aliasFields: - VERSION @@ -148,23 +166,44 @@ metrics: url: ^_^url^_^ - name: tablespace + i18n: + zh-CN: 表空间 + en-US: Tablespace priority: 1 fields: - field: file_id type: 1 label: true + i18n: + zh-CN: 文件ID + en-US: File ID - field: file_name type: 1 + i18n: + zh-CN: 文件名 + en-US: File Name - field: tablespace_name type: 1 + i18n: + zh-CN: 表空间名 + en-US: Tablespace Name - field: status type: 1 + i18n: + zh-CN: 状态 + en-US: Status - field: bytes type: 0 + i18n: + zh-CN: 字节数 + en-US: Bytes unit: MB - field: blocks type: 0 - unit: 块数 + i18n: + zh-CN: 块数 + en-US: Blocks + unit: 块 protocol: jdbc jdbc: host: ^_^host^_^ @@ -180,10 +219,16 @@ metrics: url: ^_^url^_^ - name: total_sessions + i18n: + zh-CN: 会话总数 + en-US: Total Sessions priority: 1 fields: - field: count type: 0 + i18n: + zh-CN: 总数 + en-US: Count protocol: jdbc jdbc: host: ^_^host^_^ @@ -198,10 +243,16 @@ metrics: url: ^_^url^_^ - name: active_sessions + i18n: + zh-CN: 活动会话 + en-US: Active Sessions priority: 1 fields: - field: count type: 0 + i18n: + zh-CN: 总数 + en-US: Count protocol: jdbc jdbc: host: ^_^host^_^ @@ -216,10 +267,16 @@ metrics: url: ^_^url^_^ - name: background_sessions + i18n: + zh-CN: 后台会话 + en-US: Background Sessions priority: 1 fields: - field: count type: 0 + i18n: + zh-CN: 总数 + en-US: Count protocol: jdbc jdbc: host: ^_^host^_^ @@ -234,12 +291,21 @@ metrics: url: ^_^url^_^ - name: connection + i18n: + zh-CN: 连接 + en-US: Connection priority: 1 fields: - field: username type: 1 + i18n: + zh-CN: 用户名 + en-US: Username label: true - field: count + i18n: + zh-CN: 总数 + en-US: Count type: 0 protocol: jdbc jdbc: @@ -255,16 +321,28 @@ metrics: url: ^_^url^_^ - name: performance + i18n: + zh-CN: 性能 + en-US: Performance priority: 1 fields: - field: qps type: 0 + i18n: + zh-CN: 每秒查询数 + en-US: QPS unit: qps - field: tps type: 0 + i18n: + zh-CN: 每秒事务数 + en-US: TPS unit: tps - field: mbps type: 0 + i18n: + zh-CN: 每秒IO读写 + en-US: MBPS unit: mbps aliasFields: - I/O Requests per Second @@ -289,22 +367,43 @@ metrics: - name: percentage + i18n: + zh-CN: 百分比 + en-US: Percentage priority: 1 fields: - field: tablespace_name type: 1 + i18n: + zh-CN: 表空间名 + en-US: Tablespace Name label: true - field: total + i18n: + zh-CN: 全部 + en-US: Total type: 0 - field: used + i18n: + zh-CN: 已用 + en-US: Used type: 0 - field: free + i18n: + zh-CN: 空闲 + en-US: Free type: 0 - field: used_percentage type: 0 + i18n: + zh-CN: 已用百分比 + en-US: Used Percentage unit: '%' - field: free_percentage type: 0 + i18n: + zh-CN: 空闲百分比 + en-US: Free Percentage unit: '%' protocol: jdbc jdbc: @@ -321,10 +420,16 @@ metrics: url: ^_^url^_^ - name: process + i18n: + zh-CN: 进程 + en-US: Process priority: 1 fields: - field: process_count type: 0 + i18n: + zh-CN: 进程数 + en-US: Process Count protocol: jdbc jdbc: host: ^_^host^_^ @@ -339,14 +444,23 @@ metrics: url: ^_^url^_^ - name: transaction + i18n: + zh-CN: 事务 + en-US: Transaction priority: 1 fields: - field: commits type: 0 unit: 't/s' + i18n: + zh-CN: 提交数 + en-US: Commits - field: rollbacks type: 0 unit: 't/s' + i18n: + zh-CN: 回滚数 + en-US: Rollbacks aliasFields: - User Commits Per Sec - User Rollbacks Per Sec @@ -367,32 +481,59 @@ metrics: url: ^_^url^_^ - name: wait + i18n: + zh-CN: 等待 + en-US: Wait priority: 1 fields: - field: concurrent_wait_time type: 0 unit: ms + i18n: + zh-CN: 并发等待时间 + en-US: Concurrent Wait Time - field: commit_wait_time type: 0 unit: ms + i18n: + zh-CN: 提交等待时间 + en-US: Commit Wait Time - field: app_wait_time type: 0 unit: ms + i18n: + zh-CN: 应用等待时间 + en-US: Application Wait Time - field: network_wait_time type: 0 unit: ms + i18n: + zh-CN: 网络等待时间 + en-US: Network Wait Time - field: system_io_wait_time type: 0 unit: ms + i18n: + zh-CN: 系统I/O等待时间 + en-US: System I/O Wait Time - field: user_io_wait_time type: 0 unit: ms + i18n: + zh-CN: 用户I/O等待时间 + en-US: User I/O Wait Time - field: configure_wait_time type: 0 unit: ms + i18n: + zh-CN: 配置等待时间 + en-US: Configure Wait Time - field: scheduler_wait_time type: 0 unit: ms + i18n: + zh-CN: 调度等待时间 + en-US: Scheduler Wait Time aliasFields: - System I/O - Application @@ -425,13 +566,22 @@ metrics: url: ^_^url^_^ - name: cpu_stats + i18n: + zh-CN: CPU状态 + en-US: CPU Stats priority: 1 fields: - field: type type: 1 label: true + i18n: + zh-CN: 类型 + en-US: Type - field: num type: 1 + i18n: + zh-CN: 数量 + en-US: Num protocol: jdbc jdbc: host: ^_^host^_^ @@ -446,13 +596,22 @@ metrics: url: ^_^url^_^ - name: mem_stats + i18n: + zh-CN: 内存状态 + en-US: Memory Stats priority: 1 fields: - field: type type: 1 label: true + i18n: + zh-CN: 类型 + en-US: Type - field: num type: 1 + i18n: + zh-CN: 数量 + en-US: Num protocol: jdbc jdbc: host: ^_^host^_^ @@ -467,12 +626,21 @@ metrics: url: ^_^url^_^ - name: cache_hit_ratio + i18n: + zh-CN: 缓存命中率 + en-US: Cache Hit Ratio priority: 1 fields: - field: lib_cache_hit_ratio type: 0 + i18n: + zh-CN: 库缓存命中率 + en-US: LIB CACHE HIT RATIO - field: buffer_cache_hit_ratio type: 0 + i18n: + zh-CN: 缓冲区缓存命中率 + en-US: BUFFER CACHE HIT RATIO aliasFields: - Library Cache Hit Ratio - Buffer Cache Hit Ratio @@ -493,33 +661,69 @@ metrics: url: ^_^url^_^ - name: slow_query + i18n: + zh-CN: 慢查询 + en-US: Slow Query priority: 1 fields: - field: sql_id type: 1 + i18n: + zh-CN: sql 主键 + en-US: SQL ID - field: child_number type: 1 + i18n: + zh-CN: 子编号 + en-US: Child Number - field: executions type: 1 - unit: 次数 + i18n: + zh-CN: 执行数 + en-US: EXECUTIONS + unit: 次 - field: per_secs type: 1 + i18n: + zh-CN: 每秒执行数 + en-US: Per Secs unit: 秒 - field: cpu_secs type: 1 + i18n: + zh-CN: 每秒CPU + en-US: CPU Secs unit: 秒 - field: buffer_gets type: 1 + i18n: + zh-CN: 获得的缓冲区 + en-US: Buffer Gets - field: disk_reads type: 1 + i18n: + zh-CN: 磁盘读取 + en-US: Disk Reads - field: fetches type: 1 + i18n: + zh-CN: 获取数量 + en-US: Fetches - field: parse_calls type: 1 + i18n: + zh-CN: 解析调用 + en-US: Parse Calls - field: optimizer_cost type: 1 + i18n: + zh-CN: 优化器成本 + en-US: Optimizer Cost - field: sql_text type: 1 + i18n: + zh-CN: SQL文本 + en-US: SQL Text protocol: jdbc jdbc: host: ^_^host^_^ From 09209107380c534fcce6275a8f0c144054e509dc Mon Sep 17 00:00:00 2001 From: aias00 Date: Mon, 12 Aug 2024 22:45:40 +0800 Subject: [PATCH 172/257] [improve] add pulsar push rabbitmq i18n (#2509) Co-authored-by: tomsun28 --- .../src/main/resources/define/app-pulsar.yml | 76 ++++++++++++++++++- .../src/main/resources/define/app-push.yml | 3 + .../main/resources/define/app-rabbitmq.yml | 12 +++ 3 files changed, 89 insertions(+), 2 deletions(-) diff --git a/manager/src/main/resources/define/app-pulsar.yml b/manager/src/main/resources/define/app-pulsar.yml index ce274f42dc3..5fdf8927654 100644 --- a/manager/src/main/resources/define/app-pulsar.yml +++ b/manager/src/main/resources/define/app-pulsar.yml @@ -87,6 +87,9 @@ metrics: method: GET parseType: prometheus - name: process_start_time_seconds + i18n: + zh-CN: 进程启动时间 + en-US: Process Start Time priority: 0 fields: - field: value @@ -109,6 +112,9 @@ metrics: method: GET parseType: prometheus - name: process_open_fds + i18n: + zh-CN: 打开的文件描述符 + en-US: Open File Descriptors priority: 0 fields: - field: value @@ -130,6 +136,9 @@ metrics: method: GET parseType: prometheus - name: process_max_fds + i18n: + zh-CN: 最大文件描述符 + en-US: Max File Descriptors priority: 0 fields: - field: value @@ -151,6 +160,9 @@ metrics: method: GET parseType: prometheus - name: jvm_memory_pool_allocated_bytes + i18n: + zh-CN: JVM 内存池已分配字节 + en-US: JVM Memory Pool Allocated Bytes priority: 0 # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http @@ -158,14 +170,20 @@ metrics: - field: .name type: 1 i18n: - zh-CN: 指标名称 - en-US: Metric Name + zh-CN: 名称 + en-US: Name label: true - field: pool type: 1 label: true + i18n: + zh-CN: 池 + en-US: Pool - field: value type: 0 + i18n: + zh-CN: 值 + en-US: Value # the config content when protocol is http http: # http host: ipv4 ipv6 domain @@ -178,15 +196,24 @@ metrics: method: GET parseType: prometheus - name: jvm_memory_pool_bytes_used + i18n: + zh-CN: JVM 内存池已使用字节 + en-US: JVM Memory Pool Used Bytes priority: 0 # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http fields: - field: pool type: 1 + i18n: + zh-CN: 池 + en-US: Pool label: true - field: value type: 0 + i18n: + zh-CN: 值 + en-US: Value # the config content when protocol is http http: # http host: ipv4 ipv6 domain @@ -199,15 +226,24 @@ metrics: method: GET parseType: prometheus - name: jvm_memory_pool_bytes_committed + i18n: + zh-CN: JVM 内存池已提交字节 + en-US: JVM Memory Pool Committed Bytes priority: 0 # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http fields: - field: pool type: 1 + i18n: + zh-CN: 池 + en-US: Pool label: true - field: value type: 0 + i18n: + zh-CN: 值 + en-US: Value # the config content when protocol is http http: # http host: ipv4 ipv6 domain @@ -220,15 +256,24 @@ metrics: method: GET parseType: prometheus - name: jvm_memory_pool_bytes_max + i18n: + zh-CN: JVM 内存池最大字节 + en-US: JVM Memory Pool Max Bytes priority: 0 # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http fields: - field: pool type: 1 + i18n: + zh-CN: 池 + en-US: Pool label: true - field: value type: 0 + i18n: + zh-CN: 值 + en-US: Value # the config content when protocol is http http: # http host: ipv4 ipv6 domain @@ -241,6 +286,9 @@ metrics: method: GET parseType: prometheus - name: pulsar_broker_publish_latency + i18n: + zh-CN: broker 发布延迟 + en-US: Broker Publish Latency priority: 0 # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http @@ -253,12 +301,21 @@ metrics: label: true - field: cluster type: 1 + i18n: + zh-CN: 集群 + en-US: Cluster label: true - field: quantile type: 1 + i18n: + zh-CN: 分位数 + en-US: Quantile label: true - field: value type: 0 + i18n: + zh-CN: 值 + en-US: Value # the config content when protocol is http http: # http host: ipv4 ipv6 domain @@ -271,6 +328,9 @@ metrics: method: GET parseType: prometheus - name: pulsar_metadata_store_ops_latency_ms + i18n: + zh-CN: 元数据存储操作延迟 + en-US: Metadata Store Ops Latency priority: 0 # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http @@ -283,15 +343,27 @@ metrics: label: true - field: cluster type: 1 + i18n: + zh-CN: 集群 + en-US: Cluster label: true - field: name type: 1 + i18n: + zh-CN: 名称 + en-US: Name label: true - field: type type: 1 + i18n: + zh-CN: 类型 + en-US: Type label: true - field: status type: 1 + i18n: + zh-CN: 状态 + en-US: Status label: true - field: le type: 0 diff --git a/manager/src/main/resources/define/app-push.yml b/manager/src/main/resources/define/app-push.yml index e16a35a5bf3..91bcd15ba7b 100644 --- a/manager/src/main/resources/define/app-push.yml +++ b/manager/src/main/resources/define/app-push.yml @@ -62,6 +62,9 @@ params: metrics: # metrics - all - name: metrics + i18n: + zh-CN: 指标 + en-US: Metrics # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 diff --git a/manager/src/main/resources/define/app-rabbitmq.yml b/manager/src/main/resources/define/app-rabbitmq.yml index 58c3f9d11f3..eb2e2064aeb 100644 --- a/manager/src/main/resources/define/app-rabbitmq.yml +++ b/manager/src/main/resources/define/app-rabbitmq.yml @@ -104,6 +104,9 @@ params: metrics: # Note: The built-in monitoring metrics include (responseTime - response time) - name: overview + i18n: + zh-CN: 概览 + en-US: Overview # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -170,6 +173,9 @@ metrics: parseType: default - name: object_totals + i18n: + zh-CN: 对象总数 + en-US: Object Totals # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 1 @@ -224,6 +230,9 @@ metrics: parseScript: '$.object_totals' - name: nodes + i18n: + zh-CN: 节点数 + en-US: Nodes # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 1 @@ -443,6 +452,9 @@ metrics: parseScript: '$.*' - name: queues + i18n: + zh-CN: 队列数 + en-US: Queues # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 1 From 0634e7bb2ec76da95a60f5477d483ef94cf51200 Mon Sep 17 00:00:00 2001 From: aias00 Date: Mon, 12 Aug 2024 22:51:08 +0800 Subject: [PATCH 173/257] [improve] add redis cluster i18n (#2510) Co-authored-by: tomsun28 --- .../resources/define/app-redis_cluster.yml | 600 ++++++++++++++++++ .../resources/define/app-redis_sentinel.yml | 258 ++++++++ 2 files changed, 858 insertions(+) diff --git a/manager/src/main/resources/define/app-redis_cluster.yml b/manager/src/main/resources/define/app-redis_cluster.yml index 44dc473a507..b080def499d 100644 --- a/manager/src/main/resources/define/app-redis_cluster.yml +++ b/manager/src/main/resources/define/app-redis_cluster.yml @@ -114,52 +114,124 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: identity type: 1 + i18n: + zh-CN: 标识 + en-US: Identity - field: redis_version type: 1 + i18n: + zh-CN: Redis 版本 + en-US: Redis Version - field: redis_git_sha1 type: 0 + i18n: + zh-CN: Redis Git SHA1 + en-US: Redis Git SHA1 - field: redis_git_dirty type: 0 + i18n: + zh-CN: Redis Git Dirty + en-US: Redis Git Dirty - field: redis_build_id type: 1 + i18n: + zh-CN: Redis 构建 ID + en-US: Redis Build ID - field: redis_mode type: 1 + i18n: + zh-CN: Redis 模式 + en-US: Redis Mode - field: os type: 1 + i18n: + zh-CN: 操作系统 + en-US: Operating System - field: arch_bits type: 0 + i18n: + zh-CN: 架构位数 + en-US: Architecture Bits - field: multiplexing_api type: 1 + i18n: + zh-CN: 多路复用 API + en-US: Multiplexing API - field: atomicvar_api type: 1 + i18n: + zh-CN: 原子变量 API + en-US: Atomicvar API - field: gcc_version type: 1 + i18n: + zh-CN: GCC 版本 + en-US: GCC Version - field: process_id type: 0 + i18n: + zh-CN: 进程 ID + en-US: Process ID - field: process_supervised type: 1 + i18n: + zh-CN: 进程监控 + en-US: Process Supervised - field: run_id type: 1 + i18n: + zh-CN: 运行 ID + en-US: Run ID - field: tcp_port type: 0 + i18n: + zh-CN: TCP 端口 + en-US: TCP Port - field: server_time_usec type: 0 + i18n: + zh-CN: 基于纪元的系统时间 + en-US: Server Time Usec - field: uptime_in_seconds type: 0 + i18n: + zh-CN: 启动时间(秒) + en-US: Uptime In Seconds - field: uptime_in_days type: 0 + i18n: + zh-CN: 启动时间(天) + en-US: Uptime In Days - field: hz type: 0 + i18n: + zh-CN: 定时器频率 + en-US: HZ - field: configured_hz type: 0 + i18n: + zh-CN: 配置的定时器频率 + en-US: Configured HZ - field: lru_clock type: 0 + i18n: + zh-CN: LRU 时钟 + en-US: LRU Clock - field: executable type: 1 + i18n: + zh-CN: 可执行文件 + en-US: Executable - field: config_file type: 1 + i18n: + zh-CN: 配置文件 + en-US: Config File - field: io_threads_active type: 0 + i18n: + zh-CN: IO 线程活跃数 + en-US: IO Threads Active # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -178,6 +250,9 @@ metrics: - name: clients + i18n: + zh-CN: 客户端 + en-US: Clients # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 1 @@ -185,22 +260,49 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: identity type: 1 + i18n: + zh-CN: 标识 + en-US: Identity - field: connected_clients type: 0 + i18n: + zh-CN: 已连接客户端 + en-US: Connected Clients - field: cluster_connections type: 0 + i18n: + zh-CN: 集群连接数 + en-US: Cluster Connections - field: maxclients type: 0 + i18n: + zh-CN: 最大客户端数 + en-US: Maxclients - field: client_recent_max_input_buffer type: 0 + i18n: + zh-CN: 客户端最近最大输入缓冲区 + en-US: Client Recent Max Input Buffer - field: client_recent_max_output_buffer type: 0 + i18n: + zh-CN: 客户端最近最大输出缓冲区 + en-US: Client Recent Max Output Buffer - field: blocked_clients type: 0 + i18n: + zh-CN: 阻塞客户端 + en-US: Blocked Clients - field: tracking_clients type: 0 + i18n: + zh-CN: 跟踪客户端 + en-US: Tracking Clients - field: clients_in_timeout_table type: 0 + i18n: + zh-CN: 超时表中的客户端 + en-US: Clients In Timeout Table # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -218,6 +320,9 @@ metrics: pattern: ^_^pattern^_^ - name: memory + i18n: + zh-CN: 内存 + en-US: Memory # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 2 @@ -225,97 +330,223 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: identity type: 1 + i18n: + zh-CN: 标识 + en-US: Identity - field: used_memory type: 0 + i18n: + zh-CN: 已使用内存 + en-US: Used Memory - field: used_memory_human type: 0 + i18n: + zh-CN: 已使用内存 (人类可读) + en-US: Used Memory Human unit: MB - field: used_memory_rss type: 0 + i18n: + zh-CN: 已使用内存 RSS + en-US: Used Memory RSS - field: used_memory_rss_human type: 0 + i18n: + zh-CN: 已使用内存 RSS (人类可读) + en-US: Used Memory RSS Human unit: MB - field: used_memory_peak type: 0 + i18n: + zh-CN: 内存使用峰值 + en-US: Used Memory Peak - field: used_memory_peak_human type: 0 + i18n: + zh-CN: 内存使用峰值 (人类可读) + en-US: Used Memory Peak Human unit: MB - field: used_memory_peak_perc type: 0 + i18n: + zh-CN: 内存使用峰值百分比 + en-US: Used Memory Peak Perc unit: '%' - field: used_memory_overhead type: 0 + i18n: + zh-CN: 内存开销 + en-US: Used Memory Overhead - field: used_memory_startup type: 0 + i18n: + zh-CN: 启动时内存使用 + en-US: Used Memory Startup - field: used_memory_dataset type: 0 + i18n: + zh-CN: 数据集内存使用 + en-US: Used Memory Dataset - field: used_memory_dataset_perc type: 0 + i18n: + zh-CN: 数据集内存使用百分比 + en-US: Used Memory Dataset Perc unit: '%' - field: allocator_allocated type: 0 + i18n: + zh-CN: 分配器已分配内存 + en-US: Allocator Allocated - field: allocator_active type: 0 + i18n: + zh-CN: 分配器活跃内存 + en-US: Allocator Active - field: allocator_resident type: 0 + i18n: + zh-CN: 分配器驻留内存 + en-US: Allocator Resident - field: total_system_memory type: 0 + i18n: + zh-CN: 系统总内存 + en-US: Total System Memory - field: total_system_memory_human type: 0 + i18n: + zh-CN: 系统总内存 (人类可读) + en-US: Total System Memory Human unit: GB - field: used_memory_lua type: 0 + i18n: + zh-CN: Lua 脚本内存使用 + en-US: Used Memory Lua - field: used_memory_lua_human type: 0 + i18n: + zh-CN: Lua 脚本内存使用 (人类可读) + en-US: Used Memory Lua Human unit: KB - field: used_memory_scripts type: 0 + i18n: + zh-CN: 脚本内存使用 + en-US: Used Memory Scripts - field: used_memory_scripts_human type: 0 + i18n: + zh-CN: 脚本内存使用 (人类可读) + en-US: Used Memory Scripts Human unit: KB - field: number_of_cached_scripts type: 0 + i18n: + zh-CN: 缓存脚本数 + en-US: Number Of Cached Scripts - field: maxmemory type: 0 + i18n: + zh-CN: 最大内存 + en-US: Maxmemory - field: maxmemory_human type: 0 + i18n: + zh-CN: 最大内存 (人类可读) + en-US: Maxmemory Human unit: MB - field: maxmemory_policy type: 1 + i18n: + zh-CN: 最大内存策略 + en-US: Maxmemory Policy - field: allocator_frag_ratio type: 0 + i18n: + zh-CN: 分配器碎片率 + en-US: Allocator Frag Ratio - field: allocator_frag_bytes type: 0 + i18n: + zh-CN: 分配器碎片字节 + en-US: Allocator Frag Bytes - field: allocator_rss_ratio type: 0 + i18n: + zh-CN: 分配器驻留内存比率 + en-US: Allocator Rss Ratio - field: allocator_rss_bytes type: 0 + i18n: + zh-CN: 分配器驻留内存字节 + en-US: Allocator Rss Bytes - field: rss_overhead_ratio type: 0 + i18n: + zh-CN: RSS 开销比率 + en-US: Rss Overhead Ratio - field: rss_overhead_bytes type: 0 + i18n: + zh-CN: RSS 开销字节 + en-US: Rss Overhead Bytes - field: mem_fragmentation_ratio type: 0 + i18n: + zh-CN: 内存碎片率 + en-US: Mem Fragmentation Ratio - field: mem_fragmentation_bytes type: 0 + i18n: + zh-CN: 内存碎片字节 + en-US: Mem Fragmentation Bytes - field: mem_not_counted_for_evict type: 0 + i18n: + zh-CN: 不计入驱逐的内存 + en-US: Mem Not Counted For Evict - field: mem_replication_backlog type: 0 + i18n: + zh-CN: 复制积压内存 + en-US: Mem Replication Backlog - field: mem_clients_slaves type: 0 + i18n: + zh-CN: 从节点客户端数 + en-US: Mem Clients Slaves - field: mem_clients_normal type: 0 + i18n: + zh-CN: 普通客户端数 + en-US: Mem Clients Normal - field: mem_aof_buffer type: 0 + i18n: + zh-CN: AOF 缓冲区内存 + en-US: Mem Aof Buffer - field: mem_allocator type: 1 + i18n: + zh-CN: 内存分配器 + en-US: Mem Allocator - field: active_defrag_running type: 0 + i18n: + zh-CN: 活跃碎片整理运行中 + en-US: Active Defrag Running - field: lazyfree_pending_objects type: 0 + i18n: + zh-CN: 懒惰释放待处理对象 + en-US: Lazyfree Pending Objects - field: lazyfreed_objects type: 0 + i18n: + zh-CN: 懒惰释放对象 + en-US: Lazyfreed Objects # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -333,6 +564,9 @@ metrics: pattern: ^_^pattern^_^ - name: persistence + i18n: + zh-CN: 持久化 + en-US: Persistence # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 3 @@ -340,52 +574,124 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: identity type: 1 + i18n: + zh-CN: 标识 + en-US: Identity - field: loading type: 0 + i18n: + zh-CN: 加载中 + en-US: Loading - field: current_cow_size type: 0 + i18n: + zh-CN: 当前 COW 大小 + en-US: Current Cow Size - field: current_cow_size_age type: 0 + i18n: + zh-CN: 当前 COW 大小年龄 + en-US: Current Cow Size Age - field: current_fork_perc type: 0 + i18n: + zh-CN: 当前 Fork 百分比 + en-US: Current Fork Perc - field: current_save_keys_processed type: 0 + i18n: + zh-CN: 当前保存键处理 + en-US: Current Save Keys Processed - field: current_save_keys_total type: 0 + i18n: + zh-CN: 当前保存键总数 + en-US: Current Save Keys Total - field: rdb_changes_since_last_save type: 0 + i18n: + zh-CN: 上次保存后的更改 + en-US: Rdb Changes Since Last Save - field: rdb_bgsave_in_progress type: 0 + i18n: + zh-CN: RDB 后台保存中 + en-US: Rdb Bgsave In Progress - field: rdb_last_save_time type: 0 + i18n: + zh-CN: RDB 上次保存时间 + en-US: Rdb Last Save Time - field: rdb_last_bgsave_status type: 1 + i18n: + zh-CN: RDB 上次后台保存状态 + en-US: Rdb Last Bgsave Status - field: rdb_last_bgsave_time_sec type: 0 + i18n: + zh-CN: RDB 上次后台保存时间 + en-US: Rdb Last Bgsave Time - field: rdb_current_bgsave_time_sec type: 0 + i18n: + zh-CN: RDB 当前后台保存时间 + en-US: Rdb Current Bgsave Time - field: rdb_last_cow_size type: 0 + i18n: + zh-CN: RDB 上次 COW 大小 + en-US: Rdb Last Cow Size - field: aof_enabled type: 0 + i18n: + zh-CN: AOF 启用 + en-US: Aof Enabled - field: aof_rewrite_in_progress type: 0 + i18n: + zh-CN: AOF 重写中 + en-US: Aof Rewrite In Progress - field: aof_rewrite_scheduled type: 0 + i18n: + zh-CN: AOF 重写周期 + en-US: Aof Rewrite Scheduled - field: aof_last_rewrite_time_sec type: 0 + i18n: + zh-CN: AOF 上次重写时间 + en-US: Aof Last Rewrite Time - field: aof_current_rewrite_time_sec type: 0 + i18n: + zh-CN: AOF 当前重写时间 + en-US: Aof Current Rewrite Time - field: aof_last_bgrewrite_status type: 1 + i18n: + zh-CN: AOF 上次后台重写状态 + en-US: Aof Last Bgrewrite Status - field: aof_last_write_status type: 1 + i18n: + zh-CN: AOF 上次写入状态 + en-US: Aof Last Write Status - field: aof_last_cow_size type: 0 + i18n: + zh-CN: AOF 上次 COW 大小 + en-US: Aof Last Cow Size - field: module_fork_in_progress type: 0 + i18n: + zh-CN: 模块 Fork 中 + en-US: Module Fork In Progress - field: module_fork_last_cow_size type: 0 + i18n: + zh-CN: 模块 Fork 上次 COW 大小 + en-US: Module Fork Last Cow Size # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -403,6 +709,9 @@ metrics: pattern: ^_^pattern^_^ - name: stats + i18n: + zh-CN: 统计 + en-US: Stats # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 4 @@ -410,82 +719,199 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: identity type: 1 + i18n: + zh-CN: 标识 + en-US: Identity - field: total_connections_received type: 0 + i18n: + zh-CN: 收到总连接数 + en-US: Total Connections Received - field: total_commands_processed type: 0 + i18n: + zh-CN: 处理总命令数 + en-US: Total Commands Processed - field: instantaneous_ops_per_sec type: 0 + i18n: + zh-CN: 每秒操作数 + en-US: Instantaneous Ops Per Sec - field: total_net_input_bytes type: 0 + i18n: + zh-CN: 网络总输入字节数 + en-US: Total Net Input Bytes - field: total_net_output_bytes type: 0 + i18n: + zh-CN: 网络总输出字节数 + en-US: Total Net Output Bytes - field: instantaneous_input_kbps type: 0 + i18n: + zh-CN: 瞬时的每秒输入 KB + en-US: Instantaneous Input KB - field: instantaneous_output_kbps type: 0 + i18n: + zh-CN: 瞬时的每秒输出 KB + en-US: Instantaneous Output KB - field: rejected_connections type: 0 + i18n: + zh-CN: 拒绝连接数 + en-US: Rejected Connections - field: sync_full type: 0 + i18n: + zh-CN: 同步全量 + en-US: Sync Full - field: sync_partial_ok type: 0 + i18n: + zh-CN: 同步部分成功 + en-US: Sync Partial Ok - field: sync_partial_err type: 0 + i18n: + zh-CN: 同步部分失败 + en-US: Sync Partial Err - field: expired_keys type: 0 + i18n: + zh-CN: 过期键 + en-US: Expired Keys - field: expired_stale_perc type: 0 + i18n: + zh-CN: 过期陈旧百分比 + en-US: Expired Stale Perc - field: expired_time_cap_reached_count type: 0 + i18n: + zh-CN: 达到过期时间上限计数 + en-US: Expired Time Cap Reached Count - field: expire_cycle_cpu_milliseconds type: 0 + i18n: + zh-CN: 过期周期 CPU 毫秒 + en-US: Expire Cycle CPU Milliseconds - field: evicted_keys type: 0 + i18n: + zh-CN: 驱逐键 + en-US: Evicted Keys - field: keyspace_hits type: 0 + i18n: + zh-CN: 命中键空间 + en-US: Keyspace Hits - field: keyspace_misses type: 0 + i18n: + zh-CN: 未命中键空间 + en-US: Keyspace Misses - field: pubsub_channels type: 0 + i18n: + zh-CN: 发布订阅频道 + en-US: Pubsub Channels - field: pubsub_patterns type: 0 + i18n: + zh-CN: 发布订阅模式 + en-US: Pubsub Patterns - field: latest_fork_usec type: 0 + i18n: + zh-CN: 最新 Fork 微秒 + en-US: Latest Fork Usec - field: total_forks type: 0 + i18n: + zh-CN: 总 Fork 数 + en-US: Total Forks - field: migrate_cached_sockets type: 0 + i18n: + zh-CN: 迁移缓存套接字 + en-US: Migrate Cached Sockets - field: slave_expires_tracked_keys type: 0 + i18n: + zh-CN: 从节点过期跟踪键 + en-US: Slave Expires Tracked Keys - field: active_defrag_hits type: 0 + i18n: + zh-CN: 活跃碎片整理命中 + en-US: Active Defrag Hits - field: active_defrag_misses type: 0 + i18n: + zh-CN: 活跃碎片整理未命中 + en-US: Active Defrag Misses - field: active_defrag_key_hits type: 0 + i18n: + zh-CN: 活跃碎片整理键命中 + en-US: Active Defrag Key Hits - field: active_defrag_key_misses type: 0 + i18n: + zh-CN: 活跃碎片整理键未命中 + en-US: Active Defrag Key Misses - field: tracking_total_keys type: 0 + i18n: + zh-CN: 跟踪键总数 + en-US: Tracking Total Keys - field: tracking_total_items type: 0 + i18n: + zh-CN: 跟踪项总数 + en-US: Tracking Total Items - field: tracking_total_prefixes type: 0 + i18n: + zh-CN: 跟踪前缀总数 + en-US: Tracking Total Prefixes - field: unexpected_error_replies type: 0 + i18n: + zh-CN: 意外错误回复 + en-US: Unexpected Error Replies - field: total_error_replies type: 0 + i18n: + zh-CN: 总错误回复 + en-US: Total Error Replies - field: dump_payload_sanitizations type: 0 + i18n: + zh-CN: 转储有效负载深度完整性验证的总数 + en-US: Dump Payload Sanitizations - field: total_reads_processed type: 0 + i18n: + zh-CN: 总读取处理 + en-US: Total Reads Processed - field: total_writes_processed type: 0 + i18n: + zh-CN: 总写入处理 + en-US: Total Writes Processed - field: io_threaded_reads_processed type: 0 + i18n: + zh-CN: IO 线程读取处理 + en-US: Io Threaded Reads Processed - field: io_threaded_writes_processed type: 0 + i18n: + zh-CN: IO 线程写入处理 + en-US: Io Threaded Writes Processed # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -503,6 +929,9 @@ metrics: pattern: ^_^pattern^_^ - name: replication + i18n: + zh-CN: 副本 + en-US: Replication # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 5 @@ -510,28 +939,64 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: identity type: 1 + i18n: + zh-CN: 标识 + en-US: Identity - field: role type: 1 + i18n: + zh-CN: 角色 + en-US: Role - field: connected_slaves type: 0 + i18n: + zh-CN: 已连接从节点 + en-US: Connected Slaves - field: master_failover_state type: 1 + i18n: + zh-CN: 主节点故障转移状态 + en-US: Master Failover State - field: master_replid type: 1 + i18n: + zh-CN: 主节点复制 ID + en-US: Master Replid - field: master_replid2 type: 0 + i18n: + zh-CN: 主节点复制 ID2 + en-US: Master Replid2 - field: master_repl_offset type: 0 + i18n: + zh-CN: 主节点复制偏移量 + en-US: Master Repl Offset - field: second_repl_offset type: 0 + i18n: + zh-CN: 第二复制偏移量 + en-US: Second Repl Offset - field: repl_backlog_active type: 0 + i18n: + zh-CN: 复制积压活跃 + en-US: Repl Backlog Active - field: repl_backlog_size type: 0 + i18n: + zh-CN: 复制积压大小 + en-US: Repl Backlog Size - field: repl_backlog_first_byte_offset type: 0 + i18n: + zh-CN: 复制积压第一个字节偏移量 + en-US: Repl Backlog First Byte Offset - field: repl_backlog_histlen type: 0 + i18n: + zh-CN: 复制积压历史长度 + en-US: Repl Backlog Histlen # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -549,6 +1014,9 @@ metrics: pattern: ^_^pattern^_^ - name: cpu + i18n: + zh-CN: CPU + en-US: CPU # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 6 @@ -556,18 +1024,39 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: identity type: 1 + i18n: + zh-CN: 标识 + en-US: Identity - field: used_cpu_sys type: 0 + i18n: + zh-CN: 系统已使用 CPU + en-US: Sys CPU Used - field: used_cpu_user type: 0 + i18n: + zh-CN: 用户已使用 CPU + en-US: User CPU Used - field: used_cpu_sys_children type: 0 + i18n: + zh-CN: Sys 子进程已使用 CPU + en-US: Sys Children CPU Used - field: used_cpu_user_children type: 0 + i18n: + zh-CN: 用户子进程已使用 CPU + en-US: User Children CPU Used - field: used_cpu_sys_main_thread type: 0 + i18n: + zh-CN: 系统主线程已使用 CPU + en-US: Sys Main Thread CPU Used - field: used_cpu_user_main_thread type: 0 + i18n: + zh-CN: 用户主线程已使用 CPU + en-US: User Main Thread CPU Used # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -585,6 +1074,9 @@ metrics: pattern: ^_^pattern^_^ - name: errorstats + i18n: + zh-CN: 错误统计 + en-US: Error Stats # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 8 @@ -592,10 +1084,19 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: identity type: 1 + i18n: + zh-CN: 标识 + en-US: Identity - field: errorstat_ERR type: 1 + i18n: + zh-CN: 错误统计 ERR + en-US: Error Stats ERR - field: errorstat_MISCONF type: 1 + i18n: + zh-CN: 错误统计 MISCONF + en-US: Error Stats MISCONF # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -613,6 +1114,9 @@ metrics: pattern: ^_^pattern^_^ - name: cluster + i18n: + zh-CN: 集群 + en-US: Cluster # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 9 @@ -620,40 +1124,94 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: identity type: 1 + i18n: + zh-CN: 标识 + en-US: Identity - field: cluster_enabled type: 0 + i18n: + zh-CN: 集群启用 + en-US: Cluster Enabled - field: cluster_state type: 1 + i18n: + zh-CN: 集群状态 + en-US: Cluster State - field: cluster_slots_assigned type: 0 + i18n: + zh-CN: 集群分配槽 + en-US: Cluster Slots Assigned - field: cluster_slots_ok type: 0 + i18n: + zh-CN: 集群槽正常 + en-US: Cluster Slots Ok - field: cluster_slots_pfail type: 0 + i18n: + zh-CN: 集群槽部分失败 + en-US: Cluster Slots Pfail - field: cluster_slots_fail type: 0 + i18n: + zh-CN: 集群槽失败 + en-US: Cluster Slots Fail - field: cluster_known_nodes type: 0 + i18n: + zh-CN: 集群已知节点 + en-US: Cluster Known Nodes - field: cluster_size type: 0 + i18n: + zh-CN: 集群大小 + en-US: Cluster Size - field: cluster_current_epoch type: 0 + i18n: + zh-CN: 集群当前 epoch + en-US: Cluster Current Epoch - field: cluster_my_epoch type: 0 + i18n: + zh-CN: 集群我的 epoch + en-US: Cluster My Epoch - field: cluster_stats_messages_ping_sent type: 0 + i18n: + zh-CN: 集群统计消息 ping 发送 + en-US: Cluster Stats Messages Ping Sent - field: cluster_stats_messages_pong_sent type: 0 + i18n: + zh-CN: 集群统计消息 pong 发送 + en-US: Cluster Stats Messages Pong Sent - field: cluster_stats_messages_sent type: 0 + i18n: + zh-CN: 集群统计消息发送 + en-US: Cluster Stats Messages Sent - field: cluster_stats_messages_ping_received type: 0 + i18n: + zh-CN: 集群统计消息 ping 接收 + en-US: Cluster Stats Messages Ping Received - field: cluster_stats_messages_pong_received type: 0 + i18n: + zh-CN: 集群统计消息 pong 接收 + en-US: Cluster Stats Messages Pong Received - field: cluster_stats_messages_meet_received type: 0 + i18n: + zh-CN: 集群统计消息 meet 接收 + en-US: Cluster Stats Messages Meet Received - field: cluster_stats_messages_received type: 0 + i18n: + zh-CN: 集群统计消息接收 + en-US: Cluster Stats Messages Received # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -671,6 +1229,9 @@ metrics: pattern: ^_^pattern^_^ - name: commandstats + i18n: + zh-CN: 命令统计 + en-US: Command Stats # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 9 @@ -678,26 +1239,59 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: identity type: 1 + i18n: + zh-CN: 标识 + en-US: Identity - field: cmdstat_set type: 1 + i18n: + zh-CN: SET 命令统计 + en-US: SET COMMAND STAT - field: cmdstat_get type: 1 + i18n: + zh-CN: GET 命令统计 + en-US: GET COMMAND STAT - field: cmdstat_setnx type: 1 + i18n: + zh-CN: SETNX 命令统计 + en-US: SETNX COMMAND STAT - field: cmdstat_hset type: 1 + i18n: + zh-CN: HSET 命令统计 + en-US: HSET COMMAND STAT - field: cmdstat_hget type: 1 + i18n: + zh-CN: HGET 命令统计 + en-US: HGET COMMAND STAT - field: cmdstat_lpush type: 1 + i18n: + zh-CN: LPUSH 命令统计 + en-US: LPUSH COMMAND STAT - field: cmdstat_rpush type: 1 + i18n: + zh-CN: RPUSH 命令统计 + en-US: RPUSH COMMAND STAT - field: cmdstat_lpop type: 1 + i18n: + zh-CN: LPOP 命令统计 + en-US: LPOP COMMAND STAT - field: cmdstat_rpop type: 1 + i18n: + zh-CN: RPOP 命令统计 + en-US: RPOP COMMAND STAT - field: cmdstat_llen type: 1 + i18n: + zh-CN: LLEN 命令统计 + en-US: LLEN COMMAND STAT # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -716,6 +1310,9 @@ metrics: - name: keyspace + i18n: + zh-CN: 键空间 + en-US: Keyspace # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 9 @@ -723,6 +1320,9 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: identity type: 1 + i18n: + zh-CN: 标识 + en-US: Identity - field: db0 type: 1 - field: db1 diff --git a/manager/src/main/resources/define/app-redis_sentinel.yml b/manager/src/main/resources/define/app-redis_sentinel.yml index 02035ca2917..740e3ee8f44 100644 --- a/manager/src/main/resources/define/app-redis_sentinel.yml +++ b/manager/src/main/resources/define/app-redis_sentinel.yml @@ -106,6 +106,9 @@ params: hide: true metrics: - name: server + i18n: + zh-CN: 服务器 + en-US: Server # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -113,52 +116,124 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: identity type: 1 + i18n: + zh-CN: 标识 + en-US: Identity - field: redis_version type: 1 + i18n: + zh-CN: Redis 版本 + en-US: Redis Version - field: redis_git_sha1 type: 0 + i18n: + zh-CN: Redis Git SHA1 + en-US: Redis Git SHA1 - field: redis_git_dirty type: 0 + i18n: + zh-CN: Redis Git Dirty + en-US: Redis Git Dirty - field: redis_build_id type: 1 + i18n: + zh-CN: Redis Build ID + en-US: Redis Build ID - field: redis_mode type: 1 + i18n: + zh-CN: Redis 模式 + en-US: Redis Mode - field: os type: 1 + i18n: + zh-CN: 操作系统 + en-US: Operating System - field: arch_bits type: 0 + i18n: + zh-CN: 架构位数 + en-US: Architecture Bits - field: multiplexing_api type: 1 + i18n: + zh-CN: 多路复用 API + en-US: Multiplexing API - field: atomicvar_api type: 1 + i18n: + zh-CN: 原子变量 API + en-US: Atomicvar API - field: gcc_version type: 1 + i18n: + zh-CN: GCC 版本 + en-US: GCC Version - field: process_id type: 0 + i18n: + zh-CN: 进程 ID + en-US: Process ID - field: process_supervised type: 1 + i18n: + zh-CN: 进程监控 + en-US: Process Supervised - field: run_id type: 1 + i18n: + zh-CN: 运行 ID + en-US: Run ID - field: tcp_port type: 0 + i18n: + zh-CN: TCP 端口 + en-US: TCP Port - field: server_time_usec type: 0 + i18n: + zh-CN: 基于纪元的系统时间 + en-US: Server Time Usec - field: uptime_in_seconds type: 0 + i18n: + zh-CN: 运行时间(秒) + en-US: Uptime In Seconds - field: uptime_in_days type: 0 + i18n: + zh-CN: 运行时间(天) + en-US: Uptime In Days - field: hz type: 0 + i18n: + zh-CN: 定时器频率 + en-US: Hz - field: configured_hz type: 0 + i18n: + zh-CN: 配置定时器频率 + en-US: Configured Hz - field: lru_clock type: 0 + i18n: + zh-CN: LRU 时钟 + en-US: LRU Clock - field: executable type: 1 + i18n: + zh-CN: 可执行文件 + en-US: Executable - field: config_file type: 1 + i18n: + zh-CN: 配置文件 + en-US: Config File - field: io_threads_active type: 0 + i18n: + zh-CN: 活动 IO 线程 + en-US: IO Threads Active # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -176,6 +251,9 @@ metrics: pattern: ^_^pattern^_^ - name: clients + i18n: + zh-CN: 客户端 + en-US: Clients # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 1 @@ -183,20 +261,44 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: connected_clients type: 0 + i18n: + zh-CN: 已连接客户端 + en-US: Connected Clients - field: cluster_connections type: 0 + i18n: + zh-CN: 集群连接 + en-US: Cluster Connections - field: maxclients type: 0 + i18n: + zh-CN: 最大客户端数 + en-US: Maxclients - field: client_recent_max_input_buffer type: 0 + i18n: + zh-CN: 客户端最近最大输入缓冲区 + en-US: Client Recent Max Input Buffer - field: client_recent_max_output_buffer type: 0 + i18n: + zh-CN: 客户端最近最大输出缓冲区 + en-US: Client Recent Max Output Buffer - field: blocked_clients type: 0 + i18n: + zh-CN: 阻塞客户端 + en-US: Blocked Clients - field: tracking_clients type: 0 + i18n: + zh-CN: 跟踪客户端 + en-US: Tracking Clients - field: clients_in_timeout_table type: 0 + i18n: + zh-CN: 超时表中的客户端 + en-US: Clients In Timeout Table # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -214,6 +316,9 @@ metrics: pattern: ^_^pattern^_^ - name: stats + i18n: + zh-CN: 统计 + en-US: Stats # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 2 @@ -221,80 +326,194 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: total_connections_received type: 0 + i18n: + zh-CN: 收到总连接数 + en-US: Total Connections Received - field: total_commands_processed type: 0 + i18n: + zh-CN: 处理总命令数 + en-US: Total Commands Processed - field: instantaneous_ops_per_sec type: 0 + i18n: + zh-CN: 瞬时每秒操作数 + en-US: Instantaneous Ops Per Sec - field: total_net_input_bytes type: 0 + i18n: + zh-CN: 总网络输入字节 + en-US: Total Net Input Bytes - field: total_net_output_bytes type: 0 + i18n: + zh-CN: 总网络输出字节 + en-US: Total Net Output Bytes - field: instantaneous_input_kbps type: 0 + i18n: + zh-CN: 瞬时的输入 kbps + en-US: Instantaneous Input Kbps - field: instantaneous_output_kbps type: 0 + i18n: + zh-CN: 瞬时输出 kbps + en-US: Instantaneous Output Kbps - field: rejected_connections type: 0 + i18n: + zh-CN: 拒绝连接数 + en-US: Rejected Connections - field: sync_full type: 0 + i18n: + zh-CN: 全量同步 + en-US: Sync Full - field: sync_partial_ok type: 0 + i18n: + zh-CN: 部分同步成功 + en-US: Sync Partial Ok - field: sync_partial_err type: 0 + i18n: + zh-CN: 部分同步失败 + en-US: Sync Partial Err - field: expired_keys type: 0 + i18n: + zh-CN: 过期键 + en-US: Expired Keys - field: expired_stale_perc type: 0 + i18n: + zh-CN: 过期key占比 + en-US: Expired Stale Perc - field: expired_time_cap_reached_count type: 0 + i18n: + zh-CN: 达到过期时间上限计数 + en-US: Expired Time Cap Reached Count - field: expire_cycle_cpu_milliseconds type: 0 + i18n: + zh-CN: 过期周期 CPU 毫秒 + en-US: Expire Cycle CPU Milliseconds - field: evicted_keys type: 0 + i18n: + zh-CN: 逐出键 + en-US: Evicted Keys - field: keyspace_hits type: 0 + i18n: + zh-CN: 命中键 + en-US: Keyspace Hits - field: keyspace_misses type: 0 + i18n: + zh-CN: 未命中键 + en-US: Keyspace Misses - field: pubsub_channels type: 0 + i18n: + zh-CN: 发布订阅频道 + en-US: Pubsub Channels - field: pubsub_patterns type: 0 + i18n: + zh-CN: 发布订阅模式 + en-US: Pubsub Patterns - field: latest_fork_usec type: 0 + i18n: + zh-CN: 最新 fork 毫秒 + en-US: Latest Fork Usec - field: total_forks type: 0 + i18n: + zh-CN: 总 fork 数 + en-US: Total Forks - field: migrate_cached_sockets type: 0 + i18n: + zh-CN: 迁移缓存套接字 + en-US: Migrate Cached Sockets - field: slave_expires_tracked_keys type: 0 + i18n: + zh-CN: 从节点过期跟踪键 + en-US: Slave Expires Tracked Keys - field: active_defrag_hits type: 0 + i18n: + zh-CN: 活跃碎片整理命中 + en-US: Active Defrag Hits - field: active_defrag_misses type: 0 + i18n: + zh-CN: 活跃碎片整理未命中 + en-US: Active Defrag Misses - field: active_defrag_key_hits type: 0 + i18n: + zh-CN: 活跃碎片整理键命中 + en-US: Active Defrag Key Hits - field: active_defrag_key_misses type: 0 + i18n: + zh-CN: 活跃碎片整理键未命中 + en-US: Active Defrag Key Misses - field: tracking_total_keys type: 0 + i18n: + zh-CN: 跟踪键总数 + en-US: Tracking Total Keys - field: tracking_total_items type: 0 + i18n: + zh-CN: 跟踪项总数 + en-US: Tracking Total Items - field: tracking_total_prefixes type: 0 + i18n: + zh-CN: 跟踪前缀总数 + en-US: Tracking Total Prefixes - field: unexpected_error_replies type: 0 + i18n: + zh-CN: 意外错误回复 + en-US: Unexpected Error Replies - field: total_error_replies type: 0 + i18n: + zh-CN: 总错误回复 + en-US: Total Error Replies - field: dump_payload_sanitizations type: 0 + i18n: + zh-CN: 转储有效负载深度完整性验证的总数 + en-US: Dump Payload Sanitizations - field: total_reads_processed type: 0 + i18n: + zh-CN: 总读取处理 + en-US: Total Reads Processed - field: total_writes_processed type: 0 + i18n: + zh-CN: 总写入处理 + en-US: Total Writes Processed - field: io_threaded_reads_processed type: 0 + i18n: + zh-CN: IO 线程读取处理 + en-US: Io Threaded Reads Processed - field: io_threaded_writes_processed type: 0 + i18n: + zh-CN: IO 线程写入处理 + en-US: Io Threaded Writes Processed # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -312,6 +531,9 @@ metrics: pattern: ^_^pattern^_^ - name: cpu + i18n: + zh-CN: CPU + en-US: CPU # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 3 @@ -319,16 +541,34 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: used_cpu_sys type: 0 + i18n: + zh-CN: 系统已使用 CPU + en-US: Sys CPU Used - field: used_cpu_user type: 0 + i18n: + zh-CN: 用户已使用 CPU + en-US: User CPU Used - field: used_cpu_sys_children type: 0 + i18n: + zh-CN: Sys 子进程已使用 CPU + en-US: Sys Children CPU Used - field: used_cpu_user_children type: 0 + i18n: + zh-CN: 用户子进程已使用 CPU + en-US: User Children CPU Used - field: used_cpu_sys_main_thread type: 0 + i18n: + zh-CN: 系统主线程已使用 CPU + en-US: Sys Main Thread CPU Used - field: used_cpu_user_main_thread type: 0 + i18n: + zh-CN: 用户主线程已使用 CPU + en-US: User Main Thread CPU Used # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis @@ -346,6 +586,9 @@ metrics: pattern: ^_^pattern^_^ - name: sentinel + i18n: + zh-CN: 哨兵 + en-US: Sentinel # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 4 @@ -353,14 +596,29 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: sentinel_masters type: 1 + i18n: + zh-CN: 主节点 + en-US: Masters - field: sentinel_tilt type: 1 + i18n: + zh-CN: 倾斜 + en-US: Tilt - field: sentinel_running_scripts type: 1 + i18n: + zh-CN: 运行脚本 + en-US: Running Scripts - field: sentinel_scripts_queue_length type: 1 + i18n: + zh-CN: 脚本队列长度 + en-US: Scripts Queue Length - field: sentinel_simulate_failure_flags type: 1 + i18n: + zh-CN: 模拟失败标志 + en-US: Simulate Failure Flags # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redis # the config content when protocol is redis From e34b64062ec4dba6a10a21d4a4e9756b154be359 Mon Sep 17 00:00:00 2001 From: aias00 Date: Mon, 12 Aug 2024 22:56:46 +0800 Subject: [PATCH 174/257] [improve] add shenyu\spring gateway i18n (#2511) --- .../src/main/resources/define/app-shenyu.yml | 60 +++++++++++++ .../resources/define/app-spring_gateway.yml | 84 +++++++++++++++++++ 2 files changed, 144 insertions(+) diff --git a/manager/src/main/resources/define/app-shenyu.yml b/manager/src/main/resources/define/app-shenyu.yml index 5d686f2e549..21789e2a903 100644 --- a/manager/src/main/resources/define/app-shenyu.yml +++ b/manager/src/main/resources/define/app-shenyu.yml @@ -51,6 +51,9 @@ params: defaultValue: 6000 metrics: - name: shenyu_request_total + i18n: + zh-CN: ShenYu请求总量 + en-US: ShenYu Request Total priority: 0 fields: - field: value @@ -70,6 +73,9 @@ metrics: parseType: prometheus - name: shenyu_request_throw_created + i18n: + zh-CN: ShenYu请求异常总量 + en-US: ShenYu Request Exception Total priority: 1 fields: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field @@ -90,6 +96,9 @@ metrics: parseType: prometheus - name: process_cpu_seconds_total + i18n: + zh-CN: 进程 CPU 时间总量 (秒) + en-US: Process CPU Time Total priority: 1 fields: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field @@ -111,6 +120,9 @@ metrics: parseType: prometheus - name: process_open_fds + i18n: + zh-CN: 进程打开的文件描述符数量 + en-US: Process Open Fds priority: 1 fields: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field @@ -131,6 +143,9 @@ metrics: parseType: prometheus - name: process_max_fds + i18n: + zh-CN: 进程最大文件描述符数量 + en-US: Process Max Fds priority: 1 fields: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field @@ -151,15 +166,27 @@ metrics: parseType: prometheus - name: jvm_info + i18n: + zh-CN: JVM 信息 + en-US: JVM Info priority: 1 fields: # Metric information includes field name, type field type (0-number number, 1-string string), label-whether it is a metric label field, unit: metric unit - field: runtime type: 1 + i18n: + zh-CN: 运行时 + en-US: Runtime - field: vendor type: 1 + i18n: + zh-CN: 供应商 + en-US: Vendor - field: version type: 1 + i18n: + zh-CN: 版本 + en-US: Version # Monitoring and collection usage protocol eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http # Specific collection configuration when protocol is http protocol @@ -175,12 +202,18 @@ metrics: parseType: prometheus - name: jvm_memory_bytes_used + i18n: + zh-CN: JVM 内存使用字节数 + en-US: JVM Memory Bytes Used priority: 1 fields: # Metric information includes field name, type field type (0-number number, 1-string string), label-whether it is a metric label field, unit: metric unit - field: area type: 1 label: true + i18n: + zh-CN: 区域 + en-US: Area - field: value type: 0 unit: MB @@ -201,12 +234,18 @@ metrics: parseType: prometheus - name: jvm_memory_pool_bytes_used + i18n: + zh-CN: JVM 内存池使用字节数 + en-US: JVM Memory Pool Bytes Used priority: 1 fields: # Metric information includes field name, type field type (0-number number, 1-string string), label-whether it is a metric label field, unit: metric unit - field: pool type: 1 label: true + i18n: + zh-CN: 池 + en-US: Pool - field: value type: 0 unit: MB @@ -227,12 +266,18 @@ metrics: parseType: prometheus - name: jvm_memory_pool_bytes_committed + i18n: + zh-CN: JVM 内存池提交字节数 + en-US: JVM Memory Pool Bytes Committed priority: 1 fields: # Metric information includes field name, type field type (0-number number, 1-string string), label-whether it is a metric label field, unit: metric unit - field: pool type: 1 label: true + i18n: + zh-CN: 池 + en-US: Pool - field: value type: 0 unit: MB @@ -253,12 +298,18 @@ metrics: parseType: prometheus - name: jvm_memory_pool_bytes_max + i18n: + zh-CN: JVM 内存池最大字节数 + en-US: JVM Memory Pool Bytes Max priority: 1 fields: # Metric information includes field name, type field type (0-number number, 1-string string), label-whether it is a metric label field, unit: metric unit - field: pool type: 1 label: true + i18n: + zh-CN: 池 + en-US: Pool - field: value type: 0 unit: MB @@ -279,14 +330,23 @@ metrics: parseType: prometheus - name: jvm_threads_state + i18n: + zh-CN: JVM 线程状态 + en-US: JVM Threads State priority: 1 fields: # Metric information includes field name, type field type (0-number number, 1-string string), label-whether it is a metric label field, unit: metric unit - field: state type: 1 label: true + i18n: + zh-CN: 状态 + en-US: State - field: count type: 0 + i18n: + zh-CN: 数量 + en-US: Count aliasFields: - state - value diff --git a/manager/src/main/resources/define/app-spring_gateway.yml b/manager/src/main/resources/define/app-spring_gateway.yml index 4c8253e63a9..d7333237c73 100644 --- a/manager/src/main/resources/define/app-spring_gateway.yml +++ b/manager/src/main/resources/define/app-spring_gateway.yml @@ -109,6 +109,9 @@ params: metrics: # metrics - available - name: available + i18n: + zh-CN: 可用性 + en-US: Availability # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -118,6 +121,9 @@ metrics: - field: responseTime type: 0 unit: ms + i18n: + zh-CN: 响应时间 + en-US: Response Time # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http # Specific collection configuration when protocol is http protocol @@ -142,21 +148,42 @@ metrics: parseType: default - name: environment + i18n: + zh-CN: 环境 + en-US: Environment priority: 1 fields: # The metric information, including field name, type of the field (0-number, 1-string), whether it is an instance primary key, and the unit of the metric. - field: profile type: 1 + i18n: + zh-CN: 配置文件 + en-US: Profile - field: port type: 0 + i18n: + zh-CN: 端口 + en-US: Port - field: os type: 1 + i18n: + zh-CN: 操作系统 + en-US: Operating System - field: os_arch type: 1 + i18n: + zh-CN: 操作系统架构 + en-US: Operating System Architecture - field: jdk_vendor type: 1 + i18n: + zh-CN: JDK 供应商 + en-US: JDK Vendor - field: jvm_version type: 1 + i18n: + zh-CN: JVM 版本 + en-US: JVM Version # metric alias list, used to identify metrics in query results aliasFields: - $.activeProfiles[0] @@ -199,11 +226,17 @@ metrics: parseScript: '$' - name: thread_state + i18n: + zh-CN: 线程状态 + en-US: Thread State visible: false priority: 2 fields: - field: state type: 1 + i18n: + zh-CN: 状态 + en-US: State protocol: http http: host: ^_^host^_^ @@ -219,13 +252,22 @@ metrics: parseScript: '$.availableTags[?(@.tag == "state")].values[*]' - name: threads + i18n: + zh-CN: 线程 + en-US: Threads priority: 3 fields: - field: state type: 1 + i18n: + zh-CN: 状态 + en-US: State instance: true - field: size type: 0 + i18n: + zh-CN: 数量 + en-US: Size aliasFields: - $.measurements[?(@.statistic == "VALUE")].value calculates: @@ -246,11 +288,17 @@ metrics: parseScript: '$' - name: space_name + i18n: + zh-CN: 空间名称 + en-US: Space Name visible: false priority: 4 fields: - field: id type: 1 + i18n: + zh-CN: 主键 + en-US: ID protocol: http http: host: ^_^host^_^ @@ -266,13 +314,22 @@ metrics: parseScript: '$.availableTags[?(@.tag == "id")].values[*]' - name: memory_used + i18n: + zh-CN: 内存使用 + en-US: Memory Used priority: 5 fields: - field: space type: 1 + i18n: + zh-CN: 空间 + en-US: Space instance: true - field: mem_used type: 0 + i18n: + zh-CN: 内存使用 + en-US: Memory Used unit: MB aliasFields: - $.measurements[?(@.statistic == "VALUE")].value @@ -296,10 +353,16 @@ metrics: parseScript: '$' - name: health + i18n: + zh-CN: 健康 + en-US: Health priority: 6 fields: - field: status type: 1 + i18n: + zh-CN: 状态 + en-US: Status protocol: http http: host: ^_^host^_^ @@ -314,11 +377,17 @@ metrics: parseType: default - name: route_id + i18n: + zh-CN: 路由 ID + en-US: Route ID visible: false priority: 7 fields: - field: name type: 1 + i18n: + zh-CN: 名称 + en-US: Name protocol: http http: host: ^_^host^_^ @@ -334,17 +403,32 @@ metrics: parseScript: '$[*].route_id' - name: route_info + i18n: + zh-CN: 路由信息 + en-US: Route Info priority: 8 fields: - field: route_id type: 1 + i18n: + zh-CN: 路由 ID + en-US: Route ID instance: true - field: predicate type: 1 + i18n: + zh-CN: 断言 + en-US: Predicate - field: uri type: 1 + i18n: + zh-CN: URI + en-US: URI - field: order type: 0 + i18n: + zh-CN: 顺序 + en-US: Order aliasFields: - $.predicate - $.uri From ddfa60e59714f79309a70f5823a42c9cf8520215 Mon Sep 17 00:00:00 2001 From: aias00 Date: Mon, 12 Aug 2024 23:04:48 +0800 Subject: [PATCH 175/257] [improve] add spring tidb i18n (#2512) Co-authored-by: tomsun28 --- .../main/resources/define/app-springboot2.yml | 63 +++++++++++++++++++ .../src/main/resources/define/app-tidb.yml | 63 +++++++++++++++++++ 2 files changed, 126 insertions(+) diff --git a/manager/src/main/resources/define/app-springboot2.yml b/manager/src/main/resources/define/app-springboot2.yml index 03bff8e07c0..a6db6b6cca8 100644 --- a/manager/src/main/resources/define/app-springboot2.yml +++ b/manager/src/main/resources/define/app-springboot2.yml @@ -109,6 +109,9 @@ params: metrics: # metrics - available - name: available + i18n: + zh-CN: 可用性 + en-US: Availability # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -118,6 +121,9 @@ metrics: - field: responseTime type: 0 unit: ms + i18n: + zh-CN: 响应时间 + en-US: Response Time # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http # Specific collection configuration when protocol is http protocol @@ -142,22 +148,43 @@ metrics: parseType: default # metrics - environment - name: environment + i18n: + zh-CN: 环境信息 + en-US: Environment priority: 1 # The specific monitoring metrics in the metric group. fields: # The metric information, including field name, type of the field (0-number, 1-string), whether it is an instance primary key, and the unit of the metric. - field: profile type: 1 + i18n: + zh-CN: 配置文件 + en-US: Profile - field: port type: 0 + i18n: + zh-CN: 端口 + en-US: Port - field: os type: 1 + i18n: + zh-CN: 操作系统 + en-US: Operating System - field: os_arch type: 1 + i18n: + zh-CN: 操作系统架构 + en-US: Operating System Architecture - field: jdk_vendor type: 1 + i18n: + zh-CN: JDK 厂商 + en-US: JDK Vendor - field: jvm_version type: 1 + i18n: + zh-CN: JVM 版本 + en-US: JVM Version # metric alias list, used to identify metrics in query results aliasFields: - $.activeProfiles @@ -200,6 +227,9 @@ metrics: parseScript: '$' # metrics - thread_state - name: thread_state + i18n: + zh-CN: 线程状态 + en-US: Thread State # Whether to display the metric group in the query result, the default is true visible: false # The specific monitoring metrics in the metric group. @@ -207,6 +237,9 @@ metrics: fields: - field: state type: 1 + i18n: + zh-CN: 状态 + en-US: State protocol: http http: # Host: ipv4 ipv6 domain @@ -230,13 +263,22 @@ metrics: parseScript: '$.availableTags[?(@.tag == "state")].values[*]' - name: threads + i18n: + zh-CN: 线程 + en-US: Threads priority: 3 fields: - field: state type: 1 + i18n: + zh-CN: 状态 + en-US: State label: true - field: size type: 0 + i18n: + zh-CN: 数量 + en-US: Size aliasFields: - $.measurements[?(@.statistic == "VALUE")].value calculates: @@ -257,11 +299,17 @@ metrics: parseScript: '$' - name: space_name + i18n: + zh-CN: 空间名称 + en-US: Space Name visible: false priority: 4 fields: - field: id type: 1 + i18n: + zh-CN: ID + en-US: ID protocol: http http: host: ^_^host^_^ @@ -277,13 +325,22 @@ metrics: parseScript: '$.availableTags[?(@.tag == "id")].values[*]' - name: memory_used + i18n: + zh-CN: 内存使用 + en-US: Memory Used priority: 5 fields: - field: space type: 1 + i18n: + zh-CN: 空间 + en-US: Space label: true - field: mem_used type: 0 + i18n: + zh-CN: 内存使用 + en-US: Memory Used unit: MB aliasFields: - $.measurements[?(@.statistic == "VALUE")].value @@ -307,10 +364,16 @@ metrics: parseScript: '$' - name: health + i18n: + zh-CN: 健康 + en-US: Health priority: 6 fields: - field: status type: 1 + i18n: + zh-CN: 状态 + en-US: Status protocol: http http: host: ^_^host^_^ diff --git a/manager/src/main/resources/define/app-tidb.yml b/manager/src/main/resources/define/app-tidb.yml index b0a58c557a2..2d843bbe65d 100644 --- a/manager/src/main/resources/define/app-tidb.yml +++ b/manager/src/main/resources/define/app-tidb.yml @@ -118,6 +118,9 @@ params: metrics: # metrics - status - name: status + i18n: + zh-CN: 状态 + en-US: Status # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 @@ -126,10 +129,19 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: connections type: 0 + i18n: + zh-CN: 连接数 + en-US: Connections - field: version type: 1 + i18n: + zh-CN: 版本 + en-US: Version - field: git_hash type: 1 + i18n: + zh-CN: Git Hash + en-US: Git Hash # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: http http: @@ -147,30 +159,66 @@ metrics: parseScript: '$' - name: stores + i18n: + zh-CN: 存储 + en-US: Stores priority: 1 fields: - field: address type: 1 + i18n: + zh-CN: 地址 + en-US: Address - field: version type: 1 + i18n: + zh-CN: 版本 + en-US: Version - field: status_address type: 1 + i18n: + zh-CN: 状态地址 + en-US: Status Address - field: deploy_path type: 1 + i18n: + zh-CN: 部署路径 + en-US: Deploy Path - field: state_name type: 1 + i18n: + zh-CN: 状态名称 + en-US: State Name - field: capacity type: 1 + i18n: + zh-CN: 容量 + en-US: Capacity - field: available type: 1 + i18n: + zh-CN: 可用 + en-US: Available - field: used_size type: 1 + i18n: + zh-CN: 已用 + en-US: Used - field: start_ts type: 1 + i18n: + zh-CN: 启动时间 + en-US: Start Time - field: last_heartbeat_ts type: 1 + i18n: + zh-CN: 上次心跳时间 + en-US: Last Heartbeat Time - field: uptime type: 1 + i18n: + zh-CN: 启动时长 + en-US: Uptime aliasFields: - $.store.address - $.store.version @@ -206,6 +254,9 @@ metrics: parseScript: '$.stores.*' - name: basic + i18n: + zh-CN: 基本信息 + en-US: Basic Info # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 2 @@ -214,13 +265,25 @@ metrics: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field - field: version type: 1 + i18n: + zh-CN: 版本 + en-US: Version label: true - field: port type: 1 + i18n: + zh-CN: 端口 + en-US: Port - field: datadir type: 1 + i18n: + zh-CN: 数据目录 + en-US: Data Directory - field: max_connections type: 0 + i18n: + zh-CN: 最大连接数 + en-US: Max Connections # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field aliasFields: - version From 86c7c2dadf152bab386440ef4dca68715f751759 Mon Sep 17 00:00:00 2001 From: Jast Date: Mon, 12 Aug 2024 23:26:32 +0800 Subject: [PATCH 176/257] [Improve]add presto monitor metric (#2516) Co-authored-by: tomsun28 --- home/docs/help/prestodb.md | 14 +++++ .../current/help/prestodb.md | 13 +++++ .../main/resources/define/app-prestodb.yml | 58 +++++++++++++++++++ 3 files changed, 85 insertions(+) diff --git a/home/docs/help/prestodb.md b/home/docs/help/prestodb.md index 23deb48cb81..8c19e178ed5 100644 --- a/home/docs/help/prestodb.md +++ b/home/docs/help/prestodb.md @@ -33,6 +33,20 @@ keywords: [ open source monitoring system, open source database monitoring, pres | runningDrivers | None | Running Drivers| | runningTasks | None | Running Tasks | +### Metrics Collection: Node Information + +| Metric Name | Unit | Metric Description | +|-------------------|------|----------------------------------------------------| +| `uri` | None | Node link | +| `recentRequests` | None | Number of requests in the recent period | +| `recentFailures` | None | Number of failed requests in the recent period | +| `recentSuccesses` | None | Number of successful requests in the recent period | +| `lastRequestTime` | None | Time of the most recent request | +| `lastResponseTime`| None | Time of the most recent response | +| `age` | None | Duration of operation | +| `recentFailureRatio` | None | Failure rate in the recent period | + + #### Metric Set: Node Status diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prestodb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prestodb.md index 1f2a7ac38e9..31ed6d64692 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prestodb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prestodb.md @@ -33,6 +33,19 @@ keywords: [ 开源监控系统, 开源数据库监控, Presto数据库监控 ] | runningDrivers | 无 | 运行中的驱动数 | | runningTasks | 无 | 运行中的任务数 | +#### 指标集合:节点信息 + +| 指标名称 | 指标单位 | 指标帮助描述 | +|------|------| ---------------------------------- | +| uri | 无 | 节点链接 | +| recentRequests | 无 | 最近一段时间内的请求数量 | +| recentFailures | 无 | 最近一段时间内的失败请求数量 | +| recentSuccesses | 无 | 最近一段时间内的成功请求数量 | +| lastRequestTime | 无 | 最近一次请求的时间 | +| lastResponseTime | 无 | 最近一次响应的时间 | +| age | 无 | 持续时间 | +| recentFailureRatio | 无 | 最近一段时间内的失败 | + #### 指标集合:节点状态 diff --git a/manager/src/main/resources/define/app-prestodb.yml b/manager/src/main/resources/define/app-prestodb.yml index 94c1fdd2785..7f509ddb83a 100644 --- a/manager/src/main/resources/define/app-prestodb.yml +++ b/manager/src/main/resources/define/app-prestodb.yml @@ -105,6 +105,64 @@ metrics: parseType: jsonPath parseScript: '$' + - name: node + i18n: + zh-CN: 节点信息 + en-US: Node Info + priority: 0 + fields: + - field: uri + type: 1 + i18n: + zh-CN: 节点链接 + en-US: Node Uri + - field: recentRequests + type: 0 + i18n: + zh-CN: 最近一段时间内的请求数量 + en-US: Recent Requests + - field: recentFailures + type: 0 + i18n: + zh-CN: 最近一段时间内的失败请求数量 + en-US: Recent Failures + - field: recentSuccesses + type: 0 + i18n: + zh-CN: 最近一段时间内的成功请求数量 + en-US: Recent Successes + - field: lastRequestTime + type: 1 + i18n: + zh-CN: 最近一次请求的时间 + en-US: Last Request Time + - field: lastResponseTime + type: 1 + i18n: + zh-CN: 最近一次响应的时间 + en-US: Last Response Time + - field: age + type: 0 + i18n: + zh-CN: 持续时间 + en-US: Age + - field: recentFailureRatio + type: 0 + i18n: + zh-CN: 最近一段时间内的失败 + en-US: Recent Failure Ratio + + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: /v1/node + timeout: ^_^timeout^_^ + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$[*]' + - name: status i18n: zh-CN: 节点状态 From b39602f15d5cc4d0eb295f86fc521db44f57274b Mon Sep 17 00:00:00 2001 From: YuLuo Date: Mon, 12 Aug 2024 23:28:22 +0800 Subject: [PATCH 177/257] [improve] add MetricsDataRedisCodec unit test (#2514) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../store/MetricsDataRedisCodecTest.java | 64 +++++++++++++++++-- 1 file changed, 60 insertions(+), 4 deletions(-) diff --git a/warehouse/src/test/java/org/apache/hertzbeat/warehouse/store/MetricsDataRedisCodecTest.java b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/store/MetricsDataRedisCodecTest.java index b42fe9dff68..a71e98f6fdd 100644 --- a/warehouse/src/test/java/org/apache/hertzbeat/warehouse/store/MetricsDataRedisCodecTest.java +++ b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/store/MetricsDataRedisCodecTest.java @@ -17,32 +17,88 @@ package org.apache.hertzbeat.warehouse.store; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import lombok.extern.slf4j.Slf4j; +import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.warehouse.store.realtime.redis.MetricsDataRedisCodec; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; /** * Test case for {@link MetricsDataRedisCodec} */ + +@Slf4j class MetricsDataRedisCodecTest { + private MetricsDataRedisCodec codec; + @BeforeEach void setUp() { + + codec = new MetricsDataRedisCodec(); } @Test - void decodeKey() { + void testEncodeKey() { + + String key = "testKey"; + ByteBuffer encodedKey = codec.encodeKey(key); + String decodedKey = StandardCharsets.UTF_8.decode(encodedKey).toString(); + + assertEquals(key, decodedKey); } @Test - void decodeValue() { + void testDecodeKey() { + + String key = "testKey"; + ByteBuffer buffer = ByteBuffer.wrap(key.getBytes(StandardCharsets.UTF_8)); + String decodedKey = codec.decodeKey(buffer); + + assertEquals(key, decodedKey); } @Test - void encodeKey() { + void testEncodeValue() { + + CollectRep.MetricsData metricsData = Mockito.mock(CollectRep.MetricsData.class); + byte[] bytes = new byte[] {1, 2, 3}; + Mockito.when(metricsData.toByteArray()).thenReturn(bytes); + + ByteBuffer encodedValue = codec.encodeValue(metricsData); + assertArrayEquals(bytes, encodedValue.array()); } @Test - void encodeValue() { + void testDecodeValue() { + + CollectRep.MetricsData metricsData = Mockito.mock(CollectRep.MetricsData.class); + byte[] bytes = new byte[] {1, 2, 3}; + + ByteBuffer buffer = ByteBuffer.wrap(bytes); + + try { + Mockito.mockStatic(CollectRep.MetricsData.class); + Mockito.when(CollectRep.MetricsData.parseFrom(buffer)).thenReturn(metricsData); + + CollectRep.MetricsData decodedValue = codec.decodeValue(buffer); + + assertEquals(metricsData, decodedValue); + } + catch (Exception e) { + log.error(e.getMessage()); + fail("Exception thrown during decodeValue test"); + } + finally { + Mockito.clearAllCaches(); + } } + } From 3a2350b86d775782413f69ef29a4b8e7d8b6cb6b Mon Sep 17 00:00:00 2001 From: YuLuo Date: Mon, 12 Aug 2024 23:34:08 +0800 Subject: [PATCH 178/257] [improve] add MemoryDataStorage unit test (#2515) Signed-off-by: yuluo-yx Co-authored-by: Calvin --- .../alert/dto/TenCloudAlertReport.java | 4 +- .../service/WarehouseServiceTest.java | 2 +- .../store/MemoryDataStorageTest.java | 87 ++++++++++++++++++- 3 files changed, 87 insertions(+), 6 deletions(-) diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/dto/TenCloudAlertReport.java b/alerter/src/main/java/org/apache/hertzbeat/alert/dto/TenCloudAlertReport.java index 6ea51c1b047..99846e6a14b 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/dto/TenCloudAlertReport.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/dto/TenCloudAlertReport.java @@ -174,7 +174,7 @@ public Map getAnnotations() { @Override public String getContent() { StringBuilder contentBuilder = new StringBuilder(); - // 判断类型 + // Check Type. if (EVENT.equals(getAlarmType())) { contentBuilder .append("[") @@ -225,5 +225,3 @@ public String getContent() { } } - - diff --git a/warehouse/src/test/java/org/apache/hertzbeat/warehouse/service/WarehouseServiceTest.java b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/service/WarehouseServiceTest.java index ec771f121d4..43c97e4043a 100644 --- a/warehouse/src/test/java/org/apache/hertzbeat/warehouse/service/WarehouseServiceTest.java +++ b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/service/WarehouseServiceTest.java @@ -43,7 +43,7 @@ */ @ExtendWith(SpringExtension.class) -class WarehouseServiceImplTest { +class WarehouseServiceTest { @Mock private AbstractRealTimeDataStorage realTimeDataStorage; diff --git a/warehouse/src/test/java/org/apache/hertzbeat/warehouse/store/MemoryDataStorageTest.java b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/store/MemoryDataStorageTest.java index da0e1f4dfc1..d066d1b303b 100644 --- a/warehouse/src/test/java/org/apache/hertzbeat/warehouse/store/MemoryDataStorageTest.java +++ b/warehouse/src/test/java/org/apache/hertzbeat/warehouse/store/MemoryDataStorageTest.java @@ -17,24 +17,107 @@ package org.apache.hertzbeat.warehouse.store; +import java.util.List; +import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.warehouse.store.realtime.memory.MemoryDataStorage; +import org.apache.hertzbeat.warehouse.store.realtime.memory.MemoryProperties; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Test case for {@link MemoryDataStorage} */ + class MemoryDataStorageTest { + @Mock + private MemoryProperties memoryProperties; + + @InjectMocks + private MemoryDataStorage memoryDataStorage; + @BeforeEach void setUp() { + + MockitoAnnotations.openMocks(this); + + when(memoryProperties.initSize()).thenReturn(null); + memoryDataStorage = new MemoryDataStorage(memoryProperties); } @Test - void getCurrentMetricsData() { + void testGetCurrentMetricsDataByMetric() { + + Long monitorId = 1L; + String metric = "cpuUsage"; + CollectRep.MetricsData metricsData = mock(CollectRep.MetricsData.class); + + memoryDataStorage.saveData(metricsData); + + CollectRep.MetricsData result = memoryDataStorage.getCurrentMetricsData(monitorId, metric); + + assertNull(result); } @Test - void destroy() { + void testGetCurrentMetricsData() { + + Long monitorId = 1L; + CollectRep.MetricsData metricsData1 = mock(CollectRep.MetricsData.class); + CollectRep.MetricsData metricsData2 = mock(CollectRep.MetricsData.class); + + when(metricsData1.getId()).thenReturn(monitorId); + when(metricsData1.getMetrics()).thenReturn("cpuUsage"); + when(metricsData1.getCode()).thenReturn(CollectRep.Code.SUCCESS); + + when(metricsData2.getId()).thenReturn(monitorId); + when(metricsData2.getMetrics()).thenReturn("memoryUsage"); + when(metricsData2.getCode()).thenReturn(CollectRep.Code.SUCCESS); + + memoryDataStorage.saveData(metricsData1); + memoryDataStorage.saveData(metricsData2); + + List result = memoryDataStorage.getCurrentMetricsData(monitorId); + + assertEquals(2, result.size()); + assertTrue(result.contains(metricsData1)); + assertTrue(result.contains(metricsData2)); + } + + @Test + void testSaveDataFailure() { + + CollectRep.MetricsData metricsData = mock(CollectRep.MetricsData.class); + when(metricsData.getCode()).thenReturn(CollectRep.Code.FAIL); + + memoryDataStorage.saveData(metricsData); + + List result = memoryDataStorage.getCurrentMetricsData(metricsData.getId()); + assertTrue(result.isEmpty()); } + + @Test + void testDestroy() { + + CollectRep.MetricsData metricsData = mock(CollectRep.MetricsData.class); + when(metricsData.getId()).thenReturn(1L); + when(metricsData.getMetrics()).thenReturn("cpuUsage"); + when(metricsData.getCode()).thenReturn(CollectRep.Code.SUCCESS); + + memoryDataStorage.saveData(metricsData); + memoryDataStorage.destroy(); + + List result = memoryDataStorage.getCurrentMetricsData(1L); + assertTrue(result.isEmpty()); + } + } From fbb74199c963b1ae73d4d50b0d971d15e832f8cd Mon Sep 17 00:00:00 2001 From: Jast Date: Tue, 13 Aug 2024 14:16:41 +0800 Subject: [PATCH 179/257] [feature] add markdown formatter (#2519) --- home/README.md | 4 +- home/blog/2022-06-01-hertzbeat-v1.0.md | 7 +- home/blog/2022-06-19-hertzbeat-v1.1.0.md | 17 +- home/blog/2022-06-22-one-step-up.md | 15 +- home/blog/2022-07-10-hertzbeat-v1.1.1.md | 8 +- home/blog/2022-09-04-hertzbeat-v1.1.3.md | 13 +- home/blog/2022-09-10-ssl-practice.md | 18 +- home/blog/2022-10-08-hertzbeat-v1.2.0.md | 7 +- home/blog/2022-11-28-hertzbeat-v1.2.2.md | 9 +- home/blog/2022-12-19-new-committer.md | 36 +- home/blog/2022-12-28-hertzbeat-v1.2.3.md | 6 +- home/blog/2023-01-05-monitor-iotdb.md | 4 +- home/blog/2023-01-08-monitor-shenyu.md | 13 +- home/blog/2023-02-02-monitor-dynamic-tp.md | 3 - home/blog/2023-02-10-new-committer.md | 45 +- home/blog/2023-02-11-monitor-mysql.md | 9 +- home/blog/2023-02-15-monitor-linux.md | 7 +- home/blog/2023-03-15-hertzbeat-v1.3.0.md | 22 +- home/blog/2023-03-22-monitor-springboot2.md | 21 +- home/blog/2023-05-09-hertzbeat-v1.3.1.md | 15 +- home/blog/2023-05-11-greptimedb-store.md | 5 +- home/blog/2023-07-05-hertzbeat-v1.3.2.md | 23 +- home/blog/2023-08-14-hertzbeat-v1.4.0.md | 12 +- home/blog/2023-08-28-new-committer.md | 4 - home/blog/2023-09-26-hertzbeat-v1.4.1.md | 13 +- home/blog/2023-11-12-hertzbeat-v1.4.2.md | 16 +- home/blog/2023-12-11-hertzbeat-v1.4.3.md | 14 +- home/blog/2024-01-11-new-committer.md | 20 +- home/blog/2024-01-18-hertzbeat-v1.4.4.md | 22 +- home/blog/2024-04-17-to-apache.md | 8 +- ...-09-hertzbeat-ospp-subject-introduction.md | 25 +- .../2024-06-11-hertzbeat-v1.6.0-update.md | 39 +- home/blog/2024-06-15-hertzbeat-v1.6.0.md | 11 +- home/blog/2024-07-07-new-committer.md | 4 - home/blog/2024-07-08-new-committer.md | 2 - home/blog/2024-07-15-new-committer.md | 1 - home/blog/2024-07-27-new-committer.md | 6 +- home/blog/2024-07-28-new-committer.md | 7 +- home/blog/2024-07-29-new-committer.md | 5 + home/docs/advanced/extend-http-default.md | 25 +- .../advanced/extend-http-example-hertzbeat.md | 18 +- .../advanced/extend-http-example-token.md | 18 +- home/docs/advanced/extend-http-jsonpath.md | 28 +- home/docs/advanced/extend-http.md | 19 +- home/docs/advanced/extend-jdbc.md | 54 +- home/docs/advanced/extend-jmx.md | 12 +- home/docs/advanced/extend-ngql.md | 8 +- home/docs/advanced/extend-point.md | 23 +- home/docs/advanced/extend-snmp.md | 11 +- home/docs/advanced/extend-ssh.md | 46 +- home/docs/advanced/extend-telnet.md | 16 +- home/docs/advanced/extend-tutorial.md | 16 +- home/docs/community/become_committer.md | 30 +- home/docs/community/become_pmc_member.md | 30 +- .../community/code-style-and-quality-guide.md | 700 +++++++++--------- home/docs/community/contact.md | 2 +- home/docs/community/contribution.md | 47 +- home/docs/community/development.md | 8 +- home/docs/community/document.md | 29 +- home/docs/community/how-to-release.md | 55 +- home/docs/community/how-to-verify.md | 17 +- home/docs/community/mailing_lists.md | 41 +- home/docs/community/new_committer_process.md | 37 +- home/docs/community/new_pmc_member_process.md | 30 +- home/docs/community/submit-code.md | 68 +- home/docs/download.md | 5 +- home/docs/help/activemq.md | 129 ++-- home/docs/help/ai_config.md | 70 +- home/docs/help/airflow.md | 40 +- home/docs/help/alert_console.md | 4 +- home/docs/help/alert_dingtalk.md | 20 +- home/docs/help/alert_discord.md | 8 +- home/docs/help/alert_email.md | 25 +- home/docs/help/alert_enterprise_wechat_app.md | 18 +- home/docs/help/alert_feishu.md | 22 +- home/docs/help/alert_slack.md | 3 +- home/docs/help/alert_telegram.md | 3 +- home/docs/help/alert_threshold.md | 1 + home/docs/help/alert_threshold_expr.md | 64 +- home/docs/help/alert_webhook.md | 46 +- home/docs/help/alert_wework.md | 26 +- home/docs/help/almalinux.md | 43 +- home/docs/help/api.md | 46 +- home/docs/help/centos.md | 102 +-- home/docs/help/clickhouse.md | 142 ++-- home/docs/help/debian.md | 27 +- home/docs/help/dm.md | 54 +- home/docs/help/dns.md | 13 +- home/docs/help/docker.md | 96 ++- home/docs/help/doris_be.md | 171 ++--- home/docs/help/doris_fe.md | 75 +- home/docs/help/dynamic_tp.md | 45 +- home/docs/help/elasticsearch.md | 7 +- home/docs/help/euleros.md | 13 +- home/docs/help/flink.md | 19 +- home/docs/help/flink_on_yarn.md | 235 +++--- home/docs/help/freebsd.md | 7 +- home/docs/help/ftp.md | 5 +- home/docs/help/fullsite.md | 41 +- home/docs/help/guide.md | 271 ++++--- home/docs/help/hadoop.md | 13 +- home/docs/help/hbase_master.md | 24 +- home/docs/help/hbase_regionserver.md | 26 +- home/docs/help/hdfs_datanode.md | 55 +- home/docs/help/hdfs_namenode.md | 121 +-- home/docs/help/hive.md | 69 +- home/docs/help/http_sd.md | 26 +- home/docs/help/huawei_switch.md | 107 +-- home/docs/help/hugegraph.md | 235 +++--- home/docs/help/imap.md | 5 +- home/docs/help/influxdb.md | 77 +- home/docs/help/influxdb_promql.md | 11 +- home/docs/help/iotdb.md | 45 +- home/docs/help/issue.md | 45 +- home/docs/help/jetty.md | 9 +- home/docs/help/jvm.md | 8 +- home/docs/help/kafka.md | 92 ++- home/docs/help/kafka_promql.md | 9 +- home/docs/help/kubernetes.md | 86 +-- home/docs/help/linux.md | 102 +-- home/docs/help/mariadb.md | 65 +- home/docs/help/memcached.md | 9 +- home/docs/help/mongodb.md | 16 +- home/docs/help/mongodb_atlas.md | 52 +- home/docs/help/mysql.md | 43 +- home/docs/help/nacos.md | 188 ++--- home/docs/help/nebulagraph.md | 9 +- home/docs/help/nebulagraph_cluster.md | 11 +- home/docs/help/nginx.md | 46 +- home/docs/help/ntp.md | 4 +- home/docs/help/openai.md | 10 +- home/docs/help/opengauss.md | 70 +- home/docs/help/opensuse.md | 130 ++-- home/docs/help/oracle.md | 53 +- home/docs/help/ping.md | 29 +- home/docs/help/plugin.md | 7 +- home/docs/help/pop3.md | 12 +- home/docs/help/port.md | 30 +- home/docs/help/postgresql.md | 68 +- home/docs/help/prestodb.md | 87 +-- home/docs/help/process.md | 45 +- home/docs/help/prometheus.md | 4 +- home/docs/help/pulsar.md | 46 +- home/docs/help/rabbitmq.md | 50 +- home/docs/help/redhat.md | 13 +- home/docs/help/redis.md | 401 +++++----- home/docs/help/redis_cluster.md | 108 +-- home/docs/help/rocketmq.md | 7 +- home/docs/help/rockylinux.md | 13 +- home/docs/help/shenyu.md | 103 ++- home/docs/help/smtp.md | 5 +- home/docs/help/spark.md | 19 +- home/docs/help/spring_gateway.md | 70 +- home/docs/help/springboot2.md | 61 +- home/docs/help/springboot3.md | 13 +- home/docs/help/sqlserver.md | 70 +- home/docs/help/ssl_cert.md | 19 +- home/docs/help/status.md | 43 +- home/docs/help/tidb.md | 110 +-- home/docs/help/time_expression.md | 41 +- home/docs/help/tomcat.md | 75 +- home/docs/help/ubuntu.md | 102 +-- home/docs/help/udp_port.md | 6 +- home/docs/help/website.md | 17 +- home/docs/help/websocket.md | 5 +- home/docs/help/windows.md | 43 +- home/docs/help/yarn.md | 103 +-- home/docs/help/zookeeper.md | 132 ++-- home/docs/introduce.md | 24 +- home/docs/others/design.md | 6 +- home/docs/others/resource.md | 16 +- home/docs/start/account-modify.md | 11 +- home/docs/start/custom-config.md | 10 +- home/docs/start/docker-compose-deploy.md | 7 +- home/docs/start/docker-deploy.md | 34 +- home/docs/start/greptime-init.md | 29 +- home/docs/start/influxdb-init.md | 54 +- home/docs/start/iotdb-init.md | 7 +- home/docs/start/mysql-change.md | 32 +- home/docs/start/package-deploy.md | 42 +- home/docs/start/postgresql-change.md | 22 +- home/docs/start/quickstart.md | 10 +- home/docs/start/rainbond-deploy.md | 2 +- home/docs/start/sslcert-practice.md | 15 +- home/docs/start/tdengine-init.md | 88 ++- home/docs/start/update-1.6.0.md | 39 +- home/docs/start/upgrade.md | 17 +- home/docs/start/victoria-metrics-init.md | 32 +- home/docs/template.md | 24 +- .../2022-06-01-hertzbeat-v1.0.md | 14 +- .../2022-06-19-hertzbeat-v1.1.0.md | 27 +- .../2022-06-22-one-step-up.md | 26 +- .../2022-07-10-hertzbeat-v1.1.1.md | 12 +- .../2022-09-04-hertzbeat-v1.1.3.md | 22 +- .../2022-09-10-ssl-practice.md | 18 +- .../2022-10-08-hertzbeat-v1.2.0.md | 26 +- .../2022-11-28-hertzbeat-v1.2.2.md | 9 +- .../2022-12-19-new-committer.md | 36 +- .../2022-12-28-hertzbeat-v1.2.3.md | 6 +- .../2023-01-05-monitor-iotdb.md | 90 ++- .../2023-01-08-monitor-shenyu.md | 93 ++- .../2023-02-02-monitor-dynamic-tp.md | 95 ++- .../2023-02-10-new-committer.md | 25 +- .../2023-02-11-monitor-mysql.md | 83 +-- .../2023-02-15-monitor-linux.md | 178 +++-- .../2023-03-15-hertzbeat-v1.3.0.md | 34 +- .../2023-03-22-monitor-springboot2.md | 102 ++- .../2023-05-09-hertzbeat-v1.3.1.md | 16 +- .../2023-05-11-greptimedb-store.md | 3 +- .../2023-07-05-hertzbeat-v1.3.2.md | 22 +- .../2023-08-14-hertzbeat-v1.4.0.md | 41 +- .../2023-08-28-new-committer.md | 38 +- .../2023-09-26-hertzbeat-v1.4.1.md | 16 +- .../2023-11-12-hertzbeat-v1.4.2.md | 16 +- .../2023-12-11-hertzbeat-v1.4.3.md | 15 +- .../2024-01-11-new-committer.md | 20 +- .../2024-01-18-hertzbeat-v1.4.4.md | 23 +- .../2024-04-17-to-apache.md | 13 +- ...-09-hertzbeat-ospp-subject-introduction.md | 22 +- .../2024-06-11-hertzbeat-v1.6.0-update.md | 27 +- .../2024-06-15-hertzbeat-v1.6.0.md | 11 +- .../2024-07-07-new-committer.md | 4 - .../2024-07-08-new-committer.md | 5 +- .../2024-07-15-new-committer.md | 2 - .../2024-07-27-new-committer.md | 2 +- .../2024-07-28-new-committer.md | 6 +- .../2024-07-29-new-committer.md | 7 +- .../current/advanced/extend-http-default.md | 24 +- .../advanced/extend-http-example-hertzbeat.md | 18 +- .../advanced/extend-http-example-token.md | 26 +- .../current/advanced/extend-http-jsonpath.md | 30 +- .../current/advanced/extend-http.md | 23 +- .../current/advanced/extend-jdbc.md | 53 +- .../current/advanced/extend-jmx.md | 23 +- .../current/advanced/extend-ngql.md | 8 +- .../current/advanced/extend-point.md | 13 +- .../current/advanced/extend-snmp.md | 23 +- .../current/advanced/extend-ssh.md | 47 +- .../current/advanced/extend-telnet.md | 20 +- .../current/advanced/extend-tutorial.md | 18 +- .../current/community/become_committer.md | 30 +- .../current/community/become_pmc_member.md | 30 +- .../community/code-style-and-quality-guide.md | 638 ++++++++-------- .../current/community/contact.md | 3 +- .../current/community/contribution.md | 45 +- .../current/community/development.md | 6 +- .../current/community/document.md | 29 +- .../current/community/how-to-release.md | 53 +- .../current/community/how-to-verify.md | 20 +- .../current/community/mailing_lists.md | 42 +- .../community/new_committer_process.md | 50 +- .../community/new_pmc_member_process.md | 30 +- .../current/community/submit-code.md | 80 +- .../current/download.md | 9 +- .../current/help/activemq.md | 128 ++-- .../current/help/ai_config.md | 64 +- .../current/help/airflow.md | 40 +- .../current/help/alert_console.md | 4 +- .../current/help/alert_dingtalk.md | 20 +- .../current/help/alert_discord.md | 42 +- .../current/help/alert_email.md | 31 +- .../help/alert_enterprise_wechat_app.md | 16 +- .../current/help/alert_feishu.md | 22 +- .../current/help/alert_slack.md | 27 +- .../current/help/alert_telegram.md | 33 +- .../current/help/alert_threshold.md | 3 +- .../current/help/alert_threshold_expr.md | 33 +- .../current/help/alert_webhook.md | 46 +- .../current/help/alert_wework.md | 28 +- .../current/help/almalinux.md | 141 ++-- .../current/help/api.md | 43 +- .../current/help/centos.md | 102 +-- .../current/help/clickhouse.md | 143 ++-- .../current/help/debian.md | 89 ++- .../current/help/dm.md | 54 +- .../current/help/dns.md | 36 +- .../current/help/docker.md | 92 ++- .../current/help/doris_fe.md | 124 ++-- .../current/help/dynamic_tp.md | 74 +- .../current/help/elasticsearch.md | 9 +- .../current/help/euleros.md | 19 +- .../current/help/flink.md | 6 +- .../current/help/flink_on_yarn.md | 238 +++--- .../current/help/freebsd.md | 15 +- .../current/help/ftp.md | 5 +- .../current/help/fullsite.md | 34 +- .../current/help/guide.md | 270 ++++--- .../current/help/hadoop.md | 75 +- .../current/help/hbase_master.md | 64 +- .../current/help/hbase_regionserver.md | 131 ++-- .../current/help/hdfs_datanode.md | 57 +- .../current/help/hdfs_namenode.md | 122 +-- .../current/help/hive.md | 64 +- .../current/help/huawei_switch.md | 107 +-- .../current/help/hugegraph.md | 213 +++--- .../current/help/imap.md | 7 +- .../current/help/influxdb.md | 68 +- .../current/help/influxdb_promql.md | 11 +- .../current/help/iotdb.md | 69 +- .../current/help/issue.md | 45 +- .../current/help/jetty.md | 73 +- .../current/help/jvm.md | 75 +- .../current/help/kafka.md | 92 ++- .../current/help/kafka_promql.md | 7 +- .../current/help/kubernetes.md | 77 +- .../current/help/linux.md | 102 +-- .../current/help/mariadb.md | 65 +- .../current/help/memcached.md | 5 +- .../current/help/mongodb.md | 16 +- .../current/help/mongodb_atlas.md | 104 ++- .../current/help/mysql.md | 65 +- .../current/help/nacos.md | 188 ++--- .../current/help/nebulagraph.md | 9 +- .../current/help/nebulagraph_cluster.md | 17 +- .../current/help/nginx.md | 67 +- .../current/help/ntp.md | 5 +- .../current/help/openai.md | 26 +- .../current/help/opengauss.md | 68 +- .../current/help/opensuse.md | 133 ++-- .../current/help/oracle.md | 77 +- .../current/help/ping.md | 33 +- .../current/help/plugin.md | 15 +- .../current/help/pop3.md | 28 +- .../current/help/port.md | 25 +- .../current/help/postgresql.md | 68 +- .../current/help/prestodb.md | 102 ++- .../current/help/process.md | 79 +- .../current/help/prometheus.md | 4 +- .../current/help/pulsar.md | 48 +- .../current/help/rabbitmq.md | 136 ++-- .../current/help/redhat.md | 19 +- .../current/help/redis.md | 401 +++++----- .../current/help/redis_cluster.md | 108 +-- .../current/help/rocketmq.md | 7 +- .../current/help/rockylinux.md | 19 +- .../current/help/shenyu.md | 111 ++- .../current/help/smtp.md | 6 +- .../current/help/spring_gateway.md | 68 +- .../current/help/springboot2.md | 60 +- .../current/help/springboot3.md | 23 +- .../current/help/sqlserver.md | 80 +- .../current/help/ssl_cert.md | 43 +- .../current/help/status.md | 40 +- .../current/help/tidb.md | 96 ++- .../current/help/time_expression.md | 15 +- .../current/help/tomcat.md | 80 +- .../current/help/ubuntu.md | 102 +-- .../current/help/udp_port.md | 5 +- .../current/help/website.md | 34 +- .../current/help/websocket.md | 5 +- .../current/help/windows.md | 43 +- .../current/help/yarn.md | 105 +-- .../current/help/zookeeper.md | 129 ++-- .../current/introduce.md | 27 +- .../current/others/design.md | 6 +- .../current/others/resource.md | 10 +- .../current/start/account-modify.md | 6 +- .../current/start/custom-config.md | 33 +- .../current/start/docker-compose-deploy.md | 21 +- .../current/start/docker-deploy.md | 29 +- .../current/start/greptime-init.md | 38 +- .../current/start/influxdb-init.md | 47 +- .../current/start/iotdb-init.md | 9 +- .../current/start/mysql-change.md | 35 +- .../current/start/package-deploy.md | 68 +- .../current/start/postgresql-change.md | 31 +- .../current/start/quickstart.md | 18 +- .../current/start/rainbond-deploy.md | 2 +- .../current/start/sslcert-practice.md | 4 - .../current/start/tdengine-init.md | 63 +- .../current/start/update-1.6.0.md | 27 +- .../current/start/upgrade.md | 16 +- .../current/start/victoria-metrics-init.md | 37 +- .../current/template.md | 24 +- .../advanced/extend-http-default.md | 24 +- .../advanced/extend-http-example-hertzbeat.md | 18 +- .../advanced/extend-http-example-token.md | 26 +- .../advanced/extend-http-jsonpath.md | 30 +- .../version-v1.4.x/advanced/extend-http.md | 23 +- .../version-v1.4.x/advanced/extend-jdbc.md | 53 +- .../version-v1.4.x/advanced/extend-jmx.md | 23 +- .../version-v1.4.x/advanced/extend-point.md | 13 +- .../version-v1.4.x/advanced/extend-snmp.md | 23 +- .../version-v1.4.x/advanced/extend-ssh.md | 47 +- .../advanced/extend-tutorial.md | 18 +- .../version-v1.4.x/help/activemq.md | 128 ++-- .../version-v1.4.x/help/airflow.md | 40 +- .../version-v1.4.x/help/alert_console.md | 4 +- .../version-v1.4.x/help/alert_dingtalk.md | 20 +- .../version-v1.4.x/help/alert_discord.md | 42 +- .../version-v1.4.x/help/alert_email.md | 31 +- .../help/alert_enterprise_wechat_app.md | 16 +- .../version-v1.4.x/help/alert_feishu.md | 22 +- .../version-v1.4.x/help/alert_slack.md | 27 +- .../version-v1.4.x/help/alert_telegram.md | 33 +- .../version-v1.4.x/help/alert_threshold.md | 26 +- .../help/alert_threshold_expr.md | 27 +- .../version-v1.4.x/help/alert_webhook.md | 46 +- .../version-v1.4.x/help/alert_wework.md | 28 +- .../version-v1.4.x/help/api.md | 39 +- .../version-v1.4.x/help/centos.md | 102 +-- .../version-v1.4.x/help/dm.md | 54 +- .../version-v1.4.x/help/docker.md | 92 ++- .../version-v1.4.x/help/dynamic_tp.md | 74 +- .../version-v1.4.x/help/fullsite.md | 34 +- .../version-v1.4.x/help/guide.md | 87 ++- .../version-v1.4.x/help/hadoop.md | 75 +- .../version-v1.4.x/help/hive.md | 64 +- .../version-v1.4.x/help/iotdb.md | 69 +- .../version-v1.4.x/help/issue.md | 50 +- .../version-v1.4.x/help/jetty.md | 73 +- .../version-v1.4.x/help/jvm.md | 75 +- .../version-v1.4.x/help/kafka.md | 92 ++- .../version-v1.4.x/help/kubernetes.md | 77 +- .../version-v1.4.x/help/linux.md | 102 +-- .../version-v1.4.x/help/mariadb.md | 65 +- .../version-v1.4.x/help/memcached.md | 9 +- .../version-v1.4.x/help/mysql.md | 65 +- .../version-v1.4.x/help/nebulagraph.md | 9 +- .../version-v1.4.x/help/nginx.md | 67 +- .../version-v1.4.x/help/ntp.md | 5 +- .../version-v1.4.x/help/opengauss.md | 68 +- .../version-v1.4.x/help/oracle.md | 77 +- .../version-v1.4.x/help/ping.md | 33 +- .../version-v1.4.x/help/pop3.md | 28 +- .../version-v1.4.x/help/port.md | 25 +- .../version-v1.4.x/help/postgresql.md | 68 +- .../version-v1.4.x/help/rabbitmq.md | 136 ++-- .../version-v1.4.x/help/redis.md | 401 +++++----- .../version-v1.4.x/help/shenyu.md | 111 ++- .../version-v1.4.x/help/smtp.md | 6 +- .../version-v1.4.x/help/spring_gateway.md | 68 +- .../version-v1.4.x/help/springboot2.md | 60 +- .../version-v1.4.x/help/sqlserver.md | 80 +- .../version-v1.4.x/help/ssl_cert.md | 43 +- .../version-v1.4.x/help/tomcat.md | 80 +- .../version-v1.4.x/help/ubuntu.md | 102 +-- .../version-v1.4.x/help/website.md | 34 +- .../version-v1.4.x/help/windows.md | 43 +- .../version-v1.4.x/help/zookeeper.md | 96 +-- .../version-v1.4.x/introduce.md | 27 +- .../version-v1.4.x/others/contact.md | 3 +- .../version-v1.4.x/others/contributing.md | 18 +- .../version-v1.4.x/others/design.md | 10 +- .../version-v1.4.x/others/developer.md | 10 +- .../version-v1.4.x/others/huaweicloud.md | 13 +- .../version-v1.4.x/others/images-deploy.md | 23 +- .../version-v1.4.x/others/resource.md | 12 +- .../version-v1.4.x/others/sponsor.md | 10 +- .../version-v1.4.x/start/account-modify.md | 6 +- .../version-v1.4.x/start/custom-config.md | 33 +- .../version-v1.4.x/start/docker-deploy.md | 117 ++- .../version-v1.4.x/start/greptime-init.md | 38 +- .../version-v1.4.x/start/influxdb-init.md | 47 +- .../version-v1.4.x/start/iotdb-init.md | 42 +- .../version-v1.4.x/start/mysql-change.md | 41 +- .../version-v1.4.x/start/package-deploy.md | 46 +- .../version-v1.4.x/start/postgresql-change.md | 23 +- .../version-v1.4.x/start/quickstart.md | 20 +- .../version-v1.4.x/start/rainbond-deploy.md | 2 +- .../version-v1.4.x/start/sslcert-practice.md | 3 - .../version-v1.4.x/start/tdengine-init.md | 65 +- .../version-v1.4.x/start/upgrade.md | 16 +- .../start/victoria-metrics-init.md | 37 +- .../version-v1.4.x/template.md | 24 +- .../advanced/extend-http-default.md | 24 +- .../advanced/extend-http-example-hertzbeat.md | 18 +- .../advanced/extend-http-example-token.md | 26 +- .../advanced/extend-http-jsonpath.md | 30 +- .../version-v1.5.x/advanced/extend-http.md | 23 +- .../version-v1.5.x/advanced/extend-jdbc.md | 53 +- .../version-v1.5.x/advanced/extend-jmx.md | 23 +- .../version-v1.5.x/advanced/extend-ngql.md | 8 +- .../version-v1.5.x/advanced/extend-point.md | 13 +- .../version-v1.5.x/advanced/extend-snmp.md | 23 +- .../version-v1.5.x/advanced/extend-ssh.md | 47 +- .../advanced/extend-tutorial.md | 18 +- .../community/become_committer.md | 30 +- .../community/become_pmc_member.md | 30 +- .../community/code-style-and-quality-guide.md | 638 ++++++++-------- .../version-v1.5.x/community/contact.md | 3 +- .../version-v1.5.x/community/contribution.md | 45 +- .../version-v1.5.x/community/development.md | 6 +- .../version-v1.5.x/community/document.md | 29 +- .../community/how-to-release.md | 53 +- .../version-v1.5.x/community/how-to-verify.md | 20 +- .../version-v1.5.x/community/mailing_lists.md | 42 +- .../community/new_committer_process.md | 30 +- .../community/new_pmc_member_process.md | 30 +- .../version-v1.5.x/community/submit-code.md | 80 +- .../version-v1.5.x/download.md | 9 +- .../version-v1.5.x/help/activemq.md | 128 ++-- .../version-v1.5.x/help/airflow.md | 40 +- .../version-v1.5.x/help/alert_console.md | 4 +- .../version-v1.5.x/help/alert_dingtalk.md | 20 +- .../version-v1.5.x/help/alert_discord.md | 42 +- .../version-v1.5.x/help/alert_email.md | 31 +- .../help/alert_enterprise_wechat_app.md | 16 +- .../version-v1.5.x/help/alert_feishu.md | 22 +- .../version-v1.5.x/help/alert_slack.md | 27 +- .../version-v1.5.x/help/alert_telegram.md | 33 +- .../version-v1.5.x/help/alert_threshold.md | 3 +- .../help/alert_threshold_expr.md | 33 +- .../version-v1.5.x/help/alert_webhook.md | 46 +- .../version-v1.5.x/help/alert_wework.md | 28 +- .../version-v1.5.x/help/almalinux.md | 141 ++-- .../version-v1.5.x/help/api.md | 43 +- .../version-v1.5.x/help/centos.md | 102 +-- .../version-v1.5.x/help/clickhouse.md | 143 ++-- .../version-v1.5.x/help/debian.md | 89 ++- .../version-v1.5.x/help/dm.md | 54 +- .../version-v1.5.x/help/dns.md | 36 +- .../version-v1.5.x/help/docker.md | 92 ++- .../version-v1.5.x/help/doris_fe.md | 124 ++-- .../version-v1.5.x/help/dynamic_tp.md | 74 +- .../version-v1.5.x/help/elasticsearch.md | 9 +- .../version-v1.5.x/help/euleros.md | 19 +- .../version-v1.5.x/help/flink.md | 6 +- .../version-v1.5.x/help/freebsd.md | 15 +- .../version-v1.5.x/help/ftp.md | 5 +- .../version-v1.5.x/help/fullsite.md | 34 +- .../version-v1.5.x/help/guide.md | 122 ++- .../version-v1.5.x/help/hadoop.md | 75 +- .../version-v1.5.x/help/hbase_master.md | 64 +- .../version-v1.5.x/help/hbase_regionserver.md | 131 ++-- .../version-v1.5.x/help/hdfs_datanode.md | 57 +- .../version-v1.5.x/help/hdfs_namenode.md | 122 +-- .../version-v1.5.x/help/hive.md | 64 +- .../version-v1.5.x/help/huawei_switch.md | 11 +- .../version-v1.5.x/help/hugegraph.md | 213 +++--- .../version-v1.5.x/help/influxdb.md | 68 +- .../version-v1.5.x/help/influxdb_promql.md | 11 +- .../version-v1.5.x/help/iotdb.md | 69 +- .../version-v1.5.x/help/issue.md | 45 +- .../version-v1.5.x/help/jetty.md | 73 +- .../version-v1.5.x/help/jvm.md | 75 +- .../version-v1.5.x/help/kafka.md | 92 ++- .../version-v1.5.x/help/kafka_promql.md | 7 +- .../version-v1.5.x/help/kubernetes.md | 77 +- .../version-v1.5.x/help/linux.md | 102 +-- .../version-v1.5.x/help/mariadb.md | 65 +- .../version-v1.5.x/help/memcached.md | 5 +- .../version-v1.5.x/help/mongodb.md | 16 +- .../version-v1.5.x/help/mysql.md | 65 +- .../version-v1.5.x/help/nacos.md | 114 +-- .../version-v1.5.x/help/nebulagraph.md | 9 +- .../help/nebulagraph_cluster.md | 17 +- .../version-v1.5.x/help/nginx.md | 67 +- .../version-v1.5.x/help/ntp.md | 5 +- .../version-v1.5.x/help/openai.md | 26 +- .../version-v1.5.x/help/opengauss.md | 68 +- .../version-v1.5.x/help/opensuse.md | 133 ++-- .../version-v1.5.x/help/oracle.md | 77 +- .../version-v1.5.x/help/ping.md | 33 +- .../version-v1.5.x/help/plugin.md | 13 +- .../version-v1.5.x/help/pop3.md | 28 +- .../version-v1.5.x/help/port.md | 25 +- .../version-v1.5.x/help/postgresql.md | 68 +- .../version-v1.5.x/help/process.md | 79 +- .../version-v1.5.x/help/prometheus.md | 4 +- .../version-v1.5.x/help/pulsar.md | 48 +- .../version-v1.5.x/help/rabbitmq.md | 136 ++-- .../version-v1.5.x/help/redhat.md | 19 +- .../version-v1.5.x/help/redis.md | 401 +++++----- .../version-v1.5.x/help/rocketmq.md | 7 +- .../version-v1.5.x/help/rockylinux.md | 19 +- .../version-v1.5.x/help/shenyu.md | 111 ++- .../version-v1.5.x/help/smtp.md | 6 +- .../version-v1.5.x/help/spring_gateway.md | 68 +- .../version-v1.5.x/help/springboot2.md | 60 +- .../version-v1.5.x/help/springboot3.md | 23 +- .../version-v1.5.x/help/sqlserver.md | 80 +- .../version-v1.5.x/help/ssl_cert.md | 43 +- .../version-v1.5.x/help/tidb.md | 50 +- .../version-v1.5.x/help/time_expression.md | 15 +- .../version-v1.5.x/help/tomcat.md | 80 +- .../version-v1.5.x/help/ubuntu.md | 102 +-- .../version-v1.5.x/help/udp_port.md | 5 +- .../version-v1.5.x/help/website.md | 34 +- .../version-v1.5.x/help/websocket.md | 5 +- .../version-v1.5.x/help/windows.md | 43 +- .../version-v1.5.x/help/yarn.md | 105 +-- .../version-v1.5.x/help/zookeeper.md | 96 +-- .../version-v1.5.x/introduce.md | 27 +- .../version-v1.5.x/others/design.md | 6 +- .../version-v1.5.x/others/resource.md | 10 +- .../version-v1.5.x/start/account-modify.md | 6 +- .../version-v1.5.x/start/custom-config.md | 33 +- .../version-v1.5.x/start/docker-deploy.md | 117 ++- .../version-v1.5.x/start/greptime-init.md | 38 +- .../version-v1.5.x/start/influxdb-init.md | 47 +- .../version-v1.5.x/start/iotdb-init.md | 9 +- .../version-v1.5.x/start/mysql-change.md | 41 +- .../version-v1.5.x/start/package-deploy.md | 48 +- .../version-v1.5.x/start/postgresql-change.md | 23 +- .../version-v1.5.x/start/quickstart.md | 20 +- .../version-v1.5.x/start/rainbond-deploy.md | 2 +- .../version-v1.5.x/start/sslcert-practice.md | 4 - .../version-v1.5.x/start/tdengine-init.md | 65 +- .../version-v1.5.x/start/upgrade.md | 16 +- .../start/victoria-metrics-init.md | 37 +- .../version-v1.5.x/template.md | 24 +- .../advanced/extend-http-default.md | 25 +- .../advanced/extend-http-example-hertzbeat.md | 18 +- .../advanced/extend-http-example-token.md | 15 +- .../advanced/extend-http-jsonpath.md | 28 +- .../version-v1.4.x/advanced/extend-http.md | 19 +- .../version-v1.4.x/advanced/extend-jdbc.md | 54 +- .../version-v1.4.x/advanced/extend-jmx.md | 12 +- .../version-v1.4.x/advanced/extend-point.md | 23 +- .../version-v1.4.x/advanced/extend-snmp.md | 11 +- .../version-v1.4.x/advanced/extend-ssh.md | 46 +- .../advanced/extend-tutorial.md | 16 +- .../version-v1.4.x/help/activemq.md | 129 ++-- .../version-v1.4.x/help/airflow.md | 40 +- .../version-v1.4.x/help/alert_console.md | 4 +- .../version-v1.4.x/help/alert_dingtalk.md | 20 +- .../version-v1.4.x/help/alert_discord.md | 8 +- .../version-v1.4.x/help/alert_email.md | 29 +- .../help/alert_enterprise_wechat_app.md | 18 +- .../version-v1.4.x/help/alert_feishu.md | 22 +- .../version-v1.4.x/help/alert_slack.md | 3 +- .../version-v1.4.x/help/alert_telegram.md | 3 +- .../version-v1.4.x/help/alert_threshold.md | 26 +- .../help/alert_threshold_expr.md | 27 +- .../version-v1.4.x/help/alert_webhook.md | 46 +- .../version-v1.4.x/help/alert_wework.md | 26 +- .../versioned_docs/version-v1.4.x/help/api.md | 42 +- .../version-v1.4.x/help/centos.md | 102 +-- home/versioned_docs/version-v1.4.x/help/dm.md | 54 +- .../version-v1.4.x/help/docker.md | 96 ++- .../version-v1.4.x/help/doris_be.md | 171 ++--- .../version-v1.4.x/help/doris_fe.md | 128 ++-- .../version-v1.4.x/help/dynamic_tp.md | 45 +- .../version-v1.4.x/help/fullsite.md | 41 +- .../version-v1.4.x/help/guide.md | 37 +- .../version-v1.4.x/help/hadoop.md | 13 +- .../version-v1.4.x/help/hive.md | 69 +- .../version-v1.4.x/help/iotdb.md | 45 +- .../version-v1.4.x/help/issue.md | 47 +- .../version-v1.4.x/help/jetty.md | 9 +- .../versioned_docs/version-v1.4.x/help/jvm.md | 8 +- .../version-v1.4.x/help/kafka.md | 92 ++- .../version-v1.4.x/help/kubernetes.md | 86 +-- .../version-v1.4.x/help/linux.md | 102 +-- .../version-v1.4.x/help/mariadb.md | 65 +- .../version-v1.4.x/help/memcached.md | 9 +- .../version-v1.4.x/help/mysql.md | 43 +- .../version-v1.4.x/help/nebulagraph.md | 9 +- .../version-v1.4.x/help/nginx.md | 46 +- .../versioned_docs/version-v1.4.x/help/ntp.md | 4 +- .../version-v1.4.x/help/opengauss.md | 70 +- .../version-v1.4.x/help/oracle.md | 53 +- .../version-v1.4.x/help/ping.md | 29 +- .../version-v1.4.x/help/pop3.md | 12 +- .../version-v1.4.x/help/port.md | 30 +- .../version-v1.4.x/help/postgresql.md | 68 +- .../version-v1.4.x/help/rabbitmq.md | 50 +- .../version-v1.4.x/help/redis.md | 401 +++++----- .../version-v1.4.x/help/shenyu.md | 103 ++- .../version-v1.4.x/help/smtp.md | 5 +- .../version-v1.4.x/help/spark.md | 19 +- .../version-v1.4.x/help/spring_gateway.md | 70 +- .../version-v1.4.x/help/springboot2.md | 61 +- .../version-v1.4.x/help/sqlserver.md | 70 +- .../version-v1.4.x/help/ssl_cert.md | 35 +- .../version-v1.4.x/help/tomcat.md | 75 +- .../version-v1.4.x/help/ubuntu.md | 102 +-- .../version-v1.4.x/help/website.md | 35 +- .../version-v1.4.x/help/windows.md | 43 +- .../version-v1.4.x/help/zookeeper.md | 97 +-- .../version-v1.4.x/introduce.md | 24 +- .../version-v1.4.x/others/contact.md | 3 +- .../version-v1.4.x/others/contributing.md | 22 +- .../version-v1.4.x/others/design.md | 10 +- .../version-v1.4.x/others/developer.md | 17 +- .../version-v1.4.x/others/hertzbeat.md | 59 +- .../version-v1.4.x/others/huaweicloud.md | 13 +- .../version-v1.4.x/others/images-deploy.md | 23 +- .../version-v1.4.x/others/resource.md | 12 +- .../version-v1.4.x/others/sponsor.md | 14 +- .../version-v1.4.x/start/account-modify.md | 5 +- .../version-v1.4.x/start/custom-config.md | 10 +- .../version-v1.4.x/start/docker-deploy.md | 111 +-- .../version-v1.4.x/start/greptime-init.md | 32 +- .../version-v1.4.x/start/influxdb-init.md | 53 +- .../version-v1.4.x/start/iotdb-init.md | 46 +- .../version-v1.4.x/start/mysql-change.md | 31 +- .../version-v1.4.x/start/package-deploy.md | 34 +- .../version-v1.4.x/start/postgresql-change.md | 15 +- .../version-v1.4.x/start/quickstart.md | 10 +- .../version-v1.4.x/start/rainbond-deploy.md | 2 +- .../version-v1.4.x/start/sslcert-practice.md | 12 - .../version-v1.4.x/start/tdengine-init.md | 88 ++- .../version-v1.4.x/start/upgrade.md | 17 +- .../start/victoria-metrics-init.md | 33 +- .../versioned_docs/version-v1.4.x/template.md | 24 +- .../advanced/extend-http-default.md | 25 +- .../advanced/extend-http-example-hertzbeat.md | 18 +- .../advanced/extend-http-example-token.md | 15 +- .../advanced/extend-http-jsonpath.md | 28 +- .../version-v1.5.x/advanced/extend-http.md | 19 +- .../version-v1.5.x/advanced/extend-jdbc.md | 54 +- .../version-v1.5.x/advanced/extend-jmx.md | 12 +- .../version-v1.5.x/advanced/extend-ngql.md | 8 +- .../version-v1.5.x/advanced/extend-point.md | 23 +- .../version-v1.5.x/advanced/extend-snmp.md | 11 +- .../version-v1.5.x/advanced/extend-ssh.md | 46 +- .../advanced/extend-tutorial.md | 16 +- .../community/become_committer.md | 30 +- .../community/become_pmc_member.md | 30 +- .../community/code-style-and-quality-guide.md | 700 +++++++++--------- .../version-v1.5.x/community/contact.md | 2 +- .../version-v1.5.x/community/contribution.md | 47 +- .../version-v1.5.x/community/development.md | 8 +- .../version-v1.5.x/community/document.md | 29 +- .../community/how-to-release.md | 52 +- .../version-v1.5.x/community/how-to-verify.md | 17 +- .../version-v1.5.x/community/mailing_lists.md | 41 +- .../community/new_committer_process.md | 36 +- .../community/new_pmc_member_process.md | 30 +- .../version-v1.5.x/community/submit-code.md | 68 +- .../versioned_docs/version-v1.5.x/download.md | 5 +- .../version-v1.5.x/help/activemq.md | 129 ++-- .../version-v1.5.x/help/airflow.md | 40 +- .../version-v1.5.x/help/alert_console.md | 4 +- .../version-v1.5.x/help/alert_dingtalk.md | 20 +- .../version-v1.5.x/help/alert_discord.md | 8 +- .../version-v1.5.x/help/alert_email.md | 29 +- .../help/alert_enterprise_wechat_app.md | 18 +- .../version-v1.5.x/help/alert_feishu.md | 22 +- .../version-v1.5.x/help/alert_slack.md | 3 +- .../version-v1.5.x/help/alert_telegram.md | 3 +- .../version-v1.5.x/help/alert_threshold.md | 1 + .../help/alert_threshold_expr.md | 64 +- .../version-v1.5.x/help/alert_webhook.md | 46 +- .../version-v1.5.x/help/alert_wework.md | 26 +- .../version-v1.5.x/help/almalinux.md | 43 +- .../versioned_docs/version-v1.5.x/help/api.md | 46 +- .../version-v1.5.x/help/centos.md | 102 +-- .../version-v1.5.x/help/clickhouse.md | 142 ++-- .../version-v1.5.x/help/debian.md | 27 +- home/versioned_docs/version-v1.5.x/help/dm.md | 54 +- .../versioned_docs/version-v1.5.x/help/dns.md | 13 +- .../version-v1.5.x/help/docker.md | 96 ++- .../version-v1.5.x/help/doris_be.md | 171 ++--- .../version-v1.5.x/help/doris_fe.md | 75 +- .../version-v1.5.x/help/dynamic_tp.md | 45 +- .../version-v1.5.x/help/elasticsearch.md | 7 +- .../version-v1.5.x/help/euleros.md | 13 +- .../version-v1.5.x/help/flink.md | 19 +- .../version-v1.5.x/help/freebsd.md | 7 +- .../versioned_docs/version-v1.5.x/help/ftp.md | 5 +- .../version-v1.5.x/help/fullsite.md | 41 +- .../version-v1.5.x/help/guide.md | 37 +- .../version-v1.5.x/help/hadoop.md | 13 +- .../version-v1.5.x/help/hbase_master.md | 24 +- .../version-v1.5.x/help/hbase_regionserver.md | 26 +- .../version-v1.5.x/help/hdfs_datanode.md | 55 +- .../version-v1.5.x/help/hdfs_namenode.md | 121 +-- .../version-v1.5.x/help/hive.md | 69 +- .../version-v1.5.x/help/http_sd.md | 26 +- .../version-v1.5.x/help/huawei_switch.md | 11 +- .../version-v1.5.x/help/hugegraph.md | 235 +++--- .../version-v1.5.x/help/influxdb.md | 77 +- .../version-v1.5.x/help/influxdb_promql.md | 11 +- .../version-v1.5.x/help/iotdb.md | 45 +- .../version-v1.5.x/help/issue.md | 45 +- .../version-v1.5.x/help/jetty.md | 9 +- .../versioned_docs/version-v1.5.x/help/jvm.md | 8 +- .../version-v1.5.x/help/kafka.md | 92 ++- .../version-v1.5.x/help/kafka_promql.md | 9 +- .../version-v1.5.x/help/kubernetes.md | 86 +-- .../version-v1.5.x/help/linux.md | 102 +-- .../version-v1.5.x/help/mariadb.md | 65 +- .../version-v1.5.x/help/memcached.md | 9 +- .../version-v1.5.x/help/mongodb.md | 16 +- .../version-v1.5.x/help/mysql.md | 43 +- .../version-v1.5.x/help/nacos.md | 114 +-- .../version-v1.5.x/help/nebulagraph.md | 9 +- .../help/nebulagraph_cluster.md | 11 +- .../version-v1.5.x/help/nginx.md | 46 +- .../versioned_docs/version-v1.5.x/help/ntp.md | 4 +- .../version-v1.5.x/help/openai.md | 10 +- .../version-v1.5.x/help/opengauss.md | 70 +- .../version-v1.5.x/help/opensuse.md | 130 ++-- .../version-v1.5.x/help/oracle.md | 53 +- .../version-v1.5.x/help/ping.md | 29 +- .../version-v1.5.x/help/plugin.md | 5 +- .../version-v1.5.x/help/pop3.md | 12 +- .../version-v1.5.x/help/port.md | 30 +- .../version-v1.5.x/help/postgresql.md | 68 +- .../version-v1.5.x/help/process.md | 45 +- .../version-v1.5.x/help/prometheus.md | 4 +- .../version-v1.5.x/help/pulsar.md | 46 +- .../version-v1.5.x/help/rabbitmq.md | 50 +- .../version-v1.5.x/help/redhat.md | 13 +- .../version-v1.5.x/help/redis.md | 401 +++++----- .../version-v1.5.x/help/rocketmq.md | 7 +- .../version-v1.5.x/help/rockylinux.md | 13 +- .../version-v1.5.x/help/shenyu.md | 103 ++- .../version-v1.5.x/help/smtp.md | 5 +- .../version-v1.5.x/help/spark.md | 19 +- .../version-v1.5.x/help/spring_gateway.md | 70 +- .../version-v1.5.x/help/springboot2.md | 61 +- .../version-v1.5.x/help/springboot3.md | 13 +- .../version-v1.5.x/help/sqlserver.md | 70 +- .../version-v1.5.x/help/ssl_cert.md | 35 +- .../version-v1.5.x/help/tidb.md | 50 +- .../version-v1.5.x/help/time_expression.md | 41 +- .../version-v1.5.x/help/tomcat.md | 75 +- .../version-v1.5.x/help/ubuntu.md | 102 +-- .../version-v1.5.x/help/udp_port.md | 6 +- .../version-v1.5.x/help/website.md | 35 +- .../version-v1.5.x/help/websocket.md | 5 +- .../version-v1.5.x/help/windows.md | 43 +- .../version-v1.5.x/help/yarn.md | 103 +-- .../version-v1.5.x/help/zookeeper.md | 97 +-- .../version-v1.5.x/introduce.md | 24 +- .../version-v1.5.x/others/design.md | 6 +- .../version-v1.5.x/others/resource.md | 16 +- .../version-v1.5.x/start/account-modify.md | 11 +- .../version-v1.5.x/start/custom-config.md | 10 +- .../version-v1.5.x/start/docker-deploy.md | 111 +-- .../version-v1.5.x/start/greptime-init.md | 32 +- .../version-v1.5.x/start/influxdb-init.md | 54 +- .../version-v1.5.x/start/iotdb-init.md | 7 +- .../version-v1.5.x/start/mysql-change.md | 32 +- .../version-v1.5.x/start/package-deploy.md | 31 +- .../version-v1.5.x/start/postgresql-change.md | 15 +- .../version-v1.5.x/start/quickstart.md | 10 +- .../version-v1.5.x/start/rainbond-deploy.md | 2 +- .../version-v1.5.x/start/sslcert-practice.md | 13 - .../version-v1.5.x/start/tdengine-init.md | 88 ++- .../version-v1.5.x/start/upgrade.md | 17 +- .../start/victoria-metrics-init.md | 32 +- .../versioned_docs/version-v1.5.x/template.md | 24 +- pom.xml | 34 +- 838 files changed, 20028 insertions(+), 20260 deletions(-) diff --git a/home/README.md b/home/README.md index ae1b5ccb71e..0904a4af224 100644 --- a/home/README.md +++ b/home/README.md @@ -1,6 +1,6 @@ # HertzBeat Website -This website is built with [Docusaurus](https://docusaurus.io/). +This website is built with [Docusaurus](https://docusaurus.io/). ## Installation @@ -55,7 +55,6 @@ GITHUB_HOST=gitee.com USE_SSH=true yarn deploy yarn docusaurus docs:version v1.5.x ``` - ## Directory Structure ```html @@ -85,3 +84,4 @@ yarn docusaurus docs:version v1.5.x |-- docusaurus.config.js |-- sidebars.js // document sidebar menu configuration ``` + diff --git a/home/blog/2022-06-01-hertzbeat-v1.0.md b/home/blog/2022-06-01-hertzbeat-v1.0.md index 8350ddb5825..8338eb93a57 100644 --- a/home/blog/2022-06-01-hertzbeat-v1.0.md +++ b/home/blog/2022-06-01-hertzbeat-v1.0.md @@ -4,7 +4,7 @@ author: tom author_title: tom author_url: https://github.com/tomsun28 author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 -tags: [opensource] +tags: [opensource] --- HertzBeat, incubated by Dromara and open-sourced by TanCloud, is an open-source monitoring and alerting project that supports a variety of monitoring types including websites, APIs, PING, ports, databases, full-site, operating systems, middleware, etc. It supports threshold alarms and notification alerts (email, webhook, DingTalk, WeCom, Feishu robots) and has an easy-to-use, friendly visual operation interface. @@ -66,16 +66,15 @@ Redis monitor is coming: > [HertzBeat](https://github.com/apache/hertzbeat), incubated by [Dromara](https://dromara.org) and open-sourced by [TanCloud](https://tancloud.cn), is an open-source monitoring and alerting project with a user-friendly visual interface that supports monitoring types such as websites, APIs, PING, ports, databases, operating systems, and more. > Of course, we also provide a corresponding [SaaS cloud monitoring version](https://console.tancloud.cn), so small and medium-sized teams and individuals no longer need to deploy a cumbersome monitoring system to monitor their website resources; you can [log in](https://console.tancloud.cn) to start monitoring for free. - +> > HertzBeat supports custom monitoring; by configuring the YML file, we can customize the required monitoring types and metrics to meet common personalized needs. > HertzBeat is modular, with manager, collector, scheduler, warehouse, alerter modules decoupled, making it easy to understand and customize for development. > HertzBeat supports more flexible alarm configurations (calculation expressions), supports alarm notifications, alarm templates, and timely delivery of notifications via email, DingTalk, WeChat, Feishu, etc. > Feel free to log in to HertzBeat's cloud environment, [TanCloud](https://console.tancloud.cn), to try it out and discover more. > We are rapidly iterating and welcome participation in joining and contributing to the open-source ecosystem. - +> > The multi-type support, easy expansion, and low coupling of `HertzBeat` aim to help developers and small to medium-sized teams quickly build their own monitoring systems. - **Repository url** [Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat diff --git a/home/blog/2022-06-19-hertzbeat-v1.1.0.md b/home/blog/2022-06-19-hertzbeat-v1.1.0.md index 669cd8fc1a0..cbc3102db5c 100644 --- a/home/blog/2022-06-19-hertzbeat-v1.1.0.md +++ b/home/blog/2022-06-19-hertzbeat-v1.1.0.md @@ -35,7 +35,7 @@ Bugfixes: Online at https://console.tancloud.cn. ------------------------ +--- Windows Monitor coming: ![2022-06-19 11:30:57](https://user-images.githubusercontent.com/24788200/174481159-b8a73c87-aff5-4c4c-befb-bd0d26685d71.png) @@ -54,13 +54,14 @@ ALTER TABLE param_define RENAME TO hzb_param_define; ALTER TABLE tag RENAME TO hzb_tag; ALTER TABLE tag_monitor_bind RENAME TO hzb_tag_monitor_bind; commit; -``` +``` Have Fun! ---- ## V1.1.0 + Home: hertzbeat.com | tancloud.cn Hi guys! HertzBeat v1.1.0 is coming. This version we support snmp protocol and use snmp to collect windows metrics. @@ -75,7 +76,7 @@ Feature: 2. [[monitor]change default database mysql to h2 #191](https://github.com/apache/hertzbeat/pull/191) 3. [[manager]support monitor params name i18n #184](https://github.com/apache/hertzbeat/pull/184). 4. [[script]build multi cpu arch hertzbeat docker version #189](https://github.com/apache/hertzbeat/pull/189). -5. [[monitor]feature: support oracle multi tablespaces #163](https://github.com/apache/hertzbeat/pull/163) contribute by @brave4Time +5. [[monitor]feature: support oracle multi tablespaces #163](https://github.com/apache/hertzbeat/pull/163) contribute by @brave4Time 6. [[monitor]database tables append prefix hzb_ #193](https://github.com/apache/hertzbeat/pull/193) issue from @shimingxy Bugfix. @@ -87,12 +88,12 @@ Bugfix. Online https://console.tancloud.cn. ------------------------ +--- + Windows Monitor coming: 2022-06-19 11 30 57 - ⚠️ ⚠️⚠️⚠️Attention other version upgrade to v1.1.0 need run sql script. Now the tables name has hzb_ prefix. ``` @@ -107,12 +108,10 @@ ALTER TABLE param_define RENAME TO hzb_param_define; ALTER TABLE tag RENAME TO hzb_tag; ALTER TABLE tag_monitor_bind RENAME TO hzb_tag_monitor_bind; commit; -``` +``` Have Fun! - - Have Fun! ---- @@ -124,7 +123,7 @@ Have Fun! > HertzBeat supports more flexible alarm configurations (calculation expressions), notification alerts, templates, and real-time delivery via email, DingTalk, WeChat, Lark, etc. > Welcome to try and discover more in HertzBeat's [cloud environment TanCloud](https://console.tancloud.cn). > We are rapidly iterating and welcome participation to join in co-building the open-source ecosystem. - +> > HertzBeat's support for multiple types, easy expansion, and low coupling hopes to help developers and small and medium teams quickly build their own monitoring systems. **Repository Addresses** diff --git a/home/blog/2022-06-22-one-step-up.md b/home/blog/2022-06-22-one-step-up.md index 59518f2e679..9c60d422482 100644 --- a/home/blog/2022-06-22-one-step-up.md +++ b/home/blog/2022-06-22-one-step-up.md @@ -35,7 +35,7 @@ Bugfixes: Online at [https://console.tancloud.cn](https://console.tancloud.cn). ------------------------ +--- Windows Monitor is coming: ![2022-06-19 11:30:57](https://user-images.githubusercontent.com/24788200/174481159-b8a73c87-aff5-4c4c-befb-bd0d26685d71.png) @@ -54,13 +54,14 @@ ALTER TABLE param_define RENAME TO hzb_param_define; ALTER TABLE tag RENAME TO hzb_tag; ALTER TABLE tag_monitor_bind RENAME TO hzb_tag_monitor_bind; commit; -``` +``` Have Fun! ---- ## V1.1.0 + Home: hertzbeat.com | tancloud.cn Hi guys! HertzBeat v1.1.0 is coming. This version we support snmp protocol and use snmp to collect windows metrics. @@ -75,7 +76,7 @@ Feature: 2. [[monitor]change default database mysql to h2 #191](https://github.com/apache/hertzbeat/pull/191) 3. [[manager]support monitor params name i18n #184](https://github.com/apache/hertzbeat/pull/184). 4. [[script]build multi cpu arch hertzbeat docker version #189](https://github.com/apache/hertzbeat/pull/189). -5. [[monitor]feature: support oracle multi tablespaces #163](https://github.com/apache/hertzbeat/pull/163) contribute by @brave4Time +5. [[monitor]feature: support oracle multi tablespaces #163](https://github.com/apache/hertzbeat/pull/163) contribute by @brave4Time 6. [[monitor]database tables append prefix hzb_ #193](https://github.com/apache/hertzbeat/pull/193) issue from @shimingxy Bugfix. @@ -87,12 +88,12 @@ Bugfix. Online https://console.tancloud.cn. ------------------------ +--- + Windows Monitor coming: 2022-06-19 11 30 57 - ⚠️ ⚠️⚠️⚠️Attention other version upgrade to v1.1.0 need run sql script. Now the tables name has hzb_ prefix. ``` @@ -107,7 +108,7 @@ ALTER TABLE param_define RENAME TO hzb_param_define; ALTER TABLE tag RENAME TO hzb_tag; ALTER TABLE tag_monitor_bind RENAME TO hzb_tag_monitor_bind; commit; -``` +``` Have Fun! @@ -120,7 +121,7 @@ Have Fun! > HertzBeat supports more flexible alarm configurations (calculation expressions) and alarm notifications, including alarm templates, emails, DingTalk, WeChat, Feishu, etc., for timely delivery of notifications. > Welcome to log in to HertzBeat's [cloud environment TanCloud](https://console.tancloud.cn) to try and discover more. > We are in rapid iteration and welcome participation in co-building the open-source ecosystem. - +> > HertzBeat's support for multiple types, easy expansion, and low coupling aims to help developers and small and medium teams quickly build their own monitoring systems. **Repository Addresses** diff --git a/home/blog/2022-07-10-hertzbeat-v1.1.1.md b/home/blog/2022-07-10-hertzbeat-v1.1.1.md index 7139cbfd0a4..c4a87a40c3d 100644 --- a/home/blog/2022-07-10-hertzbeat-v1.1.1.md +++ b/home/blog/2022-07-10-hertzbeat-v1.1.1.md @@ -4,12 +4,12 @@ author: tom author_title: tom author_url: https://github.com/tomsun28 author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 -tags: [opensource] +tags: [opensource] --- > Friendly Cloud Monitoring Tool. | 易用友好的实时监控工具, 无需Agent, 强大自定义监控能力. -**Home: [hertzbeat.com](https://hertzbeat.com)** +**Home: [hertzbeat.com](https://hertzbeat.com)** Hi guys! HertzBeat v1.1.1 is coming. This version brings custom monitoring enhancements, and the collected metric data can be assigned as a variable to the next collection. Fixed several bugs and improved the overall stable usability. @@ -24,7 +24,7 @@ Feature: 2. [[monitor] support use pre collect metrics data to replace next metrics config params #206](https://github.com/apache/hertzbeat/pull/206). 3. [[collector] use basic auth with raw HTTP headers instead of preemptive #212](https://github.com/apache/hertzbeat/pull/212) 4. [[manager,alerter] support wework, dingDing, flyBook webhook url config #213](https://github.com/apache/hertzbeat/pull/213) -5. [[monitor] feature update numeric metrics all contain decimal points #217](https://github.com/apache/hertzbeat/pull/217) +5. [[monitor] feature update numeric metrics all contain decimal points #217](https://github.com/apache/hertzbeat/pull/217) 6. [[web-app]feature:toggle [enable and cancel] button #218](https://github.com/apache/hertzbeat/pull/218) 7. [[manager] update define yml file name add prefix "app" or "param" #221](https://github.com/apache/hertzbeat/pull/221) @@ -53,7 +53,7 @@ Have Fun! > HertzBeat is modular, `manager, collector, scheduler, warehouse, alerter` modules are decoupled for easy understanding and custom development. > Welcome to HertzBeat's [Cloud Environment TanCloud](https://console.tancloud.cn) to try and discover more. > Welcome to join us to build hertzbeat together. - +> > `HertzBeat`'s multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring tool. **If you like HertzBeat, star us on GitHub** diff --git a/home/blog/2022-09-04-hertzbeat-v1.1.3.md b/home/blog/2022-09-04-hertzbeat-v1.1.3.md index b4076083e8b..3cec823079b 100644 --- a/home/blog/2022-09-04-hertzbeat-v1.1.3.md +++ b/home/blog/2022-09-04-hertzbeat-v1.1.3.md @@ -4,9 +4,11 @@ author: tom author_title: tom author_url: https://github.com/tomsun28 author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 -tags: [opensource] +tags: [opensource] --- + ## V1.1.3 + Home: hertzbeat.com | tancloud.cn Hi guys! HertzBeat v1.1.3 is coming. This version supports kafka monitor, ssl certificate expired monitor and more. Fixed several bugs and improved the overall stable usability. @@ -22,9 +24,9 @@ Feature: 2. [[monitor] feature: support apache kafka monitor #263](https://github.com/apache/hertzbeat/pull/263) contribute by @wang1027-wqh 3. [[webapp] support history chart query 3 mouth time range #265](https://github.com/apache/hertzbeat/pull/265) issue by @ericfrol 4. [[monitor] support ssl certificate expired monitor #266](https://github.com/apache/hertzbeat/pull/266) suggest by @noear -5. [[web-app] update default interval 600s to 120s #268](https://github.com/apache/hertzbeat/pull/268) -6. [[web-app] update layout ui - help button, nav menu #272](https://github.com/apache/hertzbeat/pull/272) -7. [[alert,webapp] support delete all alerts at once. #273](https://github.com/apache/hertzbeat/pull/273) issue by @ericfrol +5. [[web-app] update default interval 600s to 120s #268](https://github.com/apache/hertzbeat/pull/268) +6. [[web-app] update layout ui - help button, nav menu #272](https://github.com/apache/hertzbeat/pull/272) +7. [[alert,webapp] support delete all alerts at once. #273](https://github.com/apache/hertzbeat/pull/273) issue by @ericfrol 8. [[web-app] update home background image #276](https://github.com/apache/hertzbeat/pull/276) Bugfix. @@ -37,4 +39,5 @@ Bugfix. Online https://console.tancloud.cn. Have Fun! ----- +--------- + diff --git a/home/blog/2022-09-10-ssl-practice.md b/home/blog/2022-09-10-ssl-practice.md index e0bd7337753..5c0525f8c16 100644 --- a/home/blog/2022-09-10-ssl-practice.md +++ b/home/blog/2022-09-10-ssl-practice.md @@ -4,7 +4,7 @@ author: tom author_title: tom author_url: https://github.com/tomsun28 author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 -tags: [opensource, practice] +tags: [opensource, practice] --- First of all, I would like to wish all the students who see it a happy mid-autumn festival, good health, and try to get rich on the basis of good health. @@ -38,7 +38,6 @@ gitee: https://gitee.com/hertzbeat/hertzbeat > System Page -> Monitor Menu -> SSL Certificates -> New SSL Certificate - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/bd53f343a5b54feab62e71458d076441~tplv-k3u1fbpfcp-zoom-1.image) 2. Configure monitoring website @@ -52,52 +51,38 @@ gitee: https://gitee.com/hertzbeat/hertzbeat > You can view the task status in the monitor list, and go into the monitor details to view the metrics data graphs etc. - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/f874b45e909c4bb0acdd28b3fb034a61~tplv-k3u1fbpfcp-zoom-1.image) - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ef5d7443f8c04818ae5aa28d421203be~tplv-k3u1fbpfcp-zoom-1.image) - - 4. Set the threshold (triggered when the certificate expires) > System Page -> Alarms -> Alarm Thresholds -> Add Thresholds - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/8d6205172d43463aa34e534477f132f1~tplv-k3u1fbpfcp-zoom-1.image) > Configure thresholds, select SSL certificate indicator object, configure alert expression - triggered when indicator `expired` is `true`, i.e. `equals(expired, "true")` , set alert level notification template message etc. - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/83d17b381d994f26a6240e01915b2001~tplv-k3u1fbpfcp-zoom-1.image) > Associate thresholds with monitors, set which monitors this threshold should be applied to in the threshold list. - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/9b9063d7bcf9454387be0491fc382bd1~tplv-k3u1fbpfcp-zoom-1.image) - - - 5. set the threshold (triggered one week before certificate expiration) > Same as above, add a new configuration threshold, configure the alert expression - when the indicator validity timestamp `end_timestamp`, `now()` function for the current timestamp, if you configure to trigger the alert one week in advance i.e.: `end_timestamp <= (now() + 604800000)` , where `604800000` is the total time difference of 7 days. milliseconds. - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/0d6f837f57c247e09f668f60eff4a0ff~tplv-k3u1fbpfcp-zoom-1.image) > Eventually you can see the triggered alarms in the alarm center. - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/5a61b23127524976b2c209ce0ca6a339~tplv-k3u1fbpfcp-zoom-1.image) - 6. Alarm notification (timely notification via NailWeChatFlysheet, etc.) > Monitoring System -> Alert Notification -> Add Recipients - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/7f36956060ef410a82bbecafcbb2957f~tplv-k3u1fbpfcp-zoom-1.image) You can refer to the help file for the token configuration of Nail WeChat Flying Book, etc. @@ -107,7 +92,6 @@ https://tancloud.cn/docs/help/alert_dingtalk > Alert Notification -> Add new alert notification policy -> Enable notification for the recipients you just configured - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/d976343e81f843138344a039f3aff8a3~tplv-k3u1fbpfcp-zoom-1.image) 7. OK When the threshold is triggered, we can receive the corresponding alarm message, if there is no notification, you can also view the alarm information in the alarm center. diff --git a/home/blog/2022-10-08-hertzbeat-v1.2.0.md b/home/blog/2022-10-08-hertzbeat-v1.2.0.md index 90e88793232..7137abeb7ca 100644 --- a/home/blog/2022-10-08-hertzbeat-v1.2.0.md +++ b/home/blog/2022-10-08-hertzbeat-v1.2.0.md @@ -58,13 +58,15 @@ Have Fun! ### Upgrade ⚠️ Need Convert `application.yml`. + ``` spring. resources: static-locations. static-locations. - classpath:/dist/ - classpath:... /dist/ -``` +``` + To `` spring. @@ -73,6 +75,9 @@ resources: static-locations. static-locations. - classpath:/dist/ - classpath:... /dist/ + ``` ---- +``` + diff --git a/home/blog/2022-11-28-hertzbeat-v1.2.2.md b/home/blog/2022-11-28-hertzbeat-v1.2.2.md index b546c676773..be3b0b5bc75 100644 --- a/home/blog/2022-11-28-hertzbeat-v1.2.2.md +++ b/home/blog/2022-11-28-hertzbeat-v1.2.2.md @@ -8,6 +8,7 @@ tags: [opensource] --- ## v1.2.2 + Home: hertzbeat.com | tancloud.cn Hi guys! HertzBeat v1.2.2 is coming. This release brings significant features. This version we support monitor kubernetes, docker, springboot, nacos and database dm, opengauss and more. Also we bring an experimental feature, users can custom define metrics collect from prometheus with promql. Fixed several bugs and improved the overall stable usability. And more, linux monitor we support top10 cpu usage metrics, top10 memory usage metrics. @@ -40,7 +41,6 @@ Feature: 16. [[hertzbeat] update use PromQL to collect metrics from prometheus server #456](https://github.com/apache/hertzbeat/pull/456) 17. [[manager] support custom monitor api response data code #460](https://github.com/apache/hertzbeat/pull/460) - Bugfix. 1. [【bugfix#408】if logs dir not exist, create logs dir #409](https://github.com/apache/hertzbeat/pull/409) @Ceilzcx @@ -58,6 +58,7 @@ Have Fun! ---- ## V1.2.2 + 官网: hertzbeat.com | tancloud.cn 大家好,HertzBeat v1.2.2发布啦!这个版本带来个超多重大更新,我们支持了对云原生kubernets, docker的监控,支持了对springboot应用, nacos注册发现中心,达梦数据库,opengauss数据库等的指标监控。我们也引入了一个实验性特性,用户可以使用promethues promql 从promethues server拿取指标数据作为hertzbeat自定义监控指标数据。当然我们也新增了多个测试用户覆盖,修复了多个BUG。还有个很多用户想要的更新,我们新增了对linux监控的top10 cpu 内存利用率的进程监控指标。有个这个指标,我们就可以干很多事情。比如监控某个进程CPU异常,内存爆满啥的。快来试试吧! @@ -75,8 +76,8 @@ Feature: 2. [[home] add DM db document supplement #411](https://github.com/apache/hertzbeat/pull/411) @TJxiaobao 3. [[home] support algolia search #416](https://github.com/apache/hertzbeat/pull/416) 4. [[collector] support trigger and grading multiple subtasks through -_- placeholder expression #418](https://github.com/apache/hertzbeat/pull/418) -5. [WIP:feature support k8s monitor, http monitor nacos, service&http_micro monitor msa #421](https://github.com/apache/hertzbeat/pull/421) @cuipiheqiuqiu -6. [[manager] support opengauss database monitor #422](https://github.com/apache/hertzbeat/pull/422) +5. [WIP:feature support k8s monitor, http monitor nacos, service&http_micro monitor msa #421](https://github.com/apache/hertzbeat/pull/421) @cuipiheqiuqiu +6. [[manager] support opengauss database monitor #422](https://github.com/apache/hertzbeat/pull/422) 7. [[#406][warehose] Add unit test MetricsDataControllerTest.java #426](https://github.com/apache/hertzbeat/pull/426) @haibo-duan 8. [[#358][manager] Add unit test manager/service/NoticeConfigServiceTest.java #427](https://github.com/apache/hertzbeat/pull/427) @haibo-duan 9. [[#356][manager] unit test case of manager/service/MailServiceTest.java #432](https://github.com/apache/hertzbeat/pull/432) @csyshu @@ -89,7 +90,6 @@ Feature: 16. [[hertzbeat] update use PromQL to collect metrics from prometheus server #456](https://github.com/apache/hertzbeat/pull/456) 17. [[manager] support custom monitor api response data code #460](https://github.com/apache/hertzbeat/pull/460) - Bugfix. 1. [【bugfix#408】if logs dir not exist, create logs dir #409](https://github.com/apache/hertzbeat/pull/409) @Ceilzcx @@ -101,3 +101,4 @@ Bugfix. 7. [[home] fix typo in springboot2.md #464](https://github.com/apache/hertzbeat/pull/464) @eltociear ---- + diff --git a/home/blog/2022-12-19-new-committer.md b/home/blog/2022-12-19-new-committer.md index 04a7e28d849..34df92ffbd4 100644 --- a/home/blog/2022-12-19-new-committer.md +++ b/home/blog/2022-12-19-new-committer.md @@ -9,7 +9,6 @@ tags: [opensource] > 非常高兴 HertzBeat 迎来了两位新晋社区Committer, 两位都是来自互联网公司的开发工程师,让我们来了解下他们的开源经历吧! - ## 第一位 花城 姓名:王庆华 @@ -25,18 +24,18 @@ github:[wang1027-wqh](https://github.com/wang1027-wqh) 说起来挺偶然的,结识hertzbeat是因为我大学的毕业设计,当时在一家互联网公司实习,那个时候第一次看到了企业是怎么监控项目的,不管是系统监控、业务监控还是物联网iot监控,那个时候见世面不广,只知道Prometheus + Grafana,但是学起来、用起来成本比较高,那个时候就觉得应该有其他类型的监控,恰好,到了大学毕业设计选题,我就开始寻找这方面的开源项目,那个时候我们小组正在使用Shen Yu网关,我就看了下社区,发现了hertzbeat,自此我便于它结缘了。 ## 开始提交PR + 到了2022-02-18 我开始提交了我第一个pr,当时只是为了优化一些controller入参的格式,没有什么技术含量,但是这是我接触开源的第一步,让我在从理论学习跨出了实践的一步 ## 持续的开源贡献与收获 到目前为止,参与hertzbeat开源项目已有半年多时间,贡献了许多,也成长收获了许多。具体如下: -1. 见证了hertzbeat的贡献值从0到1 -2. 兼容了zookeeper、JVM、Kafka等监控功能 -3. 实现了hertzbeat项目的国际化 -4. 参与了开源之夏并顺利结项 -5. 增加了监控系统的基础告警功能: 钉钉、飞书、企业微信、邮箱等 - +1. 见证了hertzbeat的贡献值从0到1 +2. 兼容了zookeeper、JVM、Kafka等监控功能 +3. 实现了hertzbeat项目的国际化 +4. 参与了开源之夏并顺利结项 +5. 增加了监控系统的基础告警功能: 钉钉、飞书、企业微信、邮箱等 ## 感谢社区小伙伴 @@ -44,14 +43,13 @@ github:[wang1027-wqh](https://github.com/wang1027-wqh) ## 对新人的一点建议 -1. 不要觉得自己一上手就能接触到核心,心急吃不了热豆腐 -2. 不要只注重看代码,要上手搭建、使用 -3. 有想法就大胆尝试,不管自己的方案是否完善 -4. 多多关注开源,了解社区动态,多和开源开发者交流 - +1. 不要觉得自己一上手就能接触到核心,心急吃不了热豆腐 +2. 不要只注重看代码,要上手搭建、使用 +3. 有想法就大胆尝试,不管自己的方案是否完善 +4. 多多关注开源,了解社区动态,多和开源开发者交流 ------ ------ +--- +--- ## 第二位 星辰 @@ -65,13 +63,10 @@ Hertzbeat Committer github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) - ## 初识Hertzbeat 2022年8月开始接触Hertzbeat,由于公司监控elasticsearch使用的cerebro,虽然有非常强大的数据监控,但缺少告警通知的功能;就去github上浏览监控类的项目,刚好看到Hertzbeat,对此非常感兴趣,在了解完整个项目结构和实现后,刚好elasticsearch的监控部分做的不够完善,我就根据cerebro完善了这部分监控数据并提交了pull request。后面在tom老哥的帮助下也开始其他部分的实现。 - - ## 开始提交PR 从2022年9月至今提交了好几个pr,主要包括: @@ -83,8 +78,6 @@ github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) + 一些bug的修复。。。 + promethues exporter 协议解析 - - ## 持续的开源贡献与收获 到目前为止,参与Hertzbeat社区开源已有半年多时间,贡献了许多,也成长收获了许多。 @@ -93,19 +86,16 @@ github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) 同时在社区群里,看到别人提的问题和帮助别人可以学到很多新的知识,很多问题你目前不一定会遇到,其他人遇到的时候你可以思考并收获很多知识。 - ## 感谢社区小伙伴 感谢无偿帮助过我或给过我启发的小伙伴:[tomsun28](https://github.com/tomsun28) - ## 对新人的一点建议 + 使用者可以先看官网,官网基本能够解决你的问题。部分简单或者常见的问题其他可以自己解决,对自己也是一种锻炼 + 可以尝试阅读源码,大部分源码都是包含注释的,并不难;不懂的地方也可以通过运行test,debug看一下整个流程 + 有想法或者bug,可以前往gitee或者github提交issues,也可以在群里询问,不要怕,都是从菜逼过来的 - ## 如何参与Hertzbeat + 官网有非常完善的贡献者指南:[贡献者指南 | HertzBeat](https://hertzbeat.com/docs/community/contribution) @@ -114,5 +104,5 @@ github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) + 如果是大的改动,建议提交前编写issues,在提交pr,同时请注意编码的规范,尽量减少bug和警告的产生 - > 以上就是我们新晋Committer们的开源经历了,可以看出参与开源并不难,更重要的是迈出第一步,无论是代码还是文档修复或者提交issue,这些都是贡献者参与开源的姿势。快来加入我们吧! + diff --git a/home/blog/2022-12-28-hertzbeat-v1.2.3.md b/home/blog/2022-12-28-hertzbeat-v1.2.3.md index f818dc5e878..40af1be10bf 100644 --- a/home/blog/2022-12-28-hertzbeat-v1.2.3.md +++ b/home/blog/2022-12-28-hertzbeat-v1.2.3.md @@ -7,7 +7,7 @@ author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 tags: [opensource] --- -## v1.2.3 +## v1.2.3 Home: hertzbeat.com | tancloud.cn @@ -38,7 +38,6 @@ Feature: 12. [add Prometheus exporter metrics parser and IoTDB monitor #505](https://github.com/apache/hertzbeat/pull/505) @Ceilzcx 13. [support apache shenyu metrics monitoring #507](https://github.com/apache/hertzbeat/pull/507) - Bugfix. 1. [[manager] fix cross domain problem in SecurityCorsConfiguration #469](https://github.com/apache/hertzbeat/pull/469) @zenan08 @@ -57,6 +56,7 @@ Have Fun! ---- ## V1.2.3 + 官网: hertzbeat.com | tancloud.cn 大家好,HertzBeat v1.2.3发布啦!这个版本带来了重大更新,我们支持了对prometheus exporter协议监控,用户可以很方便的使用hertzbeat来适配监控prometheus exporter. 基于这个能力,这个版本我们也支持了对apache shenyu, apache iotdb的指标监控。我们更新了UI布局,修复了多个BUG,也支持了短信通知。快来体验下吧! @@ -84,7 +84,6 @@ Feature: 12. [add Prometheus exporter metrics parser and IoTDB monitor #505](https://github.com/apache/hertzbeat/pull/505) @Ceilzcx 13. [support apache shenyu metrics monitoring #507](https://github.com/apache/hertzbeat/pull/507) - Bugfix. 1. [[manager] fix cross domain problem in SecurityCorsConfiguration #469](https://github.com/apache/hertzbeat/pull/469) @zenan08 @@ -97,3 +96,4 @@ Bugfix. 8. [[manager] springboot2 monitor support base path config #515](https://github.com/apache/hertzbeat/pull/515) ---- + diff --git a/home/blog/2023-01-05-monitor-iotdb.md b/home/blog/2023-01-05-monitor-iotdb.md index b89a7441719..26dfc887aea 100644 --- a/home/blog/2023-01-05-monitor-iotdb.md +++ b/home/blog/2023-01-05-monitor-iotdb.md @@ -30,6 +30,7 @@ tags: [opensource, practice] #### 1. Enable the `metrics` function on the IoTDB side, which will provide interface data in the form of prometheus metrics. 1. The metric collection is disabled by default, you need to modify the parameters in `conf/iotdb-metric.yml` first, then restart the server + ``` # Whether to start the monitoring module, the default is false enableMetric: true @@ -73,7 +74,6 @@ For other parameters such as **collection interval**, **timeout period**, etc., - **Enable `metrics` function on IoTDB in one step** - **Another step is to configure the IP port on the HertzBeat monitoring page to add monitoring** - **In this way, we have completed the monitoring of IoTDB. We can view the monitoring details and metrics at any time to observe its service status, but people cannot always watch it. When there is always a time to rest, monitoring is often accompanied by alarms. When monitoring When the metrics are abnormal, the monitoring tool needs to be able to notify the person in charge in time** **Next, we will teach you step by step to configure the threshold alarm notification in the HertzBeat system** @@ -89,7 +89,6 @@ Path: Menu -> Alarm Threshold -> Add Threshold ![hertzbeat](/img/blog/monitor-iotdb-6.png) - 2. Add message notification recipients Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient @@ -107,7 +106,6 @@ Message notification methods support **email, DingTalk, WeChat Work, Feishu, Web ![hertzbeat](/img/blog/alert-notice-2.png) - ### Finished, now wait for the warning message to come. ding ding ding ding ``` diff --git a/home/blog/2023-01-08-monitor-shenyu.md b/home/blog/2023-01-08-monitor-shenyu.md index 6ae3e99f83c..582176e34f6 100644 --- a/home/blog/2023-01-08-monitor-shenyu.md +++ b/home/blog/2023-01-08-monitor-shenyu.md @@ -22,7 +22,6 @@ tags: [opensource, practice] - Clustering: NGINX, Docker, Kubernetes - Languages: .NET, Python, Go, Java clients available for API registration - ### HertzBeat Introduction > HertzBeat is an open source, easy to use and friendly real-time monitoring tool, no Agent, with powerful custom monitoring capabilities. @@ -44,11 +43,11 @@ tags: [opensource, practice] 1. Add the `metrics plugin` dependency to the `pom.xml` file of the gateway. ```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + ``` 2. `metric` plugin Capture is turned off by default, edit the following in the gateway's configuration `yaml` file: @@ -118,7 +117,6 @@ Path: Menu -> Alert Thresholds -> Add Thresholds ![hertzbeat](/img/blog/monitor-shenyu-7.png) - 2. Add message notification recipients > Configure recipients to let alert message know who to send to and in what way. @@ -140,7 +138,6 @@ Message notification methods support **Email, Nail, WeChat, Flybook, WebHook, SM ![hertzbeat](/img/blog/alert-notice-2.png) - ### Over and out, now wait for the alert message to come through. Ding, ding, ding, ding. ``` diff --git a/home/blog/2023-02-02-monitor-dynamic-tp.md b/home/blog/2023-02-02-monitor-dynamic-tp.md index 126c14044e4..cc42a8c0cab 100644 --- a/home/blog/2023-02-02-monitor-dynamic-tp.md +++ b/home/blog/2023-02-02-monitor-dynamic-tp.md @@ -17,7 +17,6 @@ tags: [opensource, practice] - Real-time monitoring of the running status of the thread pool, alarms are triggered when the alarm policy is set, and alarm information is pushed to the office platform. - Collect thread pool metrics data regularly, and use grafana as a visual monitoring platform to monitor the overall situation. - ### HertzBeat Introduction > HertzBeat is an open source, easy-to-use and friendly real-time monitoring tool with powerful customizable monitoring capabilities. @@ -130,7 +129,6 @@ Path: Menu -> Alert Thresholds -> Add Thresholds ![hertzbeat](/img/blog/monitor-dynamic-tp-6.png) - 2. Add message notification recipients > Configure recipients to let alert message know who to send to and in what way. @@ -152,7 +150,6 @@ Message notification methods support **Email, Dingtalk, WeChat, Flybook, WebHook ![hertzbeat](/img/blog/alert-notice-2.png) - ### Over and out, now wait for the alert message to come through. Ding, ding, ding, ding. ``` diff --git a/home/blog/2023-02-10-new-committer.md b/home/blog/2023-02-10-new-committer.md index 6c4d39e259b..37bf294428f 100644 --- a/home/blog/2023-02-10-new-committer.md +++ b/home/blog/2023-02-10-new-committer.md @@ -39,11 +39,11 @@ The first PR was submitted on April 17, 2022, mainly for the problem of TDEngine So far, I have participated in the hertzbeat open source project for more than half a year, contributed a lot, and grown and gained a lot. details as follows: -* Refactored the alarm module based on the strategy mode -* Implemented metric monitoring for `Redis` database -* Optimize `spring.factories` configuration items -* Implemented message notification channels supporting `Telegram`, `Discord`, `Slack` -* Use `Thymeleaf` to restructure the alarm text, and the alarm notification template is more standardized +* Refactored the alarm module based on the strategy mode +* Implemented metric monitoring for `Redis` database +* Optimize `spring.factories` configuration items +* Implemented message notification channels supporting `Telegram`, `Discord`, `Slack` +* Use `Thymeleaf` to restructure the alarm text, and the alarm notification template is more standardized ### Thank you community friends @@ -51,8 +51,8 @@ Thanks to the friends who have helped me or inspired me for free: tomsun28, for ### Advice for newcomers -* HertzBeat's source code is very friendly to novices, with standardized code and rich comments, which is very suitable as a learning project. -* Open source contribution is not achieved overnight, every idea, every question/answer is a contribution, the first step is the most important! +* HertzBeat's source code is very friendly to novices, with standardized code and rich comments, which is very suitable as a learning project. +* Open source contribution is not achieved overnight, every idea, every question/answer is a contribution, the first step is the most important! ## Next 🌻 Armored Little Treasure @@ -68,25 +68,22 @@ github: TJxiaobao First of all, I would like to thank Brother Huacheng here, because I wanted to learn some excellent `Java` projects at that time. Then, during the meal, I asked my brother if there was any good project recommendation. At this time, my brother recommended me to Brother Tom. When I personally used `hertzbeat`, I really discovered a new continent. Compared with the simple `Java` project I was exposed to before, I was deeply impressed by the architecture design of `hertzbeat` and its practical functions. me. At this time, a seed of "wanting to contribute my own strength" has been planted in my heart. - - ### 🌻 Start submitting PR On Oct 20, 2022, I submitted `PR` for the first time. Although this `PR` is a simple translation comment, it seems that the technical content is not very high. -But he can also make me familiar with the business logic and architecture design of the project faster, and can lay a solid foundation for future contributions. +But he can also make me familiar with the business logic and architecture design of the project faster, and can lay a solid foundation for future contributions. And this `PR` is also my first step towards open source, and it is also the starting point for me to fall in love with open source! - ### 🌻 Continuous open source contribution and harvest From the first `PR` to the present, I have participated in the `hertzbeat` open source project for a while, and I have also contributed a small part, and I have grown and gained a lot. details as follows. **contribute:** -- 1. Realize the monitoring of docker containers. -- 2. Complete the domestic database DM monitoring -- 3. Write a single test for the corresponding business. -- 4. English translation of some annotations. +- 1. Realize the monitoring of docker containers. +- 2. Complete the domestic database DM monitoring +- 3. Write a single test for the corresponding business. +- 4. English translation of some annotations. **reward:** @@ -94,33 +91,30 @@ From the first `PR` to the present, I have participated in the `hertzbeat` open - 2. Broaden your horizons. - 3. Learned a lot from the bosses. - ### 🌻 Thanks to the community partners Thanks to the friends who have helped me or inspired me for free (in no particular order): tomsun28 (brother tom), Huacheng (brother) - ### 🌻 A little advice for newcomers First of all, I am also a newcomer to Novice Village, but I can share some of my experience with you, hoping to help you. -- 1. Don't be too impatient, and calm down to understand the general implementation logic of each module. -- 2. Use different functions and debug to see the underlying implementation principle of each function. -- 3. Slowly try to read the source code and understand it. -- 4. If you encounter a bug, you can directly report it to issues, or you can try to solve it yourself. - +- 1. Don't be too impatient, and calm down to understand the general implementation logic of each module. +- 2. Use different functions and debug to see the underlying implementation principle of each function. +- 3. Slowly try to read the source code and understand it. +- 4. If you encounter a bug, you can directly report it to issues, or you can try to solve it yourself. ## What is Hertz Beat? > [HertzBeat Hertz Beat](https://github.com/apache/hertzbeat) is a real-time monitoring and alarm system with powerful custom monitoring capabilities and no Agent required. Monitoring of application services, databases, operating systems, middleware, cloud native, etc., threshold alarms, and alarm notifications (email, WeChat, Dingding, Feishu, SMS, Discord, Slack, Telegram). - +> > We make protocol specifications such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure YML to use these protocols to customize and collect any metrics you want to collect. > Do you believe that you can immediately adapt to a new monitoring type such as K8s or Docker just by configuring YML? - +> > The powerful customization of `HertzBeat`, multi-type support, easy expansion, and low coupling, hope to help developers and small and medium-sized teams quickly build their own monitoring tools. **Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: https://gitee.com/hertzbeat/hertzbeat** ## ⛄ Supported @@ -131,3 +125,4 @@ First of all, I am also a newcomer to Novice Village, but I can share some of my -Kubernetes, Docker - and more for your custom monitoring. - Notification support `Discord` `Slack` `Telegram` `Mail` `DingTalk` `WeChat` `Feishu` `SMS` `Webhook`. + diff --git a/home/blog/2023-02-11-monitor-mysql.md b/home/blog/2023-02-11-monitor-mysql.md index cdcaeb427e1..614ff205f7b 100644 --- a/home/blog/2023-02-11-monitor-mysql.md +++ b/home/blog/2023-02-11-monitor-mysql.md @@ -14,7 +14,6 @@ Keywords: [Open source monitoring tool, open source database monitoring, Mysql d > MySQL is an open source relational database management system developed by the Swedish company MySQL AB and a product of Oracle. MySQL is one of the most popular open source relational database management systems. In terms of WEB applications, MySQL is one of the best RDBMS (Relational Database Management System, relational database management system) application software. - ### Introduction to HertzBeat > HertzBeat is an open source, easy-to-use and friendly real-time monitoring tool that does not require Agent and has powerful custom monitoring capabilities. @@ -80,7 +79,6 @@ Path: Menu -> Threshold Rules -> Add Threshold ![hertzbeat](/img/blog/monitor-mysql-6.png) - 2. Add message notification recipients > Configure the receiver to let the alarm message know who to send and how to send it. @@ -89,7 +87,7 @@ Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. -- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk to configure the robot on DingTalk and set the security custom keyword `HertzBeat `, get the corresponding `access_token` value. +- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk to configure the robot on DingTalk and set the security custom keyword `HertzBeat `, get the corresponding `access_token` value. - Configure the receiver parameters in HertzBeat as follows. 【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 @@ -102,7 +100,6 @@ Message notification methods support **email, DingTalk, WeChat Work, Feishu, Web ![hertzbeat](/img/blog/alert-notice-2.png) - ### Finished, now wait for the warning message to come. ding ding ding ding ``` @@ -120,11 +117,11 @@ Content details: mysql db query_cache_hit_rate is too low, now is 20. :::tip This practical article took us to experience how to use the open source real-time monitoring tool HertzBeat to monitor Mysql database metric data. We can find that HertzBeat, which integrates `monitoring-alarm-notification`, is more convenient in operation and use, just click on the page The Mysql database can be included in the monitoring and alarm notification, and the tedious operations of deploying multiple components and writing configuration files are no longer needed. ::: - + Mysql Github: https://github.com/mysql/mysql-server HertzBeat Github: https://github.com/apache/hertzbeat -**Welcome to learn, use and star! ** +**Welcome to learn, use and star! ** > Only one docker command is needed to install and experience heartbeat: diff --git a/home/blog/2023-02-15-monitor-linux.md b/home/blog/2023-02-15-monitor-linux.md index 243a6608e1e..8681f564e11 100644 --- a/home/blog/2023-02-15-monitor-linux.md +++ b/home/blog/2023-02-15-monitor-linux.md @@ -81,7 +81,6 @@ Path: Menu -> Threshold Rules -> Add Threshold ![hertzbeat](/img/blog/monitor-linux-9.png) - 2. Add message notification recipients > Configure the receiver to let the alarm message know who to send and how to send it. @@ -103,7 +102,6 @@ Message notification methods support **email, DingTalk, WeChat Work, Feishu, Web ![hertzbeat](/img/blog/alert-notice-2.png) - ### Finished, now wait for the warning message to come. ding ding ding ding ``` @@ -129,10 +127,10 @@ This practical article took us to experience how to use the open source real-tim ## What is Hertz Beat? > [HertzBeat Hertz Beat](https://github.com/apache/hertzbeat) is a real-time monitoring and alarm system with powerful custom monitoring capabilities and no Agent required. Monitoring of application services, databases, operating systems, middleware, cloud native, etc., threshold alarms, and alarm notifications (email, WeChat, Dingding, Feishu, SMS, Discord, Slack, Telegram). - +> > We make protocol specifications such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure YML to use these protocols to customize and collect any metrics you want to collect. > Do you believe that you can immediately adapt to a new monitoring type such as K8s or Docker just by configuring YML? - +> > The powerful customization of `HertzBeat`, multi-type support, easy expansion, and low coupling, hope to help developers and small and medium-sized teams quickly build their own monitoring tools. **Github: https://github.com/apache/hertzbeat** @@ -147,3 +145,4 @@ This practical article took us to experience how to use the open source real-tim -Kubernetes, Docker - and more for your custom monitoring. - Notification support `Discord` `Slack` `Telegram` `Mail` `DingTalk` `WeChat` `Feishu` `SMS` `Webhook`. + diff --git a/home/blog/2023-03-15-hertzbeat-v1.3.0.md b/home/blog/2023-03-15-hertzbeat-v1.3.0.md index bc7a5631bfc..14a550ec61e 100644 --- a/home/blog/2023-03-15-hertzbeat-v1.3.0.md +++ b/home/blog/2023-03-15-hertzbeat-v1.3.0.md @@ -10,25 +10,20 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] Website: hertzbeat.com | tancloud.cn - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/a9629ef5bb6e486cacddb899f1495c6e~tplv-k3u1fbpfcp-zoom-1.image) - - ### What is HertzBeat? > HertzBeat is an open source real-time monitoring and alerting tool with powerful custom monitoring capabilities and no Agent required. > It supports monitoring of application services, database, operating system, middleware, cloud native, network and other metrics, and threshold alert notification in one step. > Support more liberal threshold rules (calculation expressions), `email` `Discord` `Slack` `Telegram` `Pegging` `WeChat` `FlyBook` `SMS` `Webhook` and other ways to timely delivery. - +> > We have made the protocol specifications such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable so that you can simply configure `YML` to use these protocols to customize the collection of any metrics you want. - +> > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by defining YML? - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4236e748f5ac4352b7cf4bb65ccf97aa~tplv-k3u1fbpfcp-zoom-1.image) - **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** @@ -42,18 +37,17 @@ After a month of iterations, HertzBeat v1.3.0 was officially released last weeke ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/b5c9dd3e28c54c72b49a7470012a0c36~tplv-k3u1fbpfcp-zoom-1.image) - **support for monitoring network switches**. -hertzbeat supported snmp protocol long time ago, windows monitoring is monitored by snmp protocol, this version we not only support more windows performance metrics, but also support snmp walk, adapt several common network switches monitoring, welcome to contribute more types and metrics to the community. + hertzbeat supported snmp protocol long time ago, windows monitoring is monitored by snmp protocol, this version we not only support more windows performance metrics, but also support snmp walk, adapt several common network switches monitoring, welcome to contribute more types and metrics to the community. - **Support for monitoring redis clusters and more database metrics**. -Community contributors have contributed extended metrics for redis clusters and multiple databases, enriching the performance metrics data. + Community contributors have contributed extended metrics for redis clusters and multiple databases, enriching the performance metrics data. - **Support iotdb1.0 storage, dependency-free mode** -and more new features welcome to explore + and more new features welcome to explore - Fix several bugs, better documentation, refactored code. ----- - +--- Only one docker command is needed to install and experience heartbeat `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` @@ -66,7 +60,7 @@ Upgrade note ⚠️. For users who previously used iotdb or tdengine to store metrics data, you need to modify application.yml to disable JPA storage `warehouse.store.jpa.enabled` as follows: -Modify `application.yml` and set `warehouse.store.jpa.enabled` parameter to false +Modify `application.yml` and set `warehouse.store.jpa.enabled` parameter to false ``` warehouse: @@ -82,7 +76,7 @@ ALTER table hzb_monitor modify job_id bigint default null; COMMIT; ``` ----- +--- ## ⛄ Supported diff --git a/home/blog/2023-03-22-monitor-springboot2.md b/home/blog/2023-03-22-monitor-springboot2.md index 2d0cbc17cfc..8dac9aade12 100644 --- a/home/blog/2023-03-22-monitor-springboot2.md +++ b/home/blog/2023-03-22-monitor-springboot2.md @@ -13,18 +13,18 @@ keywords: [opensource monitoring, SpringBoot monitoring, alert] ### HertzBeat Intro > HertzBeat is an open source, real-time monitoring tool with custom-monitor and agentLess. | 易用友好的开源实时监控告警工具,无需Agent,强大自定义监控能力. - +> > **Monitor+Alerter+Notify** all in one. Support monitoring web service, database, os, middleware, cloud-native, network and more. > More flexible threshold rule(calculation expression), timely notification delivery by `Discord` `Slack` `Telegram` `Email` `DingDing` `WeChat` `FeiShu` `Webhook` `SMS`. - +> > We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? - -> `HertzBeat`'s powerful custom-define, multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring system. +> +> `HertzBeat`'s powerful custom-define, multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring system. Github: https://github.com/apache/hertzbeat -### Monitoring SpringBoot2 Metrics with HertzBeat in 5 minutes +### Monitoring SpringBoot2 Metrics with HertzBeat in 5 minutes #### Prerequisite, you already have SpringBoot2 application environment and HertzBeat environment. @@ -114,13 +114,12 @@ For other parameters such as **collection interval**, **timeout period**, etc., - **Just one step to configure the IP port on the HertzBeat monitoring page and add SpringBoot2 application monitoring** - :::tip Through the above, we have completed the monitoring of the SpringBoot2 application. We can check the status and availability of various metrics of the SpringBoot2 application at any time in HertzBeat. Of course, it is impossible to manually check the metrics in real time. Monitoring is often accompanied by alarm thresholds. When the performance metrics of the SpringBoot2 application exceed our threshold or the SpringBoot2 application itself is abnormal, we can promptly notify our corresponding person in charge. The person in charge receives the notification and handles it. , this is a complete monitoring and alarm process. ::: -**Next, we will demonstrate step by step how to configure the threshold alarm notification in the HertzBeat system. When the metrics of the SpringBoot2 application are abnormal, we will be notified in time** +**Next, we will demonstrate step by step how to configure the threshold alarm notification in the HertzBeat system. When the metrics of the SpringBoot2 application are abnormal, we will be notified in time** #### 3. Add SpringBoot2 application metric threshold alarm in HertzBeat system @@ -135,7 +134,6 @@ Path: Menu -> Threshold Rules -> Add Threshold ![hertzbeat](/img/blog/monitor-springboot2-7.png) - 2. Add message notification recipients > Configure the receiver to let the alarm message know who to send and how to send it. @@ -157,7 +155,6 @@ Message notification methods support **email, DingTalk, WeChat Work, Feishu, Web ![hertzbeat](/img/blog/alert-notice-2.png) - ### Finished, now wait for the warning message to come. ding ding ding ding ``` @@ -191,11 +188,11 @@ This practical article took us to experience how to use the open source real-tim > [HertzBeat](https://github.com/apache/hertzbeat) is an open source, real-time monitoring tool with custom-monitor and agentless. > **Monitor+Alerter+Notify** all in one. Support monitoring web service, database, os, middleware, cloud-native, network and more. > More flexible threshold rule(calculation expression), timely notification delivery by `Discord` `Slack` `Telegram` `Email` `DingDing` `WeChat` `FeiShu` `Webhook` `SMS`. - +> > We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? - -> `HertzBeat`'s powerful custom-define, multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring system. +> +> `HertzBeat`'s powerful custom-define, multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring system. ---- diff --git a/home/blog/2023-05-09-hertzbeat-v1.3.1.md b/home/blog/2023-05-09-hertzbeat-v1.3.1.md index 9ed7afbe17d..1e5c663aebd 100644 --- a/home/blog/2023-05-09-hertzbeat-v1.3.1.md +++ b/home/blog/2023-05-09-hertzbeat-v1.3.1.md @@ -10,28 +10,23 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] Website: hertzbeat.com | tancloud.cn - ![hertzBeat](/img/home/0.png) - - ### What is HertzBeat? > [HertzBeat](https://github.com/apache/hertzbeat) is an open source, real-time monitoring system with custom-monitoring and agentLess. > **Monitoring+Alarm+Notify** all in one. Support monitoring web service, database, os, middleware, cloud-native, network and more. > Easy to use, full web-based operation, monitoring and alerting at the click of a mouse, zero learning cost. > More flexible threshold rule, timely notification delivery by `Discord` `Slack` `Telegram` `Email` `DingDing` `WeChat` `FeiShu` `Webhook` `SMS`. - +> > We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? - +> > `HertzBeat`'s powerful custom-define, multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring system. > We also provide **[Monitoring SaaS Cloud](https://console.tancloud.cn)**, users no longer need to deploy a cumbersome monitoring system in order to monitor resources. **[Get started for free](https://console.tancloud.cn)**. - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4236e748f5ac4352b7cf4bb65ccf97aa~tplv-k3u1fbpfcp-zoom-1.image) - **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** @@ -53,13 +48,13 @@ Only one docker command is needed to install and experience hertzbeat: If use tdengine before, please upgrade tdengine to 3.0+ Please Run SQL Script When Upgrade. + ``` ALTER table hzb_alert_define modify field varchar(255) default null; COMMIT; ``` ----- - +--- ## ⛄ Supported - Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server @@ -71,7 +66,7 @@ COMMIT; - and more for your custom monitoring. - Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook`. ----- +--- **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** diff --git a/home/blog/2023-05-11-greptimedb-store.md b/home/blog/2023-05-11-greptimedb-store.md index ec06dea561a..0343565c282 100644 --- a/home/blog/2023-05-11-greptimedb-store.md +++ b/home/blog/2023-05-11-greptimedb-store.md @@ -56,14 +56,13 @@ $ docker run -p 4000-4004:4000-4004 \ 2. Use ``$ docker ps | grep greptime`` to see if GreptimeDB started successfully. - #### Installing and Deploying HertzBeat See the [official documentation](https://hertzbeat.com/zh-cn/docs/start/docker-deploy) for details. 1. Docker installs HertzBeat. -```shell +```shell $ docker run -d -p 1157:1157 \ -e LANG=zh_CN.UTF-8 \ -e TZ=Asia/Shanghai \ @@ -73,8 +72,6 @@ $ docker run -d -p 1157:1157 \ --name hertzbeat apache/hertzbeat ``` - - - `-v /opt/data:/opt/hertzbeat/data` : (Optional, data persistence) Important ⚠️ Mount the H2 database files to the local host to ensure that the data will not be lost due to the creation and deletion of the container - `-v /opt/application.yml:/opt/hertzbeat/config/application.yml` : Mount customized local configuration files to the container, i.e. use local configuration files to overwrite the container configuration files. diff --git a/home/blog/2023-07-05-hertzbeat-v1.3.2.md b/home/blog/2023-07-05-hertzbeat-v1.3.2.md index ab232296099..f8eda452b26 100644 --- a/home/blog/2023-07-05-hertzbeat-v1.3.2.md +++ b/home/blog/2023-07-05-hertzbeat-v1.3.2.md @@ -10,33 +10,30 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] Website: hertzbeat.com | tancloud.cn - ![hertzBeat](/img/home/0.png) - ### What is HertzBeat? > [HertzBeat](https://github.com/apache/hertzbeat) is an open source, real-time monitoring system with custom-monitoring and agentLess. > **Monitoring+Alarm+Notify** all in one. Support monitoring web service, database, os, middleware, cloud-native, network and more. > Easy to use, full web-based operation, monitoring and alerting at the click of a mouse, zero learning cost. > More flexible threshold rule, timely notification delivery by `Discord` `Slack` `Telegram` `Email` `DingDing` `WeChat` `FeiShu` `Webhook` `SMS`. - +> > We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? - +> > `HertzBeat`'s powerful custom-define, multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring system. > We also provide **[Monitoring SaaS Cloud](https://console.tancloud.cn)**, users no longer need to deploy a cumbersome monitoring system in order to monitor resources. **[Get started for free](https://console.tancloud.cn)**. ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4236e748f5ac4352b7cf4bb65ccf97aa~tplv-k3u1fbpfcp-zoom-1.image) - **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** -### v1.3.2 +### v1.3.2 -Hi guys! Major release. HertzBeat v1.3.2 has published. +Hi guys! Major release. HertzBeat v1.3.2 has published. **This delightful version came out of the hard work of 27 friends. Thank them! Love 💗** @@ -52,7 +49,6 @@ In this version, we support new monitoring types and metrics for **freebsd, debi Fixed a large number of BUG, improved the document code, and improved the overall stability and usability. More new features are welcome to explore! - Let's Try Now! Only one docker command is needed to install and experience hertzbeat: @@ -68,12 +64,14 @@ Only one docker command is needed to install and experience hertzbeat: Upgrade Note⚠️. For h2 database users, sholud exec sql below: + ```sql ALTER TABLE HZB_PARAM DROP CONSTRAINT CONSTRAINT_82;; ``` How to Enable H2 WEB Console: Modify `application.yml` and restart, access `ip:1157/h2-console` + ``` spring: h2: @@ -82,10 +80,7 @@ spring: enabled: true ``` - ----- - - +--- ## ⛄ Supported - Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server @@ -97,8 +92,8 @@ spring: - and more for your custom monitoring. - Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook`. ----- +--- **Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: https://gitee.com/hertzbeat/hertzbeat** diff --git a/home/blog/2023-08-14-hertzbeat-v1.4.0.md b/home/blog/2023-08-14-hertzbeat-v1.4.0.md index 142105674ca..54ea6267be8 100644 --- a/home/blog/2023-08-14-hertzbeat-v1.4.0.md +++ b/home/blog/2023-08-14-hertzbeat-v1.4.0.md @@ -22,8 +22,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] * High performance, supports horizontal expansion of multi-collector clusters, multi-isolated network monitoring and cloud-edge collaboration. * Provides flexible alarm threshold rules and timely notifications delivered via `Discord` `Slack` `Telegram` `Email` `DingDing` `WeChat` `FeiShu` `Webhook` `SMS`. - -> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help developers and teams quickly build their own monitoring system. +> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help developers and teams quickly build their own monitoring system. ![hertzBeat](/img/docs/hertzbeat-arch.png) @@ -88,15 +87,14 @@ As for open source commercialization, the premise of open source commercializati ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_IP=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : set the collector unique identity name. - `-e MANAGER_IP=127.0.0.1` : set the main hertzbeat server ip. - `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) - ----- - +--- ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! @@ -149,7 +147,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do * [doc] add collector clusters document by @tomsun28 in https://github.com/apache/hertzbeat/pull/1161 * [hertzbeat] release hertzbeat version v1.4.0 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1168 ----- +--- ## ⛄ Supported @@ -165,5 +163,5 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do ---- **Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: https://gitee.com/hertzbeat/hertzbeat** diff --git a/home/blog/2023-08-28-new-committer.md b/home/blog/2023-08-28-new-committer.md index 676a572955f..222670b3c22 100644 --- a/home/blog/2023-08-28-new-committer.md +++ b/home/blog/2023-08-28-new-committer.md @@ -8,7 +8,6 @@ tags: [opensource, practice] keywords: [open source monitoring system, alerting system] --- - ! [hertzBeat](/img/blog/new-committer.png) It's great to welcome a new community `Committer`, unlike other contributors `logicz` comes from an Ops implementation position at Cyberoam rather than a development position, but the quality of the contributions, both in terms of code and documentation etc. is very high 👍. This is also our `HertzBeat` and other open source projects are not the same place, because the user group is more oriented to the operation and maintenance of the development, in our 139 contributors in the operation and maintenance engineers accounted for more than 30%, which breaks the open source project collaboration and contribution to the object are the inherent cognition of the development position, which shows that whether it is the operation and maintenance engineers and test engineers to contribute to the open source project participation is very enthusiastic! This shows that both operation and maintenance engineers and test engineers are very enthusiastic about contributing to open source projects, not just as bystanders to open source collaboration. Participation in open source projects is not exclusive to a certain group of people, but is open to all who want to participate, it may be a document, a script or a piece of code, imagine your participation in the open source project is deployed to thousands of servers to run running, to help others to be used or browse the Review discussion, git record will always be kept, this may be the significance of participation in open source projects. @@ -56,9 +55,7 @@ So far, to participate in the Hertzbeat open source project has been more than f * Several document updates * * Harvesting **: - * Hertzbeat is really an excellent project, in terms of project deployment, scripts and configuration are very standardized, I write a lot of my own projects have to draw on - * The most important thing is to harvest the spirit of open source, we are in a community to communicate together, progress ## Thanks to the community partners @@ -84,7 +81,6 @@ HertzBeat HertzBeat is an open source real-time monitoring and alerting system w > `HertzBeat`'s powerful customization, multi-type support, high performance, easy to extend, low-coupling, and hopefully can help developers and teams to quickly build their own monitoring system. - Github: https://github.com/apache/hertzbeat More users are welcome to participate in `HertzBeat` open source collaboration, no matter a typo or punctuation we are very welcome. diff --git a/home/blog/2023-09-26-hertzbeat-v1.4.1.md b/home/blog/2023-09-26-hertzbeat-v1.4.1.md index cac836db123..fc91ebb300d 100644 --- a/home/blog/2023-09-26-hertzbeat-v1.4.1.md +++ b/home/blog/2023-09-26-hertzbeat-v1.4.1.md @@ -22,8 +22,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] * High performance, supports horizontal expansion of multi-collector clusters, multi-isolated network monitoring and cloud-edge collaboration. * Provides flexible alarm threshold rules and timely notifications delivered via `Discord` `Slack` `Telegram` `Email` `DingDing` `WeChat` `FeiShu` `Webhook` `SMS`. - -> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help developers and teams quickly build their own monitoring system. +> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help developers and teams quickly build their own monitoring system. ![hertzBeat](/img/docs/hertzbeat-arch.png) @@ -45,7 +44,6 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] image - - new help moudle image @@ -73,15 +71,14 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : set the collector unique identity name. - `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. - `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) - ----- - +--- ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! @@ -139,7 +136,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do * [hertzbeat] release hertzbeat version v1.4.1 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1261 * auto split webhook token when user input hook url by @tomsun28 in https://github.com/apache/hertzbeat/pull/1262 ----- +--- ## ⛄ Supported @@ -155,5 +152,5 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do ---- **Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: https://gitee.com/hertzbeat/hertzbeat** diff --git a/home/blog/2023-11-12-hertzbeat-v1.4.2.md b/home/blog/2023-11-12-hertzbeat-v1.4.2.md index d349d02a170..ad9b6783518 100644 --- a/home/blog/2023-11-12-hertzbeat-v1.4.2.md +++ b/home/blog/2023-11-12-hertzbeat-v1.4.2.md @@ -20,8 +20,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] * High performance, supports horizontal expansion of multi-collector clusters, multi-isolated network monitoring and cloud-edge collaboration. * Provides flexible alarm threshold rules and timely notifications delivered via `Discord` `Slack` `Telegram` `Email` `DingDing` `WeChat` `FeiShu` `Webhook` `SMS`. - -> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help developers and teams quickly build their own monitoring system. +> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help developers and teams quickly build their own monitoring system. ![hertzBeat](/img/docs/hertzbeat-arch.png) @@ -31,14 +30,12 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ### HertzBeat's 1.4.2 version release! - - support custom notice template - support push metrics monitoring(beta) - support using Huawei Cloud OBS to store monitoring templates yml - support emqx monitoring and udp port monitoring - more features , fix multiple bugs and so on - ### Install quickly via docker 1. Just one command to get started: @@ -56,15 +53,14 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : set the collector unique identity name. - `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. - `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) - ----- - +--- ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! @@ -114,13 +110,14 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do * build hertzbeat package with jdk runtime by @tomsun28 in https://github.com/apache/hertzbeat/pull/1328 ## New Contributors + * @rbsrcy made their first contribution in https://github.com/apache/hertzbeat/pull/1268 * @XiaTian688 made their first contribution in https://github.com/apache/hertzbeat/pull/1308 * @liyin made their first contribution in https://github.com/apache/hertzbeat/pull/1311 **Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.4.1...v1.4.2 ----- +--- ## ⛄ Supported @@ -136,7 +133,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do ---- **Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: https://gitee.com/hertzbeat/hertzbeat** ### **Download Link** @@ -159,3 +156,4 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do - ⬇️ [hertzbeat-collector-macos_arm64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-macos_arm64_1.4.2.tar.gz) - ⬇️ [hertzbeat-collector-macos_amd64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-macos_amd64_1.4.2.tar.gz) - ⬇️ [hertzbeat-collector-windows64_1.4.2.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-windows64_1.4.2.zip) + diff --git a/home/blog/2023-12-11-hertzbeat-v1.4.3.md b/home/blog/2023-12-11-hertzbeat-v1.4.3.md index 2bc4957626d..3d4508b1bff 100644 --- a/home/blog/2023-12-11-hertzbeat-v1.4.3.md +++ b/home/blog/2023-12-11-hertzbeat-v1.4.3.md @@ -21,7 +21,7 @@ keywords: [open source monitoring system, alerting system] * High performance, supports horizontal expansion of multi-collector clusters, multi-isolated network monitoring and cloud-edge collaboration. * Provides flexible alarm threshold rules and timely notifications delivered via `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. -> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help developers and teams quickly build their own monitoring system. +> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help developers and teams quickly build their own monitoring system. ![hertzBeat](/img/docs/hertzbeat-arch.png) @@ -31,7 +31,6 @@ keywords: [open source monitoring system, alerting system] ### HertzBeat's 1.4.3 Version Release! - - enhanced reporting of external general alert API - support mysql api port website mongodb jvm redis monitoring metrics name i18n - support auto collect metrics by prometheus task @@ -43,7 +42,6 @@ keywords: [open source monitoring system, alerting system] Compatible with the Prometheus ecosystem, now we can monitor what Prometheus can monitoring with few clicks on webui. - ### Install Quickly Via Docker 1. Just one command to get started: @@ -61,15 +59,14 @@ Compatible with the Prometheus ecosystem, now we can monitor what Prometheus can ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : set the collector unique identity name. - `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. - `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) - ----- - +--- ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! @@ -134,7 +131,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do **Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.4.2...v1.4.3 ----- +--- ## ⛄ Supported @@ -150,7 +147,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do ---- **Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: https://gitee.com/hertzbeat/hertzbeat** ### **Download Link** @@ -173,3 +170,4 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do - ⬇️ [hertzbeat-collector-macos_arm64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-macos_arm64_1.4.3.tar.gz) - ⬇️ [hertzbeat-collector-macos_amd64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-macos_amd64_1.4.3.tar.gz) - ⬇️ [hertzbeat-collector-windows64_1.4.3.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-windows64_1.4.3.zip) + diff --git a/home/blog/2024-01-11-new-committer.md b/home/blog/2024-01-11-new-committer.md index af8533e2e37..a9828d755c5 100644 --- a/home/blog/2024-01-11-new-committer.md +++ b/home/blog/2024-01-11-new-committer.md @@ -8,7 +8,6 @@ tags: [opensource, practice] keywords: [open source monitoring system, alerting system] --- - ! [hertzBeat](/img/blog/new-committer.png) > Welcome to HertzBeat's three new community committeers, let's learn more about their open source experience! @@ -39,17 +38,11 @@ In the following period, I spent some time reading Hertzbeat's code, and submitt - Exposed to a great open source community and improved my skills in related areas. - Thanks to Tom and my ospp mentor, Zheng Chenxin, who gave me a lot of help and advice during my exposure to the open source community. Currently I am still in charge of some of the code development in the community, I hope Hertzbeat can be better and better in the future! - - ----- - +--- # New Committer - SongXiao - - **Name: Zhou Shusheng** **Junior student at Henan Normal University** @@ -67,13 +60,11 @@ In July this year, after basically completing the study of Java framework develo * Support for Spring Gateway, Apache Spark, Apache Hive and other services metrics collection * Customize nginx and pop3 protocols to collect metrics for Nginx and POP3 mailbox servers, and add corresponding help files. - ## Harvest * Exposed to better and more complex large-scale projects, improved programming and problem-solving skills. * Put the theoretical knowledge into practice, gained JUC, microservice related development experience, and valuable project experience. - ## Thanks to our community partners Thanks to the author of HertzBeat, HertzBeat/Sms4j Committer Tiejia Xiaobao, Sms4j Committer Dongfeng, when I encountered problems that I could not solve, I often asked the three brothers to ask for advice, and they are always tired of patiently helping me solve the problem, there are really no words. @@ -85,14 +76,10 @@ Thanks to the other partners in the community, I've gained a lot from communicat * When you first get involved in an open source project, start with simple tasks. Gradually familiarize yourself with the code and process of the project, and gradually take on more complex tasks. * If you encounter problems that you can't solve by yourself, you can ask for help from the community. - - ----- +--- # New Committer - Dongfeng - - **Name: Zhang Yang **Freshman from Henan Normal University** @@ -126,7 +113,6 @@ Thanks to the authors of hertzbeat for the documentation and help. Thanks to my - Issues and pr's are the knock on the door of the project you are getting to know, so be willing to discuss and express your opinion. - No matter how big or small your contribution is, be willing to try and keep improving yourself. - ---- ## What is HertzBeat? @@ -142,10 +128,8 @@ Thanks to the authors of hertzbeat for the documentation and help. Thanks to my - High-performance, supports horizontal scaling of multiple collector clusters, multi-isolated network monitoring, and cloud-side collaboration. - Free alarm threshold rules, `Email` `Discord` `Slack` `Telegram` `Nail` `WeChat` `Flybook` `SMS` `Webhook` `Server sauce` and other methods of timely delivery of messages. - > ``HertzBeat``s powerful customization, multi-type support, high performance, easy to extend, low coupling, hope to help developers and teams quickly build their own monitoring system. - **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** diff --git a/home/blog/2024-01-18-hertzbeat-v1.4.4.md b/home/blog/2024-01-18-hertzbeat-v1.4.4.md index e6e49efce56..66e8f6a25b3 100644 --- a/home/blog/2024-01-18-hertzbeat-v1.4.4.md +++ b/home/blog/2024-01-18-hertzbeat-v1.4.4.md @@ -21,7 +21,7 @@ keywords: [open source monitoring system, alerting system] * High performance, supports horizontal expansion of multi-collector clusters, multi-isolated network monitoring and cloud-edge collaboration. * Provides flexible alarm threshold rules and timely notifications delivered via `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. -> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help developers and teams quickly build their own monitoring system. +> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help developers and teams quickly build their own monitoring system. ![hertzBeat](/img/docs/hertzbeat-arch.png) @@ -31,7 +31,6 @@ keywords: [open source monitoring system, alerting system] ### HertzBeat's 1.4.4 Version Release! - - support snmp v3 monitoring protocol @TJxiaobao - support monitoring NebulaGraph metrics @ZY945 - support monitoring pop3 metrics @a-little-fool @@ -44,7 +43,6 @@ keywords: [open source monitoring system, alerting system] - add smtp protocol and support smtp monitoring by @ZY945 - more feature, document and bugfix - ### Install Quickly Via Docker 1. Just one command to get started: @@ -62,15 +60,14 @@ keywords: [open source monitoring system, alerting system] ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : set the collector unique identity name. - `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. - `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) - ----- - +--- ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! @@ -83,7 +80,10 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do * monitoring the availability of websockets through handshake. by @ZY945 in https://github.com/apache/hertzbeat/pull/1413 * [Task-1386] When adding tags in tag management, random colors are given by default. by @prolevel1 in https://github.com/apache/hertzbeat/pull/1412 * add prolevel1 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1415 -* #1397 feature: support for dns monitoring by @Calvin979 in https://github.com/apache/hertzbeat/pull/1416 +* + +# 1397 feature: support for dns monitoring by @Calvin979 in https://github.com/apache/hertzbeat/pull/1416 + * Support monitoring hive metrics by @a-little-fool in https://github.com/apache/hertzbeat/pull/1417 * support legend pageable in history data charts by @tomsun28 in https://github.com/apache/hertzbeat/pull/1414 * update component tip and help tip doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1418 @@ -145,7 +145,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do **Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.4.3...v1.4.4 ----- +--- ## ⛄ Supported @@ -161,7 +161,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do ---- **Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: https://gitee.com/hertzbeat/hertzbeat** ### **Download Link** @@ -185,7 +185,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do - ⬇️ [hertzbeat-collector-macos_amd64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-macos_amd64_1.4.4.tar.gz) - ⬇️ [hertzbeat-collector-windows64_1.4.4.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-windows64_1.4.4.zip) - **hertzbeat docker compose script** -- ⬇️ [docker-compose.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/docker-compose.zip) +- ⬇️ [docker-compose.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/docker-compose.zip) + diff --git a/home/blog/2024-04-17-to-apache.md b/home/blog/2024-04-17-to-apache.md index 84f25fb10d1..b06725d522d 100644 --- a/home/blog/2024-04-17-to-apache.md +++ b/home/blog/2024-04-17-to-apache.md @@ -16,9 +16,6 @@ HertzBeat was officially open-sourced in January 2022 in the Dromara open-source ![](/img/blog/apache-incubator-2.png) - - - Joining the Apache Incubator is not the end, but rather a fresh start for the project. After joining the Apache Incubator, HertzBeat will actively adhere to the "openness and collaboration" philosophy of Apache at the community level, continuously building a fair, diverse, and inclusive open-source community. At the product level, we will listen to user feedback, enhance and optimize the user experience, and create connections within the open-source ecosystem. We warmly welcome everyone to join the HertzBeat community. The community accepts contributions in any form. Let's work together to promote the development of open source. Hopefully, one day, HertzBeat will become a world-class open-source product like Apache Kafka and Apache Tomcat, with our contributed code deployed and running across various industries and corners of the globe. @@ -47,7 +44,6 @@ Since its open-source release, the main repository of HertzBeat on GitHub has ac > Thank you to these lovely contributors. - @@ -278,8 +274,6 @@ Since its open-source release, the main repository of HertzBeat on GitHub has ac
- - ## Special Thanks Thank you to all contributors who have participated in the community. @@ -304,7 +298,7 @@ Special thanks to the Champions and Mentors who have kindly offered guidance to ## Message from the Dromara Community > From being a star project in the Dromara community to becoming a member of the Apache Incubator, HertzBeat has demonstrated strong vitality and healthy growth momentum. We are proud and excited about every step forward that HertzBeat has taken. Here, we extend our sincerest congratulations to the HertzBeat team and express deep gratitude to all community members who have contributed to the project. - +> > May HertzBeat continue to grow and innovate in its future journey, becoming a shining star in the open-source community. We believe that with the outstanding talent of the team and the extensive support of the community, HertzBeat will achieve even greater achievements, providing high-quality services and experiences to developers and users worldwide. Dromara will continue to fully support and pay attention to the development of HertzBeat, looking forward to it creating more wonderful chapters! --- diff --git a/home/blog/2024-05-09-hertzbeat-ospp-subject-introduction.md b/home/blog/2024-05-09-hertzbeat-ospp-subject-introduction.md index abde09f8a95..3e9bf0cb9c5 100644 --- a/home/blog/2024-05-09-hertzbeat-ospp-subject-introduction.md +++ b/home/blog/2024-05-09-hertzbeat-ospp-subject-introduction.md @@ -17,8 +17,6 @@ HertzBeat is a powerful custom monitoring capabilities, high-performance cluster **Gitee: https://gitee.com/hertzbeat/hertzbeat** - - ## What is Open Source Summer? Open Source Summer is a summer open source activity initiated and long-term supported by the "Open Source Software Supply Chain Lighting Program" of the Institute of Software of the Chinese Academy of Sciences, aiming to encourage the development of open source @@ -39,8 +37,6 @@ Open Source Summer Website: Students are free to choose the project, communicate with the community mentor to realize the plan and write the project plan. The selected students will complete the development work as planned under the guidance of community mentors and contribute the results to the community. The community evaluates the student`s completion, and the sponsor distributes the financial aid to the student based on the evaluation results. - - ## HertzBeat project ### 1、 the realization of monitoring template market store @@ -52,8 +48,6 @@ Capture the metrics we want to monitor. Some different users may have different the yml configuration may not satisfy every user, so our goal is to let users contribute their own yml template to benefit more people. This can not only make the ecology of `HertzBeat` more perfect, but also make the user experience better! - - **Requirements:** 1. Use Java17, springboot3 to write the back-end code, Angular(recommended) or Vue to write the front-end code. @@ -61,20 +55,14 @@ This can not only make the ecology of `HertzBeat` more perfect, but also make th 3. The template page displays the number of downloads, categories, template description, and (optional) template versions. 4. Realize user personal page registration, login (later), upload template. - - **Output:** 1. Feature code can be incorporated into the HertzBeat repository as PR. 2. Complete the HertzBeat official template market 3. Update the help documents - - **Contact Tutor:** Qingran Zhao [zqr10159@dromara.org](mailto:zqr10159@dromara.org) - - ### 2、 implementation of Java native ipmi2 communication protocol **Project difficulty: Advanced /Advanced** @@ -84,19 +72,14 @@ middleware monitoring without Agent. In order for HertzBeat to have a wider moni package, the implementation of the native IPMI2 protocol in the query part, and use the protocol to obtain server motherboard information, network card information, power supply information, fan information, temperature sensor information and clock information. - - **Requirements:** 1. Use Java to implement the native IPMI2 protocol (query part) based on the UDP protocol, without relying on any third-party package. 2. Use the implemented IPMI2 protocol to query indicators of the IPMI enabled server, including mainboard information, network adapter information, power supply information, and fan information -alarm, temperature sensor information and clock information. - + alarm, temperature sensor information and clock information. 3. Abstract and standardize the queried indicator information to implement configuration management (optional). 4. Output detailed project documents, including design ideas, implementation details, usage instructions, etc. - - **Output:** - Feature code can be incorporated into the HertzBeat repository as PR. @@ -107,12 +90,8 @@ alarm, temperature sensor information and clock information. - Improve help documentation. - - **Contact Tutor:** Tiejia Xiaobao [tjxiaobao2024@qq.com](mailto:tjxiaobao2024@qq.com) - - ## What can you gain by participating in HertzBeat? Some of you may wonder what can be gained by participating in Open Source Summer? @@ -123,4 +102,4 @@ Some of you may wonder what can be gained by participating in Open Source Summer 4. **【Recommended Entry/Internship】 Students with excellent performance in this Programming Summer project can be recommended to work in your preferred company.** 5. **【Additional community surprise】 All students participating in this Summer of Programming project have the opportunity to become Apache HertzBeat committer and have their own apache email.** -**There are 100% prizes to take oh**, now the only problem is that time is running out, hurry up to sign up! The deadline for registration is June 4, so hurry up and sign up for 2023 Summer of Programming. \ No newline at end of file +**There are 100% prizes to take oh**, now the only problem is that time is running out, hurry up to sign up! The deadline for registration is June 4, so hurry up and sign up for 2023 Summer of Programming. diff --git a/home/blog/2024-06-11-hertzbeat-v1.6.0-update.md b/home/blog/2024-06-11-hertzbeat-v1.6.0-update.md index 398b694c6bc..d421d2eaf16 100644 --- a/home/blog/2024-06-11-hertzbeat-v1.6.0-update.md +++ b/home/blog/2024-06-11-hertzbeat-v1.6.0-update.md @@ -24,6 +24,7 @@ Due to significant changes in `application.yml` and `sureness.yml`, it is recomm #### `application.yml` generally needs to modify the following parts: Default is: + ```yaml datasource: driver-class-name: org.h2.Driver @@ -42,7 +43,9 @@ Default is: logging: level: SEVERE ``` + If you change to a MySQL database, here is an example: + ```yaml datasource: driver-class-name: com.mysql.cj.jdbc.Driver @@ -94,49 +97,46 @@ Due to the Apache Foundation's requirements for license compliance, HertzBeat's - MySQL: [https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip) - Oracle (If you want to monitor Oracle, these two drivers are required): - - [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) - - [https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar) + - [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) + - [https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar) Next, run the start-up script as before to experience the latest HertzBeat 1.6.0! ## Docker Upgrade - Mysql Database - Stop the HertzBeat container: + ``` docker stop hertzbeat ``` - - Upgrade the database script: - - Go to [https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), choose the directory of your database and execute the corresponding `V160__update_column.sql` file in MySQL. - + - Go to [https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), choose the directory of your database and execute the corresponding `V160__update_column.sql` file in MySQL. - Upgrade the configuration files: - - As mentioned, due to significant changes in `application.yml` and `sureness.yml`, it is recommended to directly mount and use the new `yml` configuration files, and then modify them based on your own needs. - + - As mentioned, due to significant changes in `application.yml` and `sureness.yml`, it is recommended to directly mount and use the new `yml` configuration files, and then modify them based on your own needs. - Add the corresponding database drivers: - - Due to the Apache Foundation's license compliance requirements, HertzBeat's installation package cannot include MySQL, Oracle, and other GPL-licensed dependencies. Users need to add them themselves by downloading the driver jars from the above links and placing them in the local `ext-lib` directory, then mounting `ext-lib` to the container's `/opt/hertzbeat/ext-lib` directory when starting. + - Due to the Apache Foundation's license compliance requirements, HertzBeat's installation package cannot include MySQL, Oracle, and other GPL-licensed dependencies. Users need to add them themselves by downloading the driver jars from the above links and placing them in the local `ext-lib` directory, then mounting `ext-lib` to the container's `/opt/hertzbeat/ext-lib` directory when starting. Next, run HertzBeat using Docker as before to experience the latest HertzBeat 1.6.0! ## Docker Installation Upgrade - H2 Built-in Database (Not recommended for production use) - Stop the HertzBeat container: + ``` docker stop hertzbeat ``` - - Edit the H2 database files: - - Assuming you have mounted the H2 database files in the `data` directory to the local system, or copied the `/opt/hertzbeat/data` directory from the old container manually. - - Download the H2 driver jar from [https://mvnrepository.com/artifact/com.h2database/h2/2.2.220](https://mvnrepository.com/artifact/com.h2database/h2/2.2.220). - - Start the database locally using the H2 driver jar: - ``` - java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 - ``` + - Assuming you have mounted the H2 database files in the `data` directory to the local system, or copied the `/opt/hertzbeat/data` directory from the old container manually. + - Download the H2 driver jar from [https://mvnrepository.com/artifact/com.h2database/h2/2.2.220](https://mvnrepository.com/artifact/com.h2database/h2/2.2.220). + - Start the database locally using the H2 driver jar: + ``` + java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 + ``` - Upgrade the configuration files: - - As mentioned, due to significant changes in `application.yml` and `sureness.yml`, it is recommended to directly mount and use the new `yml` configuration files, and then modify them based on your own needs. - + - As mentioned, due to significant changes in `application.yml` and `sureness.yml`, it is recommended to directly mount and use the new `yml` configuration files, and then modify them based on your own needs. - Add the corresponding database drivers: - - As mentioned, due to the Apache Foundation's license compliance requirements, HertzBeat's installation package cannot include MySQL, Oracle, and other GPL-licensed dependencies. Users need to add them themselves by downloading the driver jars from the above links and placing them in the local `ext-lib` directory, then mounting `ext-lib` to the container's `/opt/hertzbeat/ext-lib` directory when starting. + - As mentioned, due to the Apache Foundation's license compliance requirements, HertzBeat's installation package cannot include MySQL, Oracle, and other GPL-licensed dependencies. Users need to add them themselves by downloading the driver jars from the above links and placing them in the local `ext-lib` directory, then mounting `ext-lib` to the container's `/opt/hertzbeat/ext-lib` directory when starting. Next, run the Docker to start HertzBeat as before to experience the latest HertzBeat 1.6.0! @@ -145,4 +145,5 @@ Next, run the Docker to start HertzBeat as before to experience the latest Hertz If you do not want to go through the tedious script upgrade method mentioned above, you can directly export and import the monitoring tasks and threshold information from the old environment. - Deploy a new environment with the latest version. -- Export the monitoring tasks and threshold information from the old environment on the page \ No newline at end of file +- Export the monitoring tasks and threshold information from the old environment on the page + diff --git a/home/blog/2024-06-15-hertzbeat-v1.6.0.md b/home/blog/2024-06-15-hertzbeat-v1.6.0.md index 4678774f9e4..9647d1680e3 100644 --- a/home/blog/2024-06-15-hertzbeat-v1.6.0.md +++ b/home/blog/2024-06-15-hertzbeat-v1.6.0.md @@ -15,7 +15,6 @@ In this version, we added monitoring for OpenAi, Redfish protocol servers, plugi Due to license compatibility issues, we replaced multiple dependencies at the bottom layer, Hibernate -> EclipseLink, which is also a rare migration pitfall practice in the JPA ecosystem. At the same time, some bugs were fixed and some functions were optimized, and more complete documents. Welcome everyone to try to use, put forward valuable opinions and suggestions, and promote the development of HertzBeat together. - **Of course, the most important thing is to give the best thanks to the contributors in the community!** Download Page: https://hertzbeat.apache.org/docs/download/ @@ -45,6 +44,7 @@ Upgrade Guide: https://hertzbeat.apache.org/blog/2024/06/11/hertzbeat-v1.6.0-upd ## HertzBeat's 1.6.0 Version Release! ## Highlights + - HertzBeat is donated to the Apache Incubator. - migrate repo, clean up code, license, add more help doc and more - add dependency license doc @@ -78,6 +78,7 @@ Upgrade Guide: https://hertzbeat.apache.org/blog/2024/06/11/hertzbeat-v1.6.0-upd - and more bugfix, doc, features power by our contributors, thanks to them. ## What's Changed + * bugfix collector can not startup alone by @tomsun28 in https://github.com/apache/hertzbeat/pull/1633 * translate some hertzbeat blog by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1635 * Check class description by @ZY945 in https://github.com/apache/hertzbeat/pull/1638 @@ -365,7 +366,6 @@ Upgrade Guide: https://hertzbeat.apache.org/blog/2024/06/11/hertzbeat-v1.6.0-upd * @lw-yang made their first contribution in https://github.com/apache/hertzbeat/pull/2047 * @xfl12345 made their first contribution in https://github.com/apache/hertzbeat/pull/2048 - ## Just one command to get started ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` @@ -374,12 +374,9 @@ Upgrade Guide: https://hertzbeat.apache.org/blog/2024/06/11/hertzbeat-v1.6.0-upd ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` - Detailed refer to HertzBeat Document https://hertzbeat.com/docs - ----- - +--- **Github: https://github.com/apache/hertzbeat** Download Page: https://hertzbeat.apache.org/docs/download/ @@ -388,7 +385,7 @@ Upgrade Guide: https://hertzbeat.apache.org/blog/2024/06/11/hertzbeat-v1.6.0-upd Have Fun! ----- +--- HertzBeat, Make Monitoring Easier! diff --git a/home/blog/2024-07-07-new-committer.md b/home/blog/2024-07-07-new-committer.md index 46d0e8e81e6..fbb007bfed3 100644 --- a/home/blog/2024-07-07-new-committer.md +++ b/home/blog/2024-07-07-new-committer.md @@ -8,7 +8,6 @@ tags: [opensource, practice] keywords: [open source monitoring system, alerting system] --- - ![hertzBeat](/img/blog/new-committer.png) > 🎉 I am very pleased to become a Committer for the Apache HertzBeat project, and I have been invited by the community to introduce myself 🥰. @@ -41,6 +40,3 @@ During this process, I also incorporated some excellent experiences from other c Lastly, I want to thank the community's logicz for inviting me to become a Committer and tom for reviewing my PRs. I wish HertzBeat a successful graduation from the incubator and becoming a star project 🎊. - - - diff --git a/home/blog/2024-07-08-new-committer.md b/home/blog/2024-07-08-new-committer.md index 95afe2afcf1..b46cfec79f9 100644 --- a/home/blog/2024-07-08-new-committer.md +++ b/home/blog/2024-07-08-new-committer.md @@ -8,7 +8,6 @@ tags: [opensource, practice] keywords: [open source monitoring system, alerting system] --- - ![hertzBeat](/img/blog/new-committer.png) ### First acquaintance with Apache Hertzbeat @@ -49,4 +48,3 @@ This process made me understand the importance of cooperation and made me feel t Becoming a Committer of the Apache Hertzbeat project is a challenging and rewarding journey. Through continuous learning and contribution, I have not only improved my technical ability, but also found a sense of belonging and accomplishment in the community. I hope that my experience can inspire more people to participate in the open source community and jointly promote the progress and development of technology. To borrow the words of Tom: Participating in open source should not affect everyone's work and life, otherwise it will go against the original intention. Everyone should participate in the free time after get off work. - diff --git a/home/blog/2024-07-15-new-committer.md b/home/blog/2024-07-15-new-committer.md index cd8b172c609..2b8ec2bb111 100644 --- a/home/blog/2024-07-15-new-committer.md +++ b/home/blog/2024-07-15-new-committer.md @@ -8,7 +8,6 @@ tags: [opensource, practice] keywords: [open source monitoring system, alerting system] --- - ![hertzBeat](/img/blog/new-committer.png) Hello everyone, I am very honored to receive an invitation from the community to be nominated as a Committer for Apache HertzBeat. Let me introduce myself briefly. I have been working as a backend developer since 2019, mainly using Java. Currently, I am working at a network security company, focusing on the backend development of network security-related products. diff --git a/home/blog/2024-07-27-new-committer.md b/home/blog/2024-07-27-new-committer.md index 44128c6aabc..ed8ecc8693e 100644 --- a/home/blog/2024-07-27-new-committer.md +++ b/home/blog/2024-07-27-new-committer.md @@ -14,7 +14,7 @@ keywords: [open source monitoring system, alerting system] ## Personal introduction -I have been working as a Java back-end developer for three years. At present, I participate in the development of converged message center in an Internet company. +I have been working as a Java back-end developer for three years. At present, I participate in the development of converged message center in an Internet company. ## First met Apache Hertzbeat @@ -22,7 +22,7 @@ Reading code and design in open-source framework is one way for me to improve my ## My first PR -I failed to start Hertzbeat when I followed instructions in CONTRIBUTING. The exception shows that Node.js version at lease 18 is required. Therefor I modified CONTRIBUTING and opened my first PR in Hertzbeat community. +I failed to start Hertzbeat when I followed instructions in CONTRIBUTING. The exception shows that Node.js version at lease 18 is required. Therefor I modified CONTRIBUTING and opened my first PR in Hertzbeat community. ## Keep active in community @@ -36,4 +36,4 @@ During the reading of Hertzbeat source code, I learned a lot of code writing and ## Conclusion -It's an interesting experience for me to become a Committer of Apache Hertzbeat and it motivates me day by day. I will continue to contribute to the Apache Hertzbeat community in the future, and I hope that Apache Hertzbeat can successfully graduate from the Apache incubator. \ No newline at end of file +It's an interesting experience for me to become a Committer of Apache Hertzbeat and it motivates me day by day. I will continue to contribute to the Apache Hertzbeat community in the future, and I hope that Apache Hertzbeat can successfully graduate from the Apache incubator. diff --git a/home/blog/2024-07-28-new-committer.md b/home/blog/2024-07-28-new-committer.md index cbaeab8fb90..c04ab4716ef 100644 --- a/home/blog/2024-07-28-new-committer.md +++ b/home/blog/2024-07-28-new-committer.md @@ -11,7 +11,8 @@ keywords: [open source monitoring system, alerting system] ![hertzBeathertzBeat](/img/blog/new-committer.png) > It's an honor for me to become a Committer of Apache Hertzbeat -## Personal introduction +> + ## Personal introduction I graduated in 2023 and am currently working as a Java developer in an Internet company. @@ -30,7 +31,9 @@ I downloaded the source code of Hertzbeat and compiled and tested it according t After submitting the first PR, I continuously followed the issues in the Hertzbeat community and attempted to solve the existing problems. I have successively completed tasks such as specifying @people on WeChat, integrating AI, adding a PrestoDB monitor, modifying bugs, and contributing to the official website documentation. ## Reward + After several months of contributing to open source, I have reaped a lot. I have learned the business logic in the code, code norms, some technical frameworks that I have never used before, and some algorithms. It can be said that I have benefited greatly. ## Conclusion -Becoming a Committer of Apache Hertzbeat is a very meaningful thing for me. In the future, I will continue to contribute to the Apache Hertzbeat community. I also hope that Apache Hertzbeat can successfully graduate from the incubator and that the community will become better and better. \ No newline at end of file + +Becoming a Committer of Apache Hertzbeat is a very meaningful thing for me. In the future, I will continue to contribute to the Apache Hertzbeat community. I also hope that Apache Hertzbeat can successfully graduate from the incubator and that the community will become better and better. diff --git a/home/blog/2024-07-29-new-committer.md b/home/blog/2024-07-29-new-committer.md index 9b7e63476a5..55aecd3eb94 100644 --- a/home/blog/2024-07-29-new-committer.md +++ b/home/blog/2024-07-29-new-committer.md @@ -13,22 +13,27 @@ keywords: [open source monitoring system, alerting system] > It's an honor for me to become a Committer of Apache HertzBeat ## Becoming Part of a Stronger Team: My Contributions and Growth + In the open-source community, every contribution not only pushes the project forward but also signifies personal growth. Recently, in the Apache HertzBeat project, I have contributed through a series of optimizations and feature enhancements, which have not only advanced the project but also improved my skills. Today, I would like to share my contribution experience and insights into my growth. ## Starting from the Details: Optimizing Visuals and Interactions + I firmly believe that details determine success or failure. When I first joined the project, I began by optimizing the interface to enhance the user's visual and interactive experience. I refined the modal window layout of the monitoring selection menu to better align with user operation habits. I adjusted the header style and content layout of the monitoring details page to make information presentation clearer and more intuitive. Additionally, I unified the border-radius values of components and addressed issues such as missing internationalization translations, ensuring the consistency and completeness of the system interface. These seemingly minor changes significantly enhanced the overall aesthetics and user experience of the system. Through this process, I gained a profound understanding of the importance of interface design for user experience and honed my attention to detail. ## Delving into Functional Modules for Enhanced Capabilities + In addition to interface optimization, I actively engaged in expanding and enhancing the functional modules of the system. I refactored repetitive code within the system, abstracted common components, and improved code reusability and maintainability. These improvements not only simplified the development process but also reduced the cost of maintenance in the long run. Furthermore, I extended the capabilities of the multi-functional input component by adding support for password types and search types, further enriching its functionality and versatility. During the process of implementing these features, I encountered numerous challenges. However, these challenges spurred me to continuously learn and explore new technologies and methodologies. By consulting official documentation and other resources, I gradually overcame these obstacles and successfully completed the tasks. This process not only enhanced my technical abilities but also deepened my understanding of the importance of team collaboration. ## Emphasizing User Feedback for Continuous Product Optimization + I firmly believe that users are the ultimate judges of a product. As such, I continuously collect and analyze user feedback both within the company and in the community, using these insights to guide targeted optimizations and improvements. By refining search and filtering functionalities and adopting a consistent and concise set of interactive elements, I have consistently enhanced the user experience. In this process, I realized the importance of a user-centric approach. Only by genuinely focusing on users' needs and expectations can we create products that meet market demands. ## Looking Ahead: Continuous Contribution and Growth + Reflecting on my past contributions, I feel a profound sense of pride and satisfaction. However, I am also acutely aware of the many areas where I still have room for improvement and need to continue learning and growing. Moving forward, I will uphold the spirit of rigor, innovation, and user-centricity, continuously exploring and practicing to contribute even more to the Apache HertzBeat project. I eagerly anticipate the opportunity to grow and progress alongside my fellow team members, jointly driving the project towards prosperity and success. diff --git a/home/docs/advanced/extend-http-default.md b/home/docs/advanced/extend-http-default.md index abc9921e9ce..1875a9cff77 100644 --- a/home/docs/advanced/extend-http-default.md +++ b/home/docs/advanced/extend-http-default.md @@ -4,14 +4,16 @@ title: HTTP Protocol System Default Parsing Method sidebar_label: Default Parsing Method --- -> After calling the HTTP api to obtain the response data, use the default parsing method of hertzbeat to parse the response data. +> After calling the HTTP api to obtain the response data, use the default parsing method of hertzbeat to parse the response data. -**The interface response data structure must be consistent with the data structure rules specified by hertzbeat** +**The interface response data structure must be consistent with the data structure rules specified by hertzbeat** -### HertzBeat data format specification -Note⚠️ The response data is JSON format. +### HertzBeat data format specification + +Note⚠️ The response data is JSON format. Single layer format :key-value + ```json { "metricName1": "metricValue", @@ -20,7 +22,9 @@ Single layer format :key-value "metricName4": "metricValue" } ``` + Multilayer format:Set key value in the array + ```json [ { @@ -37,9 +41,11 @@ Multilayer format:Set key value in the array } ] ``` + eg: -Query the CPU information of the custom system. The exposed interface is `/metrics/cpu`. We need `hostname,core,useage` Metric. -If there is only one virtual machine, its single-layer format is : +Query the CPU information of the custom system. The exposed interface is `/metrics/cpu`. We need `hostname,core,useage` Metric. +If there is only one virtual machine, its single-layer format is : + ```json { "hostname": "linux-1", @@ -49,7 +55,9 @@ If there is only one virtual machine, its single-layer format is : "runningTime": 100 } ``` -If there are multiple virtual machines, the multilayer format is: : + +If there are multiple virtual machines, the multilayer format is: : + ```json [ { @@ -76,7 +84,7 @@ If there are multiple virtual machines, the multilayer format is: : ] ``` -**The corresponding monitoring template yml can be configured as follows** +**The corresponding monitoring template yml can be configured as follows** ```yaml category: custom @@ -275,3 +283,4 @@ metrics: parseType: jsonPath parseScript: '$' ``` + diff --git a/home/docs/advanced/extend-http-example-hertzbeat.md b/home/docs/advanced/extend-http-example-hertzbeat.md index 80ddd938506..7f9fc7c93a5 100644 --- a/home/docs/advanced/extend-http-example-hertzbeat.md +++ b/home/docs/advanced/extend-http-example-hertzbeat.md @@ -8,12 +8,10 @@ Through this tutorial, we describe step by step how to add a monitoring type bas Before reading this tutorial, we hope that you are familiar with how to customize types, metrics, protocols, etc. from [Custom Monitoring](extend-point) and [http Protocol Customization](extend-http). - ### HTTP protocol parses the general response structure to obtain metric data > In many scenarios, we need to monitor the provided HTTP API interface and obtain the index value returned by the interface. In this article, we use the http custom protocol to parse our common http interface response structure, and obtain the fields in the returned body as metric data. - ``` { "code": 200, @@ -22,6 +20,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz } ``` + As above, usually our background API interface will design such a general return. The same is true for the background of the hertzbeat system. Today, we will use the hertzbeat API as an example, add a new monitoring type **hertzbeat**, and monitor and collect its system summary statistics API `http://localhost:1157/api/summary`, the response data is: @@ -63,16 +62,13 @@ As above, usually our background API interface will design such a general return **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - Here we define a custom monitoring type `app` named `hertzbeat` which use the HTTP protocol to collect data. **Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** - ```yaml category: custom # The monitoring type eg: linux windows tomcat mysql aws... @@ -195,38 +191,30 @@ metrics: **The addition is complete, now we save and apply. We can see that the system page has added a `hertzbeat` monitoring type. ** - ![](/img/docs/advanced/extend-http-example-1.png) - ### The system page adds the monitoring of `hertzbeat` monitoring type > We click Add `HertzBeat Monitoring Tool`, configure monitoring IP, port, collection cycle, account password in advanced settings, etc., click OK to add monitoring. - ![](/img/docs/advanced/extend-http-example-2.png) - ![](/img/docs/advanced/extend-http-example-3.png) > After a certain period of time (depending on the collection cycle), we can see the specific metric data and historical charts in the monitoring details! - ![](/img/docs/advanced/extend-http-example-4.png) - - ### Set threshold alarm notification > Next, we can set the threshold normally. After the alarm is triggered, we can view it in the alarm center, add recipients, set alarm notifications, etc. Have Fun!!! - ---- #### over! This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! -If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. +If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. **github: https://github.com/apache/hertzbeat** diff --git a/home/docs/advanced/extend-http-example-token.md b/home/docs/advanced/extend-http-example-token.md index 6cc09b6065c..d4d7a6bed02 100644 --- a/home/docs/advanced/extend-http-example-token.md +++ b/home/docs/advanced/extend-http-example-token.md @@ -22,6 +22,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz "identifier": "admin" } ``` + **The response structure data is as follows**: ```json @@ -40,11 +41,9 @@ Before reading this tutorial, we hope that you are familiar with how to customiz **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - 1. The custom monitoring type needs to add a new configuration monitoring template yml. We directly reuse the `hertzbeat` monitoring type in Tutorial 1 and modify it based on it A monitoring configuration definition file named after the monitoring type - hertzbeat_token @@ -218,8 +217,7 @@ metrics: # http response data parse type: default-hertzbeat rule, jsonpath-jsonpath script, website-for website monitoring, prometheus-prometheus exporter rule parseType: jsonPath parseScript: '$.data' - - +--- ``` **At this time, save and apply, add `hertzbeat_token` type monitoring on the system page, configure input parameters, `content-type` fill in `application/json`, `request Body` fill in the account password json as follows: ** @@ -233,15 +231,12 @@ metrics: ![](/img/docs/advanced/extend-http-example-5.png) - ** After the addition is successful, we can see the `token`, `refreshToken` metric data we collected on the details page. ** ![](/img/docs/advanced/extend-http-example-6.png) ![](/img/docs/advanced/extend-http-example-7.png) - - ### Use `token` as a variable parameter to collect and use the following metricss **Add an index group definition `summary` in `app-hertzbeat_token.yml`, which is the same as `summary` in Tutorial 1, and set the collection priority to 1** @@ -361,8 +356,7 @@ metrics: # http response data parse type: default-hertzbeat rule, jsonpath-jsonpath script, website-for website monitoring, prometheus-prometheus exporter rule parseType: jsonPath parseScript: '$.data' - - +--- - name: summary priority: 1 fields: @@ -403,12 +397,12 @@ metrics: > Next, we can set the threshold normally. After the alarm is triggered, we can view it in the alarm center, add a new recipient, set alarm notification, etc. Have Fun!!! ----- +--- #### over! This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! -If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. +If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. **github: https://github.com/apache/hertzbeat** diff --git a/home/docs/advanced/extend-http-jsonpath.md b/home/docs/advanced/extend-http-jsonpath.md index 772c96d20d3..86a49c06756 100644 --- a/home/docs/advanced/extend-http-jsonpath.md +++ b/home/docs/advanced/extend-http-jsonpath.md @@ -3,16 +3,18 @@ id: extend-http-jsonpath title: HTTP Protocol JsonPath Parsing Method sidebar_label: JsonPath Parsing Method --- + > After calling the HTTP api to obtain the response data, use JsonPath script parsing method to parse the response data. -Note⚠️ The response data is JSON format. +Note⚠️ The response data is JSON format. + +**Use the JsonPath script to parse the response data into data that conforms to the data structure rules specified by HertzBeat** -**Use the JsonPath script to parse the response data into data that conforms to the data structure rules specified by HertzBeat** +#### JsonPath Operator -#### JsonPath Operator -[JSONPath online verification](https://www.jsonpath.cn) +[JSONPath online verification](https://www.jsonpath.cn) -| JSONPATH | Help description | +| JSONPATH | Help description | |------------------|----------------------------------------------------------------------------------------| | $ | Root object or element | | @ | Current object or element | @@ -25,8 +27,10 @@ Note⚠️ The response data is JSON format. | ?() | Filter (script) expression | | () | Script Expression | -#### HertzBeat data format specification +#### HertzBeat data format specification + Single layer format :key-value + ```json { "metricName1": "metricValue", @@ -35,7 +39,9 @@ Single layer format :key-value "metricName4": "metricValue" } ``` + Multilayer format:Set key value in the array + ```json [ { @@ -56,7 +62,8 @@ Multilayer format:Set key value in the array #### Example Query the value information of the custom system, and its exposed interface is `/metrics/person`. We need `type,num` Metric. -The raw data returned by the interface is as follows: +The raw data returned by the interface is as follows: + ```json { "firstName": "John", @@ -80,7 +87,8 @@ The raw data returned by the interface is as follows: } ``` -We use the jsonpath script to parse, and the corresponding script is: `$.number[*]`,The parsed data structure is as follows: +We use the jsonpath script to parse, and the corresponding script is: `$.number[*]`,The parsed data structure is as follows: + ```json [ { @@ -93,9 +101,10 @@ We use the jsonpath script to parse, and the corresponding script is: `$.number[ } ] ``` + This data structure conforms to the data format specification of HertzBeat, and the Metric `type,num` is successfully extracted. -**The corresponding monitoring template yml can be configured as follows** +**The corresponding monitoring template yml can be configured as follows** ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -163,3 +172,4 @@ metrics: parseType: jsonPath parseScript: '$.number[*]' ``` + diff --git a/home/docs/advanced/extend-http.md b/home/docs/advanced/extend-http.md index 4df5fce33b0..10e5bdf9623 100644 --- a/home/docs/advanced/extend-http.md +++ b/home/docs/advanced/extend-http.md @@ -1,35 +1,33 @@ --- id: extend-http title: HTTP Protocol Custom Monitoring -sidebar_label: HTTP Protocol Custom Monitoring +sidebar_label: HTTP Protocol Custom Monitoring --- -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use HTTP protocol to customize Metric monitoring +> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use HTTP protocol to customize Metric monitoring -### HTTP protocol collection process +### HTTP protocol collection process 【**Call HTTP API**】->【**Response Verification**】->【**Parse Response Data**】->【**Default method parsing|JsonPath script parsing | XmlPath parsing(todo) | Prometheus parsing**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of HTTP protocol. We need to configure HTTP request parameters, configure which Metrics to obtain, and configure the parsing method and parsing script for response data. -HTTP protocol supports us to customize HTTP request path, request header, request parameters, request method, request body, etc. +HTTP protocol supports us to customize HTTP request path, request header, request parameters, request method, request body, etc. **System default parsing method**:HTTP interface returns the JSON data structure specified by hertzbeat, that is, the default parsing method can be used to parse the data and extract the corresponding Metric data. For details, refer to [**System Default Parsing**](extend-http-default) -**JsonPath script parsing method**:Use JsonPath script to parse the response JSON data, return the data structure specified by the system, and then provide the corresponding Metric data. For details, refer to [**JsonPath Script Parsing**](extend-http-jsonpath) - +**JsonPath script parsing method**:Use JsonPath script to parse the response JSON data, return the data structure specified by the system, and then provide the corresponding Metric data. For details, refer to [**JsonPath Script Parsing**](extend-http-jsonpath) -### Custom Steps +### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ------- -Configuration usages of the monitoring templates yml are detailed below. Please pay attention to usage annotation. +Configuration usages of the monitoring templates yml are detailed below. Please pay attention to usage annotation. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. eg:Define a custom monitoring type `app` named `example_http` which use the HTTP protocol to collect data. @@ -281,3 +279,4 @@ metrics: parseType: default ``` + diff --git a/home/docs/advanced/extend-jdbc.md b/home/docs/advanced/extend-jdbc.md index 3527ba60d5d..ec42f84f642 100644 --- a/home/docs/advanced/extend-jdbc.md +++ b/home/docs/advanced/extend-jdbc.md @@ -1,68 +1,73 @@ --- id: extend-jdbc title: JDBC Protocol Custom Monitoring -sidebar_label: JDBC Protocol Custom Monitoring +sidebar_label: JDBC Protocol Custom Monitoring --- -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use JDBC(support mysql,mariadb,postgresql,sqlserver at present) to customize Metric monitoring. -> JDBC protocol custom monitoring allows us to easily monitor Metrics we want by writing SQL query statement. -### JDBC protocol collection process -【**System directly connected to MYSQL**】->【**Run SQL query statement**】->【**parse reponse data: oneRow, multiRow, columns**】->【**Metric data extraction**】 +> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use JDBC(support mysql,mariadb,postgresql,sqlserver at present) to customize Metric monitoring. +> JDBC protocol custom monitoring allows us to easily monitor Metrics we want by writing SQL query statement. + +### JDBC protocol collection process + +【**System directly connected to MYSQL**】->【**Run SQL query statement**】->【**parse reponse data: oneRow, multiRow, columns**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of JDBC protocol. We need to configure SSH request parameters, configure which Metrics to obtain, and configure query SQL statements. -### Data parsing method +### Data parsing method + We can obtain the corresponding Metric data through the data fields queried by SQL and the Metric mapping we need. At present, there are three mapping parsing methods:oneRow, multiRow, columns. -#### **oneRow** -> Query a row of data, return the column name of the result set through query and map them to the queried field. +#### **oneRow** + +> Query a row of data, return the column name of the result set through query and map them to the queried field. eg: queried Metric fields:one two three four query SQL:select one, two, three, four from book limit 1; -Here the Metric field and the response data can be mapped into a row of collected data one by one. +Here the Metric field and the response data can be mapped into a row of collected data one by one. #### **multiRow** -> Query multiple rows of data, return the column names of the result set and map them to the queried fields. + +> Query multiple rows of data, return the column names of the result set and map them to the queried fields. eg: queried Metric fields:one two three four query SQL:select one, two, three, four from book; -Here the Metric field and the response data can be mapped into multiple rows of collected data one by one. +Here the Metric field and the response data can be mapped into multiple rows of collected data one by one. #### **columns** -> Collect a row of Metric data. By matching the two columns of queried data (key value), key and the queried field, value is the value of the query field. + +> Collect a row of Metric data. By matching the two columns of queried data (key value), key and the queried field, value is the value of the query field. eg: queried fields:one two three four query SQL:select key, value from book; -SQL response data: +SQL response data: -| key | value | -|---------|-------| -| one | 243 | -| two | 435 | -| three | 332 | -| four | 643 | +| key | value | +|-------|-------| +| one | 243 | +| two | 435 | +| three | 332 | +| four | 643 | Here by mapping the Metric field with the key of the response data, we can obtain the corresponding value as collection and monitoring data. -### Custom Steps +### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. -eg:Define a custom monitoring type `app` named `example_sql` which use the JDBC protocol to collect data. - +eg:Define a custom monitoring type `app` named `example_sql` which use the JDBC protocol to collect data. ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -236,3 +241,4 @@ metrics: sql: show global status where Variable_name like 'innodb%'; url: ^_^url^_^ ``` + diff --git a/home/docs/advanced/extend-jmx.md b/home/docs/advanced/extend-jmx.md index 5284118f8a7..2f9ba992f63 100644 --- a/home/docs/advanced/extend-jmx.md +++ b/home/docs/advanced/extend-jmx.md @@ -1,12 +1,14 @@ --- id: extend-jmx title: JMX Protocol Custom Monitoring -sidebar_label: JMX Protocol Custom Monitoring +sidebar_label: JMX Protocol Custom Monitoring --- + > From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use JMX to customize Metric monitoring. > JMX protocol custom monitoring allows us to easily monitor Metrics we want by config JMX Mbeans Object. ### JMX protocol collection process + 【**Peer Server Enable Jmx Service**】->【**HertzBeat Connect Peer Server Jmx**】->【**Query Jmx Mbean Object Data**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of JMX protocol. We need to configure JMX request parameters, configure which Metrics to obtain, and configure Mbeans Object. @@ -15,25 +17,24 @@ It can be seen from the process that we define a monitoring type of JMX protocol By configuring the monitoring template YML metrics `field`, `aliasFields`, `objectName` of the `jmx` protocol to map and parse the `Mbean` object information exposed by the peer system. -### Custom Steps +### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ![](/img/docs/advanced/extend-point-1.png) ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. eg:Define a custom monitoring type `app` named `example_jvm` which use the JVM protocol to collect data. - ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring category: service @@ -191,3 +192,4 @@ metrics: objectName: java.lang:type=MemoryPool,name=* url: ^_^url^_^ ``` + diff --git a/home/docs/advanced/extend-ngql.md b/home/docs/advanced/extend-ngql.md index c07c5ae8ca0..2047e1d1cf5 100644 --- a/home/docs/advanced/extend-ngql.md +++ b/home/docs/advanced/extend-ngql.md @@ -22,9 +22,9 @@ Mapping the fields returned by NGQL queries to the metrics we need allows us to For example: - online_meta_count#SHOW HOSTS META#Status#ONLINE - Counts the number of rows returned by `SHOW HOSTS META` where Status equals ONLINE. +Counts the number of rows returned by `SHOW HOSTS META` where Status equals ONLINE. - online_meta_count#SHOW HOSTS META## - Counts the number of rows returned by `SHOW HOSTS META`. +Counts the number of rows returned by `SHOW HOSTS META`. #### **oneRow** @@ -68,12 +68,13 @@ Notes: ![HertzBeat Page](/img/docs/advanced/extend-point-1.png) ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Template YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. -> Monitoring template is used to define the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information, etc. +> Monitoring template is used to define the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information, etc. eg: Customize a monitoring type named example_ngql, which collects metric data using NGQL. @@ -165,3 +166,4 @@ metrics: - match (v:tag2) return "tag2" as name ,count(v) as cnt timeout: ^_^timeout^_^ ``` + diff --git a/home/docs/advanced/extend-point.md b/home/docs/advanced/extend-point.md index 6fdbdd636b7..e4d92d8d875 100644 --- a/home/docs/advanced/extend-point.md +++ b/home/docs/advanced/extend-point.md @@ -1,31 +1,29 @@ --- id: extend-point title: Custom Monitoring -sidebar_label: Custom Monitoring +sidebar_label: Custom Monitoring --- -> HertzBeat has custom monitoring ability. You only need to configure monitoring template yml to fit a custom monitoring type. -> Custom monitoring currently supports [HTTP protocol](extend-http),[JDBC protocol](extend-jdbc), [SSH protocol](extend-ssh), [JMX protocol](extend-jmx), [SNMP protocol](extend-snmp). And it will support more general protocols in the future. -### Custom Monitoring Steps +> HertzBeat has custom monitoring ability. You only need to configure monitoring template yml to fit a custom monitoring type. +> Custom monitoring currently supports [HTTP protocol](extend-http),[JDBC protocol](extend-jdbc), [SSH protocol](extend-ssh), [JMX protocol](extend-jmx), [SNMP protocol](extend-snmp). And it will support more general protocols in the future. -**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** +### Custom Monitoring Steps +**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ------- Configuration usages of the monitoring templates yml are detailed below. -### Monitoring Templates YML +### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. +> +> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. +eg:Define a custom monitoring type `app` named `example2` which use the HTTP protocol to collect data. -> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - -eg:Define a custom monitoring type `app` named `example2` which use the HTTP protocol to collect data. - -**Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** - +**Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -140,3 +138,4 @@ metrics: parseType: website ``` + diff --git a/home/docs/advanced/extend-snmp.md b/home/docs/advanced/extend-snmp.md index c97aea1f766..b3bb9173c87 100644 --- a/home/docs/advanced/extend-snmp.md +++ b/home/docs/advanced/extend-snmp.md @@ -1,23 +1,22 @@ --- id: extend-snmp title: SNMP Protocol Custom Monitoring -sidebar_label: SNMP Protocol Custom Monitoring +sidebar_label: SNMP Protocol Custom Monitoring --- > From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use SNMP to customize Metric monitoring. > JMX protocol custom monitoring allows us to easily monitor Metrics we want by config SNMP MIB OIDs. ### SNMP protocol collection process + 【**Peer Server Enable SNMP Service**】->【**HertzBeat Connect Peer Server SNMP**】->【**Query Oids Data**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of Snmp protocol. We need to configure Snmp request parameters, configure which Metrics to obtain, and configure oids. - ### Data parsing method By configuring the metrics `field`, `aliasFields`, and `oids` under the `snmp` protocol of the monitoring template YML to capture the data specified by the peer and parse the mapping. - ### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** @@ -25,18 +24,17 @@ By configuring the metrics `field`, `aliasFields`, and `oids` under the `snmp` p ![](/img/docs/advanced/extend-point-1.png) ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. eg:Define a custom monitoring type `app` named `example_windows` which use the SNMP protocol to collect data. - ```yaml # The monitoring type category:service-application service monitoring db-database monitoring mid-middleware custom-custom monitoring os-operating system monitoring category: os @@ -171,3 +169,4 @@ metrics: processes: 1.3.6.1.2.1.25.1.6.0 location: 1.3.6.1.2.1.1.6.0 ``` + diff --git a/home/docs/advanced/extend-ssh.md b/home/docs/advanced/extend-ssh.md index 772ee315207..bf960376179 100644 --- a/home/docs/advanced/extend-ssh.md +++ b/home/docs/advanced/extend-ssh.md @@ -1,21 +1,25 @@ --- id: extend-ssh title: SSH Protocol Custom Monitoring -sidebar_label: SSH Protocol Custom Monitoring +sidebar_label: SSH Protocol Custom Monitoring --- -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use SSH protocol to customize Metric monitoring. -> SSH protocol custom monitoring allows us to easily monitor and collect the Linux Metrics we want by writing sh command script. -### SSH protocol collection process -【**System directly connected to Linux**】->【**Run shell command script statement**】->【**parse response data: oneRow, multiRow**】->【**Metric data extraction**】 +> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use SSH protocol to customize Metric monitoring. +> SSH protocol custom monitoring allows us to easily monitor and collect the Linux Metrics we want by writing sh command script. + +### SSH protocol collection process + +【**System directly connected to Linux**】->【**Run shell command script statement**】->【**parse response data: oneRow, multiRow**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of SSH protocol. We need to configure SSH request parameters, configure which Metrics to obtain, and configure query script statements. -### Data parsing method +### Data parsing method + We can obtain the corresponding Metric data through the data fields queried by the SHELL script and the Metric mapping we need. At present, there are two mapping parsing methods:oneRow and multiRow which can meet the needs of most Metrics. -#### **oneRow** -> Query out a column of data, return the field value (one value per row) of the result set through query and map them to the field. +#### **oneRow** + +> Query out a column of data, return the field value (one value per row) of the result set through query and map them to the field. eg: Metrics of Linux to be queried hostname-host name,uptime-start time @@ -23,31 +27,37 @@ Host name original query command:`hostname` Start time original query command:`uptime | awk -F "," '{print $1}'` Then the query script of the two Metrics in hertzbeat is(Use `;` Connect them together): `hostname; uptime | awk -F "," '{print $1}'` -The data responded by the terminal is: +The data responded by the terminal is: + ``` tombook 14:00:15 up 72 days -``` +``` + At last collected Metric data is mapped one by one as: hostname is `tombook` -uptime is `14:00:15 up 72 days` +uptime is `14:00:15 up 72 days` -Here the Metric field and the response data can be mapped into a row of collected data one by one +Here the Metric field and the response data can be mapped into a row of collected data one by one #### **multiRow** -> Query multiple rows of data, return the column names of the result set through the query, and map them to the Metric field of the query. + +> Query multiple rows of data, return the column names of the result set through the query, and map them to the Metric field of the query. eg: Linux memory related Metric fields queried:total-Total memory, used-Used memory,free-Free memory, buff-cache-Cache size, available-Available memory -Memory metrics original query command:`free -m`, Console response: +Memory metrics original query command:`free -m`, Console response: + ```shell total used free shared buff/cache available Mem: 7962 4065 333 1 3562 3593 Swap: 8191 33 8158 ``` + In hertzbeat multiRow format parsing requires a one-to-one mapping between the column name of the response data and the indicaotr value, so the corresponding query SHELL script is: `free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` -Console response is: +Console response is: + ```shell total used free buff_cache available 7962 4066 331 3564 3592 @@ -60,18 +70,17 @@ Here the Metric field and the response data can be mapped into collected data on **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. eg:Define a custom monitoring type `app` named `example_linux` which use the SSH protocol to collect data. - ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring category: os @@ -203,3 +212,4 @@ metrics: script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' parseType: multiRow ``` + diff --git a/home/docs/advanced/extend-telnet.md b/home/docs/advanced/extend-telnet.md index 9aa8eac92f1..7b45c5aab32 100644 --- a/home/docs/advanced/extend-telnet.md +++ b/home/docs/advanced/extend-telnet.md @@ -1,29 +1,31 @@ --- id: extend-telnet title: Telnet Protocol Custom Monitoring -sidebar_label: Telnet Protocol Custom Monitoring +sidebar_label: Telnet Protocol Custom Monitoring --- - + > From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use Telnet to customize Metric monitoring. > Telnet protocol custom monitoring allows us to easily monitor and collect the Linux Metrics we want by writing sh command script. -### Telnet protocol collection process +### Telnet protocol collection process + 【**System directly connected to Linux**】->【**Run shell command script statement**】->【**parse response data: oneRow, multiRow**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of Telnet protocol. We need to configure Telnet request parameters, configure which Metrics to obtain, and configure query script statements. ### Data parsing method -By configuring the metrics `field`, `aliasFields` the `Telnet` protocol of the monitoring template YML to capture the data specified by the peer and parse the mapping. +By configuring the metrics `field`, `aliasFields` the `Telnet` protocol of the monitoring template YML to capture the data specified by the peer and parse the mapping. -### Custom Steps +### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ![](/img/docs/advanced/extend-point-1.png) ------- -Configuration usages of the monitoring templates yml are detailed below. + +Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML @@ -32,7 +34,6 @@ Configuration usages of the monitoring templates yml are detailed below. eg:Define a custom monitoring type `app` named `zookeeper` which use the telnet protocol to collect data. - ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring # 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 @@ -296,3 +297,4 @@ metrics: ``` + diff --git a/home/docs/advanced/extend-tutorial.md b/home/docs/advanced/extend-tutorial.md index 03b73707789..ab25bed88f7 100644 --- a/home/docs/advanced/extend-tutorial.md +++ b/home/docs/advanced/extend-tutorial.md @@ -8,12 +8,10 @@ Through this tutorial, we describe step by step how to customize and adapt a mon Before reading this tutorial, we hope that you are familiar with how to customize types, metrics, protocols, etc. from [Custom Monitoring](extend-point) and [Http Protocol Customization](extend-http). - ### HTTP protocol parses the general response structure to obtain metrics data > In many scenarios, we need to monitor the provided HTTP API interface and obtain the index value returned by the interface. In this article, we use the http custom protocol to parse our common http interface response structure, and obtain the fields in the returned body as metric data. - ``` { "code": 200, @@ -22,6 +20,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz } ``` + As above, usually our background API interface will design such a general return. The same is true for the background of the hertzbeat system. Today, we will use the hertzbeat API as an example, add a new monitoring type **hertzbeat**, and monitor and collect its system summary statistics API `http://localhost:1157/api/summary`, the response data is: @@ -58,17 +57,14 @@ As above, usually our background API interface will design such a general return **This time we get the metrics data such as `category`, `app`, `status`, `size`, `availableSize` under the app. ** - ### Add Monitoring Template Yml **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - Here we define a custom monitoring type `app` named `hertzbeat` which use the HTTP protocol to collect data. **Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** @@ -207,32 +203,24 @@ metrics: **The addition is complete, now we restart the hertzbeat system. We can see that the system page has added a `hertzbeat` monitoring type. ** - ![](/img/docs/advanced/extend-http-example-1.png) - ### The system page adds the monitoring of `hertzbeat` monitoring type > We click Add `HertzBeat Monitoring Tool`, configure monitoring IP, port, collection cycle, account password in advanced settings, etc., click OK to add monitoring. - ![](/img/docs/advanced/extend-http-example-2.png) - ![](/img/docs/advanced/extend-http-example-3.png) > After a certain period of time (depending on the collection cycle), we can see the specific metric data and historical charts in the monitoring details! - ![](/img/docs/advanced/extend-http-example-4.png) - - ### Set threshold alarm notification > Next, we can set the threshold normally. After the alarm is triggered, we can view it in the alarm center, add a new recipient, set alarm notification, etc. Have Fun!!! - ---- #### over! diff --git a/home/docs/community/become_committer.md b/home/docs/community/become_committer.md index f2824bdc4ed..382e69fb9bf 100644 --- a/home/docs/community/become_committer.md +++ b/home/docs/community/become_committer.md @@ -5,20 +5,20 @@ sidebar_position: 2 --- ## Become A Committer of Apache HertzBeat @@ -66,6 +66,7 @@ of the main website or HertzBeat's GitHub repositories. - +1 month with solid activity and engagement. ### Quality of contributions + - A solid general understanding of the project - Well tested, well-designed, following Apache HertzBeat coding standards, and simple patches. @@ -82,3 +83,4 @@ of the main website or HertzBeat's GitHub repositories. - Be involved in the design road map discussions with a professional and diplomatic approach even if there is a disagreement - Promoting the project by writing articles or holding events + diff --git a/home/docs/community/become_pmc_member.md b/home/docs/community/become_pmc_member.md index cf48cbe7c82..cd9dff4e02a 100644 --- a/home/docs/community/become_pmc_member.md +++ b/home/docs/community/become_pmc_member.md @@ -5,20 +5,20 @@ sidebar_position: 3 --- ## Become A PMC member of Apache HertzBeat @@ -66,6 +66,7 @@ of the main website or HertzBeat's GitHub repositories. - +3 month with solid activity and engagement. ### Quality of contributions + - A solid general understanding of the project - Well tested, well-designed, following Apache HertzBeat coding standards, and simple patches. @@ -82,3 +83,4 @@ of the main website or HertzBeat's GitHub repositories. - Be involved in the design road map discussions with a professional and diplomatic approach even if there is a disagreement - Promoting the project by writing articles or holding events + diff --git a/home/docs/community/code-style-and-quality-guide.md b/home/docs/community/code-style-and-quality-guide.md index 755a98f3fd7..c86438a577e 100644 --- a/home/docs/community/code-style-and-quality-guide.md +++ b/home/docs/community/code-style-and-quality-guide.md @@ -5,39 +5,36 @@ sidebar_position: 3 --- - ## 1 Pull Requests & Changes Rule 1. `ISSUE`/`PR`(pull request) driving and naming - - After creating a new `PR`, you need to associate the existing corresponding `ISSUE` at the Github Development button on the `PR` page (if there is no corresponding ISSUE, it is recommended to create a new corresponding ISSUE). + - After creating a new `PR`, you need to associate the existing corresponding `ISSUE` at the Github Development button on the `PR` page (if there is no corresponding ISSUE, it is recommended to create a new corresponding ISSUE). - - Title naming format - `[feature/bugfix/doc/improve/refactor/bug/cleanup] title` + - Title naming format + `[feature/bugfix/doc/improve/refactor/bug/cleanup] title` 2. Description - - Please fill in the `PR` template to describe the contribution. So that the reviewer can understand the problem and solution from the description, rather than just from the code. - - Check the CheckList - + - Please fill in the `PR` template to describe the contribution. So that the reviewer can understand the problem and solution from the description, rather than just from the code. + - Check the CheckList 3. It's recommended that `PR` should be arranged changes such as `cleanup`, `Refactor`, `improve`, and `feature` into separated `PRs`/`Commits`. - 4. Commit message(English, lowercase, no special characters) The commit of messages should follow a pattern similar to the `[feature/bugfix/doc/improve/refactor/bug/cleanup] title` @@ -50,11 +47,11 @@ sidebar_position: 3 3. Set **Checkstyle version** to **10.14.2**. 4. Set **Scan scope** to **Only Java sources (including tests)**. 5. Click **+** button in the **Configuration** section to open a dialog to choose the checkstyle config file. - 1. Enter a **Description**. For example, hertzbeat. - 2. Select **Use a local checkstyle file**. - 3. Set **File** to **script/checkstyle/checkstyle.xml**. - 4. Select **Store relative to project location**. - 5. Click **Next** → **Next** → **Finish**. + 1. Enter a **Description**. For example, hertzbeat. + 2. Select **Use a local checkstyle file**. + 3. Set **File** to **script/checkstyle/checkstyle.xml**. + 4. Select **Store relative to project location**. + 5. Click **Next** → **Next** → **Finish**. 6. Activate the configuration you just added by toggling the corresponding box. 7. Click **OK**. @@ -69,96 +66,94 @@ sidebar_position: 3 ### 3.1 Naming Style 1. Prioritize selecting nouns for variable naming, it's easier to distinguish between `variables` or `methods`. + ```java - Cache publicKeyCache; + Cache publicKeyCache; ``` - 2. Pinyin abbreviations are prohibited for variables (excluding nouns such as place names), such as chengdu. - 3. It is recommended to end variable names with a `type`. For variables of type `Collection/List`, take `xxxx` (plural representing multiple elements) or end with `xxxList` (specific type). For variables of type `map`, describe the `key` and `value` clearly: + ```java - Map idUserMap; - Map userIdNameMap; + Map idUserMap; + Map userIdNameMap; ``` - 4. That can intuitively know the type and meaning of the variable through its name. Method names should start with a verb first as follows: + ```java - void computeVcores(Object parameter1); + void computeVcores(Object parameter1); ``` + > Note: It is not necessary to strictly follow this rule in the `Builder` tool class. - ### 3.2 Constant Variables Definition -1. Redundant strings should be extracted as constants - >If a constant has been hardcoded twice or more times, please directly extract it as a constant and change the corresponding reference. - In generally, constants in `log` can be ignored to extract. - - - Negative demo: - - ```java - public static RestResponse success(Object data) { - RestResponse resp = new RestResponse(); - resp.put("status", "success"); - resp.put("code", ResponseCode.CODE_SUCCESS); - resp.put("data", data); - return resp; - } - - public static RestResponse error() { - RestResponse resp = new RestResponse(); - resp.put("status", "error"); - resp.put("code", ResponseCode.CODE_FAIL); - resp.put("data", null); - return resp; - } - ``` - - - Positive demo: - - > Strings are extracted as constant references. - - ```java - public static final String STATUS = "status"; - public static final String CODE = "code"; - public static final String DATA = "data"; - - public static RestResponse success(Object data) { - RestResponse resp = new RestResponse(); - resp.put(STATUS, "success"); - resp.put(CODE, ResponseCode.CODE_SUCCESS); - resp.put(DATA, data); - return resp; - } - - public static RestResponse error() { - RestResponse resp = new RestResponse(); - resp.put(STATUS, "error"); - resp.put(CODE, ResponseCode.CODE_FAIL); - resp.put(DATA, null); - return resp; - } - ``` +1. Redundant strings should be extracted as constants + + > If a constant has been hardcoded twice or more times, please directly extract it as a constant and change the corresponding reference. + > In generally, constants in `log` can be ignored to extract. + + - Negative demo: + + ```java + public static RestResponse success(Object data) { + RestResponse resp = new RestResponse(); + resp.put("status", "success"); + resp.put("code", ResponseCode.CODE_SUCCESS); + resp.put("data", data); + return resp; + } + + public static RestResponse error() { + RestResponse resp = new RestResponse(); + resp.put("status", "error"); + resp.put("code", ResponseCode.CODE_FAIL); + resp.put("data", null); + return resp; + } + ``` + - Positive demo: + + > Strings are extracted as constant references. + + ```java + public static final String STATUS = "status"; + public static final String CODE = "code"; + public static final String DATA = "data"; + + public static RestResponse success(Object data) { + RestResponse resp = new RestResponse(); + resp.put(STATUS, "success"); + resp.put(CODE, ResponseCode.CODE_SUCCESS); + resp.put(DATA, data); + return resp; + } + public static RestResponse error() { + RestResponse resp = new RestResponse(); + resp.put(STATUS, "error"); + resp.put(CODE, ResponseCode.CODE_FAIL); + resp.put(DATA, null); + return resp; + } + ``` 2. Ensure code readability and intuitiveness - - The string in the `annotation` symbol doesn't need to be extracted as constant. +- The string in the `annotation` symbol doesn't need to be extracted as constant. - - The referenced `package` or `resource` name doesn't need to be extracted as constant. +- The referenced `package` or `resource` name doesn't need to be extracted as constant. 3. Variables that have not been reassigned must also be declared as final types. -4. About the arrangement order of `constant/variable` lines +4. About the arrangement order of `constant/variable` lines Sort the variable lines in the class in the order of 1. `public static final V`, `static final V`,`protected static final V`, `private static final V` 2. `public static v`, `static v`,`protected static v`, `private static v` 3. `public v`, `v`, `protected v`, `private v` - ### 3.3 Methods Rule 1. Sort the methods in the class in the order of `public`, `protected`, `private` @@ -174,9 +169,9 @@ sidebar_position: 3 3. If there are too many lines of code in the method, please have a try on using multiple sub methods at appropriate points to segment the method body. Generally speaking, it needs to adhere to the following principles: - - Convenient testing - - Good semantics - - Easy to read + - Convenient testing + - Good semantics + - Easy to read In addition, it is also necessary to consider whether the splitting is reasonable in terms of components, logic, abstraction, and other aspects in the scenario. @@ -185,35 +180,31 @@ sidebar_position: 3 ### 3.4 Collection Rule 1. For `collection` returned values, unless there are special `concurrent` (such as thread safety), always return the `interface`, such as: - - - returns List if use `ArrayList` - - returns Map if use `HashMap` - - returns Set if use `HashSet` - + - returns List if use `ArrayList` + - returns Map if use `HashMap` + - returns Set if use `HashSet` 2. If there are multiple threads, the following declaration or returned types can be used: - ```java - private CurrentHashMap map; - public CurrentHashMap funName(); - ``` +```java +private CurrentHashMap map; +public CurrentHashMap funName(); +``` 3. Use `isEmpty()` instead of `length() == 0` or `size() == 0` + - Negative demo: - - Negative demo: - - ```java - if (pathPart.length() == 0) { - return; - } - ``` - - - Positive demo: + ```java + if (pathPart.length() == 0) { + return; + } + ``` + - Positive demo: - ```java - if (pathPart.isEmpty()) { - return; - } - ``` + ```java + if (pathPart.isEmpty()) { + return; + } + ``` ### 3.5 Concurrent Processing @@ -226,9 +217,8 @@ sidebar_position: 3 ### 3.6 Control/Condition Statements 1. Avoid unreasonable `condition/control` branches order leads to: - - - Multiple code line `depths` of `n+1` - - Redundant lines + - Multiple code line `depths` of `n+1` + - Redundant lines Generally speaking, if a method's code line depth exceeds `2+ Tabs` due to continuous nested `if... else..`, it should be considered to try - `merging branches`, @@ -237,77 +227,85 @@ Generally speaking, if a method's code line depth exceeds `2+ Tabs` due to conti to reduce code line depth and improve readability like follows: - Union or merge the logic into the next level calling - - Negative demo: - ```java - if (isInsert) { - save(platform); - } else { - updateById(platform); - } - ``` - - Positive demo: - ```java - saveOrUpdate(platform); - ``` +- Negative demo: + +```java +if (isInsert) { +save(platform); +} else { +updateById(platform); +} +``` + +- Positive demo: + +```java +saveOrUpdate(platform); +``` + - Merge the conditions - - Negative demo: - ```java - if (expression1) { - if(expression2) { - ...... - } - } - ``` - - Positive demo: - ```java - if (expression1 && expression2) { - ...... - } - ``` +- Negative demo: + +```java +if (expression1) { +if(expression2) { +...... +} +} + +``` + +- Positive demo: + + ```java + if (expression1 && expression2) { + ...... + } + ``` - Reverse the condition - - Negative demo: - - ```java - public void doSomething() { - // Ignored more deeper block lines - // ..... - if (condition1) { - ... - } else { - ... - } - } - ``` - - - Positive demo: - - ```java - public void doSomething() { - // Ignored more deeper block lines - // ..... - if (!condition1) { - ... - return; - } - // ... - } - ``` +- Negative demo: + + ```java + public void doSomething() { + // Ignored more deeper block lines + // ..... + if (condition1) { + ... + } else { + ... + } + } + ``` +- Positive demo: + + ```java + public void doSomething() { + // Ignored more deeper block lines + // ..... + if (!condition1) { + ... + return; + } + // ... + } + ``` - Using a single variable or method to reduce the complex conditional expression - - Negative demo: - ```java - if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { - ... - } - ``` - - - Positive demo: - ```java - if (containsSqlServer(dbType)) { - .... - } - //..... - // definition of the containsSqlServer - ``` +- Negative demo: + + ```java + if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { + ... + } + ``` +- Positive demo: + + ```java + if (containsSqlServer(dbType)) { + .... + } + //..... + // definition of the containsSqlServer + ``` > Using `sonarlint` and `better highlights` to check code depth looks like good in the future. @@ -315,22 +313,22 @@ to reduce code line depth and improve readability like follows: 1. Method lacks comments: - - `When`: When can the method be called - - `How`: How to use this method and how to pass parameters, etc. - - `What`: What functions does this method achieve - - `Note`: What should developers pay attention to when calling this method +- `When`: When can the method be called +- `How`: How to use this method and how to pass parameters, etc. +- `What`: What functions does this method achieve +- `Note`: What should developers pay attention to when calling this method 2. Missing necessary class header description comments. - Add `What`, `Note`, etc. like mentioned in the `1`. +Add `What`, `Note`, etc. like mentioned in the `1`. 3. The method declaration in the interface must be annotated. - - If the semantics of the implementation and the annotation content at the interface declaration are inconsistent, the specific implementation method also needs to be rewritten with annotations. +- If the semantics of the implementation and the annotation content at the interface declaration are inconsistent, the specific implementation method also needs to be rewritten with annotations. - - If the semantics of the method implementation are consistent with the annotation content at the interface declaration, it is not recommended to write annotations to avoid duplicate annotations. +- If the semantics of the method implementation are consistent with the annotation content at the interface declaration, it is not recommended to write annotations to avoid duplicate annotations. -4. The first word in the comment lines need to be capitalized, like `param` lines, `return` lines. +4. The first word in the comment lines need to be capitalized, like `param` lines, `return` lines. If a special reference as a subject does not need to be capitalized, special symbols such as quotation marks need to be noted. ### 3.8 Java Lambdas @@ -338,31 +336,29 @@ to reduce code line depth and improve readability like follows: 1. Prefer `non-capturing` lambdas (lambdas that do not contain references to the outer scope). Capturing lambdas need to create a new object instance for every call. `Non-capturing` lambdas can use the same instance for each invocation. - - Negative demo: +- Negative demo: - ```java - map.computeIfAbsent(key, x -> key.toLowerCase()) - ``` - - - Positive demo: + ```java + map.computeIfAbsent(key, x -> key.toLowerCase()) + ``` +- Positive demo: - ```java - map.computeIfAbsent(key, k -> k.toLowerCase()); - ``` + ```java + map.computeIfAbsent(key, k -> k.toLowerCase()); + ``` 2. Consider method references instead of inline lambdas - - Negative demo: - - ```java - map.computeIfAbsent(key, k-> Loader.load(k)); - ``` +- Negative demo: - - Positive demo: + ```java + map.computeIfAbsent(key, k-> Loader.load(k)); + ``` +- Positive demo: - ```java - map.computeIfAbsent(key, Loader::load); - ``` + ```java + map.computeIfAbsent(key, Loader::load); + ``` ### 3.9 Java Streams @@ -380,186 +376,180 @@ to reduce code line depth and improve readability like follows: 1. Use `StringUtils.isBlank` instead of `StringUtils.isEmpty` - - Negative demo: - - ```java - if (StringUtils.isEmpty(name)) { - return; - } - ``` - - - Positive demo: - - ```java - if (StringUtils.isBlank(name)) { - return; - } - ``` +- Negative demo: + + ```java + if (StringUtils.isEmpty(name)) { + return; + } + ``` +- Positive demo: + + ```java + if (StringUtils.isBlank(name)) { + return; + } + ``` 2. Use `StringUtils.isNotBlank` instead of `StringUtils.isNotEmpty` - - Negative demo: - - ```java - if (StringUtils.isNotEmpty(name)) { - return; - } - ``` - - - Positive demo: - - ```java - if (StringUtils.isNotBlank(name)) { - return; - } - ``` +- Negative demo: + + ```java + if (StringUtils.isNotEmpty(name)) { + return; + } + ``` +- Positive demo: + + ```java + if (StringUtils.isNotBlank(name)) { + return; + } + ``` 3. Use `StringUtils.isAllBlank` instead of `StringUtils.isAllEmpty` - - Negative demo: - - ```java - if (StringUtils.isAllEmpty(name, age)) { - return; - } - ``` - - - Positive demo: - - ```java - if (StringUtils.isAllBlank(name, age)) { - return; - } - ``` +- Negative demo: + + ```java + if (StringUtils.isAllEmpty(name, age)) { + return; + } + ``` +- Positive demo: + + ```java + if (StringUtils.isAllBlank(name, age)) { + return; + } + ``` ### 3.12 `Enum` Class 1. Enumeration value comparison - - Negative demo: - - ```java - if (status.equals(JobStatus.RUNNING)) { - return; - } - ``` - - - Positive demo: - - ```java - if (status == JobStatus.RUNNING) { - return; - } - ``` +- Negative demo: + + ```java + if (status.equals(JobStatus.RUNNING)) { + return; + } + ``` +- Positive demo: + + ```java + if (status == JobStatus.RUNNING) { + return; + } + ``` 2. Enumeration classes do not need to implement Serializable - - Negative demo: - - ```java - public enum JobStatus implements Serializable { - ... - } - ``` - - - Positive demo: - - ```java - public enum JobStatus { - ... - } - ``` +- Negative demo: + + ```java + public enum JobStatus implements Serializable { + ... + } + ``` +- Positive demo: + + ```java + public enum JobStatus { + ... + } + ``` 3. Use `Enum.name()` instead of `Enum.toString()` - - Negative demo: - - ```java - System.out.println(JobStatus.RUNNING.toString()); - ``` - - - Positive demo: - - ```java - System.out.println(JobStatus.RUNNING.name()); - ``` +- Negative demo: + + ```java + System.out.println(JobStatus.RUNNING.toString()); + ``` +- Positive demo: + + ```java + System.out.println(JobStatus.RUNNING.name()); + ``` 4. Enumeration class names uniformly use the Enum suffix - - Negative demo: - - ```java - public enum JobStatus { - ... - } - ``` - - - Positive demo: - - ```java - public enum JobStatusEnum { - ... - } - ``` +- Negative demo: + + ```java + public enum JobStatus { + ... + } + ``` +- Positive demo: + + ```java + public enum JobStatusEnum { + ... + } + ``` ### 3.13 `Deprecated` Annotation - - Negative demo: +- Negative demo: - ```java - @deprecated - public void process(String input) { - ... - } - ``` +```java +@deprecated +public void process(String input) { + ... +} +``` - - Positive demo: +- Positive demo: - ```java - @Deprecated - public void process(String input) { - ... - } - ``` +```java +@Deprecated +public void process(String input) { + ... +} +``` ## 4 Log 1. Use `placeholders` for log output: - - Negative demo - ```java - log.info("Deploy cluster request " + deployRequest); - ``` - - Positive demo - ```java - log.info("load plugin:{} to {}", file.getName(), appPlugins); - ``` +- Negative demo -2. Pay attention to the selection of `log level` when printing logs + ```java + log.info("Deploy cluster request " + deployRequest); + ``` +- Positive demo + + ```java + log.info("load plugin:{} to {}", file.getName(), appPlugins); + ``` - When printing the log content, if the actual parameters of the log placeholder are passed, it is necessary to avoid premature evaluation to avoid unnecessary evaluation caused by the log level. +2. Pay attention to the selection of `log level` when printing logs - - Negative demo: +When printing the log content, if the actual parameters of the log placeholder are passed, it is necessary to avoid premature evaluation to avoid unnecessary evaluation caused by the log level. - Assuming the current log level is `INFO`: +- Negative demo: - ```java - // ignored declaration lines. - List userList = getUsersByBatch(1000); - LOG.debug("All users: {}", getAllUserIds(userList)); - ``` + Assuming the current log level is `INFO`: - - Positive demo: + ```java + // ignored declaration lines. + List userList = getUsersByBatch(1000); + LOG.debug("All users: {}", getAllUserIds(userList)); + ``` +- Positive demo: - In this case, we should determine the log level in advance before making actual log calls as follows: + In this case, we should determine the log level in advance before making actual log calls as follows: - ```java - // ignored declaration lines. - List userList = getUsersByBatch(1000); - if (LOG.isDebugEnabled()) { - LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); - } - ``` + ```java + // ignored declaration lines. + List userList = getUsersByBatch(1000); + if (LOG.isDebugEnabled()) { + LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); + } + ``` ## 5 Testing @@ -568,8 +558,14 @@ to reduce code line depth and improve readability like follows: 2. The implemented interface needs to write the `e2e` test case script under the `e2e` module. ## References + - https://site.mockito.org/ - https://alibaba.github.io/p3c/ - https://rules.sonarsource.com/java/ - https://junit.org/junit5/ - https://streampark.apache.org/ + +``` + +``` + diff --git a/home/docs/community/contact.md b/home/docs/community/contact.md index 91a4659ae5c..decd6d27c8c 100644 --- a/home/docs/community/contact.md +++ b/home/docs/community/contact.md @@ -1,7 +1,7 @@ --- id: contact title: Join discussion -sidebar_label: Discussion +sidebar_label: Discussion --- > If you need any help or want to exchange suggestions during the use process, you can discuss and exchange through ISSUE or Github Discussion. diff --git a/home/docs/community/contribution.md b/home/docs/community/contribution.md index 06987053696..f7d932bcd2c 100644 --- a/home/docs/community/contribution.md +++ b/home/docs/community/contribution.md @@ -5,20 +5,20 @@ sidebar_position: 0 --- > We are committed to maintaining a happy community that helps each other, welcome every contributor to join us! @@ -50,7 +50,6 @@ Even small corrections to typos are very welcome :) > To get HertzBeat code running on your development tools, and able to debug with breakpoints. > This is a front-end and back-end separation project. To start the local code, the back-end manager and the front-end web-app must be started separately. - #### Backend start 1. Requires `maven3+`, `java17` and `lombok` environments @@ -88,23 +87,31 @@ Of course, if you have a good idea, you can also propose it directly on GitHub D 1. First you need to fork your target [hertzbeat repository](https://github.com/apache/hertzbeat). 2. Then download the code locally with git command: + ```shell git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended ``` + 3. After the download is complete, please refer to the getting started guide or README file of the target repository to initialize the project. 4. Then, you can refer to the following command to submit the code: + ```shell git checkout -b a-feature-branch #Recommended ``` + 5. Submit the coed as a commit, the commit message format specification required: [module name or type name]feature or bugfix or doc: custom message. + ```shell git add git commit -m '[docs]feature: necessary instructions' #Recommended ``` + 6. Push to the remote repository + ```shell git push origin a-feature-branch ``` + 7. Then you can initiate a new PR (Pull Request) on GitHub. Please note that the title of the PR needs to conform to our spec, and write the necessary description in the PR to facilitate code review by Committers and other contributors. @@ -137,6 +144,7 @@ git pull upstream master ``` ### HertzBeat Improvement Proposal (HIP) + If you have major new features(e.g., support metrics push gateway, support logs monitoring), you need to write a design document known as a HertzBeat Improvement Proposal (HIP). Before starting to write a HIP, make sure you follow the process [here](https://github.com/apache/hertzbeat/tree/master/hip). ### How to become a Committer? @@ -152,14 +160,15 @@ Add WeChat account `ahertzbeat` to pull you into the WeChat group. ## 🥐 Architecture - **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** Provide monitoring management, system management basic services. + > Provides monitoring management, monitoring configuration management, system user management, etc. -- **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** Provide metrics data collection services. +> - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** Provide metrics data collection services. > Use common protocols to remotely collect and obtain peer-to-peer metrics data. -- **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** Provide monitoring data warehousing services. +> - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** Provide monitoring data warehousing services. > Metrics data management, data query, calculation and statistics. -- **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** Provide alert service. +> - **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** Provide alert service. > Alarm calculation trigger, monitoring status linkage, alarm configuration, and alarm notification. -- **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** Provide web ui. +> - **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** Provide web ui. > Angular Web UI. ![hertzBeat](/img/docs/hertzbeat-arch.png) diff --git a/home/docs/community/development.md b/home/docs/community/development.md index 6bc9544a900..549084ce516 100644 --- a/home/docs/community/development.md +++ b/home/docs/community/development.md @@ -1,7 +1,7 @@ --- id: development title: How to Run or Build HertzBeat? -sidebar_label: Development +sidebar_label: Development --- ## Getting HertzBeat code up and running @@ -10,7 +10,6 @@ sidebar_label: Development > This is a front-end and back-end separation project. > To start the local code, the back-end [manager](https://github.com/apache/hertzbeat/tree/master/manager) and the front-end [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) must be started separately. - ### Backend start 1. Requires `maven3+`, `java17` and `lombok` environments @@ -37,9 +36,9 @@ sidebar_label: Development 7. Browser access to localhost:4200 to start, default account/password is *admin/hertzbeat* -## Build HertzBeat binary package +## Build HertzBeat binary package -> Requires `maven3+`, `java17`, `node` and `yarn` environments. +> Requires `maven3+`, `java17`, `node` and `yarn` environments. ### Frontend build @@ -53,7 +52,6 @@ sidebar_label: Development 5. Build web-app: `yarn package` - ### Backend build 1. Requires `maven3+`, `java17` environments diff --git a/home/docs/community/document.md b/home/docs/community/document.md index a4f871bb753..e561b3f1b2c 100644 --- a/home/docs/community/document.md +++ b/home/docs/community/document.md @@ -5,20 +5,20 @@ sidebar_position: 1 --- Good documentation is critical for any type of software. Any contribution that can improve the HertzBeat documentation is welcome. @@ -94,4 +94,3 @@ css and other style files are placed in the `src/css` directory > All pages doc can be directly jumped to the corresponding github resource modification page through the 'Edit this page' button at the bottom - diff --git a/home/docs/community/how-to-release.md b/home/docs/community/how-to-release.md index f92b328c8f7..12338c1d3a6 100644 --- a/home/docs/community/how-to-release.md +++ b/home/docs/community/how-to-release.md @@ -22,11 +22,11 @@ This release process is operated in the UbuntuOS(Windows,Mac), and the following ## 2. Preparing for release > First summarize the account information to better understand the operation process, will be used many times later. -- apache id: `muchunjin (APACHE LDAP UserName)` -- apache passphrase: `APACHE LDAP Passphrase` -- apache email: `muchunjin@apache.org` -- gpg real name: `muchunjin (Any name can be used, here I set it to the same name as the apache id)` -- gpg key passphrase: `The password set when creating the gpg key, you need to remember this password` +> - apache id: `muchunjin (APACHE LDAP UserName)` +> - apache passphrase: `APACHE LDAP Passphrase` +> - apache email: `muchunjin@apache.org` +> - gpg real name: `muchunjin (Any name can be used, here I set it to the same name as the apache id)` +> - gpg key passphrase: `The password set when creating the gpg key, you need to remember this password` ### 2.1 Key generation @@ -165,11 +165,10 @@ $ (gpg --list-sigs muchunjin@apache.org && gpg --export --armor muchunjin@apache $ svn ci -m "add gpg key for muchunjin" ``` -## 3. Prepare material package & release +## 3. Prepare material package & release #### 3.1 Based on the master branch, create a release-${release_version}-rcx branch, such as release-1.6.0-rc1, And create a tag named v1.6.0-rc1 based on the release-1.6.0-rc1 branch, and set this tag as pre-release. - ```shell git checkout master git checkout -b release-1.6.0-rc1 @@ -292,7 +291,7 @@ apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz: OK #### 3.7 Publish the dev directory of the Apache SVN material package -- Clone the dev directory +- Clone the dev directory ```shell # Check out the dev directory of the Apache SVN to the svn/dev directory under dist in the root directory of the Apache HertzBeat project @@ -331,7 +330,6 @@ svn commit -m "release for HertzBeat 1.6.0" > Visit the address https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1/ in the browser, check if existed the new material package - ## 4. Enter the community voting stage #### 4.1 Send a Community Vote Email @@ -340,7 +338,7 @@ Send a voting email in the community requires at least three `+1` and no `-1`. > `Send to`: dev@hertzbeat.apache.org
> `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0 rc1
-> `Body`: +> `Body`: ``` Hello HertzBeat Community: @@ -403,8 +401,7 @@ Dear HertzBeat community, Thanks for your review and vote for "Release Apache HertzBeat (incubating) 1.6.0-rc1" I'm happy to announce the vote has passed: - - +--- 4 binding +1, from: - cc @@ -412,17 +409,14 @@ I'm happy to announce the vote has passed: 1 non-binding +1, from: - Roc Marshal - - +--- no 0 or -1 votes. Vote thread: https://lists.apache.org/thread/t01b2lbtqzyt7j4dsbdp5qjc3gngjsdq - - +--- Thank you to everyone who helped us to verify and vote for this release. We will move to the ASF Incubator voting shortly. - - +--- Best, ChunJin Mu ``` @@ -474,17 +468,14 @@ More detailed checklist please refer: Steps to validate the release, Please refer to: • https://www.apache.org/info/verification.html • https://hertzbeat.apache.org/docs/community/how_to_verify_release - - +--- How to Build: https://hertzbeat.apache.org/docs/community/development/#build-hertzbeat-binary-package - - +--- Thanks, On behalf of Apache HertzBeat (incubating) community - - +--- Best, ChunJin Mu ``` @@ -546,10 +537,9 @@ svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 http https://github.com/apache/hertzbeat/blob/master/home/docs/download.md https://github.com/apache/hertzbeat/blob/master/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md - Open the official website address https://hertzbeat.apache.org/docs/download/ to see if there is a new version of the download -> It should be noted that the download link may take effect after an hour, so please pay attention to it. +> It should be noted that the download link may take effect after an hour, so please pay attention to it. #### 4.3 Generate a release on github @@ -560,11 +550,14 @@ You can modify it on the original RC Release without creating a new Release. ::: Then enter Release Title and Describe -- Release Title: +- Release Title: + ``` v1.6.0 ``` + - Describe: + ``` xxx release note: xxx @@ -601,11 +594,9 @@ https://hertzbeat.apache.org/ HertzBeat Resources: - Issue: https://github.com/apache/hertzbeat/issues - Mailing list: dev@hertzbeat.apache.org - - +--- Apache HertzBeat Team - - +--- Best, ChunJin Mu ``` @@ -614,4 +605,4 @@ This version release is over. --- -This doc refer from [Apache StreamPark](https://streampark.apache.org/) +This doc refer from [Apache StreamPark](https://streampark.apache.org/) diff --git a/home/docs/community/how-to-verify.md b/home/docs/community/how-to-verify.md index 41c3341b59e..38b507149b0 100644 --- a/home/docs/community/how-to-verify.md +++ b/home/docs/community/how-to-verify.md @@ -40,7 +40,6 @@ The package uploaded to dist must include the source code package, and the binar 3. Whether to include the sha512 of the source code package 4. If the binary package is uploaded, also check the contents listed in (2)-(4) - ### 2.2 Check gpg signature First import the publisher's public key. Import KEYS from the svn repository to the local environment. (The person who releases the version does not need to import it again, the person who helps to do the verification needs to import it, and the user name is enough for the person who issued the version) @@ -51,6 +50,7 @@ First import the publisher's public key. Import KEYS from the svn repository to $ curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # Download KEYS $ gpg --import KEYS # Import KEYS to local ``` + #### 2.2.2 Trust the public key Trust the KEY used in this version: @@ -123,8 +123,6 @@ check as follows: - [ ] Able to compile correctly - [ ] ..... - - ### 2.5 Check the source package > If the binary/web-binary package is uploaded, check the binary package. @@ -151,7 +149,6 @@ and check as follows: You can refer to this article: [ASF Third Party License Policy](https://apache.org/legal/resolved.html) - ## 3. Email reply If you initiate a posting vote, you can refer to this response example to reply to the email after verification @@ -170,9 +167,9 @@ If you have already voted on dev@hertzbeat.apache.org, you can take it directly //Incubator community voting, only IPMC members have binding binding,PPMC needs to be aware of binding changes Forward my +1 from dev@listhertzbeatnkis (non-binding) Copy my +1 from hertzbeat DEV ML (non-binding) -```` -::: +``` +::: Non-PPMC/Non-IPMC member: @@ -184,7 +181,7 @@ I checked: 3. LICENSE and NOTICE are exist 4. Build successfully on macOS(Big Sur) 5. -```` +``` PPMC/IPMC member: @@ -197,10 +194,8 @@ I checked: 3. LICENSE and NOTICE are exist 4. Build successfully on macOS(Big Sur) 5. -```` - - +``` --- -This doc refer from [Apache StreamPark](https://streampark.apache.org/) +This doc refer from [Apache StreamPark](https://streampark.apache.org/) diff --git a/home/docs/community/mailing_lists.md b/home/docs/community/mailing_lists.md index fed6e3928ee..c5ab8df7604 100644 --- a/home/docs/community/mailing_lists.md +++ b/home/docs/community/mailing_lists.md @@ -5,20 +5,20 @@ sidebar_position: 1 --- The [Developer Mailing List](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) is the community-recommended way to communicate and obtain the latest information. @@ -32,18 +32,17 @@ Before you post anything to the mailing lists, be sure that you already **subscr - Use this list for your HertzBeat questions - Used by HertzBeat contributors to discuss development of HertzBeat - -| List Name | Address | Subscribe | Unsubscribe | Archive | -|---------------------|------------------------------|------------------------------------------------------------|----------------------------------------------------------------|----------------------------------------------------------------------------| -| **Developer List** | dev@hertzbeat.apache.org | [subscribe](mailto:dev-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:dev-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | +| List Name | Address | Subscribe | Unsubscribe | Archive | +|--------------------|--------------------------|--------------------------------------------------------|------------------------------------------------------------|------------------------------------------------------------------------| +| **Developer List** | dev@hertzbeat.apache.org | [subscribe](mailto:dev-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:dev-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | ### Notification List - Notifications on changes to the HertzBeat codebase -| List Name | Address | Subscribe | Unsubscribe | Archive | -|-------------------------|------------------------------------|------------------------------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------| -| **Notification List** | notifications@hertzbeat.apache.org | [subscribe](mailto:notifications-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | +| List Name | Address | Subscribe | Unsubscribe | Archive | +|-----------------------|------------------------------------|------------------------------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------| +| **Notification List** | notifications@hertzbeat.apache.org | [subscribe](mailto:notifications-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | ## Steps for Subscription diff --git a/home/docs/community/new_committer_process.md b/home/docs/community/new_committer_process.md index cec404f9fe8..0d72df109d7 100644 --- a/home/docs/community/new_committer_process.md +++ b/home/docs/community/new_committer_process.md @@ -5,20 +5,20 @@ sidebar_position: 4 --- [Apache New Committer Guideline](https://community.apache.org/newcommitter.html#new-committer-process) @@ -47,9 +47,8 @@ sidebar_position: 4 see **Committer Account Creation** - - Wait until root says it is done - - [Roster](https://whimsy.apache.org/roster/ppmc/hertzbeat) add the new committer - + - Wait until root says it is done + - [Roster](https://whimsy.apache.org/roster/ppmc/hertzbeat) add the new committer - Announce the new committer see **Announce New Committer Template** @@ -58,7 +57,7 @@ sidebar_position: 4 Note that, there are three placeholder in template should be replaced before using -- NEW_COMMITTER_NAME This Must Be Public Name, Not Github Name Or Id. +- NEW_COMMITTER_NAME This Must Be Public Name, Not Github Name Or Id. - NEW_COMMITTER_EMAIL - NEW_COMMITTER_APACHE_NAME @@ -68,7 +67,6 @@ Note that, there are three placeholder in template should be replaced before usi NEW_COMMITTER_NAME This Must Be Public Name, Not Github Name Or Id. ::: - ```text To: private@hertzbeat.apache.org Subject: [VOTE] New committer: ${NEW_COMMITTER_NAME} @@ -285,3 +283,4 @@ Thanks for your contributions. Best Wishes! ``` + diff --git a/home/docs/community/new_pmc_member_process.md b/home/docs/community/new_pmc_member_process.md index ebc84b92d67..414dad94a56 100644 --- a/home/docs/community/new_pmc_member_process.md +++ b/home/docs/community/new_pmc_member_process.md @@ -5,20 +5,20 @@ sidebar_position: 5 --- [Apache New Committer Guideline](https://community.apache.org/newcommitter.html#new-committer-process) @@ -81,7 +81,6 @@ ${Work list}[1] Note that, Voting ends one week from today, i.e. [midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) [Apache Voting Guidelines](https://community.apache.org/newcommitter.html) - ### Close Vote Template ```text @@ -283,3 +282,4 @@ A PPMC member helps manage and guide the direction of the project. Thanks, On behalf of the Apache HertzBeat (incubating) PPMC ``` + diff --git a/home/docs/community/submit-code.md b/home/docs/community/submit-code.md index dfea1601d87..15bfeba545a 100644 --- a/home/docs/community/submit-code.md +++ b/home/docs/community/submit-code.md @@ -5,22 +5,21 @@ sidebar_position: 2 --- - * First from the remote repository fork a copy of the code into your own repository * The remote dev and merge branch is `master`. @@ -28,49 +27,44 @@ sidebar_position: 2 * Clone your repository to your local ```shell - git clone git@github.com:/hertzbeat.git +git clone git@github.com:/hertzbeat.git ``` * Add remote repository address, named upstream ```shell - git remote add upstream git@github.com:apache/hertzbeat.git +git remote add upstream git@github.com:apache/hertzbeat.git ``` * View repository ```shell - git remote -v +git remote -v ``` - > At this time, there will be two repositories: origin (your own repository) and upstream (remote repository) +> At this time, there will be two repositories: origin (your own repository) and upstream (remote repository) * Get/Update remote repository code - ```shell - git fetch upstream - ``` - + ```shell + git fetch upstream + ``` * Synchronize remote repository code to local repository - ```shell - git checkout origin/dev - git merge --no-ff upstream/dev - ``` - + ```shell + git checkout origin/dev + git merge --no-ff upstream/dev + ``` * **⚠️Note that you must create a new branch to develop features `git checkout -b feature-xxx`. It is not recommended to use the master branch for direct development** - * After modifying the code locally, submit it to your own repository: - **Note that the submission information does not contain special characters** - ```shell - git commit -m 'commit content' - git push - ``` + **Note that the submission information does not contain special characters** + ```shell + git commit -m 'commit content' + git push + ``` * Submit changes to the remote repository, you can see a green button "Compare & pull request" on your repository page, click it. - * Select the modified local branch and the branch you want to merge with the past, you need input the message carefully, describe doc is important as code, click "Create pull request". - * Then the community Committers will do CodeReview, and then he will discuss some details (design, implementation, performance, etc.) with you, afterward you can directly update the code in this branch according to the suggestions (no need to create a new PR). When this pr is approved, the commit will be merged into the master branch - * Finally, congratulations, you have become an official contributor to HertzBeat ! You will be added to the contributor wall, you can contact the community to obtain a contributor certificate. + diff --git a/home/docs/download.md b/home/docs/download.md index 827784bea00..4bccd984b70 100644 --- a/home/docs/download.md +++ b/home/docs/download.md @@ -18,12 +18,10 @@ sidebar_label: Download Previous releases of HertzBeat may be affected by security issues, please use the latest one. ::: - -| Version | Date | Download | Release Notes | +| Version | Date | Download | Release Notes | |---------|------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------| | v1.6.0 | 2024.06.10 | [apache-hertzbeat-1.6.0-incubating-bin.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz.sha512) )
[apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz.sha512) )
[apache-hertzbeat-1.6.0-incubating-src.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz.sha512) ) | [release note](https://github.com/apache/hertzbeat/releases/tag/v1.6.0) | - ## Release Docker Image > Apache HertzBeat provides a docker image for each release. You can pull the image from the [Docker Hub](https://hub.docker.com/r/apache/hertzbeat). @@ -31,7 +29,6 @@ Previous releases of HertzBeat may be affected by security issues, please use th - HertzBeat https://hub.docker.com/r/apache/hertzbeat - HertzBeat Collector https://hub.docker.com/r/apache/hertzbeat-collector - ## All Archived Releases For older releases, please check the [archive](https://archive.apache.org/dist/incubator/hertzbeat/). diff --git a/home/docs/help/activemq.md b/home/docs/help/activemq.md index 52e3090fde2..f24bc37fbbb 100644 --- a/home/docs/help/activemq.md +++ b/home/docs/help/activemq.md @@ -9,7 +9,7 @@ keywords: [open source monitoring tool, monitoring Apache ActiveMQ metrics] **Use Protocol: JMX** -### Pre-monitoring Operations +### Pre-monitoring Operations > You need to enable the `JMX` service on ActiveMQ, HertzBeat uses the JMX protocol to collect metrics from ActiveMQ. @@ -26,6 +26,7 @@ keywords: [open source monitoring tool, monitoring Apache ActiveMQ metrics] 2. Modify the `bin/env` file in the installation directory, configure the JMX port IP, etc. The original configuration information will be as follows + ```text # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" @@ -36,6 +37,7 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" ``` Update to the following configuration, ⚠️ pay attention to modify `local external IP` + ```text # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" @@ -52,7 +54,7 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" ### Configuration parameters -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | | Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | @@ -64,82 +66,81 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" | Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | | Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | -### Collect Metrics +### Collect Metrics #### metrics: broker -| Metric Name | Unit | Description | +| Metric Name | Unit | Description | |-------------------------|------|-----------------------------------------------------------------------| -| BrokerName | None | The name of the broker. | -| BrokerVersion | None | The version of the broker. | -| Uptime | None | Uptime of the broker. | +| BrokerName | None | The name of the broker. | +| BrokerVersion | None | The version of the broker. | +| Uptime | None | Uptime of the broker. | | UptimeMillis | ms | Uptime of the broker in milliseconds. | -| Persistent | None | Messages are synchronized to disk. | +| Persistent | None | Messages are synchronized to disk. | | MemoryPercentUsage | % | Percent of memory limit used. | | StorePercentUsage | % | Percent of store limit used. | | TempPercentUsage | % | Percent of temp limit used. | -| CurrentConnectionsCount | None | Attribute exposed for management | -| TotalConnectionsCount | None | Attribute exposed for management | -| TotalEnqueueCount | None | Number of messages that have been sent to the broker. | -| TotalDequeueCount | None | Number of messages that have been acknowledged on the broker. | -| TotalConsumerCount | None | Number of message consumers subscribed to destinations on the broker. | -| TotalProducerCount | None | Number of message producers active on destinations on the broker. | -| TotalMessageCount | None | Number of unacknowledged messages on the broker. | -| AverageMessageSize | None | Average message size on this broker | -| MaxMessageSize | None | Max message size on this broker | -| MinMessageSize | None | Min message size on this broker | - -#### metrics: topic - -| Metric Name | Unit | Description | -|-------------------------|------|-------------------------------------------------------------------------------------------| -| Name | None | Name of this destination. | -| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | -| MemoryPercentUsage | None | The percentage of the memory limit used | -| ProducerCount | None | Number of producers attached to this destination | -| ConsumerCount | None | Number of consumers subscribed to this destination. | -| EnqueueCount | None | Number of messages that have been sent to the destination. | -| DequeueCount | None | Number of messages that has been acknowledged (and removed) from the destination. | -| ForwardCount | None | Number of messages that have been forwarded (to a networked broker) from the destination. | -| InFlightCount | None | Number of messages that have been dispatched to, but not acknowledged by, consumers. | -| DispatchCount | None | Number of messages that has been delivered to consumers, including those not acknowledged | -| ExpiredCount | None | Number of messages that have been expired. | -| StoreMessageSize | B | The memory size of all messages in this destination's store. | -| AverageEnqueueTime | ms | Average time a message was held on this destination. | -| MaxEnqueueTime | ms | The longest time a message was held on this destination | -| MinEnqueueTime | ms | The shortest time a message was held on this destination | -| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | -| AverageMessageSize | B | Average message size on this destination | -| MaxMessageSize | B | Max message size on this destination | -| MinMessageSize | B | Min message size on this destination | - +| CurrentConnectionsCount | None | Attribute exposed for management | +| TotalConnectionsCount | None | Attribute exposed for management | +| TotalEnqueueCount | None | Number of messages that have been sent to the broker. | +| TotalDequeueCount | None | Number of messages that have been acknowledged on the broker. | +| TotalConsumerCount | None | Number of message consumers subscribed to destinations on the broker. | +| TotalProducerCount | None | Number of message producers active on destinations on the broker. | +| TotalMessageCount | None | Number of unacknowledged messages on the broker. | +| AverageMessageSize | None | Average message size on this broker | +| MaxMessageSize | None | Max message size on this broker | +| MinMessageSize | None | Min message size on this broker | + +#### metrics: topic + +| Metric Name | Unit | Description | +|--------------------|------|-------------------------------------------------------------------------------------------| +| Name | None | Name of this destination. | +| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | +| MemoryPercentUsage | None | The percentage of the memory limit used | +| ProducerCount | None | Number of producers attached to this destination | +| ConsumerCount | None | Number of consumers subscribed to this destination. | +| EnqueueCount | None | Number of messages that have been sent to the destination. | +| DequeueCount | None | Number of messages that has been acknowledged (and removed) from the destination. | +| ForwardCount | None | Number of messages that have been forwarded (to a networked broker) from the destination. | +| InFlightCount | None | Number of messages that have been dispatched to, but not acknowledged by, consumers. | +| DispatchCount | None | Number of messages that has been delivered to consumers, including those not acknowledged | +| ExpiredCount | None | Number of messages that have been expired. | +| StoreMessageSize | B | The memory size of all messages in this destination's store. | +| AverageEnqueueTime | ms | Average time a message was held on this destination. | +| MaxEnqueueTime | ms | The longest time a message was held on this destination | +| MinEnqueueTime | ms | The shortest time a message was held on this destination | +| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | +| AverageMessageSize | B | Average message size on this destination | +| MaxMessageSize | B | Max message size on this destination | +| MinMessageSize | B | Min message size on this destination | #### metrics: memory_pool -| Metric Name | Unit | Description | -|-------------| ----------- |----------------| -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | +| Metric Name | Unit | Description | +|-------------|------|--------------| +| name | | metrics name | +| committed | kb | total size | +| init | kb | init size | +| max | kb | max size | +| used | kb | used size | #### metrics: class_loading -| Metric Name | Unit | Description | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | | Loaded Class Count | -| TotalLoadedClassCount | | Total Loaded Class Count | -| UnloadedClassCount | | Unloaded Class Count | - +| Metric Name | Unit | Description | +|-----------------------|------|--------------------------| +| LoadedClassCount | | Loaded Class Count | +| TotalLoadedClassCount | | Total Loaded Class Count | +| UnloadedClassCount | | Unloaded Class Count | #### metrics: thread -| Metric Name | Unit | Description | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | | Total Started Thread Count | -| ThreadCount | | Thread Count | -| PeakThreadCount | | Peak Thread Count | -| DaemonThreadCount | | Daemon Thread Count | -| CurrentThreadUserTime | ms | Current Thread User Time | -| CurrentThreadCpuTime | ms | Current Thread Cpu Time | +| Metric Name | Unit | Description | +|-------------------------|------|----------------------------| +| TotalStartedThreadCount | | Total Started Thread Count | +| ThreadCount | | Thread Count | +| PeakThreadCount | | Peak Thread Count | +| DaemonThreadCount | | Daemon Thread Count | +| CurrentThreadUserTime | ms | Current Thread User Time | +| CurrentThreadCpuTime | ms | Current Thread Cpu Time | + diff --git a/home/docs/help/ai_config.md b/home/docs/help/ai_config.md index ebb263a0a38..5c5b56962af 100644 --- a/home/docs/help/ai_config.md +++ b/home/docs/help/ai_config.md @@ -9,52 +9,54 @@ keywords: [AI] ### Configuration parameter description -| Name of the parameter | Parameter help description | -|-----------------------|------------------------------| -| type | Choose a large AI model (such as Zhipu, Tongyi thousand questions...)| -| model | Select the model, which defaults to GLM-4 | -| api-key | Gets the api_key, without which you cannot talk to the large model | +| Name of the parameter | Parameter help description | +|-----------------------|-----------------------------------------------------------------------| +| type | Choose a large AI model (such as Zhipu, Tongyi thousand questions...) | +| model | Select the model, which defaults to GLM-4 | +| api-key | Gets the api_key, without which you cannot talk to the large model | ### Large model options and configuration details #### ZhiPu AI -| Name of the parameter | Example | Link | -|--------------|-----------------------------------------------------|---------------------------------------------------------------| -| type | zhiPu (must be exactly the same as example) | | -| model | glm-4-0520、glm-4 、glm-4-air、glm-4-airx、 glm-4-flash | | -| api-key | xxxxx.xxxxxx | https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys | +| Name of the parameter | Example | Link | +|-----------------------|-----------------------------------------------------|-----------------------------------------------------------------| +| type | zhiPu (must be exactly the same as example) | | +| model | glm-4-0520、glm-4 、glm-4-air、glm-4-airx、 glm-4-flash | | +| api-key | xxxxx.xxxxxx | https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys | #### Alibaba AI -| Name of the parameter | Example | Link | -|--------------|----------------------------------------------------------------------------------------------|---------------------------------------------------------------| -| type | alibabaAi (must be exactly the same as example) | | -| model | qwen-turbo、qwen-plus、qwen-max、qwen-max-0428、qwen-max-0403、qwen-max-0107、qwen-max-longcontext | https://help.aliyun.com/zh/dashscope/developer-reference/model-introduction?spm=a2c4g.11186623.0.0.4e0246c1RQFKMH | -| api-key | xxxxxxxxxxx |https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key?spm=a2c4g.11186623.0.i10| +| Name of the parameter | Example | Link | +|-----------------------|----------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------| +| type | alibabaAi (must be exactly the same as example) | | +| model | qwen-turbo、qwen-plus、qwen-max、qwen-max-0428、qwen-max-0403、qwen-max-0107、qwen-max-longcontext | https://help.aliyun.com/zh/dashscope/developer-reference/model-introduction?spm=a2c4g.11186623.0.0.4e0246c1RQFKMH | +| api-key | xxxxxxxxxxx | https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key?spm=a2c4g.11186623.0.i10 | #### Kimi AI -| Name of the parameter | Example | Link | -|--------------|----------------------------------------------|---------------------------------------------------------------| -| type | kimiAi (must be exactly the same as example) | | -| model | moonshot-v1-8k、moonshot-v1-32k、moonshot-v1-128k | | -| api-key | xxxxxxxxxxx |https://platform.moonshot.cn/console/api-keys| +| Name of the parameter | Example | Link | +|-----------------------|-------------------------------------------------|-----------------------------------------------| +| type | kimiAi (must be exactly the same as example) | | +| model | moonshot-v1-8k、moonshot-v1-32k、moonshot-v1-128k | | +| api-key | xxxxxxxxxxx | https://platform.moonshot.cn/console/api-keys | #### sparkDesk AI + QuickStart: https://www.xfyun.cn/doc/platform/quickguide.html -| Name of the parameter | Example | Link | -|--------------|-------------------------------------------------|---------------------------------------------------------------| -| type | sparkDesk (must be exactly the same as example) | | -| model | general、generalv2、generalv3、generalv3.5、4.0Ultra | | -| api-key | xxxxxxxxxxx |https://console.xfyun.cn/services/cbm| -| api-secret | xxxxxxxxxxx |https://console.xfyun.cn/services/cbm| - -| sparkDesk version | model | -|-------------------|-----------------------------------------------| -| Spark4.0 Ultra | 4.0Ultra | -| Spark Max |generalv3.5 | -| Spark Pro | generalv3 | -| Spark V2.0 |generalv2 | -| Spark Lite(free) |general | \ No newline at end of file +| Name of the parameter | Example | Link | +|-----------------------|--------------------------------------------------|---------------------------------------| +| type | sparkDesk (must be exactly the same as example) | | +| model | general、generalv2、generalv3、generalv3.5、4.0Ultra | | +| api-key | xxxxxxxxxxx | https://console.xfyun.cn/services/cbm | +| api-secret | xxxxxxxxxxx | https://console.xfyun.cn/services/cbm | + +| sparkDesk version | model | +|-------------------|-------------| +| Spark4.0 Ultra | 4.0Ultra | +| Spark Max | generalv3.5 | +| Spark Pro | generalv3 | +| Spark V2.0 | generalv2 | +| Spark Lite(free) | general | + diff --git a/home/docs/help/airflow.md b/home/docs/help/airflow.md index 5323ede8110..52367155d89 100644 --- a/home/docs/help/airflow.md +++ b/home/docs/help/airflow.md @@ -9,33 +9,31 @@ keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8080 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| HTTPS | 是否启用HTTPS | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-----------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | +| 端口 | 数据库对外提供的端口,默认为8080 | +| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | +| HTTPS | 是否启用HTTPS | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:airflow_health -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | -------------------- | -| metadatabase | 无 | metadatabase健康情况 | -| scheduler | 无 | scheduler健康情况 | -| triggerer | 无 | triggerer健康情况 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|------------------| +| metadatabase | 无 | metadatabase健康情况 | +| scheduler | 无 | scheduler健康情况 | +| triggerer | 无 | triggerer健康情况 | #### 指标集合:airflow_version -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | -------- | --------------- | -| value | 无 | Airflow版本 | -| git_version | 无 | Airflow git版本 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|---------------| +| value | 无 | Airflow版本 | +| git_version | 无 | Airflow git版本 | diff --git a/home/docs/help/alert_console.md b/home/docs/help/alert_console.md index e727fec4771..45ab7d791d3 100644 --- a/home/docs/help/alert_console.md +++ b/home/docs/help/alert_console.md @@ -6,13 +6,13 @@ sidebar_label: Console address in alarm template > After the threshold is triggered, send the alarm information. When you notify through DingDing / enterprise Wechat / FeiShu robot or email, the alarm content has a detailed link to log in to the console. - ### Custom settings In our startup configuration file application.yml, find the following configuration + ```yml alerter: console-url: #Here is our custom console address ``` -The default value is the official console address of HertzBeat. \ No newline at end of file +The default value is the official console address of HertzBeat. diff --git a/home/docs/help/alert_dingtalk.md b/home/docs/help/alert_dingtalk.md index fb63d52aa48..b86ed662940 100644 --- a/home/docs/help/alert_dingtalk.md +++ b/home/docs/help/alert_dingtalk.md @@ -5,15 +5,15 @@ sidebar_label: Alert DingDing robot notification keywords: [open source monitoring tool, open source alerter, open source DingDing robot notification] --- -> After the threshold is triggered send alarm information and notify the recipient by DingDing robot. +> After the threshold is triggered send alarm information and notify the recipient by DingDing robot. -### Operation steps +### Operation steps 1. **【DingDing desktop client】-> 【Group settings】-> 【Intelligent group assistant】-> 【Add new robot-select custom】-> 【Set robot name and avatar】-> 【Note⚠️Set custom keywords: HertzBeat】 ->【Copy its webhook address after adding successfully】** -> Note⚠️ When adding a robot, its custom keywords need to be set in the security setting block: HertzBeat. Other security settings or the IP segment don't need to be filled in. +> Note⚠️ When adding a robot, its custom keywords need to be set in the security setting block: HertzBeat. Other security settings or the IP segment don't need to be filled in. -![email](/img/docs/help/alert-notice-8.png) +![email](/img/docs/help/alert-notice-8.png) 2. **【Save access_token value of the WebHook address of the robot】** @@ -24,18 +24,18 @@ keywords: [open source monitoring tool, open source alerter, open source DingDin ![email](/img/docs/help/alert-notice-9.png) -4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### DingDing robot common issues -### DingDing robot common issues +1. DingDing group did not receive the robot alarm notification. -1. DingDing group did not receive the robot alarm notification. > Please check whether there is any triggered alarm information in the alarm center. > Please check whether DingDing robot is configured with security custom keywords :HertzBeat. > Please check whether the robot ACCESS_TOKEN is configured correctly and whether the alarm strategy association is configured. -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_discord.md b/home/docs/help/alert_discord.md index 8dfdca384fa..7aa565c0acf 100644 --- a/home/docs/help/alert_discord.md +++ b/home/docs/help/alert_discord.md @@ -45,7 +45,6 @@ keywords: [open source monitoring tool, open source alerter, open source Discord ![bot](/img/docs/help/discord-bot-7.png) - ### Add an alarm notification person in HertzBeat, the notification method is Discord Bot 1. **[Alarm notification] -> [Add recipient] -> [Select Discord robot notification method] -> [Set robot Token and ChannelId] -> [OK]** @@ -58,13 +57,12 @@ keywords: [open source monitoring tool, open source alerter, open source Discord ![email](/img/docs/help/alert-notice-policy.png) +### Discord Bot Notification FAQ -### Discord Bot Notification FAQ - -1. Discord doesn't receive bot alert notifications +1. Discord doesn't receive bot alert notifications > Please check whether the alarm information has been triggered in the alarm center > Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured -> Please check whether the bot is properly authorized by the Discord chat server +> Please check whether the bot is properly authorized by the Discord chat server Other questions can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_email.md b/home/docs/help/alert_email.md index 2c5bb61550b..3beddfbf4e8 100644 --- a/home/docs/help/alert_email.md +++ b/home/docs/help/alert_email.md @@ -5,30 +5,29 @@ sidebar_label: Alert email notification keywords: [open source monitoring tool, open source alerter, open source email notification] --- -> After the threshold is triggered send alarm information and notify the recipient by email. +> After the threshold is triggered send alarm information and notify the recipient by email. -### Operation steps +### Operation steps -1. **【Alarm notification】->【Add new recipient】 ->【Select email notification method】** +1. **【Alarm notification】->【Add new recipient】 ->【Select email notification method】** -![email](/img/docs/help/alert-notice-1.png) +![email](/img/docs/help/alert-notice-1.png) 2. **【Get verification code】-> 【Enter email verification code】-> 【Confirm】** -![email](/img/docs/help/alert-notice-2.png) + ![email](/img/docs/help/alert-notice-2.png) -![email](/img/docs/help/alert-notice-3.png) +![email](/img/docs/help/alert-notice-3.png) -3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### Email notification common issues -### Email notification common issues - -1. Hertzbeat deployed on its own intranet cannot receive email notifications -> Hertzbeat needs to configure its own mail server. Please confirm whether you have configured its own mail server in application.yml +1. Hertzbeat deployed on its own intranet cannot receive email notifications +> Hertzbeat needs to configure its own mail server. Please confirm whether you have configured its own mail server in application.yml Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_enterprise_wechat_app.md b/home/docs/help/alert_enterprise_wechat_app.md index f9e072e436a..1d5d41a15bc 100644 --- a/home/docs/help/alert_enterprise_wechat_app.md +++ b/home/docs/help/alert_enterprise_wechat_app.md @@ -5,30 +5,30 @@ sidebar_label: Alert Enterprise Wechat App notification keywords: [open source monitoring tool, open source alerter, open source Enterprise Wechat App notification] --- -> After the threshold is triggered send alarm information and notify the recipient by enterprise WeChat App. +> After the threshold is triggered send alarm information and notify the recipient by enterprise WeChat App. -### Operation steps +### Operation steps 1. **【Enterprise Wechat backstage】-> 【App Management】-> 【Create an app】-> 【Set App message】->【Copy AgentId and Secret adding successfully】** -![email](/img/docs/help/alert-wechat-1.jpg) +![email](/img/docs/help/alert-wechat-1.jpg) 2. **【Alarm notification】->【Add new recipient】 ->【Select Enterprise WeChat App notification method】->【Set Enterprise WeChat ID,Enterprise App ID and Enterprise App Secret 】-> 【Confirm】** ![email](/img/docs/help/alert-wechat-2.jpg) -3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-wechat-3.jpg) +![email](/img/docs/help/alert-wechat-3.jpg) +### Enterprise WeChat App common issues -### Enterprise WeChat App common issues +1. Enterprise WeChat App did not receive the alarm notification. -1. Enterprise WeChat App did not receive the alarm notification. > Please check if the user has application permissions. > Please check if the enterprise application callback address settings are normal. > Please check if the server IP is on the enterprise application whitelist. -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_feishu.md b/home/docs/help/alert_feishu.md index 56606012021..8f7e9391001 100644 --- a/home/docs/help/alert_feishu.md +++ b/home/docs/help/alert_feishu.md @@ -5,30 +5,30 @@ sidebar_label: Alert FeiShu robot notification keywords: [open source monitoring tool, open source alerter, open source feishu bot notification] --- -> After the threshold is triggered send alarm information and notify the recipient by FeiShu robot. +> After the threshold is triggered send alarm information and notify the recipient by FeiShu robot. -### Operation steps +### Operation steps 1. **【FeiShu client】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** -2. **【Save the key value of the WebHook address of the robot】** +2. **【Save the key value of the WebHook address of the robot】** > eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select FeiShu robot notification method】->【Set FeiShu robot KEY】-> 【Confirm】** -4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### FeiShu robot notification common issues -### FeiShu robot notification common issues +1. FeiShu group did not receive the robot alarm notification. -1. FeiShu group did not receive the robot alarm notification. > Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. +> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_slack.md b/home/docs/help/alert_slack.md index 2540a27451d..5148432fe8b 100644 --- a/home/docs/help/alert_slack.md +++ b/home/docs/help/alert_slack.md @@ -25,12 +25,11 @@ Refer to the official website document [Sending messages using Incoming Webhooks ![email](/img/docs/help/alert-notice-policy.png) - ### Slack Notification FAQ 1. Slack did not receive the robot warning notification > Please check whether the alarm information has been triggered in the alarm center -> Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured +> Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_telegram.md b/home/docs/help/alert_telegram.md index 1fbe4f0ae7e..cb60f266778 100644 --- a/home/docs/help/alert_telegram.md +++ b/home/docs/help/alert_telegram.md @@ -54,13 +54,12 @@ Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token ![email](/img/docs/help/alert-notice-policy.png) - ### Telegram Bot Notification FAQ 1. Telegram did not receive the robot warning notification > Please check whether the alarm information has been triggered in the alarm center > Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured -> UserId should be the UserId of the recipient of the message +> UserId should be the UserId of the recipient of the message Other questions can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_threshold.md b/home/docs/help/alert_threshold.md index 0574b1924ba..2619fdbcf6e 100644 --- a/home/docs/help/alert_threshold.md +++ b/home/docs/help/alert_threshold.md @@ -3,6 +3,7 @@ id: alert_threshold title: Threshold Alert Configuration sidebar_label: Threshold Alert Configuration --- + > Configure alert thresholds for monitoring metrics (warning alert, critical alert, emergency alert). The system triggers alerts based on threshold configuration and collected metric data. ## Operational Steps diff --git a/home/docs/help/alert_threshold_expr.md b/home/docs/help/alert_threshold_expr.md index a7a7e6bdbe0..6b8772388e6 100644 --- a/home/docs/help/alert_threshold_expr.md +++ b/home/docs/help/alert_threshold_expr.md @@ -8,40 +8,40 @@ sidebar_label: Threshold Trigger Expression #### Supported Operators in Expressions -| Operator (Visual Configuration) | Operator (Expression Configuration) | Supported Types | Description | -| ------------------------------- | ----------------------------------- | ------------------------- | -------------------------- | -| Equals | equals(str1,str2) | String | Check if strings are equal | -| Not Equals | !equals(str1,str2) | String | Check if strings are not equal | -| Contains | contains(str1,str2) | String | Check if string contains | -| Not Contains | !contains(str1,str2) | String | Check if string does not contain | -| Matches | matches(str1,str2) | String | Check if string matches regex | -| Not Matches | !matches(str1,str2) | String | Check if string does not match regex | -| Exists | exists(obj) | String, Numeric, Time | Check if value exists | -| Not Exists | !exists(obj) | String, Numeric, Time | Check if value does not exist | -| Greater than | obj1 > obj2 | Numeric, Time | Check if value is greater than | -| Less than | obj1 < obj2 | Numeric, Time | Check if value is less than | -| Greater than or Equal to | obj1 >= obj2 | Numeric, Time | Check if value is greater than or equal to | -| Less than or Equal to | obj1 <= obj2 | Numeric, Time | Check if value is less than or equal to | -| Not Equal to | obj1 != obj2 | Numeric, Time | Check if values are not equal | -| Equal to | obj1 == obj2 | Numeric, Time | Check if values are equal | +| Operator (Visual Configuration) | Operator (Expression Configuration) | Supported Types | Description | +|---------------------------------|-------------------------------------|-----------------------|--------------------------------------------| +| Equals | equals(str1,str2) | String | Check if strings are equal | +| Not Equals | !equals(str1,str2) | String | Check if strings are not equal | +| Contains | contains(str1,str2) | String | Check if string contains | +| Not Contains | !contains(str1,str2) | String | Check if string does not contain | +| Matches | matches(str1,str2) | String | Check if string matches regex | +| Not Matches | !matches(str1,str2) | String | Check if string does not match regex | +| Exists | exists(obj) | String, Numeric, Time | Check if value exists | +| Not Exists | !exists(obj) | String, Numeric, Time | Check if value does not exist | +| Greater than | obj1 > obj2 | Numeric, Time | Check if value is greater than | +| Less than | obj1 < obj2 | Numeric, Time | Check if value is less than | +| Greater than or Equal to | obj1 >= obj2 | Numeric, Time | Check if value is greater than or equal to | +| Less than or Equal to | obj1 <= obj2 | Numeric, Time | Check if value is less than or equal to | +| Not Equal to | obj1 != obj2 | Numeric, Time | Check if values are not equal | +| Equal to | obj1 == obj2 | Numeric, Time | Check if values are equal | #### Expression Function Library List -| Supported Function Library | Description | -| -------------------------------- | -------------------------------------------------------------- | -| condition ? trueExpression : falseExpression | Ternary operator | -| toDouble(str) | Convert string to Double type | -| toBoolean(str) | Convert string to Boolean type | -| toInteger(str) | Convert string to Integer type | -| array[n] | Retrieve the nth element of an array | -| * | Multiplication | -| / | Division | -| % | Modulo | -| ( and ) | Parentheses for controlling the order of operations in logical or mathematical expressions | -| + | Addition | -| - | Subtraction | -| && | Logical AND operator | -| \|\| | Logical OR operator | +| Supported Function Library | Description | +|----------------------------------------------|--------------------------------------------------------------------------------------------| +| condition ? trueExpression : falseExpression | Ternary operator | +| toDouble(str) | Convert string to Double type | +| toBoolean(str) | Convert string to Boolean type | +| toInteger(str) | Convert string to Integer type | +| array[n] | Retrieve the nth element of an array | +| * | Multiplication | +| / | Division | +| % | Modulo | +| ( and ) | Parentheses for controlling the order of operations in logical or mathematical expressions | +| + | Addition | +| - | Subtraction | +| && | Logical AND operator | +| \|\| | Logical OR operator | #### Supported Environment Variables @@ -63,4 +63,4 @@ This variable is mainly used for calculations involving multiple instances. For 4. MYSQL Monitoring -> Alert when 'threads_running' metric under 'status' exceeds 7 `threads_running>7` -If you encounter any issues, feel free to discuss and provide feedback through our community group or ISSUE tracker! \ No newline at end of file +If you encounter any issues, feel free to discuss and provide feedback through our community group or ISSUE tracker! diff --git a/home/docs/help/alert_webhook.md b/home/docs/help/alert_webhook.md index adc1b6f12f8..d1741d71481 100644 --- a/home/docs/help/alert_webhook.md +++ b/home/docs/help/alert_webhook.md @@ -5,23 +5,24 @@ sidebar_label: Alert webHook notification keywords: [open source monitoring tool, open source alerter, open source webhook notification] --- -> After the threshold is triggered send alarm information and call the Webhook interface through post request to notify the recipient. +> After the threshold is triggered send alarm information and call the Webhook interface through post request to notify the recipient. -### Operation steps +### Operation steps -1. **【Alarm notification】->【Add new recipient】 ->【Select WebHook notification method】-> 【Set WebHook callback address】 -> 【Confirm】** +1. **【Alarm notification】->【Add new recipient】 ->【Select WebHook notification method】-> 【Set WebHook callback address】 -> 【Confirm】** ![email](/img/docs/help/alert-notice-5.png) -2. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +2. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) -### WebHook callback POST body BODY content +### WebHook callback POST body BODY content + +Content format:JSON -Content format:JSON ```json { "alarmId": 76456, @@ -43,24 +44,23 @@ Content format:JSON } ``` -| | | -|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | -| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | -| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | -| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | -| content | string title: The actual content of the alarm notification 告警通知实际内容 | -| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | -| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | -| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | -| tags | example: {key1:value1} | - +| | | +|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | +| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | +| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | +| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | +| content | string title: The actual content of the alarm notification 告警通知实际内容 | +| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | +| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | +| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | +| tags | example: {key1:value1} | +### Webhook notification common issues -### Webhook notification common issues +1. WebHook callback did not take effect -1. WebHook callback did not take effect > Please check whether there is any triggered alarm information in the alarm center. > Please check whether the configured webhook callback address is correct. -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_wework.md b/home/docs/help/alert_wework.md index e862fae7ddf..ca14d5615fa 100644 --- a/home/docs/help/alert_wework.md +++ b/home/docs/help/alert_wework.md @@ -5,34 +5,34 @@ sidebar_label: Alert enterprise Wechat notification keywords: [open source monitoring tool, open source alerter, open source WeWork notification] --- -> After the threshold is triggered send alarm information and notify the recipient by enterprise Wechat robot. +> After the threshold is triggered send alarm information and notify the recipient by enterprise Wechat robot. -### Operation steps +### Operation steps -1. **【Enterprise Wechat】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** +1. **【Enterprise Wechat】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** -![email](/img/docs/help/alert-notice-6.jpg) +![email](/img/docs/help/alert-notice-6.jpg) -2. **【Save the key value of the WebHook address of the robot】** +2. **【Save the key value of the WebHook address of the robot】** > eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -3. **【Alarm notification】->【Add new recipient】 ->【Select enterprise Wechat robot notification method】->【Set enterprise Wechat robot KEY】-> 【Confirm】** +3. **【Alarm notification】->【Add new recipient】 ->【Select enterprise Wechat robot notification method】->【Set enterprise Wechat robot KEY】-> 【Confirm】** ![email](/img/docs/help/alert-notice-7.png) -4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** - -![email](/img/docs/help/alert-notice-4.png) +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +![email](/img/docs/help/alert-notice-4.png) ### Enterprise Wechat robot common issues -1. The enterprise wechat group did not receive the robot alarm notification. +1. The enterprise wechat group did not receive the robot alarm notification. + > Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. +> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/almalinux.md b/home/docs/help/almalinux.md index 380e1439ab4..40a07028c96 100644 --- a/home/docs/help/almalinux.md +++ b/home/docs/help/almalinux.md @@ -4,14 +4,14 @@ title: Monitoring AlmaLinux Operating System Monitoring sidebar_label: AlmaLinux Operating System keywords: [open-source monitoring system, open-source operating system monitoring, AlmaLinux operating system monitoring] --- + > Collect and monitor common performance metrics of the AlmaLinux operating system. ### Configuration Parameters - -| Parameter Name | Parameter Help Description | -| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------- | -| Monitoring Host | The monitored peer's IPv4, IPv6, or domain name. Note ⚠️ No protocol header (e.g., https://, http://). | +| Parameter Name | Parameter Help Description | +|-------------------|---------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | The monitored peer's IPv4, IPv6, or domain name. Note ⚠️ No protocol header (e.g., https://, http://). | | Task Name | A unique name to identify this monitoring task. | | Port | The port provided by Linux SSH, default is 22. | | Timeout | Set the connection timeout in milliseconds, default is 6000 ms. | @@ -28,18 +28,16 @@ keywords: [open-source monitoring system, open-source operating system monitorin #### Metric Set: Basic System Information - -| Metric Name | Metric Unit | Metric Help Description | -| -------------- | ----------- | ------------------------ | +| Metric Name | Metric Unit | Metric Help Description | +|----------------|-------------|--------------------------| | Host Name | None | Host name | | System Version | None | Operating system version | | Uptime | None | Uptime | #### Metric Set: CPU Information - -| Metric Name | Metric Unit | Metric Help Description | -| -------------- | ----------- | --------------------------------- | +| Metric Name | Metric Unit | Metric Help Description | +|----------------|-------------|-----------------------------------| | info | None | CPU model | | cores | Cores | Number of CPU cores | | interrupt | Count | Number of CPU interrupts | @@ -49,9 +47,8 @@ keywords: [open-source monitoring system, open-source operating system monitorin #### Metric Set: Memory Information - -| Metric Name | Metric Unit | Metric Help Description | -| ----------- | ----------- | ----------------------------------- | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------| | total | Mb | Total memory capacity | | used | Mb | Memory used by user programs | | free | Mb | Free memory capacity | @@ -61,9 +58,8 @@ keywords: [open-source monitoring system, open-source operating system monitorin #### Metric Set: Disk Information - -| Metric Name | Metric Unit | Metric Help Description | -| ------------- | ----------- | -------------------------------------- | +| Metric Name | Metric Unit | Metric Help Description | +|---------------|-------------|----------------------------------------| | disk_num | Count | Total number of disks | | partition_num | Count | Total number of partitions | | block_write | Blocks | Total number of blocks written to disk | @@ -72,18 +68,16 @@ keywords: [open-source monitoring system, open-source operating system monitorin #### Metric Set: Network Card Information - -| Metric Name | Metric Unit | Metric Help Description | -| -------------- | ----------- | ----------------------------- | +| Metric Name | Metric Unit | Metric Help Description | +|----------------|-------------|-------------------------------| | interface_name | None | Network card name | | receive_bytes | Byte | Inbound data traffic (bytes) | | transmit_bytes | Byte | Outbound data traffic (bytes) | #### Metric Set: File System - | Metric Name | Metric Unit | Metric Help Description | -| ----------- | ----------- | ----------------------- | +|-------------|-------------|-------------------------| | filesystem | None | Name of the file system | | used | Mb | Used disk size | | available | Mb | Available disk size | @@ -94,9 +88,8 @@ keywords: [open-source monitoring system, open-source operating system monitorin Statistics for the top 10 processes using the CPU. Statistics include: process ID, CPU usage, memory usage, and executed command. - | Metric Name | Metric Unit | Metric Help Description | -| ----------- | ----------- | ----------------------- | +|-------------|-------------|-------------------------| | pid | None | Process ID | | cpu_usage | % | CPU usage | | mem_usage | % | Memory usage | @@ -106,12 +99,12 @@ Statistics for the top 10 processes using the CPU. Statistics include: process I Statistics for the top 10 processes using memory. Statistics include: process ID, memory usage, CPU usage, and executed command. - | Metric Name | Metric Unit | Metric Help Description | -| ----------- | ----------- | ----------------------- | +|-------------|-------------|-------------------------| | pid | None | Process ID | | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | --- + diff --git a/home/docs/help/api.md b/home/docs/help/api.md index f0f92be3da3..98763e0eefe 100644 --- a/home/docs/help/api.md +++ b/home/docs/help/api.md @@ -5,33 +5,33 @@ sidebar_label: HTTP API keywords: [open source monitoring tool, monitoring http api] --- -> Call HTTP API interface, check whether the interface is available, and monitor its response time and other Metrics. +> Call HTTP API interface, check whether the interface is available, and monitor its response time and other Metrics. ### Configuration parameter -| Parameter name | Parameter help description | -|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| Relative path | Suffix path of website address except IP port. For example, the relative path of `www.tancloud.io/console` website is `/console` | -| Request mode | Set the request mode of interface call:GET, POST, PUT, DELETE | -| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | -| Username | User name used for interface Basic authentication or Digest authentication | -| Password | Password used for interface Basic authentication or Digest authentication | -| Headers | HTTP request headers | -| Params | HTTP query params, support [time expression](time_expression) | -| Content-Type | Set the resource type when carrying the BODY request body data request | -| Request BODY | Set the carry BODY request body data, which is valid when PUT or POST request method is used | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | +| Relative path | Suffix path of website address except IP port. For example, the relative path of `www.tancloud.io/console` website is `/console` | +| Request mode | Set the request mode of interface call:GET, POST, PUT, DELETE | +| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | +| Username | User name used for interface Basic authentication or Digest authentication | +| Password | Password used for interface Basic authentication or Digest authentication | +| Headers | HTTP request headers | +| Params | HTTP query params, support [time expression](time_expression) | +| Content-Type | Set the resource type when carrying the BODY request body data request | +| Request BODY | Set the carry BODY request body data, which is valid when PUT or POST request method is used | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | -### Collection Metric +### Collection Metric -#### Metric set:summary +#### Metric set:summary -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| responseTime | ms | Website response time | +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-------------------------| +| responseTime | ms | Website response time | diff --git a/home/docs/help/centos.md b/home/docs/help/centos.md index 2a6ad2b0a6d..60b770ebf96 100644 --- a/home/docs/help/centos.md +++ b/home/docs/help/centos.md @@ -9,74 +9,74 @@ keywords: [open source monitoring tool, open source os monitoring tool, monitori ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Linux SSH. The default is 22 | -| Username | SSH connection user name, optional | -| Password | SSH connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Linux SSH. The default is 22 | +| Username | SSH connection user name, optional | +| Password | SSH connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| hostname | none | Host name | -| version | none | Operating system version | -| uptime | none | System running time | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------| +| hostname | none | Host name | +| version | none | Operating system version | +| uptime | none | System running time | #### Metric set:cpu -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| info | none | CPU model | -| cores | cores | Number of CPU cores | -| interrupt | number | Number of CPU interrupts | -| load | none | Average load of CPU in the last 1/5/15 minutes | -| context_switch | number | Number of current context switches | -| usage | % | CPU usage | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------------------------| +| info | none | CPU model | +| cores | cores | Number of CPU cores | +| interrupt | number | Number of CPU interrupts | +| load | none | Average load of CPU in the last 1/5/15 minutes | +| context_switch | number | Number of current context switches | +| usage | % | CPU usage | #### Metric set:memory -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| total | Mb | Total memory capacity | -| used | Mb | User program memory | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory occupied by cache | -| available | Mb | Remaining available memory capacity | -| usage | % | Memory usage | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------------------| +| total | Mb | Total memory capacity | +| used | Mb | User program memory | +| free | Mb | Free memory capacity | +| buff_cache | Mb | Memory occupied by cache | +| available | Mb | Remaining available memory capacity | +| usage | % | Memory usage | #### Metric set:disk -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| disk_num | blocks | Total number of disks | -| partition_num | partitions | Total number of partitions | -| block_write | blocks | Total number of blocks written to disk | -| block_read | blocks | Number of blocks read from disk | -| write_rate | iops | Rate of writing disk blocks per second | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|----------------------------------------| +| disk_num | blocks | Total number of disks | +| partition_num | partitions | Total number of partitions | +| block_write | blocks | Total number of blocks written to disk | +| block_read | blocks | Number of blocks read from disk | +| write_rate | iops | Rate of writing disk blocks per second | #### Metric set:interface -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| interface_name | none | Network card name | -| receive_bytes | byte | Inbound data traffic(bytes) | -| transmit_bytes | byte | Outbound data traffic(bytes) | +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------| +| interface_name | none | Network card name | +| receive_bytes | byte | Inbound data traffic(bytes) | +| transmit_bytes | byte | Outbound data traffic(bytes) | #### Metric set:disk_free -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| filesystem | none | File system name | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | usage | -| mounted | none | Mount point directory | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| filesystem | none | File system name | +| used | Mb | Used disk size | +| available | Mb | Available disk size | +| usage | % | usage | +| mounted | none | Mount point directory | + diff --git a/home/docs/help/clickhouse.md b/home/docs/help/clickhouse.md index 3e09687a450..efd873d1f32 100644 --- a/home/docs/help/clickhouse.md +++ b/home/docs/help/clickhouse.md @@ -4,91 +4,93 @@ title: Monitoring ClickHouse Database Monitoring sidebar_label: ClickHouse Database keywords: [open source monitoring system, open source database monitoring, ClickHouse database monitoring] --- + > Collect and monitor general performance metrics for the ClickHouse database. ### Configuration Parameters -| Parameter Name | Parameter Description | -| -------------- | ------------------------------------------------------------------------- | -| Monitor Host | IP address, IPV4, IPV6, or domain name of the host being monitored. Note ⚠️ without protocol prefix (e.g., https://, http://). | -| Task Name | Name identifying this monitoring, ensuring uniqueness. | -| Port | Port number of the database exposed to the outside, default is 8123. | -| Query Timeout | Timeout for SQL queries to respond, in milliseconds (ms), default is 6000ms. | -| Database Name | Name of the database instance, optional. | -| Username | Username for database connection, optional. | -| Password | Password for database connection, optional. | -| Collection Interval | Interval for periodic data collection during monitoring, in seconds, with a minimum interval of 30 seconds. | -| Tag Binding | Used for categorizing and managing monitored resources. | -| Description | Additional information to identify and describe this monitoring, where users can add remarks. | +| Parameter Name | Parameter Description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------| +| Monitor Host | IP address, IPV4, IPV6, or domain name of the host being monitored. Note ⚠️ without protocol prefix (e.g., https://, http://). | +| Task Name | Name identifying this monitoring, ensuring uniqueness. | +| Port | Port number of the database exposed to the outside, default is 8123. | +| Query Timeout | Timeout for SQL queries to respond, in milliseconds (ms), default is 6000ms. | +| Database Name | Name of the database instance, optional. | +| Username | Username for database connection, optional. | +| Password | Password for database connection, optional. | +| Collection Interval | Interval for periodic data collection during monitoring, in seconds, with a minimum interval of 30 seconds. | +| Tag Binding | Used for categorizing and managing monitored resources. | +| Description | Additional information to identify and describe this monitoring, where users can add remarks. | ### Collected Metrics #### Metric Set: ping Availability -| Metric Name | Metric Unit | Metric Description | -| ------------- | ----------- | ------------------ | -| responseTime | N/A | Response time | +| Metric Name | Metric Unit | Metric Description | +|--------------|-------------|--------------------| +| responseTime | N/A | Response time | #### Metric Set: Data from system.metrics table -| Metric Name | Metric Unit | Metric Description | -| ---------------------- | ----------- | ------------------------------------------------------------- | -| Query | N/A | Number of queries being executed | -| Merge | N/A | Number of background merges being executed | -| Move | N/A | Number of background moves being executed | -| PartMutation | N/A | Number of table mutations | -| ReplicatedFetch | N/A | Number of data blocks fetched from replicas | -| ReplicatedSend | N/A | Number of data blocks sent to replicas | -| ReplicatedChecks | N/A | Number of consistency checks on data blocks | -| QueryPreempted | N/A | Number of queries stopped or waiting | -| TCPConnection | N/A | Number of TCP connections | -| HTTPConnection | N/A | Number of HTTP connections | -| OpenFileForRead | N/A | Number of open readable files | -| OpenFileForWrite | N/A | Number of open writable files | -| QueryThread | N/A | Number of threads processing queries | -| ReadonlyReplica | N/A | Number of Replicated tables in read-only state | -| EphemeralNode | N/A | Number of ephemeral nodes in ZooKeeper | -| ZooKeeperWatch | N/A | Number of ZooKeeper event subscriptions | -| StorageBufferBytes | Bytes | Bytes in Buffer tables | -| VersionInteger | N/A | ClickHouse version number | -| RWLockWaitingReaders | N/A | Number of threads waiting for read-write lock on a table | -| RWLockWaitingWriters | N/A | Number of threads waiting for write lock on a table | -| RWLockActiveReaders | N/A | Number of threads holding read lock on a table | -| RWLockActiveWriters | N/A | Number of threads holding write lock on a table | -| GlobalThread | N/A | Number of threads in global thread pool | -| GlobalThreadActive | N/A | Number of active threads in global thread pool | -| LocalThread | N/A | Number of threads in local thread pool | -| LocalThreadActive | N/A | Number of active threads in local thread pool | +| Metric Name | Metric Unit | Metric Description | +|----------------------|-------------|----------------------------------------------------------| +| Query | N/A | Number of queries being executed | +| Merge | N/A | Number of background merges being executed | +| Move | N/A | Number of background moves being executed | +| PartMutation | N/A | Number of table mutations | +| ReplicatedFetch | N/A | Number of data blocks fetched from replicas | +| ReplicatedSend | N/A | Number of data blocks sent to replicas | +| ReplicatedChecks | N/A | Number of consistency checks on data blocks | +| QueryPreempted | N/A | Number of queries stopped or waiting | +| TCPConnection | N/A | Number of TCP connections | +| HTTPConnection | N/A | Number of HTTP connections | +| OpenFileForRead | N/A | Number of open readable files | +| OpenFileForWrite | N/A | Number of open writable files | +| QueryThread | N/A | Number of threads processing queries | +| ReadonlyReplica | N/A | Number of Replicated tables in read-only state | +| EphemeralNode | N/A | Number of ephemeral nodes in ZooKeeper | +| ZooKeeperWatch | N/A | Number of ZooKeeper event subscriptions | +| StorageBufferBytes | Bytes | Bytes in Buffer tables | +| VersionInteger | N/A | ClickHouse version number | +| RWLockWaitingReaders | N/A | Number of threads waiting for read-write lock on a table | +| RWLockWaitingWriters | N/A | Number of threads waiting for write lock on a table | +| RWLockActiveReaders | N/A | Number of threads holding read lock on a table | +| RWLockActiveWriters | N/A | Number of threads holding write lock on a table | +| GlobalThread | N/A | Number of threads in global thread pool | +| GlobalThreadActive | N/A | Number of active threads in global thread pool | +| LocalThread | N/A | Number of threads in local thread pool | +| LocalThreadActive | N/A | Number of active threads in local thread pool | #### Metric Set: Data from system.events table -| Metric Name | Metric Unit | Metric Description | -| ------------------------------------- | ----------- | ---------------------------------------------------------------------------------------------------- | -| Query | N/A | Number of queries to parse and possibly execute. Excludes queries rejected due to AST size limits, quota limits, or simultaneous query limits. May include internal queries initiated by ClickHouse. Subqueries are not counted. | -| SelectQuery | N/A | Number of Select queries possibly executed | -| InsertQuery | N/A | Number of Insert queries possibly executed | -| InsertedRows | N/A | Number of rows inserted into all tables | -| InsertedBytes | Bytes | Number of bytes inserted into all tables | -| FailedQuery | N/A | Number of failed queries | -| FailedSelectQuery | N/A | Number of failed Select queries | -| FileOpen | N/A | Number of file openings | -| MergeTreeDataWriterRows | N/A | Number of data rows written to MergeTree tables | -| MergeTreeDataWriterCompressedBytes | Bytes | Number of compressed data bytes written to MergeTree tables | +| Metric Name | Metric Unit | Metric Description | +|------------------------------------|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Query | N/A | Number of queries to parse and possibly execute. Excludes queries rejected due to AST size limits, quota limits, or simultaneous query limits. May include internal queries initiated by ClickHouse. Subqueries are not counted. | +| SelectQuery | N/A | Number of Select queries possibly executed | +| InsertQuery | N/A | Number of Insert queries possibly executed | +| InsertedRows | N/A | Number of rows inserted into all tables | +| InsertedBytes | Bytes | Number of bytes inserted into all tables | +| FailedQuery | N/A | Number of failed queries | +| FailedSelectQuery | N/A | Number of failed Select queries | +| FileOpen | N/A | Number of file openings | +| MergeTreeDataWriterRows | N/A | Number of data rows written to MergeTree tables | +| MergeTreeDataWriterCompressedBytes | Bytes | Number of compressed data bytes written to MergeTree tables | #### Metric Set: Data from system.asynchronous_metrics table -| Metric Name | Metric Unit | Metric Description | -| -------------------------------------- | ----------- | -------------------------------------- | -| AsynchronousMetricsCalculationTimeSpent | N/A | Time spent calculating asynchronous metrics (seconds) | -| jemalloc.arenas.all.muzzy_purged | N/A | Number of purged muzzy pages | -| jemalloc.arenas.all.dirty_purged | N/A | Number of purged dirty pages | -| BlockReadBytes_ram1 | N/A | Number of bytes read from ram1 block | -| jemalloc.background_thread.run_intervals | N/A | Number of intervals jemalloc background thread ran | -| BlockQueueTime_nbd13 | N/A | Queue wait time for nbd13 block | -| jemalloc.background_thread.num_threads | N/A | Number of jemalloc background threads | -| jemalloc.resident | N/A | Physical memory size allocated by jemalloc (bytes) | -| InterserverThreads | N/A | Number of Interserver threads | -| BlockWriteMerges_nbd7 | N/A | Number of block write merges for nbd7 block | -| MarkCacheBytes | N/A | Size of marks cache in StorageMergeTree | -| MarkCacheFiles | N/A | Number of files in marks cache for StorageMergeTree | -| MaxPartCountForPartition | N/A | Maximum active data blocks in partitions | \ No newline at end of file +| Metric Name | Metric Unit | Metric Description | +|------------------------------------------|-------------|-------------------------------------------------------| +| AsynchronousMetricsCalculationTimeSpent | N/A | Time spent calculating asynchronous metrics (seconds) | +| jemalloc.arenas.all.muzzy_purged | N/A | Number of purged muzzy pages | +| jemalloc.arenas.all.dirty_purged | N/A | Number of purged dirty pages | +| BlockReadBytes_ram1 | N/A | Number of bytes read from ram1 block | +| jemalloc.background_thread.run_intervals | N/A | Number of intervals jemalloc background thread ran | +| BlockQueueTime_nbd13 | N/A | Queue wait time for nbd13 block | +| jemalloc.background_thread.num_threads | N/A | Number of jemalloc background threads | +| jemalloc.resident | N/A | Physical memory size allocated by jemalloc (bytes) | +| InterserverThreads | N/A | Number of Interserver threads | +| BlockWriteMerges_nbd7 | N/A | Number of block write merges for nbd7 block | +| MarkCacheBytes | N/A | Size of marks cache in StorageMergeTree | +| MarkCacheFiles | N/A | Number of files in marks cache for StorageMergeTree | +| MaxPartCountForPartition | N/A | Maximum active data blocks in partitions | + diff --git a/home/docs/help/debian.md b/home/docs/help/debian.md index 65940c34a39..14cee060aaf 100644 --- a/home/docs/help/debian.md +++ b/home/docs/help/debian.md @@ -4,13 +4,13 @@ title: Monitoring Debian System Monitoring sidebar_label: Debian keywords: [Open Source Monitoring System, Operating System Monitoring, Debian Monitoring] --- + > Collect and monitor general performance metrics of the Debian system. ## Configuration Parameters - -| Parameter Name | Metric help description | -| ----------------------- | ----------------------------------------------------------------------------------------------------------------- | +| Parameter Name | Metric help description | +|-------------------------|-------------------------------------------------------------------------------------------------------------------| | Target Host | The monitored destination IPV4, IPV6, or domain name. Note: no protocol header (e.g., https://, http://). | | Task Name | A unique name to identify this monitoring task. | | Port | SSH port of the Debian system, default: 22 | @@ -28,18 +28,16 @@ keywords: [Open Source Monitoring System, Operating System Monitoring, Debian Mo #### Metric Set: Basic System Information - -| Metric Name | Metric Unit | Metric help description | -| -------------- | ----------- | ------------------------ | +| Metric Name | Metric Unit | Metric help description | +|----------------|-------------|--------------------------| | Host Name | N/A | Host name | | System Version | N/A | Operating system version | | Uptime | N/A | Boot time | #### Metric Set: CPU Information - -| Metric Name | Metric Unit | Metric help description | -| -------------- | ----------- | ----------------------- | +| Metric Name | Metric Unit | Metric help description | +|----------------|-------------|-------------------------| | Info | N/A | Model | | Cores | N/A | Number of cores | | Interrupt | N/A | Number of interrupts | @@ -49,9 +47,8 @@ keywords: [Open Source Monitoring System, Operating System Monitoring, Debian Mo #### Metric Set: Memory Information - -| Metric Name | Metric Unit | Metric help description | -| ------------------- | ----------- | ---------------------------- | +| Metric Name | Metric Unit | Metric help description | +|---------------------|-------------|------------------------------| | Total Memory | Mb | Total memory capacity | | User Program Memory | Mb | Memory used by user programs | | Free Memory | Mb | Free memory capacity | @@ -61,9 +58,8 @@ keywords: [Open Source Monitoring System, Operating System Monitoring, Debian Mo #### Metric Set: Disk Information - -| Metric Name | Metric Unit | Metric help description | -| ------------- | ----------- | ----------------------------- | +| Metric Name | Metric Unit | Metric help description | +|---------------|-------------|-------------------------------| | Disk Num | N/A | Total number of disks | | Partition Num | N/A | Total number of partitions | | Block Write | N/A | Number of disk blocks written | @@ -99,3 +95,4 @@ Metric Unit: - Memory Usage Rate: % - CPU Usage Rate: % + diff --git a/home/docs/help/dm.md b/home/docs/help/dm.md index 91b032fdf54..82159bf2408 100644 --- a/home/docs/help/dm.md +++ b/home/docs/help/dm.md @@ -9,41 +9,41 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameters -| Parameter name | Parameter help description | -| ------- | ---------- | -| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | -| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | -| Port | The port provided by the database externally, the default is 5236. | -| Query Timeout | Set the timeout when the SQL query does not respond to data, in ms milliseconds, the default is 3000 milliseconds. | -| database name | database instance name, optional. | -| username | database connection username, optional | -| password | database connection password, optional | -| URL | Database connection URL, optional | -| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | -| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | +| Parameter name | Parameter help description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | +| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | +| Port | The port provided by the database externally, the default is 5236. | +| Query Timeout | Set the timeout when the SQL query does not respond to data, in ms milliseconds, the default is 3000 milliseconds. | +| database name | database instance name, optional. | +| username | database connection username, optional | +| password | database connection password, optional | +| URL | Database connection URL, optional | +| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | +| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | ### Collect metrics #### Metric collection: basic -| Metric Name | Metric Unit | Metric Help Description | -| ------------ | -------- | ------------------ | -| PORT_NUM | None | Database exposed service port | -| CTL_PATH | None | Control File Path | -| MAX_SESSIONS | None | Maximum database connections | +| Metric Name | Metric Unit | Metric Help Description | +|--------------|-------------|-------------------------------| +| PORT_NUM | None | Database exposed service port | +| CTL_PATH | None | Control File Path | +| MAX_SESSIONS | None | Maximum database connections | #### Metric collection: status -| Metric Name | Metric Unit | Metric Help Description | -| -------- | -------- | ------------------ | -| status$ | None | Open/Close status of DM database | - +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|----------------------------------| +| status$ | None | Open/Close status of DM database | #### Metric collection: thread -| Metric Name | Metric Unit | Metric Help Description | -| ------------- | -------- | ------------------------- | -| dm_sql_thd | None | Thread for writing dmsql dmserver | -| dm_io_thd | None | IO threads, controlled by IO_THR_GROUPS parameter, default is 2 threads | -| dm_quit_thd | None | Thread used to perform a graceful shutdown of the database | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------------------------------------| +| dm_sql_thd | None | Thread for writing dmsql dmserver | +| dm_io_thd | None | IO threads, controlled by IO_THR_GROUPS parameter, default is 2 threads | +| dm_quit_thd | None | Thread used to perform a graceful shutdown of the database | + diff --git a/home/docs/help/dns.md b/home/docs/help/dns.md index 7587452c1a7..d8dbd8d0921 100644 --- a/home/docs/help/dns.md +++ b/home/docs/help/dns.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6. Note⚠️Without protocol header (eg: https://, http://). | | Monitoring name | Identify the name of this monitoring. The name needs to be unique. | @@ -28,7 +28,7 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito #### Metrics Set:Header -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|---------------------------------------------------| | Response Time | ms | Time taken for DNS server to respond to requests. | | Opcode | none | Type of the current message. | @@ -41,13 +41,13 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito ### Metrics Set: Question -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|-----------------------------------------------------------------------------------------------------------------------------------| | Section | none | Question record information, including the queried domain name, resource type, resource record class, and additional information. | ### Metrics Set: Answer -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|----------------------------------------------------------------------------------------------------------------------------| | Section0 | none | Answer record information, including the queried domain name, TTL, resource record class, resource type, and query result. | @@ -55,7 +55,7 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito ### Metrics Set: Authority -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------| | Section0 | none | SOA (Start of Authority) record for the domain name, including queried domain name, TTL, resource type, resource record class, and other information. | @@ -63,8 +63,9 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito ### Metrics Set: Additional -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|-----------------------------------------| | Section0 | none | Additional information for DNS queries. | > The metric set collects up to 10 records, with metric names from Section0 to Section9. + diff --git a/home/docs/help/docker.md b/home/docs/help/docker.md index fdd3098ce55..0e3a1f0b428 100644 --- a/home/docs/help/docker.md +++ b/home/docs/help/docker.md @@ -7,7 +7,6 @@ keywords: [open source monitoring tool, open source docker monitoring tool, moni > Collect and monitor general performance Metrics of Docker containers. - ## Pre-monitoring operations If you want to monitor the container information in `Docker`, you need to open the port according to the following steps, so that the collection request can obtain the corresponding information. @@ -31,7 +30,7 @@ This is equivalent to the **2375** port that is open to the outside world. Of co ```shell systemctl daemon-reload systemctl restart docker -```` +``` **Note: Remember to open the `2375` port number in the server console. ** @@ -42,65 +41,62 @@ Open the `2375` port number inside the server. ```shell firewall-cmd --zone=public --add-port=2375/tcp --permanent firewall-cmd --reload -```` - - - - +``` ### Configuration parameters -| Parameter name | Parameter help description | -| ------------ | ------------------------------- | -| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | -| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | -| Port | The port provided by the database externally, the default is 2375. | -| Query Timeout | Set the timeout when getting the Docker server API interface, in ms, the default is 3000 ms. | -| Container Name | Generally monitors all running container information. | -| username | connection username, optional | -| password | connection password, optional | -| URL | Database connection URL, optional, if configured, the parameters such as database name, username and password in the URL will override the parameters configured above | -| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | -| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | +| Parameter name | Parameter help description | +|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | +| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | +| Port | The port provided by the database externally, the default is 2375. | +| Query Timeout | Set the timeout when getting the Docker server API interface, in ms, the default is 3000 ms. | +| Container Name | Generally monitors all running container information. | +| username | connection username, optional | +| password | connection password, optional | +| URL | Database connection URL, optional, if configured, the parameters such as database name, username and password in the URL will override the parameters configured above | +| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | +| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | ### Collect metrics #### Metric collection: system -| Metric Name | Metric Unit | Metric Help Description | -| ------------------ | -------- | ----------------------- | -| Name | None | Server Name | -| version | none | docker version number | -| os | none | server version eg: linux x86_64 | -| root_dir | none | docker folder directory eg: /var/lib/docker | -| containers | None | Total number of containers (running + not running) | -| containers_running | None | Number of running containers | -| containers_paused | none | number of containers in pause | -| images | None | The total number of container images. | -| ncpu | none | ncpu | -| mem_total | MB | Total size of memory used | -| system_time | none | system time | +| Metric Name | Metric Unit | Metric Help Description | +|--------------------|-------------|----------------------------------------------------| +| Name | None | Server Name | +| version | none | docker version number | +| os | none | server version eg: linux x86_64 | +| root_dir | none | docker folder directory eg: /var/lib/docker | +| containers | None | Total number of containers (running + not running) | +| containers_running | None | Number of running containers | +| containers_paused | none | number of containers in pause | +| images | None | The total number of container images. | +| ncpu | none | ncpu | +| mem_total | MB | Total size of memory used | +| system_time | none | system time | #### Metric collection: containers -| Metric Name | Metric Unit | Metric Help Description | -| -------- | -------- | ------------ | -| id | None | The ID of the container in Docker | -| name | None | The container name in the Docker container | -| image | None | Image used by the Docker container | -| command | None | Default startup command in Docker | -| state | None | The running state of the container in Docker | -| status | None | Update time in Docker container | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|----------------------------------------------| +| id | None | The ID of the container in Docker | +| name | None | The container name in the Docker container | +| image | None | Image used by the Docker container | +| command | None | Default startup command in Docker | +| state | None | The running state of the container in Docker | +| status | None | Update time in Docker container | #### Metrics collection: stats -| Metric Name | Metric Unit | Metric Help Description | -| ---------------- | -------- | ------------------ | -| name | None | The name in the Docker container | -| available_memory | MB | The amount of memory that the Docker container can utilize | -| used_memory | MB | The amount of memory already used by the Docker container | -| memory_usage | None | Memory usage of the Docker container | -| cpu_delta | None | The number of CPUs already used by the Docker container | -| number_cpus | None | The number of CPUs that the Docker container can use | -| cpu_usage | None | Docker container CPU usage | +| Metric Name | Metric Unit | Metric Help Description | +|------------------|-------------|------------------------------------------------------------| +| name | None | The name in the Docker container | +| available_memory | MB | The amount of memory that the Docker container can utilize | +| used_memory | MB | The amount of memory already used by the Docker container | +| memory_usage | None | Memory usage of the Docker container | +| cpu_delta | None | The number of CPUs already used by the Docker container | +| number_cpus | None | The number of CPUs that the Docker container can use | +| cpu_usage | None | Docker container CPU usage | + diff --git a/home/docs/help/doris_be.md b/home/docs/help/doris_be.md index 2bc212ef3fb..8dcde7b549b 100644 --- a/home/docs/help/doris_be.md +++ b/home/docs/help/doris_be.md @@ -9,162 +9,163 @@ keywords: [开源监控系统, 开源数据库监控, DORIS数据库BE监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8040 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| 数据库名称 | 数据库实例名称,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-----------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | +| 端口 | 数据库对外提供的端口,默认为8040 | +| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | +| 数据库名称 | 数据库实例名称,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:doris_be_load_channel_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------- | -| value | 无 | 当前打开的 load channel 个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------------| +| value | 无 | 当前打开的 load channel 个数 | #### 指标集合:doris_be_memtable_flush_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------- | -| value | 无 | memtable写入磁盘的个数累计值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------| +| value | 无 | memtable写入磁盘的个数累计值 | #### 指标集合:doris_be_plan_fragment_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------- | -| value | 无 | 当前已接收的 fragment instance 的数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------| +| value | 无 | 当前已接收的 fragment instance 的数量 | #### 指标集合:doris_be_process_thread_num -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------------------------- | -| value | 无 | BE 进程线程数。通过 `/proc/pid/task` 采集 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------------------| +| value | 无 | BE 进程线程数。通过 `/proc/pid/task` 采集 | #### 指标集合:doris_be_query_scan_rows -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | 读取行数的累计值。这里只统计读取 Olap 表的数据量。并且是 RawRowsRead(部分数据行可能被索引跳过,并没有真正读取,但仍会记录到这个值中) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------------------------------------------------------| +| value | 无 | 读取行数的累计值。这里只统计读取 Olap 表的数据量。并且是 RawRowsRead(部分数据行可能被索引跳过,并没有真正读取,但仍会记录到这个值中) | #### 指标集合:doris_be_result_buffer_block_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------- | -| value | 无 | 当前查询结果缓存中的 query 个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------| +| value | 无 | 当前查询结果缓存中的 query 个数 | #### 指标集合:doris_be_send_batch_thread_pool_queue_size -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------------------- | -| value | 无 | 导入时用于发送数据包的线程池的排队个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------| +| value | 无 | 导入时用于发送数据包的线程池的排队个数 | #### 指标集合:doris_be_tablet_base_max_compaction_score -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------------- | -| value | 无 | 当前最大的 Base Compaction Score | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------------------| +| value | 无 | 当前最大的 Base Compaction Score | #### 指标集合:doris_be_timeout_canceled_fragment_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | --------------------------------------------- | -| value | 无 | 因超时而被取消的 fragment instance 数量累计值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|----------------------------------| +| value | 无 | 因超时而被取消的 fragment instance 数量累计值 | #### 指标集合:doris_be_load_rows -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------- | -| value | 无 | 通过 tablet sink 发送的行数累计 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------| +| value | 无 | 通过 tablet sink 发送的行数累计 | #### 指标集合:doris_be_all_rowsets_num -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------- | -| value | 无 | 当前所有 rowset 的个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| value | 无 | 当前所有 rowset 的个数 | #### 指标集合:doris_be_all_segments_num -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------- | -| value | 无 | 当前所有 segment 的个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------| +| value | 无 | 当前所有 segment 的个数 | #### 指标集合:doris_be_heavy_work_max_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------ | -| value | 无 | brpc heavy线程池线程个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------------| +| value | 无 | brpc heavy线程池线程个数 | #### 指标集合:doris_be_light_work_max_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------ | -| value | 无 | brpc light线程池线程个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------------| +| value | 无 | brpc light线程池线程个数 | #### 指标集合:doris_be_heavy_work_pool_queue_size -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------------------------------- | -| value | 无 | brpc heavy线程池队列最大长度,超过则阻塞提交work | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------------------| +| value | 无 | brpc heavy线程池队列最大长度,超过则阻塞提交work | #### 指标集合:doris_be_light_work_pool_queue_size -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------------------------------- | -| value | 无 | brpc light线程池队列最大长度,超过则阻塞提交work | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------------------| +| value | 无 | brpc light线程池队列最大长度,超过则阻塞提交work | #### 指标集合:doris_be_heavy_work_active_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------- | -| value | 无 | brpc heavy线程池活跃线程数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------| +| value | 无 | brpc heavy线程池活跃线程数 | #### 指标集合:doris_be_light_work_active_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------- | -| value | 无 | brpc light线程池活跃线程数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------| +| value | 无 | brpc light线程池活跃线程数 | #### 指标集合:doris_be_compaction_bytes_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------- | -------- | ---------------------------------- | -| base | 字节 | Base Compaction 的数据量累计 | -| cumulative | 字节 | Cumulative Compaction 的数据量累计 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|------------------------------| +| base | 字节 | Base Compaction 的数据量累计 | +| cumulative | 字节 | Cumulative Compaction 的数据量累计 | #### 指标集合:doris_be_disks_avail_capacity -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------- | -| path | 无 | 指定数据目录 | -| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的剩余空间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------------------------| +| path | 无 | 指定数据目录 | +| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的剩余空间 | #### 指标集合:doris_be_disks_total_capacity -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------- | -| path | 无 | 指定数据目录 | -| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的全部空间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------------------------| +| path | 无 | 指定数据目录 | +| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的全部空间 | #### 指标集合:doris_be_local_bytes_read_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | --------------------------------- | -| value | 字节 | 由 `LocalFileReader` 读取的字节数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|----------------------------| +| value | 字节 | 由 `LocalFileReader` 读取的字节数 | #### 指标集合:doris_be_local_bytes_written_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | --------------------------------- | -| value | 字节 | 由 `LocalFileWriter` 写入的字节数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|----------------------------| +| value | 字节 | 由 `LocalFileWriter` 写入的字节数 | #### 指标集合:doris_be_memory_allocated_bytes -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | --------------------------------------------------- | -| value | 字节 | BE 进程物理内存大小,取自 `/proc/self/status/VmRSS` | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------------------| +| value | 字节 | BE 进程物理内存大小,取自 `/proc/self/status/VmRSS` | + diff --git a/home/docs/help/doris_fe.md b/home/docs/help/doris_fe.md index bb7a6b99d53..b478b2eaadb 100644 --- a/home/docs/help/doris_fe.md +++ b/home/docs/help/doris_fe.md @@ -4,6 +4,7 @@ title: Monitoring DORIS Database FE Monitoring sidebar_label: DORIS Database FE keywords: [Open Source Monitoring System, Open Source Database Monitoring, DORIS Database FE Monitoring] --- + > Collect and monitor general performance metrics for DORIS database FE. Supports DORIS 2.0.0. **Protocol: HTTP** @@ -14,9 +15,8 @@ Check the `fe/conf/fe.conf` file to obtain the value of the `http_port` configur ### Configuration Parameters - -| Parameter Name | Parameter Description | -| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Parameter Name | Parameter Description | +|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitor Host | The monitored target's IPV4, IPV6, or domain name. Note: Without the protocol header (e.g., https://, http://) | | Task Name | A unique name identifying this monitoring task | | Port | The port provided by the database to the outside, default is 8030 ,get the value of the`http_port` configuration item | @@ -30,26 +30,23 @@ Check the `fe/conf/fe.conf` file to obtain the value of the `http_port` configur #### Metric Set: doris_fe_connection_total - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | -------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|----------------------------------------------------| | value | None | The current number of MySQL port connections on FE | #### Metric Set: doris_fe_edit_log_clean Should not fail; if it does, manual intervention is required. - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|---------------------------------------------------------------| | success | None | The number of successful cleanups of historical metadata logs | | failed | None | The number of failed cleanups of historical metadata logs | #### Metric Set: doris_fe_edit_log - -| Metric Name | Metric Unit | Metric help description | -| ----------------- | ----------- | ------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------------|-------------|---------------------------------------------| | write | None | The count of metadata log write operations | | read | None | The count of metadata log read operations | | current | None | The current number of metadata logs | @@ -60,9 +57,8 @@ Should not fail; if it does, manual intervention is required. Should not fail; if it does, manual intervention is required. - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | -------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|----------------------------------------------------------------------| | success | None | The number of successful cleanups of historical metadata image files | | failed | None | The number of failed cleanups of historical metadata image files | @@ -70,77 +66,68 @@ Should not fail; if it does, manual intervention is required. Should not fail; if it does, manual intervention is required. - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ------------------------------------------------------------ | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|--------------------------------------------------------------| | success | None | The number of successful generations of metadata image files | | failed | None | The number of failed generations of metadata image files | #### Metric Set: doris_fe_query_err - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ----------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|-------------------------------------------| | value | None | The cumulative value of erroneous queries | #### Metric Set: doris_fe_max_journal_id - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | value | None | The current maximum metadata log ID on the FE node. If it is a Master FE, it is the maximum ID currently written; if it is a non-Master FE, it represents the maximum metadata log ID currently being replayed. Used to observe if there is a large gap between the IDs of multiple FEs. A large gap indicates issues with metadata synchronization | #### Metric Set: doris_fe_max_tablet_compaction_score - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | value | None | The largest compaction score value among all BE nodes. This value can observe the current cluster's maximum compaction score to judge if it is too high. If too high, there may be delays in queries or writes | #### Metric Set: doris_fe_qps - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ------------------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|---------------------------------------------------------------------------------| | value | None | The number of queries per second on the current FE (only counts query requests) | #### Metric Set: doris_fe_query_err_rate - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ------------------------------------------ | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|--------------------------------------------| | value | None | The number of erroneous queries per second | #### Metric Set: doris_fe_report_queue_size - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | value | None | The length of the queue for various regular reporting tasks on the BE side at the FE end. This value reflects the degree of blocking of reporting tasks on the Master FE node. A larger value indicates insufficient processing capacity on the FE | #### Metric Set: doris_fe_rps - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ---------------------------------------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|------------------------------------------------------------------------------------------------------| | value | None | The number of requests per second on the current FE (includes queries and other types of statements) | #### Metric Set: doris_fe_scheduled_tablet_num - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | value | None | The number of tablets currently being scheduled by the Master FE node. This includes tablets that are being repaired and tablets that are being balanced. This value can reflect the number of tablets currently migrating in the cluster. If there is a value for a long time, it indicates that the cluster is unstable | #### Metric Set: doris_fe_txn_status Can observe the number of import transactions in various states to determine if there is a backlog. - | Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ----------------------- | +|-------------|-------------|-------------------------| | unknown | None | Unknown state | | prepare | None | In preparation | | committed | None | Committed | | visible | None | Visible | | aborted | None | Aborted / Revoked | + diff --git a/home/docs/help/dynamic_tp.md b/home/docs/help/dynamic_tp.md index 7418e17cc1b..fd36206bc6e 100644 --- a/home/docs/help/dynamic_tp.md +++ b/home/docs/help/dynamic_tp.md @@ -24,6 +24,7 @@ management: exposure: include: '*' ``` + Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has response json data as follows: ```json @@ -60,7 +61,6 @@ Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has respo 3. Add DynamicTp monitoring under HertzBeat middleware monitoring - ### Configuration parameters | Parameter name | Parameter help description | @@ -78,24 +78,25 @@ Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has respo #### Metric collection: thread_pool -| Metric Name | Metric Unit | Metric Help Description | -|---------|------|------------------------| -| pool_name | None | Thread pool name | -| core_pool_size | None | Number of core threads | -| maximum_pool_size | None | Maximum number of threads | -| queue_type | None | Task queue type | -| queue_capacity | MB | task queue capacity | -| queue_size | None | The current occupied size of the task queue | -| fair | None | Queue mode, SynchronousQueue will be used | -| queue_remaining_capacity | MB | task queue remaining size | -| active_count | None | Number of active threads | -| task_count | None | Total number of tasks | -| completed_task_count | None | Number of completed tasks | -| largest_pool_size | None | The largest number of threads in history | -| pool_size | none | current number of threads | -| wait_task_count | None | Number of tasks waiting to be executed | -| reject_count | None | Number of rejected tasks | -| reject_handler_name | None | Reject policy type | -| dynamic | None | Dynamic thread pool or not | -| run_timeout_count | None | Number of running timeout tasks | -| queue_timeout_count | None | Number of tasks waiting for timeout | +| Metric Name | Metric Unit | Metric Help Description | +|--------------------------|-------------|---------------------------------------------| +| pool_name | None | Thread pool name | +| core_pool_size | None | Number of core threads | +| maximum_pool_size | None | Maximum number of threads | +| queue_type | None | Task queue type | +| queue_capacity | MB | task queue capacity | +| queue_size | None | The current occupied size of the task queue | +| fair | None | Queue mode, SynchronousQueue will be used | +| queue_remaining_capacity | MB | task queue remaining size | +| active_count | None | Number of active threads | +| task_count | None | Total number of tasks | +| completed_task_count | None | Number of completed tasks | +| largest_pool_size | None | The largest number of threads in history | +| pool_size | none | current number of threads | +| wait_task_count | None | Number of tasks waiting to be executed | +| reject_count | None | Number of rejected tasks | +| reject_handler_name | None | Reject policy type | +| dynamic | None | Dynamic thread pool or not | +| run_timeout_count | None | Number of running timeout tasks | +| queue_timeout_count | None | Number of tasks waiting for timeout | + diff --git a/home/docs/help/elasticsearch.md b/home/docs/help/elasticsearch.md index 25078850862..3ac3d62a7e1 100644 --- a/home/docs/help/elasticsearch.md +++ b/home/docs/help/elasticsearch.md @@ -9,7 +9,7 @@ keywords: [ open source monitoring tool, monitoring ElasticSearch metrics ] ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6. Note⚠️Without protocol header (eg: https://, http://). | | Monitoring name | Identify the name of this monitoring. The name needs to be unique. | @@ -27,7 +27,7 @@ keywords: [ open source monitoring tool, monitoring ElasticSearch metrics ] #### Metrics Set:health -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|-------------------------------------------| | cluster_name | none | Cluster Name | | status | none | status | @@ -49,7 +49,7 @@ keywords: [ open source monitoring tool, monitoring ElasticSearch metrics ] #### Metrics Set:nodes_detail -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------|-------------|-------------------------| | node_name | none | Node Name | | ip | none | IP Address | @@ -61,3 +61,4 @@ keywords: [ open source monitoring tool, monitoring ElasticSearch metrics ] | disk_free | GB | Disk Free | | disk_total | GB | Disk Total | | disk_used_percent | % | Disk Used Percent | + diff --git a/home/docs/help/euleros.md b/home/docs/help/euleros.md index c63c6c26643..786dab30afc 100644 --- a/home/docs/help/euleros.md +++ b/home/docs/help/euleros.md @@ -9,7 +9,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, EulerOS Mo ### Configuration Parameters -| Parameter Name | Parameter help description | +| Parameter Name | Parameter help description | |---------------------|----------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The IP, IPV6, or domain name of the monitored endpoint. Note ⚠️: Do not include protocol headers (eg: https://, http://). | | Task Name | Identifies the name of this monitoring, ensuring uniqueness. | @@ -28,7 +28,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, EulerOS Mo #### Metric Set: Basic Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|---------------------------| | Host Name | None | Host name. | | System Version | None | Operating system version. | @@ -36,7 +36,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, EulerOS Mo #### Metric Set: CPU Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|-------------------------------------------| | info | None | CPU model. | | cores | None | Number of CPU cores. | @@ -47,7 +47,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, EulerOS Mo #### Metric Set: Memory Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|------------------------------------| | total | Mb | Total memory capacity. | | used | Mb | Used memory by user programs. | @@ -58,7 +58,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, EulerOS Mo #### Metric Set: Disk Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |---------------|-------------|------------------------------------| | disk_num | None | Total number of disks. | | partition_num | None | Total number of partitions. | @@ -68,7 +68,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, EulerOS Mo #### Metric Set: Interface Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|--------------------------------| | interface_name | None | Name of the network interface. | | receive_bytes | Mb | Inbound data traffic. | @@ -105,3 +105,4 @@ Top 10 processes consuming memory. Metrics include: Process ID, Memory usage, CP | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | + diff --git a/home/docs/help/flink.md b/home/docs/help/flink.md index 2115a1f0b0f..bd731a6dee6 100644 --- a/home/docs/help/flink.md +++ b/home/docs/help/flink.md @@ -7,10 +7,9 @@ keywords: [open source monitoring tool, open source flink monitoring tool] > Collect and monitor the general performance Metrics of Flink. - ### Configuration parameter -| Parameter Name | Parameter Help Description | +| Parameter Name | Parameter Help Description | |---------------------|-----------------------------------------------------------------------------------------------------------------------------| | Monitor Host | The monitored peer IPV4, IPV6, or domain name. Note: Do not include protocol headers (e.g., https://, http://). | | Task Name | Identifier for this monitoring task, name must be unique. | @@ -27,13 +26,11 @@ keywords: [open source monitoring tool, open source flink monitoring tool] #### Metrics Set:Overview -| Metric Name | Metric Unit | Metric Help Description | -|---------------|-------------|-------------------------| -| slots_total | Units | Total number of slots. | -| slots_used | Units | Number of slots used. | -| task_total | Units | Total number of tasks. | -| jobs_running | Units | Number of jobs running. | -| jobs_failed | Units | Number of jobs failed. | - - +| Metric Name | Metric Unit | Metric Help Description | +|--------------|-------------|-------------------------| +| slots_total | Units | Total number of slots. | +| slots_used | Units | Number of slots used. | +| task_total | Units | Total number of tasks. | +| jobs_running | Units | Number of jobs running. | +| jobs_failed | Units | Number of jobs failed. | diff --git a/home/docs/help/flink_on_yarn.md b/home/docs/help/flink_on_yarn.md index 0a81fb93c4f..1ae8e5ebd51 100644 --- a/home/docs/help/flink_on_yarn.md +++ b/home/docs/help/flink_on_yarn.md @@ -4,134 +4,135 @@ ### Configuration Parameters -| Parameter Name | Parameter Help Description | -| -------------------- | --------------------------------------------------------------------------- | -| Monitoring Host | The monitored peer's IPV4, IPV6, or domain name. Note ⚠️ do not include protocol headers (e.g., https://, http://). | -| Task Name | The name identifying this monitoring task. The name must be unique. | -| Yarn Port | The Yarn port, corresponding to the port in `yarn.resourcemanager.webapp.address`. | -| Query Timeout | The timeout for JVM connections, in milliseconds, default is 3000 ms. | -| Enable SSL | Whether to enable SSL | -| Username | Connection username | -| Password | Connection password | -| Monitoring Interval | Interval for periodic data collection, in seconds, minimum interval is 30 seconds. | -| Tags | Used for categorizing and managing monitoring resources. | -| Description | Additional notes and descriptions for this monitoring task. Users can add notes here. | +| Parameter Name | Parameter Help Description | +|---------------------|---------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | The monitored peer's IPV4, IPV6, or domain name. Note ⚠️ do not include protocol headers (e.g., https://, http://). | +| Task Name | The name identifying this monitoring task. The name must be unique. | +| Yarn Port | The Yarn port, corresponding to the port in `yarn.resourcemanager.webapp.address`. | +| Query Timeout | The timeout for JVM connections, in milliseconds, default is 3000 ms. | +| Enable SSL | Whether to enable SSL | +| Username | Connection username | +| Password | Connection password | +| Monitoring Interval | Interval for periodic data collection, in seconds, minimum interval is 30 seconds. | +| Tags | Used for categorizing and managing monitoring resources. | +| Description | Additional notes and descriptions for this monitoring task. Users can add notes here. | ### Collected Metrics #### Metrics Set: JobManager Metrics -| Metric Name | Metric Unit | Metric Help Description | -| ------------------------------------------------------- | ----- | ---------------------------- | -| Status.JVM.Memory.NonHeap.Committed | Bytes | Non-heap memory committed | -| Status.JVM.Memory.Mapped.TotalCapacity | Bytes | Total capacity of mapped memory | -| Status.JVM.Memory.NonHeap.Used | Bytes | Non-heap memory used | -| Status.JVM.Memory.Metaspace.Max | Bytes | Maximum capacity of metaspace | -| Status.JVM.GarbageCollector.G1_Old_Generation.Count | Count | Count of old generation garbage collections | -| Status.JVM.Memory.Direct.MemoryUsed | Bytes | Direct memory used | -| Status.JVM.Memory.Mapped.MemoryUsed | Bytes | Mapped memory used | -| Status.JVM.GarbageCollector.G1_Young_Generation.Count | Count | Count of young generation garbage collections | -| Status.JVM.Memory.Direct.TotalCapacity | Bytes | Total capacity of direct memory | -| Status.JVM.GarbageCollector.G1_Old_Generation.Time | ms | Time spent on old generation garbage collections | -| Status.JVM.Memory.Heap.Committed | Bytes | Heap memory committed | -| Status.JVM.Memory.Mapped.Count | Count | Count of mapped memory | -| Status.JVM.Memory.Metaspace.Used | Bytes | Metaspace memory used | -| Status.JVM.Memory.Direct.Count | Count | Count of direct memory | -| Status.JVM.Memory.Heap.Used | Bytes | Heap memory used | -| Status.JVM.Memory.Heap.Max | Bytes | Maximum capacity of heap memory | -| Status.JVM.GarbageCollector.G1_Young_Generation.Time | ms | Time spent on young generation garbage collections | -| Status.JVM.Memory.NonHeap.Max | Bytes | Maximum capacity of non-heap memory | +| Metric Name | Metric Unit | Metric Help Description | +|-------------------------------------------------------|-------------|----------------------------------------------------| +| Status.JVM.Memory.NonHeap.Committed | Bytes | Non-heap memory committed | +| Status.JVM.Memory.Mapped.TotalCapacity | Bytes | Total capacity of mapped memory | +| Status.JVM.Memory.NonHeap.Used | Bytes | Non-heap memory used | +| Status.JVM.Memory.Metaspace.Max | Bytes | Maximum capacity of metaspace | +| Status.JVM.GarbageCollector.G1_Old_Generation.Count | Count | Count of old generation garbage collections | +| Status.JVM.Memory.Direct.MemoryUsed | Bytes | Direct memory used | +| Status.JVM.Memory.Mapped.MemoryUsed | Bytes | Mapped memory used | +| Status.JVM.GarbageCollector.G1_Young_Generation.Count | Count | Count of young generation garbage collections | +| Status.JVM.Memory.Direct.TotalCapacity | Bytes | Total capacity of direct memory | +| Status.JVM.GarbageCollector.G1_Old_Generation.Time | ms | Time spent on old generation garbage collections | +| Status.JVM.Memory.Heap.Committed | Bytes | Heap memory committed | +| Status.JVM.Memory.Mapped.Count | Count | Count of mapped memory | +| Status.JVM.Memory.Metaspace.Used | Bytes | Metaspace memory used | +| Status.JVM.Memory.Direct.Count | Count | Count of direct memory | +| Status.JVM.Memory.Heap.Used | Bytes | Heap memory used | +| Status.JVM.Memory.Heap.Max | Bytes | Maximum capacity of heap memory | +| Status.JVM.GarbageCollector.G1_Young_Generation.Time | ms | Time spent on young generation garbage collections | +| Status.JVM.Memory.NonHeap.Max | Bytes | Maximum capacity of non-heap memory | #### Metrics Set: JobManager Config -| Metric Name | Metric Unit | Metric Help Description | -| --------------------------------------------- |-------------| ------------------------------------ | -| internal.jobgraph-path | - | Internal job graph path | -| env.java.home | - | Java environment path | -| classloader.check-leaked-classloader | - | Whether to check for leaked class loaders | -| env.java.opts | - | Java options | -| high-availability.cluster-id | - | High availability cluster ID | -| jobmanager.rpc.address | - | JobManager's RPC address | -| jobmanager.memory.jvm-overhead.min | Bytes | Minimum JVM overhead for JobManager | -| jobmanager.web.port | Port | JobManager's Web port | -| webclient.port | Port | Web client port | -| execution.savepoint.ignore-unclaimed-state | - | Whether to ignore unclaimed state | -| io.tmp.dirs | Path | Temporary file directories | -| parallelism.default | - | Default parallelism | -| taskmanager.memory.fraction | - | TaskManager memory fraction | -| taskmanager.numberOfTaskSlots | - | Number of task slots for TaskManager | -| yarn.application.name | - | Yarn application name | -| taskmanager.heap.mb | MB | Heap memory size for TaskManager | -| taskmanager.memory.process.size | GB | Process memory size for TaskManager | -| web.port | Port | Web port | -| classloader.resolve-order | - | Class loader resolve order | -| jobmanager.heap.mb | MB | Heap memory size for JobManager | -| jobmanager.memory.off-heap.size | Bytes | Off-heap memory size for JobManager | -| state.backend.incremental | - | Whether the state backend is incremental | -| execution.target | - | Execution target | -| jobmanager.memory.process.size | GB | Process memory size for JobManager | -| web.tmpdir | Path | Web temporary directory | -| yarn.ship-files | Path | Yarn shipped files | -| jobmanager.rpc.port | Port | JobManager's RPC port | -| internal.io.tmpdirs.use-local-default | - | Whether to use local default temporary directories | -| execution.checkpointing.interval | ms | Checkpointing interval | -| execution.attached | - | Whether to execute attached | -| internal.cluster.execution-mode | - | Internal cluster execution mode | -| execution.shutdown-on-attached-exit | - | Whether to shutdown on attached exit | -| pipeline.jars | Path | Pipeline JAR files | -| rest.address | - | REST address | -| state.backend | - | State backend type | -| jobmanager.memory.jvm-metaspace.size | Bytes | JVM metaspace size for JobManager | -| $internal.deployment.config-dir | Path | Internal deployment configuration directory | -| $internal.yarn.log-config-file | Path | Internal Yarn log configuration file path | -| jobmanager.memory.heap.size | Bytes | Heap memory size for JobManager | -| state.checkpoints.dir | Path | State checkpoints directory | -| jobmanager.memory.jvm-overhead.max | Bytes | Maximum JVM overhead for JobManager | +| Metric Name | Metric Unit | Metric Help Description | +|--------------------------------------------|-------------|----------------------------------------------------| +| internal.jobgraph-path | - | Internal job graph path | +| env.java.home | - | Java environment path | +| classloader.check-leaked-classloader | - | Whether to check for leaked class loaders | +| env.java.opts | - | Java options | +| high-availability.cluster-id | - | High availability cluster ID | +| jobmanager.rpc.address | - | JobManager's RPC address | +| jobmanager.memory.jvm-overhead.min | Bytes | Minimum JVM overhead for JobManager | +| jobmanager.web.port | Port | JobManager's Web port | +| webclient.port | Port | Web client port | +| execution.savepoint.ignore-unclaimed-state | - | Whether to ignore unclaimed state | +| io.tmp.dirs | Path | Temporary file directories | +| parallelism.default | - | Default parallelism | +| taskmanager.memory.fraction | - | TaskManager memory fraction | +| taskmanager.numberOfTaskSlots | - | Number of task slots for TaskManager | +| yarn.application.name | - | Yarn application name | +| taskmanager.heap.mb | MB | Heap memory size for TaskManager | +| taskmanager.memory.process.size | GB | Process memory size for TaskManager | +| web.port | Port | Web port | +| classloader.resolve-order | - | Class loader resolve order | +| jobmanager.heap.mb | MB | Heap memory size for JobManager | +| jobmanager.memory.off-heap.size | Bytes | Off-heap memory size for JobManager | +| state.backend.incremental | - | Whether the state backend is incremental | +| execution.target | - | Execution target | +| jobmanager.memory.process.size | GB | Process memory size for JobManager | +| web.tmpdir | Path | Web temporary directory | +| yarn.ship-files | Path | Yarn shipped files | +| jobmanager.rpc.port | Port | JobManager's RPC port | +| internal.io.tmpdirs.use-local-default | - | Whether to use local default temporary directories | +| execution.checkpointing.interval | ms | Checkpointing interval | +| execution.attached | - | Whether to execute attached | +| internal.cluster.execution-mode | - | Internal cluster execution mode | +| execution.shutdown-on-attached-exit | - | Whether to shutdown on attached exit | +| pipeline.jars | Path | Pipeline JAR files | +| rest.address | - | REST address | +| state.backend | - | State backend type | +| jobmanager.memory.jvm-metaspace.size | Bytes | JVM metaspace size for JobManager | +| $internal.deployment.config-dir | Path | Internal deployment configuration directory | +| $internal.yarn.log-config-file | Path | Internal Yarn log configuration file path | +| jobmanager.memory.heap.size | Bytes | Heap memory size for JobManager | +| state.checkpoints.dir | Path | State checkpoints directory | +| jobmanager.memory.jvm-overhead.max | Bytes | Maximum JVM overhead for JobManager | #### TaskManager Metrics -| Metric Name | Metric Unit | Metric Help Description | -| ------------------------------------------- | ----- | ----------------------------------- | -| Container ID | - | Container ID for uniquely identifying a container | -| Path | - | Container path | -| Data Port | Port | Data transmission port | -| JMX Port | Port | JMX (Java Management Extensions) port | -| Last Heartbeat | Timestamp | Last heartbeat time | -| All Slots | Count | Total number of task slots in the container | -| Free Slots | Count | Number of free task slots in the container | -| totalResourceCpuCores | Cores | Total number of CPU cores in the container | -| totalResourceTaskHeapMemory | MB | Total task heap memory size in the container | -| totalResourceManagedMemory | MB | Total managed memory size in the container | -| totalResourceNetworkMemory | MB | Total network memory size in the container | -| freeResourceCpuCores | Cores | Number of free CPU cores in the container | -| freeResourceTaskHeapMemory | MB | Free task heap memory size in the container | -| freeResourceTaskOffHeapMemory | MB | Free task off-heap memory size in the container | -| freeResourceManagedMemory | MB | Free managed memory size in the container | -| freeResourceNetworkMemory | MB | Free network memory size in the container | -| CPU Cores | Cores | Number of CPU cores | -| Physical MEM | MB | Size of physical memory | -| JVM Heap Size | MB | Size of JVM heap memory | -| Flink Managed MEM | MB | Size of Flink managed memory | -| Framework Heap | MB | Size of framework heap memory | -| Task Heap | MB | Size of task heap memory | -| Framework Off-Heap | MB | Size of framework off-heap memory | -| memoryConfigurationTaskOffHeap | Bytes | Task off-heap memory configuration | -| Network | MB | Network memory configuration | -| Managed Memory | MB | Managed memory configuration | -| JVM Metaspace | MB | Size of JVM metaspace | -| JVM Overhead | MB | JVM overhead | -| memoryConfigurationTotalFlinkMemory | Bytes | Total Flink memory configuration | -| memoryConfigurationTotalProcessMemory | Bytes | Total process memory configuration | +| Metric Name | Metric Unit | Metric Help Description | +|---------------------------------------|-------------|---------------------------------------------------| +| Container ID | - | Container ID for uniquely identifying a container | +| Path | - | Container path | +| Data Port | Port | Data transmission port | +| JMX Port | Port | JMX (Java Management Extensions) port | +| Last Heartbeat | Timestamp | Last heartbeat time | +| All Slots | Count | Total number of task slots in the container | +| Free Slots | Count | Number of free task slots in the container | +| totalResourceCpuCores | Cores | Total number of CPU cores in the container | +| totalResourceTaskHeapMemory | MB | Total task heap memory size in the container | +| totalResourceManagedMemory | MB | Total managed memory size in the container | +| totalResourceNetworkMemory | MB | Total network memory size in the container | +| freeResourceCpuCores | Cores | Number of free CPU cores in the container | +| freeResourceTaskHeapMemory | MB | Free task heap memory size in the container | +| freeResourceTaskOffHeapMemory | MB | Free task off-heap memory size in the container | +| freeResourceManagedMemory | MB | Free managed memory size in the container | +| freeResourceNetworkMemory | MB | Free network memory size in the container | +| CPU Cores | Cores | Number of CPU cores | +| Physical MEM | MB | Size of physical memory | +| JVM Heap Size | MB | Size of JVM heap memory | +| Flink Managed MEM | MB | Size of Flink managed memory | +| Framework Heap | MB | Size of framework heap memory | +| Task Heap | MB | Size of task heap memory | +| Framework Off-Heap | MB | Size of framework off-heap memory | +| memoryConfigurationTaskOffHeap | Bytes | Task off-heap memory configuration | +| Network | MB | Network memory configuration | +| Managed Memory | MB | Managed memory configuration | +| JVM Metaspace | MB | Size of JVM metaspace | +| JVM Overhead | MB | JVM overhead | +| memoryConfigurationTotalFlinkMemory | Bytes | Total Flink memory configuration | +| memoryConfigurationTotalProcessMemory | Bytes | Total process memory configuration | #### TaskManager Metrics -| Metric Name | Metric Unit | Metric Help Description | -| ------------------------------------------ | ----- | ----------------------------- | -| Status.Shuffle.Netty.TotalMemory | MB | Total memory used by Netty Shuffle | -| Status.Flink.Memory.Managed.Used | MB | Managed memory used by Flink | -| Status.JVM.Memory.Metaspace.Used | MB | Used JVM metaspace memory | -| Status.JVM.Memory.Metaspace.Max | MB | Maximum JVM metaspace memory | -| Status.JVM.Memory.Heap.Used | MB | Used JVM heap memory | -| Status.JVM.Memory.Heap.Max | MB | Maximum JVM heap memory | -| Status.Flink.Memory.Managed.Total | MB | Total managed memory by Flink | -| Status.Shuffle.Netty.UsedMemory | MB | Used memory by Netty Shuffle | \ No newline at end of file +| Metric Name | Metric Unit | Metric Help Description | +|-----------------------------------|-------------|------------------------------------| +| Status.Shuffle.Netty.TotalMemory | MB | Total memory used by Netty Shuffle | +| Status.Flink.Memory.Managed.Used | MB | Managed memory used by Flink | +| Status.JVM.Memory.Metaspace.Used | MB | Used JVM metaspace memory | +| Status.JVM.Memory.Metaspace.Max | MB | Maximum JVM metaspace memory | +| Status.JVM.Memory.Heap.Used | MB | Used JVM heap memory | +| Status.JVM.Memory.Heap.Max | MB | Maximum JVM heap memory | +| Status.Flink.Memory.Managed.Total | MB | Total managed memory by Flink | +| Status.Shuffle.Netty.UsedMemory | MB | Used memory by Netty Shuffle | + diff --git a/home/docs/help/freebsd.md b/home/docs/help/freebsd.md index 96d9866743b..51d0ed9ab0b 100644 --- a/home/docs/help/freebsd.md +++ b/home/docs/help/freebsd.md @@ -9,7 +9,7 @@ keywords: [ Open Source Monitoring System, Open Source Operating System Monitori ### Configuration Parameters -| Parameter Name | Parameter help description | +| Parameter Name | Parameter help description | |---------------------|------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The IPv4, IPv6, or domain name of the monitored peer. Note ⚠️ without the protocol header (eg: https://, http://). | | Task Name | Identifies the name of this monitor, ensuring uniqueness of the name. | @@ -28,7 +28,7 @@ keywords: [ Open Source Monitoring System, Open Source Operating System Monitori #### Metric Set: Basic Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|--------------------------| | Host Name | None | Host name | | System Version | None | Operating system version | @@ -36,7 +36,7 @@ keywords: [ Open Source Monitoring System, Open Source Operating System Monitori #### Metric Set: CPU Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|----------------------------------------------| | info | None | CPU model | | cores | Number | Number of CPU cores | @@ -85,3 +85,4 @@ Statistics of the top 10 processes using memory. Statistics include: Process ID, | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | + diff --git a/home/docs/help/ftp.md b/home/docs/help/ftp.md index 8802d3ab415..50a571eb7a7 100644 --- a/home/docs/help/ftp.md +++ b/home/docs/help/ftp.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source ftp server monitoring tool, ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| | Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: ftp://). | | Monitoring name | Identify the name of this monitoring, The name needs to be unique. | @@ -28,7 +28,8 @@ keywords: [ open source monitoring tool, open source ftp server monitoring tool, #### Metrics Set:Basic -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |---------------|-------------|----------------------------------------------------------| | Is Active | none | Check if the directory exists and has access permission. | | Response Time | ms | Response Time | + diff --git a/home/docs/help/fullsite.md b/home/docs/help/fullsite.md index 3246fa31d82..6145f238bdc 100644 --- a/home/docs/help/fullsite.md +++ b/home/docs/help/fullsite.md @@ -7,28 +7,29 @@ keywords: [open source monitoring tool, open source website monitoring tool, mon > Available or not to monitor all pages of the website. > A website often has multiple pages provided by different services. We monitor the full site by collecting the SiteMap exposed by the website. -> Note⚠️ This monitoring requires your website to support SiteMap. We support SiteMap in XML and TXT formats. +> Note⚠️ This monitoring requires your website to support SiteMap. We support SiteMap in XML and TXT formats. -### Configuration parameter +### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| SiteMap | Relative path of website SiteMap address, eg:/sitemap.xml | -| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | +| SiteMap | Relative path of website SiteMap address, eg:/sitemap.xml | +| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | -### Collection Metric +### Collection Metric -#### Metric set:summary +#### Metric set:summary + +| Metric name | Metric unit | Metric help description | +|--------------|-------------|------------------------------------------------------| +| url | none | URL path of web page | +| statusCode | none | Response HTTP status code for requesting the website | +| responseTime | ms | Website response time | +| errorMsg | none | Error message feedback after requesting the website | -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| url | none | URL path of web page | -| statusCode | none | Response HTTP status code for requesting the website | -| responseTime | ms | Website response time | -| errorMsg | none | Error message feedback after requesting the website | diff --git a/home/docs/help/guide.md b/home/docs/help/guide.md index 5f7af25ac16..ce182746ffa 100644 --- a/home/docs/help/guide.md +++ b/home/docs/help/guide.md @@ -1,137 +1,134 @@ ---- -id: guide -title: Help Center -sidebar_label: Help Center ---- - -> Hertzbeat - An open source, real-time monitoring tool with custom-monitor and agentLess. -> Help documents and auxiliary information during use - -## 🔬 Monitoring services - -> Regularly collect and monitor the performance Metrics exposed by end-to-end services, provide visual interfaces, and process data for alarm and other service scheduling. -> Planned monitoring type:application service, database, operating system, cloud native, open source middleware. - -### Application service monitoring - - 👉 [Website monitoring](website)
- 👉 [HTTP API](api)
- 👉 [PING Connectivity](ping)
- 👉 [Port availability](port)
- 👉 [Full site monitoring](fullsite)
- 👉 [SSL Cert monitoring](ssl_cert)
- 👉 [DNS monitoring](dns)
- 👉 [FTP monitoring](ftp)
- 👉 [Websocket monitoring](websocket)
- -### Program monitoring - - 👉 [Process](process)
- 👉 [JVM monitoring](jvm)
- 👉 [SpringBoot2.0](springboot2)
- 👉 [SpringBoot3.0](springboot3)
- 👉 [DynamicTp](dynamic_tp)
- -### Database monitoring - - 👉 [MYSQL database monitoring](mysql)
- 👉 [MariaDB database monitoring](mariadb)
- 👉 [PostgreSQL database monitoring](postgresql)
- 👉 [SqlServer database monitoring](sqlserver)
- 👉 [Oracle database monitoring](oracle)
- 👉 [DM database monitoring](dm)
- 👉 [OpenGauss database monitoring](opengauss)
- 👉 [IoTDB database monitoring](iotdb)
- 👉 [TiDB database monitoring](tidb)
- 👉 [MongoDB database monitoring](mongodb)
- 👉 [NebulaGraph cluster monitoring](nebulagraph_cluster)
- -### Cache monitoring - - 👉 [Redis monitoring](redis)
- 👉 [Memcached monitoring](memcached)
- - -### Operating system monitoring - - 👉 [Linux operating system monitoring](linux)
- 👉 [Windows operating system monitoring](windows)
- 👉 [Ubuntu operating system monitoring](ubuntu)
- 👉 [Centos operating system monitoring](centos)
- 👉 [FreeBSD operating system monitoring](freebsd)
- 👉 [RedHat operating system monitoring](redhat)
- 👉 [Rocky Linux operating system monitoring](rockylinux)
- 👉 [EulerOS monitoring](euleros)
- -### Middleware monitoring - - 👉 [Zookeeper](zookeeper)
- 👉 [Kafka](kafka)
- 👉 [Tomcat](tomcat)
- 👉 [ShenYu](shenyu)
- 👉 [DynamicTp](dynamic_tp)
- 👉 [RabbitMQ](rabbitmq)
- 👉 [ActiveMQ](activemq)
- 👉 [Jetty](jetty)
- 👉 [Nacos](nacos)
- -### CloudNative monitoring - - 👉 [Docker](docker)
- 👉 [Kubernetes](kubernetes)
- -### Bigdata monitoring - - 👉 [Clickhouse](clickhouse)
- 👉 [ElasticSearch](elasticsearch)
- 👉 [Flink](flink)
- -### Ai LLM monitoring - - 👉 [openai](openai)
- -### Network monitoring - - 👉 [Huawei-switch](huawei_switch)
- -### Server monitoring - - -*** - -## 💡 Alarm service - -> More liberal threshold alarm configuration (calculation expression), supports email, SMS, WebHook, DingDing, WeChat and FeiShu for alarm notification. -> The positioning of alarm service is to trigger the threshold accurately and timely, and the alarm notification can be reached in time. - -### Alarm center - -> The triggered alarm information center provides query and filtering of alarm deletion, alarm processing, mark unprocessed, alarm level status, etc. - -### Alarm configuration - -> The Metric threshold configuration provides the Metric threshold configuration in the form of expression, which can set the alarm level, trigger times, alarm notification template and whether it is enabled, correlation monitoring and other functions. - -More details see 👉 [Threshold alarm](alert_threshold)
-   👉 [Threshold expression](alert_threshold_expr) - -### Alarm notification - -> After triggering the alarm information, in addition to being displayed in the alarm center list, it can also be notified to the designated recipient in a specified way (e-mail, wechat and FeiShu etc.) -> Alarm notification provides different types of notification methods, such as email recipient, enterprise wechat robot notification, DingDing robot notification, and FeiShu robot notification. -> After setting the receiver, you need to set the associated alarm notification strategy to configure which alarm information is sent to which receiver. - - - 👉 [Configure Email Notification](alert_email)
- 👉 [Configure Discord Notification](alert_webhook)
- 👉 [Configure Slack Notification](alert_webhook)
- 👉 [Configure Telegram Notification](alert_webhook)
- 👉 [Configure WebHook Notification](alert_webhook)
- 👉 [Configure enterprise WeChat Robot Notification](alert_wework)
- 👉 [Configure DingDing Robot Notification](alert_dingtalk)
- 👉 [Configure FeiShu Robot Notification](alert_feishu)
- 👉 [Configure Huawei Cloud SMN Notification](alert_smn)
- -### Plugins - - 👉 [Plugin](plugin)
\ No newline at end of file +--- +id: guide +title: Help Center +sidebar_label: Help Center +--- + +> Hertzbeat - An open source, real-time monitoring tool with custom-monitor and agentLess. +> Help documents and auxiliary information during use + +## 🔬 Monitoring services + +> Regularly collect and monitor the performance Metrics exposed by end-to-end services, provide visual interfaces, and process data for alarm and other service scheduling. +> Planned monitoring type:application service, database, operating system, cloud native, open source middleware. + +### Application service monitoring + + 👉 [Website monitoring](website)
+ 👉 [HTTP API](api)
+ 👉 [PING Connectivity](ping)
+ 👉 [Port availability](port)
+ 👉 [Full site monitoring](fullsite)
+ 👉 [SSL Cert monitoring](ssl_cert)
+ 👉 [DNS monitoring](dns)
+ 👉 [FTP monitoring](ftp)
+ 👉 [Websocket monitoring](websocket)
+ +### Program monitoring + + 👉 [Process](process)
+ 👉 [JVM monitoring](jvm)
+ 👉 [SpringBoot2.0](springboot2)
+ 👉 [SpringBoot3.0](springboot3)
+ 👉 [DynamicTp](dynamic_tp)
+ +### Database monitoring + + 👉 [MYSQL database monitoring](mysql)
+ 👉 [MariaDB database monitoring](mariadb)
+ 👉 [PostgreSQL database monitoring](postgresql)
+ 👉 [SqlServer database monitoring](sqlserver)
+ 👉 [Oracle database monitoring](oracle)
+ 👉 [DM database monitoring](dm)
+ 👉 [OpenGauss database monitoring](opengauss)
+ 👉 [IoTDB database monitoring](iotdb)
+ 👉 [TiDB database monitoring](tidb)
+ 👉 [MongoDB database monitoring](mongodb)
+ 👉 [NebulaGraph cluster monitoring](nebulagraph_cluster)
+ +### Cache monitoring + + 👉 [Redis monitoring](redis)
+ 👉 [Memcached monitoring](memcached)
+ +### Operating system monitoring + + 👉 [Linux operating system monitoring](linux)
+ 👉 [Windows operating system monitoring](windows)
+ 👉 [Ubuntu operating system monitoring](ubuntu)
+ 👉 [Centos operating system monitoring](centos)
+ 👉 [FreeBSD operating system monitoring](freebsd)
+ 👉 [RedHat operating system monitoring](redhat)
+ 👉 [Rocky Linux operating system monitoring](rockylinux)
+ 👉 [EulerOS monitoring](euleros)
+ +### Middleware monitoring + + 👉 [Zookeeper](zookeeper)
+ 👉 [Kafka](kafka)
+ 👉 [Tomcat](tomcat)
+ 👉 [ShenYu](shenyu)
+ 👉 [DynamicTp](dynamic_tp)
+ 👉 [RabbitMQ](rabbitmq)
+ 👉 [ActiveMQ](activemq)
+ 👉 [Jetty](jetty)
+ 👉 [Nacos](nacos)
+ +### CloudNative monitoring + + 👉 [Docker](docker)
+ 👉 [Kubernetes](kubernetes)
+ +### Bigdata monitoring + + 👉 [Clickhouse](clickhouse)
+ 👉 [ElasticSearch](elasticsearch)
+ 👉 [Flink](flink)
+ +### Ai LLM monitoring + + 👉 [openai](openai)
+ +### Network monitoring + + 👉 [Huawei-switch](huawei_switch)
+ +### Server monitoring + +*** + +## 💡 Alarm service + +> More liberal threshold alarm configuration (calculation expression), supports email, SMS, WebHook, DingDing, WeChat and FeiShu for alarm notification. +> The positioning of alarm service is to trigger the threshold accurately and timely, and the alarm notification can be reached in time. + +### Alarm center + +> The triggered alarm information center provides query and filtering of alarm deletion, alarm processing, mark unprocessed, alarm level status, etc. + +### Alarm configuration + +> The Metric threshold configuration provides the Metric threshold configuration in the form of expression, which can set the alarm level, trigger times, alarm notification template and whether it is enabled, correlation monitoring and other functions. + +More details see 👉 [Threshold alarm](alert_threshold)
+   👉 [Threshold expression](alert_threshold_expr) + +### Alarm notification + +> After triggering the alarm information, in addition to being displayed in the alarm center list, it can also be notified to the designated recipient in a specified way (e-mail, wechat and FeiShu etc.) +> Alarm notification provides different types of notification methods, such as email recipient, enterprise wechat robot notification, DingDing robot notification, and FeiShu robot notification. +> After setting the receiver, you need to set the associated alarm notification strategy to configure which alarm information is sent to which receiver. + + 👉 [Configure Email Notification](alert_email)
+ 👉 [Configure Discord Notification](alert_webhook)
+ 👉 [Configure Slack Notification](alert_webhook)
+ 👉 [Configure Telegram Notification](alert_webhook)
+ 👉 [Configure WebHook Notification](alert_webhook)
+ 👉 [Configure enterprise WeChat Robot Notification](alert_wework)
+ 👉 [Configure DingDing Robot Notification](alert_dingtalk)
+ 👉 [Configure FeiShu Robot Notification](alert_feishu)
+ 👉 [Configure Huawei Cloud SMN Notification](alert_smn)
+ +### Plugins + + 👉 [Plugin](plugin)
diff --git a/home/docs/help/hadoop.md b/home/docs/help/hadoop.md index f0a458ecc9f..56f19472277 100644 --- a/home/docs/help/hadoop.md +++ b/home/docs/help/hadoop.md @@ -11,9 +11,10 @@ keywords: [Open Source Monitoring System, Open Source Java Monitoring, Hadoop JV ### Pre-monitoring steps ->You need to enable JMX service in the Hadoop application before monitoring. HertzBeat uses the JMX protocol to collect performance metrics from Hadoop's JVM. +> You need to enable JMX service in the Hadoop application before monitoring. HertzBeat uses the JMX protocol to collect performance metrics from Hadoop's JVM. ### Steps to enable JMX protocol in the Hadoop application + Add JVM parameters when the application starts. ⚠️Note that you can customize the exposed port and external IP. - 1.Enter the hadoop-env.sh configuration file and enter the following command in the terminal: @@ -31,12 +32,12 @@ export HADOOP_OPTS= "$HADOOP_OPTS -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false " ``` -- 3.Save and exit, and then execute "start-all.sh" in the "$HADOOP_HOME/sbin" directory to restart the service. +- 3.Save and exit, and then execute "start-all.sh" in the "$HADOOP_HOME/sbin" directory to restart the service. ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -70,16 +71,15 @@ export HADOOP_OPTS= "$HADOOP_OPTS #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|--------------------------| | LoadedClassCount | | Loaded Class Count | | TotalLoadedClassCount | | Total Loaded Class Count | | UnloadedClassCount | | Unloaded Class Count | - #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|----------------------------| | TotalStartedThreadCount | | Total Started Thread Count | | ThreadCount | | Thread Count | @@ -88,4 +88,3 @@ export HADOOP_OPTS= "$HADOOP_OPTS | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/docs/help/hbase_master.md b/home/docs/help/hbase_master.md index 188ad146a5a..1e3efe84af7 100644 --- a/home/docs/help/hbase_master.md +++ b/home/docs/help/hbase_master.md @@ -4,6 +4,7 @@ title: Monitoring Hbase Master sidebar_label: HbaseMaster Monitoring keywords: [Open Source Monitoring System, Open Source Database Monitoring, HbaseMaster Monitoring] --- + > Collect monitoring data for general performance metrics of Hbase Master. **Protocol: HTTP** @@ -14,13 +15,12 @@ Check the `hbase-site.xml` file to obtain the value of the `hbase.master.info.po ## Configuration Parameters - -| Parameter Name | Parameter Description | -| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Parameter Name | Parameter Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Target Host | The IPv4, IPv6, or domain name of the monitored peer. Note: without protocol header (e.g., https://, http://). | | Port | The port number of the Hbase master, default is 16010. That is, the value of the`hbase.master.info.port` parameter. | | Task Name | The name identifying this monitoring, which needs to be unique. | -| Query Timeout | Set the connection timeout in ms, the default is 3000 milliseconds. | +| Query Timeout | Set the connection timeout in ms, the default is 3000 milliseconds. | | Collection Interval | The periodic collection interval for monitoring data, in seconds, with the minimum allowable interval being 30 seconds. | | Probe | Whether to probe and check the availability of monitoring before adding new monitoring, and proceed with the addition or modification operation only if the probe is successful. | | Description | Additional notes and descriptions for this monitoring, users can add notes here. | @@ -29,9 +29,8 @@ Check the `hbase-site.xml` file to obtain the value of the `hbase.master.info.po #### Metric Set: server - -| Metric Name | Unit | Metric Description | -| -------------------- | ---- | --------------------------------------- | +| Metric Name | Unit | Metric Description | +|----------------------|------|-----------------------------------------| | numRegionServers | none | Number of currently alive RegionServers | | numDeadRegionServers | none | Number of currently dead RegionServers | | averageLoad | none | Cluster average load | @@ -39,18 +38,16 @@ Check the `hbase-site.xml` file to obtain the value of the `hbase.master.info.po #### Metric Set: Rit - -| Metric Name | Unit | Metric Description | -| -------------------- | ---- | -------------------------------- | +| Metric Name | Unit | Metric Description | +|----------------------|------|----------------------------------| | ritnone | none | Current number of RIT | | ritnoneOverThreshold | none | Number of RIT over the threshold | | ritOldestAge | ms | Duration of the oldest RIT | #### Metric Set: basic - -| Metric Name | Unit | Metric Description | -| ----------------------- | ---- | ------------------------------------------- | +| Metric Name | Unit | Metric Description | +|-------------------------|------|---------------------------------------------| | liveRegionServers | none | List of currently active RegionServers | | deadRegionServers | none | List of currently offline RegionServers | | zookeeperQuorum | none | Zookeeper list | @@ -60,3 +57,4 @@ Check the `hbase-site.xml` file to obtain the value of the `hbase.master.info.po | receivedBytes | MB | Cluster received data volume | | sentBytes | MB | Cluster sent data volume (MB) | | clusterRequests | none | Total number of cluster requests | + diff --git a/home/docs/help/hbase_regionserver.md b/home/docs/help/hbase_regionserver.md index 4e676491022..0a77eb5441b 100644 --- a/home/docs/help/hbase_regionserver.md +++ b/home/docs/help/hbase_regionserver.md @@ -4,6 +4,7 @@ title: Monitoring HBase RegionServer Monitoring sidebar_label: HBase RegionServer Monitoring keywords: [Open-source monitoring system, Open-source database monitoring, RegionServer monitoring] --- + > Collect and monitor common performance metrics for HBase RegionServer. **Protocol:** HTTP @@ -14,13 +15,12 @@ Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver. ## Configuration Parameters - -| Parameter Name | Parameter Description | -| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | -| Target Host | The IPV4, IPV6, or domain name of the monitored entity. Note ⚠️ Do not include the protocol header (e.g., https://, http://). | +| Parameter Name | Parameter Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------------| +| Target Host | The IPV4, IPV6, or domain name of the monitored entity. Note ⚠️ Do not include the protocol header (e.g., https://, http://). | | Port | The port number of the HBase regionserver, default is 16030, i.e., the value of the`hbase.regionserver.info.port` parameter | | Task Name | A unique name to identify this monitoring task. | -| Query Timeout | Set the connection timeout in ms, the default is 3000 milliseconds. | +| Query Timeout | Set the connection timeout in ms, the default is 3000 milliseconds. | | Collection Interval | The interval time for periodic data collection in seconds, with a minimum interval of 30 seconds. | | Probe Before Adding | Whether to probe and check the availability of monitoring before adding new monitoring, only proceed with the addition if the probe is successful. | | Description Note | Additional notes to identify and describe this monitoring, users can add notes here. | @@ -31,9 +31,8 @@ Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver. #### Metric Set: server - -| Metric Name | Unit | Metric Description | -| --------------------------------- | ----- | ------------------------------------------------------------------------- | +| Metric Name | Unit | Metric Description | +|-----------------------------------|-------|---------------------------------------------------------------------------| | regionCount | None | Number of Regions | | readRequestCount | None | Number of read requests since cluster restart | | writeRequestCount | None | Number of write requests since cluster restart | @@ -74,9 +73,8 @@ Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver. #### Metric Set: IPC - -| Metric Name | Unit | Metric Description | -| ------------------------- | ---- | -------------------------------------- | +| Metric Name | Unit | Metric Description | +|---------------------------|------|----------------------------------------| | numActiveHandler | None | Current number of RITs | | NotServingRegionException | None | Number of RITs exceeding the threshold | | RegionMovedException | ms | Duration of the oldest RIT | @@ -84,9 +82,8 @@ Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver. #### Metric Set: JVM - -| Metric Name | Unit | Metric Description | -| -------------------- | ---- | --------------------------------- | +| Metric Name | Unit | Metric Description | +|----------------------|------|-----------------------------------| | MemNonHeapUsedM | None | Current active RegionServer list | | MemNonHeapCommittedM | None | Current offline RegionServer list | | MemHeapUsedM | None | Zookeeper list | @@ -94,3 +91,4 @@ Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver. | MemHeapMaxM | None | Cluster balance load times | | MemMaxM | None | RPC handle count | | GcCount | MB | Cluster data reception volume | + diff --git a/home/docs/help/hdfs_datanode.md b/home/docs/help/hdfs_datanode.md index 2e09fb9fba1..164adc7d6df 100644 --- a/home/docs/help/hdfs_datanode.md +++ b/home/docs/help/hdfs_datanode.md @@ -15,42 +15,43 @@ Retrieve the HTTP monitoring port for the Apache HDFS DataNode. Value: `dfs.data ## Configuration Parameters -| Parameter Name | Parameter Description | -| ----------------- |-------------------------------------------------------| -| Target Host | IP(v4 or v6) or domain name of the target to be monitored. Exclude protocol. | -| Port | Monitoring port number for Apache HDFS DataNode, default is 50075. | -| Query Timeout | Timeout for querying Apache HDFS DataNode, in milliseconds, default is 6000 milliseconds. | +| Parameter Name | Parameter Description | +|-----------------------------|-------------------------------------------------------------------------------------------| +| Target Host | IP(v4 or v6) or domain name of the target to be monitored. Exclude protocol. | +| Port | Monitoring port number for Apache HDFS DataNode, default is 50075. | +| Query Timeout | Timeout for querying Apache HDFS DataNode, in milliseconds, default is 6000 milliseconds. | | Metrics Collection Interval | Time interval for monitoring data collection, in seconds, minimum interval is 30 seconds. | -| Probe Before Monitoring | Whether to probe and check monitoring availability before adding. | -| Description/Remarks | Additional description and remarks for this monitoring. | +| Probe Before Monitoring | Whether to probe and check monitoring availability before adding. | +| Description/Remarks | Additional description and remarks for this monitoring. | ### Metrics Collected #### Metric Set: FSDatasetState -| Metric Name | Metric Unit | Metric Description | -| ------------ | ----------- | ------------------------------ | -| DfsUsed | GB | DataNode HDFS usage | -| Remaining | GB | Remaining space on DataNode HDFS | -| Capacity | GB | Total capacity of DataNode HDFS | +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|----------------------------------| +| DfsUsed | GB | DataNode HDFS usage | +| Remaining | GB | Remaining space on DataNode HDFS | +| Capacity | GB | Total capacity of DataNode HDFS | #### Metric Set: JvmMetrics -| Metric Name | Metric Unit | Metric Description | -| ---------------------- | ----------- | ----------------------------------------- | -| MemNonHeapUsedM | MB | Current usage of NonHeapMemory by JVM | -| MemNonHeapCommittedM | MB | Committed size of NonHeapMemory configured in JVM | -| MemHeapUsedM | MB | Current usage of HeapMemory by JVM | -| MemHeapCommittedM | MB | Committed size of HeapMemory by JVM | -| MemHeapMaxM | MB | Maximum size of HeapMemory configured in JVM | -| MemMaxM | MB | Maximum memory available for JVM at runtime | -| ThreadsRunnable | Count | Number of threads in RUNNABLE state | -| ThreadsBlocked | Count | Number of threads in BLOCKED state | -| ThreadsWaiting | Count | Number of threads in WAITING state | -| ThreadsTimedWaiting | Count | Number of threads in TIMED WAITING state | +| Metric Name | Metric Unit | Metric Description | +|----------------------|-------------|---------------------------------------------------| +| MemNonHeapUsedM | MB | Current usage of NonHeapMemory by JVM | +| MemNonHeapCommittedM | MB | Committed size of NonHeapMemory configured in JVM | +| MemHeapUsedM | MB | Current usage of HeapMemory by JVM | +| MemHeapCommittedM | MB | Committed size of HeapMemory by JVM | +| MemHeapMaxM | MB | Maximum size of HeapMemory configured in JVM | +| MemMaxM | MB | Maximum memory available for JVM at runtime | +| ThreadsRunnable | Count | Number of threads in RUNNABLE state | +| ThreadsBlocked | Count | Number of threads in BLOCKED state | +| ThreadsWaiting | Count | Number of threads in WAITING state | +| ThreadsTimedWaiting | Count | Number of threads in TIMED WAITING state | #### Metric Set: runtime -| Metric Name | Metric Unit | Metric Description | -| ------------ | ----------- | ------------------ | -| StartTime | | Startup time | +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|--------------------| +| StartTime | | Startup time | + diff --git a/home/docs/help/hdfs_namenode.md b/home/docs/help/hdfs_namenode.md index 1afd6d4b1ae..978daac3613 100644 --- a/home/docs/help/hdfs_namenode.md +++ b/home/docs/help/hdfs_namenode.md @@ -15,78 +15,79 @@ Ensure that you have obtained the JMX monitoring port for the HDFS NameNode. ## Configuration Parameters -| Parameter Name | Parameter Description | -| ------------------ |--------------------------------------------------------| -| Target Host | The IPv4, IPv6, or domain name of the target being monitored. Exclude protocol headers. | -| Port | The monitoring port number of the HDFS NameNode, default is 50070. | -| Query Timeout | Timeout for querying the HDFS NameNode, in milliseconds, default is 6000 milliseconds. | +| Parameter Name | Parameter Description | +|-----------------------------|-------------------------------------------------------------------------------------------| +| Target Host | The IPv4, IPv6, or domain name of the target being monitored. Exclude protocol headers. | +| Port | The monitoring port number of the HDFS NameNode, default is 50070. | +| Query Timeout | Timeout for querying the HDFS NameNode, in milliseconds, default is 6000 milliseconds. | | Metrics Collection Interval | Time interval for collecting monitoring data, in seconds, minimum interval is 30 seconds. | -| Probe Before Monitoring | Whether to probe and check the availability of monitoring before adding it. | -| Description/Remarks | Additional description and remarks for this monitoring. | +| Probe Before Monitoring | Whether to probe and check the availability of monitoring before adding it. | +| Description/Remarks | Additional description and remarks for this monitoring. | ### Collected Metrics #### Metric Set: FSNamesystem -| Metric Name | Metric Unit | Metric Description | -| --------------------------- | ----------- | ------------------------------------- | -| CapacityTotal | | Total cluster storage capacity | -| CapacityTotalGB | GB | Total cluster storage capacity | -| CapacityUsed | | Used cluster storage capacity | -| CapacityUsedGB | GB | Used cluster storage capacity | -| CapacityRemaining | | Remaining cluster storage capacity | -| CapacityRemainingGB | GB | Remaining cluster storage capacity | -| CapacityUsedNonDFS | | Non-HDFS usage of cluster capacity | -| TotalLoad | | Total client connections in the cluster | -| FilesTotal | | Total number of files in the cluster | -| BlocksTotal | | Total number of BLOCKs | -| PendingReplicationBlocks | | Number of blocks awaiting replication | -| UnderReplicatedBlocks | | Number of blocks with insufficient replicas | -| CorruptBlocks | | Number of corrupt blocks | -| ScheduledReplicationBlocks | | Number of blocks scheduled for replication | -| PendingDeletionBlocks | | Number of blocks awaiting deletion | -| ExcessBlocks | | Number of excess blocks | -| PostponedMisreplicatedBlocks| | Number of misreplicated blocks postponed for processing | -| NumLiveDataNodes | | Number of live data nodes in the cluster | -| NumDeadDataNodes | | Number of data nodes marked as dead | -| NumDecomLiveDataNodes | | Number of decommissioned live nodes | -| NumDecomDeadDataNodes | | Number of decommissioned dead nodes | -| NumDecommissioningDataNodes | | Number of nodes currently being decommissioned | -| TransactionsSinceLastCheckpoint | | Number of transactions since the last checkpoint | -| LastCheckpointTime | | Time of the last checkpoint | -| PendingDataNodeMessageCount| | Number of DATANODE requests queued in the standby namenode | +| Metric Name | Metric Unit | Metric Description | +|---------------------------------|-------------|------------------------------------------------------------| +| CapacityTotal | | Total cluster storage capacity | +| CapacityTotalGB | GB | Total cluster storage capacity | +| CapacityUsed | | Used cluster storage capacity | +| CapacityUsedGB | GB | Used cluster storage capacity | +| CapacityRemaining | | Remaining cluster storage capacity | +| CapacityRemainingGB | GB | Remaining cluster storage capacity | +| CapacityUsedNonDFS | | Non-HDFS usage of cluster capacity | +| TotalLoad | | Total client connections in the cluster | +| FilesTotal | | Total number of files in the cluster | +| BlocksTotal | | Total number of BLOCKs | +| PendingReplicationBlocks | | Number of blocks awaiting replication | +| UnderReplicatedBlocks | | Number of blocks with insufficient replicas | +| CorruptBlocks | | Number of corrupt blocks | +| ScheduledReplicationBlocks | | Number of blocks scheduled for replication | +| PendingDeletionBlocks | | Number of blocks awaiting deletion | +| ExcessBlocks | | Number of excess blocks | +| PostponedMisreplicatedBlocks | | Number of misreplicated blocks postponed for processing | +| NumLiveDataNodes | | Number of live data nodes in the cluster | +| NumDeadDataNodes | | Number of data nodes marked as dead | +| NumDecomLiveDataNodes | | Number of decommissioned live nodes | +| NumDecomDeadDataNodes | | Number of decommissioned dead nodes | +| NumDecommissioningDataNodes | | Number of nodes currently being decommissioned | +| TransactionsSinceLastCheckpoint | | Number of transactions since the last checkpoint | +| LastCheckpointTime | | Time of the last checkpoint | +| PendingDataNodeMessageCount | | Number of DATANODE requests queued in the standby namenode | #### Metric Set: RPC -| Metric Name | Metric Unit | Metric Description | -| ------------------------- | ----------- | -------------------------- | -| ReceivedBytes | | Data receiving rate | -| SentBytes | | Data sending rate | -| RpcQueueTimeNumOps | | RPC call rate | +| Metric Name | Metric Unit | Metric Description | +|--------------------|-------------|---------------------| +| ReceivedBytes | | Data receiving rate | +| SentBytes | | Data sending rate | +| RpcQueueTimeNumOps | | RPC call rate | #### Metric Set: runtime -| Metric Name | Metric Unit | Metric Description | -| ------------------------- | ----------- | ------------------- | -| StartTime | | Start time | +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|--------------------| +| StartTime | | Start time | #### Metric Set: JvmMetrics -| Metric Name | Metric Unit | Metric Description | -| ------------------------- | ----------- | ------------------- | -| MemNonHeapUsedM | MB | Current usage of NonHeapMemory by JVM | -| MemNonHeapCommittedM | MB | Committed NonHeapMemory by JVM | -| MemHeapUsedM | MB | Current usage of HeapMemory by JVM | -| MemHeapCommittedM | MB | Committed HeapMemory by JVM | -| MemHeapMaxM | MB | Maximum HeapMemory configured for JVM | -| MemMaxM | MB | Maximum memory that can be used by JVM | -| GcCountParNew | Count | Number of ParNew GC events | -| GcTimeMillisParNew | Milliseconds| Time spent in ParNew GC | -| GcCountConcurrentMarkSweep| Count | Number of ConcurrentMarkSweep GC events| -| GcTimeMillisConcurrentMarkSweep | Milliseconds | Time spent in ConcurrentMarkSweep GC | -| GcCount | Count | Total number of GC events | -| GcTimeMillis | Milliseconds| Total time spent in GC events | -| ThreadsRunnable | Count | Number of threads in RUNNABLE state | -| ThreadsBlocked | Count | Number of threads in BLOCKED state | -| ThreadsWaiting | Count | Number of threads in WAITING state | -| ThreadsTimedWaiting | Count | Number of threads in TIMED WAITING state| +| Metric Name | Metric Unit | Metric Description | +|---------------------------------|--------------|------------------------------------------| +| MemNonHeapUsedM | MB | Current usage of NonHeapMemory by JVM | +| MemNonHeapCommittedM | MB | Committed NonHeapMemory by JVM | +| MemHeapUsedM | MB | Current usage of HeapMemory by JVM | +| MemHeapCommittedM | MB | Committed HeapMemory by JVM | +| MemHeapMaxM | MB | Maximum HeapMemory configured for JVM | +| MemMaxM | MB | Maximum memory that can be used by JVM | +| GcCountParNew | Count | Number of ParNew GC events | +| GcTimeMillisParNew | Milliseconds | Time spent in ParNew GC | +| GcCountConcurrentMarkSweep | Count | Number of ConcurrentMarkSweep GC events | +| GcTimeMillisConcurrentMarkSweep | Milliseconds | Time spent in ConcurrentMarkSweep GC | +| GcCount | Count | Total number of GC events | +| GcTimeMillis | Milliseconds | Total time spent in GC events | +| ThreadsRunnable | Count | Number of threads in RUNNABLE state | +| ThreadsBlocked | Count | Number of threads in BLOCKED state | +| ThreadsWaiting | Count | Number of threads in WAITING state | +| ThreadsTimedWaiting | Count | Number of threads in TIMED WAITING state | + diff --git a/home/docs/help/hive.md b/home/docs/help/hive.md index ec0d7dee398..806969c2e7c 100644 --- a/home/docs/help/hive.md +++ b/home/docs/help/hive.md @@ -16,6 +16,7 @@ If you want to monitor information in `Apache Hive` with this monitoring type, y ```shell hive --service metastore & ``` + **2. Enable hive server2:** ```shell @@ -24,55 +25,53 @@ hive --service hiveserver2 & ### Configure parameters -| Parameter name | Parameter Help describes the | -| ------------ |-------------------------------------------------------------------------------------------------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| Port | The default port provided by the database is 10002. | -| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | +| Parameter name | Parameter Help describes the | +|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| Port | The default port provided by the database is 10002. | +| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | | The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | ### Collect metrics #### metric Collection: basic -| Metric Name | metric unit | Metrics help describe | -|-------------| -------- |--------------------------------| -| vm_name | None | The name of the virtual machine (VM) running HiveServer2. | -| vm_vendor | None | The vendor or provider of the virtual machine. | -| vm_version | None | The version of the virtual machine. | -| up_time | None | The duration for which HiveServer2 has been running. | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-----------------------------------------------------------| +| vm_name | None | The name of the virtual machine (VM) running HiveServer2. | +| vm_vendor | None | The vendor or provider of the virtual machine. | +| vm_version | None | The version of the virtual machine. | +| up_time | None | The duration for which HiveServer2 has been running. | #### metric Collection: enviroment -| Metric Name | metric unit | Metrics help describe | -|-----------------| -------- |-------------------------------------| -| https_proxyPort | None | The port number used for HTTPS proxy communication. | -| os_name | None | The name of the operating system on which HiveServer2 is running. | -| os_version | None | The version of the operating system.| -| os_arch | None | The architecture of the operating system.| -| java_runtime_name | None | The name of the Java runtime environment used by HiveServer2. | -| java_runtime_version | None | The version of the Java runtime environment. | +| Metric Name | metric unit | Metrics help describe | +|----------------------|-------------|-------------------------------------------------------------------| +| https_proxyPort | None | The port number used for HTTPS proxy communication. | +| os_name | None | The name of the operating system on which HiveServer2 is running. | +| os_version | None | The version of the operating system. | +| os_arch | None | The architecture of the operating system. | +| java_runtime_name | None | The name of the Java runtime environment used by HiveServer2. | +| java_runtime_version | None | The version of the Java runtime environment. | #### metric Collection: thread -| Metric Name | metric unit | Metrics help describe | -| ---------------- |------|--------------------| -| thread_count | None | The current number of threads being used by HiveServer2. | -| total_started_thread | None | The total count of threads started by HiveServer2 since its launch. | -| peak_thread_count | None | The highest number of threads used by HiveServer2 at any given time. | -| daemon_thread_count | None | The number of daemon threads currently active in HiveServer2. | +| Metric Name | metric unit | Metrics help describe | +|----------------------|-------------|----------------------------------------------------------------------| +| thread_count | None | The current number of threads being used by HiveServer2. | +| total_started_thread | None | The total count of threads started by HiveServer2 since its launch. | +| peak_thread_count | None | The highest number of threads used by HiveServer2 at any given time. | +| daemon_thread_count | None | The number of daemon threads currently active in HiveServer2. | #### metric Collection: code_cache -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|--------------------------------------------| -| committed | MB | The amount of memory currently allocated for the memory pool. | -| init | MB | The initial amount of memory requested for the memory pool. | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-------------------------------------------------------------------------| +| committed | MB | The amount of memory currently allocated for the memory pool. | +| init | MB | The initial amount of memory requested for the memory pool. | | max | MB | The maximum amount of memory that can be allocated for the memory pool. | -| used | MB | The amount of memory currently being used by the memory pool. | - - +| used | MB | The amount of memory currently being used by the memory pool. | diff --git a/home/docs/help/http_sd.md b/home/docs/help/http_sd.md index 3278936d66d..6b8de487555 100644 --- a/home/docs/help/http_sd.md +++ b/home/docs/help/http_sd.md @@ -16,39 +16,37 @@ keywords: [open source monitoring tool, open source java monitoring tool, monito > We currently support for `Consul` and `Nacos`. 2. Add http_sd monitor and enter necessary info about **Register center** on Hertzbeat, such as host, port and so on. - 3. Click **OK** # Configuration parameter -| Parameter name | Parameter help description | -| --------------------- | ------------------------------------------------------------ | -| Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Task name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Register center | -| Discovery Client Type | Select one Register center that you want to monitor | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Parameter name | Parameter help description | +|-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Task name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Register center | +| Discovery Client Type | Select one Register center that you want to monitor | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | | Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | # Collection Metrics ## Metrics Set:server -| Metric name | Metric unit | Metric help description | -| ------------- | ----------- | ----------------------- | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|-------------------------| | Address | | | | Port | | | | Response Time | ms | | ## Metrics Set:service -| Metric name | Metric unit | Metric help description | -| ------------- | ----------- | -------------------------------- | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|----------------------------------| | Service Id | | | | Service Name | | | | Address | | | | Port | | | | Health Status | | Current health status of service | - diff --git a/home/docs/help/huawei_switch.md b/home/docs/help/huawei_switch.md index 8217ff554b8..902c0596965 100644 --- a/home/docs/help/huawei_switch.md +++ b/home/docs/help/huawei_switch.md @@ -1,53 +1,54 @@ ---- -id: huawei_switch -title: Monitoring:Huawei switch -sidebar_label: Huawei switch -keywords: [ open source monitoring tool, network monitoring, Huawei switch ] ---- - -> Collect and monitor the general indicators (availability, system information, port traffic, etc.) of Huawei switches. - -**Protocol Use: SNMP** - -### Configuration parameter - -| Parameter name | Parameter help description | -|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Task Name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port of SNMP. The default is 161 | -| SNMP Version | SNMP version to use | -| SNMP Community | Used in SNMP v1 and SNMP v2c, used to complete authentication in SNMP Agent, in string form. Group name includes "read" and "write", when performing SNMP query operation, "read" group name is used for authentication; when performing SNMP setting operation, "write" group name is used for authentication. When performing SNMP query operation, "read" group name is used for authentication; when performing SNMP setting operation, "write" group name is used for authentication. | -| SNMP username | For SNMP v3, MSG username | -| SNMP contextName | For SNMP v3, used to determine the MIB view of the Context EngineID to the managed device | -| SNMP authPassword | For SNMP v3, SNMP authentication passwords | -| authPassword Encryption | For SNMP v3, SNMP authentication algorithm | -| SNMP privPassphrase | For SNMP v3, SNMP encrypted passwords | -| privPassword Encryption | For SNMP v3, SNMP encrypted algorithm | -| Timeout | Set the timeout time when querying unresponsive data, in milliseconds, the default is 6000 milliseconds | -| Intervals | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Description | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -Since there are too many metrics that can be queried on Huawei switches, detailed metrics can be queried -on [Huawei MIB Query Platform](https://info.support.huawei.com/info-finder/tool/en/enterprise/mib). - -This document only introduces the monitoring indicators queried in the monitor template. - -#### Metric set: huawei_core - -| Metric Name | Metric Unit | Metric Help Description | -|---------------|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ifIndex | none | Interface index. This value is greater than zero and globally unique. | -| ifDescr | none | A textual string containing information about the interface. This string should include the name of the manufacturer, the product name and the version of the interface hardware/software. | -| ifMtu | octets | The size of the largest packet which can be sent/received on the interface. For interfaces that are used for transmitting network datagrams, this is the size of the largest network datagram that can be sent on the interface. | -| ifSpeed | bit/s | An estimate of the interface's current bandwidth. For interfaces which do not vary in bandwidth or for those where no accurate estimation can be made, this object should contain the nominal bandwidth. If the bandwidth of the interface is greater than the maximum value reportable by this object then this object should report its maximum value (4,294,967,295) and ifHighSpeed must be used to report the interace's speed. For a sub-layer which has no concept of bandwidth, this object should be zero. | -| ifInOctets | octets | The total number of octets received on the interface, including framing characters. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | -| ifInDiscards | none | The number of inbound packets which were chosen to be discarded even though no errors had been detected to prevent their being deliverable to a higher-layer protocol. One possible reason for discarding such a packet could be to free up buffer space. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | -| ifInErrors | none | For packet-oriented interfaces, the number of inbound packets that contained errors preventing them from being deliverable to a higher-layer protocol. For character-oriented or fixed-length interfaces, the number of inbound transmission units that contained errors preventing them from being deliverable to a higher-layer protocol. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | -| ifOutOctets | octets | The total number of octets transmitted out of the interface, including framing characters. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | -| ifOutDiscards | none | The number of outbound packets which were chosen to be discarded even though no errors had been detected to prevent their being transmitted. One possible reason for discarding such a packet could be to free up buffer space. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | -| ifOutErrors | none | For packet-oriented interfaces, the number of outbound packets that could not be transmitted because of errors. For character-oriented or fixed-length interfaces, the number of outbound transmission units that could not be transmitted because of errors. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | -| ifAdminStatus | none | The desired state of the interface. The testing(3) state indicates that no operational packets can be passed. When a managed system initializes, all interfaces start with ifAdminStatus in the down(2) state. As a result of either explicit management action or per configuration information retained by the managed system, ifAdminStatus is then changed to either the up(1) or testing(3) states (or remains in the down(2) state). | -| ifOperStatus | none | The current operational state of the interface. The testing(3) state indicates that no operational packets can be passed. If ifAdminStatus is down(2) then ifOperStatus should be down(2). If ifAdminStatus is changed to up(1) then ifOperStatus should change to up(1) if the interface is ready to transmit and receive network traffic; it should change to dormant(5) if the interface is waiting for external actions (such as a serial line waiting for an incoming connection); it should remain in the down(2) state if and only if there is a fault that prevents it from going to the up(1) state; it should remain in the notPresent(6) state if the interface has missing (typically, hardware) components. | +--- +id: huawei_switch +title: Monitoring:Huawei switch +sidebar_label: Huawei switch +keywords: [ open source monitoring tool, network monitoring, Huawei switch ] +--- + +> Collect and monitor the general indicators (availability, system information, port traffic, etc.) of Huawei switches. + +**Protocol Use: SNMP** + +### Configuration parameter + +| Parameter name | Parameter help description | +|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Task Name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port of SNMP. The default is 161 | +| SNMP Version | SNMP version to use | +| SNMP Community | Used in SNMP v1 and SNMP v2c, used to complete authentication in SNMP Agent, in string form. Group name includes "read" and "write", when performing SNMP query operation, "read" group name is used for authentication; when performing SNMP setting operation, "write" group name is used for authentication. When performing SNMP query operation, "read" group name is used for authentication; when performing SNMP setting operation, "write" group name is used for authentication. | +| SNMP username | For SNMP v3, MSG username | +| SNMP contextName | For SNMP v3, used to determine the MIB view of the Context EngineID to the managed device | +| SNMP authPassword | For SNMP v3, SNMP authentication passwords | +| authPassword Encryption | For SNMP v3, SNMP authentication algorithm | +| SNMP privPassphrase | For SNMP v3, SNMP encrypted passwords | +| privPassword Encryption | For SNMP v3, SNMP encrypted algorithm | +| Timeout | Set the timeout time when querying unresponsive data, in milliseconds, the default is 6000 milliseconds | +| Intervals | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Description | For more information about identifying and describing this monitoring, users can note information here | + +### Collection Metric + +Since there are too many metrics that can be queried on Huawei switches, detailed metrics can be queried +on [Huawei MIB Query Platform](https://info.support.huawei.com/info-finder/tool/en/enterprise/mib). + +This document only introduces the monitoring indicators queried in the monitor template. + +#### Metric set: huawei_core + +| Metric Name | Metric Unit | Metric Help Description | +|---------------|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ifIndex | none | Interface index. This value is greater than zero and globally unique. | +| ifDescr | none | A textual string containing information about the interface. This string should include the name of the manufacturer, the product name and the version of the interface hardware/software. | +| ifMtu | octets | The size of the largest packet which can be sent/received on the interface. For interfaces that are used for transmitting network datagrams, this is the size of the largest network datagram that can be sent on the interface. | +| ifSpeed | bit/s | An estimate of the interface's current bandwidth. For interfaces which do not vary in bandwidth or for those where no accurate estimation can be made, this object should contain the nominal bandwidth. If the bandwidth of the interface is greater than the maximum value reportable by this object then this object should report its maximum value (4,294,967,295) and ifHighSpeed must be used to report the interace's speed. For a sub-layer which has no concept of bandwidth, this object should be zero. | +| ifInOctets | octets | The total number of octets received on the interface, including framing characters. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | +| ifInDiscards | none | The number of inbound packets which were chosen to be discarded even though no errors had been detected to prevent their being deliverable to a higher-layer protocol. One possible reason for discarding such a packet could be to free up buffer space. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | +| ifInErrors | none | For packet-oriented interfaces, the number of inbound packets that contained errors preventing them from being deliverable to a higher-layer protocol. For character-oriented or fixed-length interfaces, the number of inbound transmission units that contained errors preventing them from being deliverable to a higher-layer protocol. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | +| ifOutOctets | octets | The total number of octets transmitted out of the interface, including framing characters. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | +| ifOutDiscards | none | The number of outbound packets which were chosen to be discarded even though no errors had been detected to prevent their being transmitted. One possible reason for discarding such a packet could be to free up buffer space. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | +| ifOutErrors | none | For packet-oriented interfaces, the number of outbound packets that could not be transmitted because of errors. For character-oriented or fixed-length interfaces, the number of outbound transmission units that could not be transmitted because of errors. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | +| ifAdminStatus | none | The desired state of the interface. The testing(3) state indicates that no operational packets can be passed. When a managed system initializes, all interfaces start with ifAdminStatus in the down(2) state. As a result of either explicit management action or per configuration information retained by the managed system, ifAdminStatus is then changed to either the up(1) or testing(3) states (or remains in the down(2) state). | +| ifOperStatus | none | The current operational state of the interface. The testing(3) state indicates that no operational packets can be passed. If ifAdminStatus is down(2) then ifOperStatus should be down(2). If ifAdminStatus is changed to up(1) then ifOperStatus should change to up(1) if the interface is ready to transmit and receive network traffic; it should change to dormant(5) if the interface is waiting for external actions (such as a serial line waiting for an incoming connection); it should remain in the down(2) state if and only if there is a fault that prevents it from going to the up(1) state; it should remain in the notPresent(6) state if the interface has missing (typically, hardware) components. | + diff --git a/home/docs/help/hugegraph.md b/home/docs/help/hugegraph.md index 4fca13f4e00..66b0574aab7 100644 --- a/home/docs/help/hugegraph.md +++ b/home/docs/help/hugegraph.md @@ -4,6 +4,7 @@ title: Monitoring HugeGraph Monitoring sidebar_label: Apache HugeGraph keywords: [Open Source Monitoring System, Open Source Database Monitoring, HugeGraph Monitoring] --- + > Collect and monitor the general performance metrics of HugeGraph **Protocol used: HTTP** @@ -14,137 +15,127 @@ Check the `rest-server.properties` file to obtain the value of the `restserver_p ## Configuration Parameters - -| Parameter Name | Parameter Description | -|------------------|--------------------------------------------------------| -| Target Host | The IPv4, IPv6, or domain name of the monitored endpoint. Note ⚠️ Do not include protocol headers (eg: https://, http://). | -| Port | Port number of the HugeGraph restserver, default is 8080. i.e., the value of the `restserver_port` parameter | -| Enable SSL | Enable SSL usage | -| Base Path | Base path, default is: /metrics, usually does not need to be modified | -| Task Name | Identifies the name of this monitoring, ensuring uniqueness. | -| Collection Interval | Interval for periodically collecting data for monitoring, in seconds, with a minimum interval of 30 seconds | -| Probe Enabled | Whether to probe before adding new monitoring, only continue with add/modify operations if the probe is successful | -| Description | Additional identification and description of this monitoring, users can add information here | +| Parameter Name | Parameter Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------| +| Target Host | The IPv4, IPv6, or domain name of the monitored endpoint. Note ⚠️ Do not include protocol headers (eg: https://, http://). | +| Port | Port number of the HugeGraph restserver, default is 8080. i.e., the value of the `restserver_port` parameter | +| Enable SSL | Enable SSL usage | +| Base Path | Base path, default is: /metrics, usually does not need to be modified | +| Task Name | Identifies the name of this monitoring, ensuring uniqueness. | +| Collection Interval | Interval for periodically collecting data for monitoring, in seconds, with a minimum interval of 30 seconds | +| Probe Enabled | Whether to probe before adding new monitoring, only continue with add/modify operations if the probe is successful | +| Description | Additional identification and description of this monitoring, users can add information here | ### Metrics Collection #### Metric Set: gauges -| Metric Name | Metric Unit | Metric Description | -| ------------------------------------ | -------- | --------------------------------- | -| edge-hugegraph-capacity | NONE | Indicates the capacity limit of edges in the current graph | -| edge-hugegraph-expire | NONE | Indicates the expiration time of edge data | -| edge-hugegraph-hits | NONE | Indicates the number of hits in the edge data cache | -| edge-hugegraph-miss | NONE | Indicates the number of misses in the edge data cache | -| edge-hugegraph-size | NONE | Indicates the number of edges in the current graph | -| instances | NONE | Indicates the number of currently running HugeGraph instances| -| schema-id-hugegraph-capacity | NONE | Indicates the capacity limit of schema IDs in the graph | -| schema-id-hugegraph-expire | NONE | Indicates the expiration time of schema ID data | -| schema-id-hugegraph-hits | NONE | Indicates the number of hits in the schema ID data cache| -| schema-id-hugegraph-miss | NONE | Indicates the number of misses in the schema ID data cache| -| schema-id-hugegraph-size | NONE | Indicates the number of schema IDs in the current graph | -| schema-name-hugegraph-capacity | NONE | Indicates the capacity limit of schema names in the graph | -| schema-name-hugegraph-expire | NONE | Indicates the expiration time of schema name data | -| schema-name-hugegraph-hits | NONE | Indicates the number of hits in the schema name data cache| -| schema-name-hugegraph-miss | NONE | Indicates the number of misses in the schema name data cache| -| schema-name-hugegraph-size | NONE | Indicates the number of schema names in the current graph | -| token-hugegraph-capacity | NONE | Indicates the capacity limit of tokens in the graph | -| token-hugegraph-expire | NONE | Indicates the expiration time of token data | -| token-hugegraph-hits | NONE | Indicates the number of hits in the token data cache | -| token-hugegraph-miss | NONE | Indicates the number of misses in the token data cache | -| token-hugegraph-size | NONE | Indicates the number of tokens in the current graph | -| users-hugegraph-capacity | NONE | Indicates the capacity limit of users in the graph | -| users-hugegraph-expire | NONE | Indicates the expiration time of user data | -| users-hugegraph-hits | NONE | Indicates the number of hits in the user data cache | -| users-hugegraph-miss | NONE | Indicates the number of misses in the user data cache | -| users-hugegraph-size | NONE | Indicates the number of users in the current graph | -| users_pwd-hugegraph-capacity | NONE | Indicates the capacity limit of user passwords | -| users_pwd-hugegraph-expire | NONE | Indicates the expiration time of user password data | -| users_pwd-hugegraph-hits | NONE | Indicates the number of hits in the user password data cache| -| users_pwd-hugegraph-miss | NONE | Indicates the number of misses in the user password data cache| -| users_pwd-hugegraph-size | NONE | Indicates the number of user passwords in the current graph | -| vertex-hugegraph-capacity | NONE | Indicates the capacity limit of vertices in the graph | -| vertex-hugegraph-expire | NONE | Indicates the expiration time of vertex data | -| vertex-hugegraph-hits | NONE | Indicates the number of hits in the vertex data cache | -| vertex-hugegraph-miss | NONE | Indicates the number of misses in the vertex data cache | -| vertex-hugegraph-size | NONE | Indicates the number of vertices in the current graph | -| batch-write-threads | NONE | Indicates the number of threads for batch write operations | -| max-write-threads | NONE | Indicates the maximum number of threads for write operations | -| pending-tasks | NONE | Indicates the number of pending tasks | -| workers | NONE | Indicates the current number of worker threads | -| average-load-penalty | NONE | Indicates the average load penalty | -| estimated-size | NONE | Indicates the estimated data size | -| eviction-count | NONE | Indicates the number of evicted data entries | -| eviction-weight | NONE | Indicates the weight of evicted data | -| hit-count | NONE | Indicates the total cache hits | -| hit-rate | NONE | Indicates the cache hit rate | -| load-count | NONE | Indicates the number of data loads | -| load-failure-count | NONE | Indicates the number of data load failures | -| load-failure-rate | NONE | Indicates the data load failure rate | -| load-success-count | NONE | Indicates the number of successful data loads | -| long-run-compilation-count | NONE | Indicates the number of long-running compilations | -| miss-count | NONE | Indicates the total cache misses | -| miss-rate | NONE | Indicates the cache miss rate | -| request-count | NONE | Indicates the total request count | -| total-load-time | NONE | Indicates the total data load time | -| sessions | NONE | Indicates the current number of active sessions | - - - +| Metric Name | Metric Unit | Metric Description | +|--------------------------------|-------------|----------------------------------------------------------------| +| edge-hugegraph-capacity | NONE | Indicates the capacity limit of edges in the current graph | +| edge-hugegraph-expire | NONE | Indicates the expiration time of edge data | +| edge-hugegraph-hits | NONE | Indicates the number of hits in the edge data cache | +| edge-hugegraph-miss | NONE | Indicates the number of misses in the edge data cache | +| edge-hugegraph-size | NONE | Indicates the number of edges in the current graph | +| instances | NONE | Indicates the number of currently running HugeGraph instances | +| schema-id-hugegraph-capacity | NONE | Indicates the capacity limit of schema IDs in the graph | +| schema-id-hugegraph-expire | NONE | Indicates the expiration time of schema ID data | +| schema-id-hugegraph-hits | NONE | Indicates the number of hits in the schema ID data cache | +| schema-id-hugegraph-miss | NONE | Indicates the number of misses in the schema ID data cache | +| schema-id-hugegraph-size | NONE | Indicates the number of schema IDs in the current graph | +| schema-name-hugegraph-capacity | NONE | Indicates the capacity limit of schema names in the graph | +| schema-name-hugegraph-expire | NONE | Indicates the expiration time of schema name data | +| schema-name-hugegraph-hits | NONE | Indicates the number of hits in the schema name data cache | +| schema-name-hugegraph-miss | NONE | Indicates the number of misses in the schema name data cache | +| schema-name-hugegraph-size | NONE | Indicates the number of schema names in the current graph | +| token-hugegraph-capacity | NONE | Indicates the capacity limit of tokens in the graph | +| token-hugegraph-expire | NONE | Indicates the expiration time of token data | +| token-hugegraph-hits | NONE | Indicates the number of hits in the token data cache | +| token-hugegraph-miss | NONE | Indicates the number of misses in the token data cache | +| token-hugegraph-size | NONE | Indicates the number of tokens in the current graph | +| users-hugegraph-capacity | NONE | Indicates the capacity limit of users in the graph | +| users-hugegraph-expire | NONE | Indicates the expiration time of user data | +| users-hugegraph-hits | NONE | Indicates the number of hits in the user data cache | +| users-hugegraph-miss | NONE | Indicates the number of misses in the user data cache | +| users-hugegraph-size | NONE | Indicates the number of users in the current graph | +| users_pwd-hugegraph-capacity | NONE | Indicates the capacity limit of user passwords | +| users_pwd-hugegraph-expire | NONE | Indicates the expiration time of user password data | +| users_pwd-hugegraph-hits | NONE | Indicates the number of hits in the user password data cache | +| users_pwd-hugegraph-miss | NONE | Indicates the number of misses in the user password data cache | +| users_pwd-hugegraph-size | NONE | Indicates the number of user passwords in the current graph | +| vertex-hugegraph-capacity | NONE | Indicates the capacity limit of vertices in the graph | +| vertex-hugegraph-expire | NONE | Indicates the expiration time of vertex data | +| vertex-hugegraph-hits | NONE | Indicates the number of hits in the vertex data cache | +| vertex-hugegraph-miss | NONE | Indicates the number of misses in the vertex data cache | +| vertex-hugegraph-size | NONE | Indicates the number of vertices in the current graph | +| batch-write-threads | NONE | Indicates the number of threads for batch write operations | +| max-write-threads | NONE | Indicates the maximum number of threads for write operations | +| pending-tasks | NONE | Indicates the number of pending tasks | +| workers | NONE | Indicates the current number of worker threads | +| average-load-penalty | NONE | Indicates the average load penalty | +| estimated-size | NONE | Indicates the estimated data size | +| eviction-count | NONE | Indicates the number of evicted data entries | +| eviction-weight | NONE | Indicates the weight of evicted data | +| hit-count | NONE | Indicates the total cache hits | +| hit-rate | NONE | Indicates the cache hit rate | +| load-count | NONE | Indicates the number of data loads | +| load-failure-count | NONE | Indicates the number of data load failures | +| load-failure-rate | NONE | Indicates the data load failure rate | +| load-success-count | NONE | Indicates the number of successful data loads | +| long-run-compilation-count | NONE | Indicates the number of long-running compilations | +| miss-count | NONE | Indicates the total cache misses | +| miss-rate | NONE | Indicates the cache miss rate | +| request-count | NONE | Indicates the total request count | +| total-load-time | NONE | Indicates the total data load time | +| sessions | NONE | Indicates the current number of active sessions | #### Metric Set: counters - - -| Metric Name | Metric Unit | Metric Description | -| --------------------------------------------- | -------- | ---------------------------------------- | -| GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests | -| GET-TOTAL_COUNTER | NONE | Records the total number of GET requests | -| favicon-ico-GET-FAILED_COUNTER | NONE | Records the number of failed GET requests to retrieve favicon.ico | -| favicon-ico-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve favicon.ico | -| graphs-HEAD-FAILED_COUNTER | NONE | Records the number of failed HEAD requests for graphs resources | -| graphs-HEAD-SUCCESS_COUNTER | NONE | Records the number of successful HEAD requests for graphs resources | -| graphs-HEAD-TOTAL_COUNTER | NONE | Records the total number of HEAD requests for graphs resources | -| graphs-hugegraph-graph-vertices-GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests for vertices in HugeGraph graphs | -| graphs-hugegraph-graph-vertices-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests for vertices in HugeGraph graphs | -| metrics-GET-FAILED_COUNTER | NONE | Records the number of failed GET requests to retrieve metrics | -| metrics-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve metrics | -| metrics-GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests to retrieve metrics | -| metrics-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve metrics | -| metrics-gauges-GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests to retrieve metrics gauges | -| metrics-gauges-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve metrics gauges | - - - +| Metric Name | Metric Unit | Metric Description | +|-----------------------------------------------------|-------------|--------------------------------------------------------------------------------| +| GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests | +| GET-TOTAL_COUNTER | NONE | Records the total number of GET requests | +| favicon-ico-GET-FAILED_COUNTER | NONE | Records the number of failed GET requests to retrieve favicon.ico | +| favicon-ico-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve favicon.ico | +| graphs-HEAD-FAILED_COUNTER | NONE | Records the number of failed HEAD requests for graphs resources | +| graphs-HEAD-SUCCESS_COUNTER | NONE | Records the number of successful HEAD requests for graphs resources | +| graphs-HEAD-TOTAL_COUNTER | NONE | Records the total number of HEAD requests for graphs resources | +| graphs-hugegraph-graph-vertices-GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests for vertices in HugeGraph graphs | +| graphs-hugegraph-graph-vertices-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests for vertices in HugeGraph graphs | +| metrics-GET-FAILED_COUNTER | NONE | Records the number of failed GET requests to retrieve metrics | +| metrics-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve metrics | +| metrics-GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests to retrieve metrics | +| metrics-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve metrics | +| metrics-gauges-GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests to retrieve metrics gauges | +| metrics-gauges-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve metrics gauges | #### Metric Set: system +| Metric Name | Metric Unit | Metric Description | +|---------------------------------------------|-------------|---------------------------------------------------------------------------------------------------------| +| mem | NONE | Indicates the total memory of the system | +| mem_total | NONE | Indicates the total memory of the system (same as mem) | +| mem_used | NONE | Indicates the currently used memory of the system | +| mem_free | NONE | Indicates the free memory of the system | +| mem_unit | NONE | Indicates the unit of memory (such as bytes, kilobytes, megabytes, etc.) | +| processors | NONE | Indicates the number of processors in the system | +| uptime | NONE | Indicates the system uptime, i.e., the time since booting | +| systemload_average | NONE | Indicates the average system load, reflecting the system's busyness | +| heap_committed | NONE | Indicates the committed size of JVM heap memory, i.e., the guaranteed heap memory size available to JVM | +| heap_init | NONE | Indicates the initial size of JVM heap memory | +| heap_used | NONE | Indicates the currently used JVM heap memory size | +| heap_max | NONE | Indicates the maximum available size of JVM heap memory | +| nonheap_committed | NONE | Indicates the committed size of JVM non-heap memory | +| nonheap_init | NONE | Indicates the initial size of JVM non-heap memory | +| nonheap_used | NONE | Indicates the currently used JVM non-heap memory size | +| nonheap_max | NONE | Indicates the maximum available size of JVM non-heap memory | +| thread_peak | NONE | Indicates the peak number of threads since JVM startup | +| thread_daemon | NONE | Indicates the current number of active daemon threads | +| thread_total_started | NONE | Indicates the total number of threads started since JVM startup | +| thread_count | NONE | Indicates the current number of active threads | +| garbage_collector_g1_young_generation_count | NONE | Indicates the number of young generation garbage collections by G1 garbage collector | +| garbage_collector_g1_young_generation_time | NONE | Indicates the total time spent in young generation garbage collections by G1 garbage collector | +| garbage_collector_g1_old_generation_count | NONE | Indicates the number of old generation garbage collections by G1 garbage collector | +| garbage_collector_g1_old_generation_time | NONE | Indicates the total time spent in old generation garbage collections by G1 garbage collector | +| garbage_collector_time_unit | NONE | Indicates the unit of garbage collection time (such as milliseconds, seconds, etc.) | - -| Metric Name | Metric Unit | Metric Description | -| --------------------------------------------- | -------- | -------------------------------------------------- | -| mem | NONE | Indicates the total memory of the system | -| mem_total | NONE | Indicates the total memory of the system (same as mem) | -| mem_used | NONE | Indicates the currently used memory of the system | -| mem_free | NONE | Indicates the free memory of the system | -| mem_unit | NONE | Indicates the unit of memory (such as bytes, kilobytes, megabytes, etc.) | -| processors | NONE | Indicates the number of processors in the system | -| uptime | NONE | Indicates the system uptime, i.e., the time since booting | -| systemload_average | NONE | Indicates the average system load, reflecting the system's busyness | -| heap_committed | NONE | Indicates the committed size of JVM heap memory, i.e., the guaranteed heap memory size available to JVM | -| heap_init | NONE | Indicates the initial size of JVM heap memory | -| heap_used | NONE | Indicates the currently used JVM heap memory size | -| heap_max | NONE | Indicates the maximum available size of JVM heap memory | -| nonheap_committed | NONE | Indicates the committed size of JVM non-heap memory | -| nonheap_init | NONE | Indicates the initial size of JVM non-heap memory | -| nonheap_used | NONE | Indicates the currently used JVM non-heap memory size | -| nonheap_max | NONE | Indicates the maximum available size of JVM non-heap memory | -| thread_peak | NONE | Indicates the peak number of threads since JVM startup | -| thread_daemon | NONE | Indicates the current number of active daemon threads | -| thread_total_started | NONE | Indicates the total number of threads started since JVM startup | -| thread_count | NONE | Indicates the current number of active threads | -| garbage_collector_g1_young_generation_count | NONE | Indicates the number of young generation garbage collections by G1 garbage collector | -| garbage_collector_g1_young_generation_time | NONE | Indicates the total time spent in young generation garbage collections by G1 garbage collector | -| garbage_collector_g1_old_generation_count | NONE | Indicates the number of old generation garbage collections by G1 garbage collector | -| garbage_collector_g1_old_generation_time | NONE | Indicates the total time spent in old generation garbage collections by G1 garbage collector | -| garbage_collector_time_unit | NONE | Indicates the unit of garbage collection time (such as milliseconds, seconds, etc.) | \ No newline at end of file diff --git a/home/docs/help/imap.md b/home/docs/help/imap.md index 9a4a9904897..a6cf82e577b 100644 --- a/home/docs/help/imap.md +++ b/home/docs/help/imap.md @@ -21,7 +21,7 @@ For example, in QQ Mail (other emails are similar): ### Configuration Parameters -| Parameter Name | Parameter Help Description | +| Parameter Name | Parameter Help Description | |:--------------------|------------------------------------------------------------------------------------------------------------| | Monitoring Host | IMAP mail server domain. Note ⚠️ do not include protocol headers (e.g., https://, http://). | | Task Name | The name that identifies this monitoring task, which needs to be unique. | @@ -40,8 +40,9 @@ Collect information on each folder in the email (custom folders can be configure #### Metrics Collection: (Folder Name in Email) -| Metric Name | Metric Unit | Metric Help Description | +| Metric Name | Metric Unit | Metric Help Description | |----------------------|-------------|-------------------------------------------------------| | Total message count | None | The total number of emails in this folder | | Recent message count | None | The number of recently received emails in this folder | | Unseen message count | None | The number of unread emails in this folder | + diff --git a/home/docs/help/influxdb.md b/home/docs/help/influxdb.md index cf3d838e796..92c5da380ef 100644 --- a/home/docs/help/influxdb.md +++ b/home/docs/help/influxdb.md @@ -7,61 +7,60 @@ keywords: [open source monitoring system, open source database monitoring, Influ ### Configuration Parameters -| Parameter Name | Parameter Description | -| -------------- | -------------------------------------------------------- | -| Monitor Host | The IPv4, IPv6, or domain name of the target being monitored. Note⚠️: Do not include the protocol header (e.g., https://, http://). | -| Task Name | A unique identifier for this monitoring task. | -| Port | The port on which the database is exposed. Default is 8086. | -| URL | The database connection URL, usually constructed from the host. No need to add it separately. | -| Collection Interval | The interval at which data is collected during monitoring, in seconds. The minimum interval that can be set is 30 seconds. | -| Probe Enabled | Whether to perform a probe check for monitoring availability before adding or modifying the monitoring task. | -| Description | Additional notes and remarks about this monitoring task. Users can provide information and descriptions here. | +| Parameter Name | Parameter Description | +|---------------------|-------------------------------------------------------------------------------------------------------------------------------------| +| Monitor Host | The IPv4, IPv6, or domain name of the target being monitored. Note⚠️: Do not include the protocol header (e.g., https://, http://). | +| Task Name | A unique identifier for this monitoring task. | +| Port | The port on which the database is exposed. Default is 8086. | +| URL | The database connection URL, usually constructed from the host. No need to add it separately. | +| Collection Interval | The interval at which data is collected during monitoring, in seconds. The minimum interval that can be set is 30 seconds. | +| Probe Enabled | Whether to perform a probe check for monitoring availability before adding or modifying the monitoring task. | +| Description | Additional notes and remarks about this monitoring task. Users can provide information and descriptions here. | ### Collected Metrics #### Metric Set: influxdb_info | Metric Name | Metric Unit | Metric Description | -|------------| ----------- |--------| -| build_date | N/A | Creation date | -| os | N/A | Operating system | -| cpus | N/A | CPUs | -| version | N/A | Version number | +|-------------|-------------|--------------------| +| build_date | N/A | Creation date | +| os | N/A | Operating system | +| cpus | N/A | CPUs | +| version | N/A | Version number | #### Metric Set: http_api_request_duration_seconds -| Metric Name | Metric Unit | Metric Description | -|---------------|------|---------| -| handler | N/A | Handler | -| path | N/A | Path | -| response_code | N/A | Response code | -| method | N/A | Request method | -| user_agent | N/A | User agent | -| status | N/A | Status | +| Metric Name | Metric Unit | Metric Description | +|---------------|-------------|--------------------| +| handler | N/A | Handler | +| path | N/A | Path | +| response_code | N/A | Response code | +| method | N/A | Request method | +| user_agent | N/A | User agent | +| status | N/A | Status | #### Metric Set: storage_compactions_queued -| Metric Name | Metric Unit | Metric Description | -|---------------------------------|------|------------| -| bucket | N/A | Storage bucket | -| engine | N/A | Engine type | -| id | N/A | Identifier | -| level | N/A | Level | -| path | N/A | Data file path | - +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|--------------------| +| bucket | N/A | Storage bucket | +| engine | N/A | Engine type | +| id | N/A | Identifier | +| level | N/A | Level | +| path | N/A | Data file path | #### Metric Set: http_write_request_bytes -| Metric Name | Metric Unit | Metric Description | -| ----------- |------|--------| -| endpoint | N/A | Endpoint | -| org_id | N/A | Organization identifier | -| status | N/A | Status | +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|-------------------------| +| endpoint | N/A | Endpoint | +| org_id | N/A | Organization identifier | +| status | N/A | Status | #### Metric Set: qc_requests_total -| Metric Name | Metric Unit | Metric Description | -| ----------- |------|--------| -| result | N/A | Result | -| org | N/A | Organization identifier | +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|-------------------------| +| result | N/A | Result | +| org | N/A | Organization identifier | diff --git a/home/docs/help/influxdb_promql.md b/home/docs/help/influxdb_promql.md index fcef4b4acff..afed14cad7a 100644 --- a/home/docs/help/influxdb_promql.md +++ b/home/docs/help/influxdb_promql.md @@ -9,7 +9,7 @@ keywords: [ Open Source Monitoring System, InfluxDB Monitoring, InfluxDB-PromQL ### Configuration Parameters -| Parameter Name | Parameter help description | +| Parameter Name | Parameter help description | |---------------------|----------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | IP, IPv6, or domain name of the target being monitored. Note ⚠️: Do not include protocol header (e.g., https://, http://). | | Monitoring name | Name to identify this monitoring, ensuring uniqueness of names. | @@ -28,7 +28,7 @@ keywords: [ Open Source Monitoring System, InfluxDB Monitoring, InfluxDB-PromQL #### Metric Set: basic_influxdb_memstats_alloc -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | instance | None | Instance to which the metric belongs | | timestamp | None | Timestamp of metric collection | @@ -36,7 +36,7 @@ keywords: [ Open Source Monitoring System, InfluxDB Monitoring, InfluxDB-PromQL #### Metric Set: influxdb_database_numMeasurements -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | job | None | Metric name | | instance | None | Instance to which the metric belongs | @@ -46,7 +46,7 @@ keywords: [ Open Source Monitoring System, InfluxDB Monitoring, InfluxDB-PromQL #### Metric Set: influxdb_query_rate_seconds -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | instance | None | Instance to which the metric belongs | | timestamp | None | Timestamp of metric collection | @@ -54,8 +54,9 @@ keywords: [ Open Source Monitoring System, InfluxDB Monitoring, InfluxDB-PromQL #### Metric Set: influxdb_queryExecutor_queriesFinished_10s -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | instance | None | Instance to which the metric belongs | | timestamp | None | Timestamp of metric collection | | value | None | Metric value | + diff --git a/home/docs/help/iotdb.md b/home/docs/help/iotdb.md index 0e4dcad9912..bec827feb73 100644 --- a/home/docs/help/iotdb.md +++ b/home/docs/help/iotdb.md @@ -61,33 +61,33 @@ predefinedMetrics: #### Metric collection: cluster_node_status -| Metric Name | Metric Unit | Metric Help Description | -| --------- |------|-------------------------| -| name | None | Node name IP | -| status | None | Node status, 1=online 2=offline | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|---------------------------------| +| name | None | Node name IP | +| status | None | Node status, 1=online 2=offline | #### Metric collection: jvm_memory_committed_bytes -| Metric Name | Metric Unit | Metric Help Description | -|-------|------|------------------| -| area | none | heap memory or nonheap memory | -| id | none | memory block | -| value | MB | The memory size currently requested by the JVM | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|------------------------------------------------| +| area | none | heap memory or nonheap memory | +| id | none | memory block | +| value | MB | The memory size currently requested by the JVM | #### Metric collection: jvm_memory_used_bytes -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------| -| area | none | heap memory or nonheap memory | -| id | none | memory block | -| value | MB | JVM used memory size | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------| +| area | none | heap memory or nonheap memory | +| id | none | memory block | +| value | MB | JVM used memory size | #### Metric collection: jvm_threads_states_threads -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------| -| state | none | thread state | -| count | None | The number of threads corresponding to the thread state | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|---------------------------------------------------------| +| state | none | thread state | +| count | None | The number of threads corresponding to the thread state | #### Index collection: quantity business data @@ -114,7 +114,8 @@ predefinedMetrics: #### Metric collection: thrift_connections -| Metric Name | Metric Unit | Metric Help Description | -|-------|------|-------------| -| name | None | name | -| connection | none | thrift current connection number | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|----------------------------------| +| name | None | name | +| connection | none | thrift current connection number | + diff --git a/home/docs/help/issue.md b/home/docs/help/issue.md index c3dffd147be..a48e84cfd4e 100644 --- a/home/docs/help/issue.md +++ b/home/docs/help/issue.md @@ -1,49 +1,58 @@ --- id: issue title: Common issues -sidebar_label: Common issues +sidebar_label: Common issues --- -### Monitoring common issues +### Monitoring common issues -1. **Page feedback:monitor.host: Monitoring Host must be ipv4, ipv6 or domain name** -> As shown in the information, the entered monitoring Host must be ipv4, ipv6 or domain name, and cannot carry a protocol header, such as http +1. **Page feedback:monitor.host: Monitoring Host must be ipv4, ipv6 or domain name** -2. **The website API and other monitoring feedback statusCode:403 or 401, but the opposite end service itself does not need authentication, and the direct access of the browser is OK** -> Please check whether it is blocked by the firewall. For example, BaoTa/aaPanel have set the blocking of `User-Agent=Apache-HttpClient` in the request header by default. If it is blocked, please delete this blocking rule. (user-agent has been simulated as a browser in the v1.0.beat5 version. This problem does not exist) +> As shown in the information, the entered monitoring Host must be ipv4, ipv6 or domain name, and cannot carry a protocol header, such as http + +2. **The website API and other monitoring feedback statusCode:403 or 401, but the opposite end service itself does not need authentication, and the direct access of the browser is OK** + +> Please check whether it is blocked by the firewall. For example, BaoTa/aaPanel have set the blocking of `User-Agent=Apache-HttpClient` in the request header by default. If it is blocked, please delete this blocking rule. (user-agent has been simulated as a browser in the v1.0.beat5 version. This problem does not exist) 3. Ping connectivity monitoring exception when installing hertzbeat for package deployment. -The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 + The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 + > The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. > When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address - -### Docker Deployment common issues +### Docker Deployment common issues 1. **MYSQL, TDENGINE and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** -The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. + The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. + > Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. -> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` +> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` 2. **According to the process deploy,visit http://ip:1157/ no interface** -Please refer to the following points to troubleshoot issues: + Please refer to the following points to troubleshoot issues: + > one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. -> two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. -> > three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. +> two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. +> +>> three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. + +3. **Log an error TDengine connection or insert SQL failed** -3. **Log an error TDengine connection or insert SQL failed** > one:Check whether database account and password configured is correct, the database is created. -> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. +> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. ### Package Deployment common issues 1. **According to the process deploy,visit http://ip:1157/ no interface** Please refer to the following points to troubleshoot issues: + > one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. > two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. > three: Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 2. **Log an error TDengine connection or insert SQL failed** + > one:Check whether database account and password configured is correct, the database is created. -> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. +> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. + diff --git a/home/docs/help/jetty.md b/home/docs/help/jetty.md index 3e5230aa9d0..6e069553dba 100644 --- a/home/docs/help/jetty.md +++ b/home/docs/help/jetty.md @@ -23,6 +23,7 @@ keywords: [open source monitoring tool, open source jetty web server monitoring java -jar $JETTY_HOME/start.jar --add-module=jmx java -jar $JETTY_HOME/start.jar --add-module=jmx-remote ``` + Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file 2. Edit the `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file to modify the JMX IP port and other parameters. @@ -50,7 +51,7 @@ Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -73,19 +74,17 @@ Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` | max | kb | max size | | used | kb | used size | - #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|--------------------------| | LoadedClassCount | | Loaded Class Count | | TotalLoadedClassCount | | Total Loaded Class Count | | UnloadedClassCount | | Unloaded Class Count | - #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|----------------------------| | TotalStartedThreadCount | | Total Started Thread Count | | ThreadCount | | Thread Count | diff --git a/home/docs/help/jvm.md b/home/docs/help/jvm.md index 95b1545fffc..3b47e0e7a8a 100644 --- a/home/docs/help/jvm.md +++ b/home/docs/help/jvm.md @@ -24,7 +24,7 @@ Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#rem ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -58,16 +58,15 @@ Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#rem #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|--------------------------| | LoadedClassCount | | Loaded Class Count | | TotalLoadedClassCount | | Total Loaded Class Count | | UnloadedClassCount | | Unloaded Class Count | - #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|----------------------------| | TotalStartedThreadCount | | Total Started Thread Count | | ThreadCount | | Thread Count | @@ -76,4 +75,3 @@ Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#rem | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/docs/help/kafka.md b/home/docs/help/kafka.md index 067cabef0e9..f86913733b1 100644 --- a/home/docs/help/kafka.md +++ b/home/docs/help/kafka.md @@ -27,70 +27,64 @@ exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by JMX | -| Username | JMX connection user name, optional | -| Password | JMX connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by JMX | +| Username | JMX connection user name, optional | +| Password | JMX connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metrics #### Metrics Set:server_info -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| Version | | Kafka Version | -| StartTimeMs | ms | Start Time | -| CommitId | | Version Commit ID | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| Version | | Kafka Version | +| StartTimeMs | ms | Start Time | +| CommitId | | Version Commit ID | #### Metrics Set:memory_pool -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| name | | metrics name | +| committed | kb | total size | +| init | kb | init size | +| max | kb | max size | +| used | kb | used size | #### Metrics Set:active_controller_count -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| Value | | server active controller count | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------| +| Value | | server active controller count | #### Metrics Set:broker_partition_count -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| Value | | broker partition count | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| Value | | broker partition count | #### Metrics Set:broker_leader_count -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| Value | | broker leader count | - - -#### Metrics Set:broker_handler_avg_percent - -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| EventType | | event type | -| RateUnit | | rate unit | -| Count | | percent count | -| OneMinuteRate | % | One Minute Rate | -| FiveMinuteRate | % | Five Minute Rate | -| MeanRate | % | Mean Rate | -| FifteenMinuteRate | % | Fifteen Minute Rate | - - - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| Value | | broker leader count | + +#### Metrics Set:broker_handler_avg_percent + +| Metric name | Metric unit | Metric help description | +|-------------------|-------------|-------------------------| +| EventType | | event type | +| RateUnit | | rate unit | +| Count | | percent count | +| OneMinuteRate | % | One Minute Rate | +| FiveMinuteRate | % | Five Minute Rate | +| MeanRate | % | Mean Rate | +| FifteenMinuteRate | % | Fifteen Minute Rate | diff --git a/home/docs/help/kafka_promql.md b/home/docs/help/kafka_promql.md index e88f6eb0342..ea358d0de8d 100644 --- a/home/docs/help/kafka_promql.md +++ b/home/docs/help/kafka_promql.md @@ -16,7 +16,7 @@ keywords: [ Open Source Monitoring System, Open Source Middleware Monitoring, Ka ### Configuration Parameters -| Parameter Name | Parameter Description | +| Parameter Name | Parameter Description | |---------------------|----------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | IP, IPv6, or domain name of the target being monitored. Note ⚠️: Do not include protocol header (e.g., https://, http://). | | Monitoring name | Name to identify this monitoring, ensuring uniqueness of names. | @@ -35,7 +35,7 @@ keywords: [ Open Source Monitoring System, Open Source Middleware Monitoring, Ka #### Metric Set: kafka_brokers -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | \_\_name__ | None | Metric name | | instance | None | Instance to which the metric belongs | @@ -44,7 +44,7 @@ keywords: [ Open Source Monitoring System, Open Source Middleware Monitoring, Ka #### Metric Set: kafka_topic_partitions -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | \_\_name__ | None | Metric name | | instance | None | Instance to which the metric belongs | @@ -53,7 +53,7 @@ keywords: [ Open Source Monitoring System, Open Source Middleware Monitoring, Ka #### Metric Set: kafka_server_brokertopicmetrics_bytesinpersec -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | \_\_name__ | None | Metric name | | instance | None | Instance to which the metric belongs | @@ -64,3 +64,4 @@ keywords: [ Open Source Monitoring System, Open Source Middleware Monitoring, Ka 1. If Kafka is enabled with JMX monitoring, you can use [Kafka](kafka) Monitoring. 2. If Kafka cluster deploys kafka_exporter to expose monitoring metrics, you can refer to [Prometheus task](prometheus) to configure the Prometheus collection task to monitor kafka. + diff --git a/home/docs/help/kubernetes.md b/home/docs/help/kubernetes.md index 8e10896c6d1..45adda576fc 100644 --- a/home/docs/help/kubernetes.md +++ b/home/docs/help/kubernetes.md @@ -28,6 +28,7 @@ kubectl describe secret {secret} -n kube-system ``` #### method two: + ```shell kubectl create serviceaccount cluster-admin kubectl create clusterrolebinding cluster-admin-manual --clusterrole=cluster-admin --serviceaccount=default:cluster-admin @@ -36,59 +37,60 @@ kubectl create token --duration=1000h cluster-admin ### Configure parameters -| Parameter name | Parameter Help describes the | -|-------------|------------------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| APiServer port | K8s APiServer port, default 6443 | -| token | Authorize the Access Token | -| URL | The database connection URL is optional, if configured, the database name, user name and password parameters in the URL will override the parameter | configured above -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | +| Parameter name | Parameter Help describes the | +|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| APiServer port | K8s APiServer port, default 6443 | +| token | Authorize the Access Token | +| URL | The database connection URL is optional, if configured, the database name, user name and password parameters in the URL will override the parameter | configured above | +| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | ### Collect metrics #### metric collection: nodes -| Metric Name | metric unit | Metrics help describe | -| ------------------ | -------- |--------| -| node_name | None | Node name | -| is_ready | None | Node Status | -| capacity_cpu | None | CPU capacity | -| allocatable_cpu | None | CPU | allotted -| capacity_memory | None | Memory capacity | -| allocatable_memory | None | Memory | allocated -| creation_time | None | Node creation time | +| Metric Name | metric unit | Metrics help describe | +|--------------------|-------------|-----------------------|-----------| +| node_name | None | Node name | +| is_ready | None | Node Status | +| capacity_cpu | None | CPU capacity | +| allocatable_cpu | None | CPU | allotted | +| capacity_memory | None | Memory capacity | +| allocatable_memory | None | Memory | allocated | +| creation_time | None | Node creation time | #### metric Collection: namespaces -| Metric Name | metric unit | Metrics help describe | -| -------- | -------- |-------------| -| namespace | None | namespace name | -| status | None | Status | -| creation_time | None | Created | +| Metric Name | metric unit | Metrics help describe | +|---------------|-------------|-----------------------| +| namespace | None | namespace name | +| status | None | Status | +| creation_time | None | Created | #### metric collection: pods -| Metric Name | metric unit | Metrics help describe | -| ---------------- | -------- |----------------| -| pod | None | Pod name | -| namespace | None | The namespace | to which the pod belongs -| status | None | Pod status | -| restart | None | Number of restarts | -| host_ip | None | The IP address of the host is | -| pod_ip | None | pod ip | -| creation_time | None | Pod creation time | -| start_time | None | Pod startup time | +| Metric Name | metric unit | Metrics help describe | +|---------------|-------------|-------------------------------|--------------------------| +| pod | None | Pod name | +| namespace | None | The namespace | to which the pod belongs | +| status | None | Pod status | +| restart | None | Number of restarts | +| host_ip | None | The IP address of the host is | +| pod_ip | None | pod ip | +| creation_time | None | Pod creation time | +| start_time | None | Pod startup time | #### metric Collection: services -| Metric Name | metric unit | Metrics help describe | -| ---------------- |------|--------------------------------------------------------| -| service | None | Service Name | -| namespace | None | The namespace | to which the service belongs -| type | None | Service Type ClusterIP NodePort LoadBalancer ExternalName | -| cluster_ip | None | cluster ip | -| selector | None | tag selector matches | -| creation_time | None | Created | +| Metric Name | metric unit | Metrics help describe | +|---------------|-------------|-----------------------------------------------------------|------------------------------| +| service | None | Service Name | +| namespace | None | The namespace | to which the service belongs | +| type | None | Service Type ClusterIP NodePort LoadBalancer ExternalName | +| cluster_ip | None | cluster ip | +| selector | None | tag selector matches | +| creation_time | None | Created | + diff --git a/home/docs/help/linux.md b/home/docs/help/linux.md index 05e3405ff6e..6c22028114c 100644 --- a/home/docs/help/linux.md +++ b/home/docs/help/linux.md @@ -9,74 +9,74 @@ keywords: [open source monitoring tool, open source linux monitoring tool, monit ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Linux SSH. The default is 22 | -| Username | SSH connection user name, optional | -| Password | SSH connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Linux SSH. The default is 22 | +| Username | SSH connection user name, optional | +| Password | SSH connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| hostname | none | Host name | -| version | none | Operating system version | -| uptime | none | System running time | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------| +| hostname | none | Host name | +| version | none | Operating system version | +| uptime | none | System running time | #### Metric set:cpu -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| info | none | CPU model | -| cores | cores | Number of CPU cores | -| interrupt | number | Number of CPU interrupts | -| load | none | Average load of CPU in the last 1/5/15 minutes | -| context_switch | number | Number of current context switches | -| usage | % | CPU usage | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------------------------| +| info | none | CPU model | +| cores | cores | Number of CPU cores | +| interrupt | number | Number of CPU interrupts | +| load | none | Average load of CPU in the last 1/5/15 minutes | +| context_switch | number | Number of current context switches | +| usage | % | CPU usage | #### Metric set:memory -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| total | Mb | Total memory capacity | -| used | Mb | User program memory | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory occupied by cache | -| available | Mb | Remaining available memory capacity | -| usage | % | Memory usage | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------------------| +| total | Mb | Total memory capacity | +| used | Mb | User program memory | +| free | Mb | Free memory capacity | +| buff_cache | Mb | Memory occupied by cache | +| available | Mb | Remaining available memory capacity | +| usage | % | Memory usage | #### Metric set:disk -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| disk_num | blocks | Total number of disks | -| partition_num | partitions | Total number of partitions | -| block_write | blocks | Total number of blocks written to disk | -| block_read | blocks | Number of blocks read from disk | -| write_rate | iops | Rate of writing disk blocks per second | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|----------------------------------------| +| disk_num | blocks | Total number of disks | +| partition_num | partitions | Total number of partitions | +| block_write | blocks | Total number of blocks written to disk | +| block_read | blocks | Number of blocks read from disk | +| write_rate | iops | Rate of writing disk blocks per second | #### Metric set:interface -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| interface_name | none | Network card name | -| receive_bytes | byte | Inbound data traffic(bytes) | -| transmit_bytes | byte | Outbound data traffic(bytes) | +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------| +| interface_name | none | Network card name | +| receive_bytes | byte | Inbound data traffic(bytes) | +| transmit_bytes | byte | Outbound data traffic(bytes) | #### Metric set:disk_free -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| filesystem | none | File system name | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | usage | -| mounted | none | Mount point directory | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| filesystem | none | File system name | +| used | Mb | Used disk size | +| available | Mb | Available disk size | +| usage | % | usage | +| mounted | none | Mount point directory | + diff --git a/home/docs/help/mariadb.md b/home/docs/help/mariadb.md index 6738a2710db..f5ef40cbfbc 100644 --- a/home/docs/help/mariadb.md +++ b/home/docs/help/mariadb.md @@ -15,49 +15,46 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 3306 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 3306 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| version | none | Database version | -| port | none | Database exposure service port | -| datadir | none | Database storage data disk address | -| max_connections | none | Database maximum connections | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|------------------------------------| +| version | none | Database version | +| port | none | Database exposure service port | +| datadir | none | Database storage data disk address | +| max_connections | none | Database maximum connections | #### Metric set:status -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| threads_created | none | MariaDB created total connections | -| threads_connected | none | MariaDB connected connections | -| threads_cached | none | MariaDB current cached connections | -| threads_running | none | MariaDB current active connections | - +| Metric name | Metric unit | Metric help description | +|-------------------|-------------|------------------------------------| +| threads_created | none | MariaDB created total connections | +| threads_connected | none | MariaDB connected connections | +| threads_cached | none | MariaDB current cached connections | +| threads_running | none | MariaDB current active connections | #### Metric set:innodb -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| innodb_data_reads | none | innodb average number of reads from files per second | -| innodb_data_writes | none | innodb average number of writes from file per second | -| innodb_data_read | KB | innodb average amount of data read per second | -| innodb_data_written | KB | innodb average amount of data written per second | - - +| Metric name | Metric unit | Metric help description | +|---------------------|-------------|------------------------------------------------------| +| innodb_data_reads | none | innodb average number of reads from files per second | +| innodb_data_writes | none | innodb average number of writes from file per second | +| innodb_data_read | KB | innodb average amount of data read per second | +| innodb_data_written | KB | innodb average amount of data written per second | diff --git a/home/docs/help/memcached.md b/home/docs/help/memcached.md index 5d89ce0977b..920da021e6b 100644 --- a/home/docs/help/memcached.md +++ b/home/docs/help/memcached.md @@ -14,7 +14,7 @@ The default YML configuration for the memcache version is in compliance with 1.4 You need to use the stats command to view the parameters that your memcache can monitor ``` -### +### **1、Obtain usable parameter indicators through commands such as stats、stats setting、stats settings. @@ -36,7 +36,7 @@ STAT version 1.4.15 ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -49,7 +49,7 @@ STAT version 1.4.15 #### Metrics Set:server_info -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |------------------|-------------|---------------------------------------------------| | pid | | Memcache server process ID | | uptime | s | The number of seconds the server has been running | @@ -66,4 +66,5 @@ STAT version 1.4.15 | cmd_set | | Set command request count | | cmd_flush | | Flush command request count | | get_misses | | Get command misses | -| delete_misses | | Delete command misses | \ No newline at end of file +| delete_misses | | Delete command misses | + diff --git a/home/docs/help/mongodb.md b/home/docs/help/mongodb.md index 4a2951ec23c..9c536e73d7a 100644 --- a/home/docs/help/mongodb.md +++ b/home/docs/help/mongodb.md @@ -9,7 +9,7 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |------------------------|-------------------------------------------------------------------------------------------------------------------------| | Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://). | | Monitoring name | Identify the name of this monitoring. The name needs to be unique. | @@ -27,7 +27,7 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m #### Metric set:Build Info -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |------------------|-------------|-----------------------------------------------------------------------------------------| | version | none | The version number of the MongoDB server. | | gitVersion | none | The Git version of the MongoDB codebase. | @@ -39,7 +39,7 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m #### Metric set:Server Document -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|-----------------------------------| | deleted | none | The number of documents deleted. | | inserted | none | The number of documents inserted. | @@ -48,21 +48,21 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m #### Metric set:Server Operation -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |----------------|-------------|------------------------------------------------------------------| | scanAndOrder | none | The number of times a query required both scanning and ordering. | | writeConflicts | none | The number of write conflicts that occurred. | #### Metric set: Max Connections -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |------------------|-------------|--------------------------------------------| | deletedDocuments | none | Number of deleted documents. | | passes | none | Total number of passes for TTL operations. | #### Metric set:System Info -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|------------------------------------------------------| | currentTime | none | Current system time. | | hostname | none | Hostname of the server. | @@ -75,7 +75,7 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m #### Metric set:OS Info -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|----------------------------------| | type | none | Type of the operating system. | | name | none | Name of the operating system. | @@ -83,7 +83,7 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m #### Metric set:Extra Info -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------|-------------|--------------------------------------------------------| | versionString | none | String describing the version of the operating system. | | libcVersion | none | Version of the C standard library (libc). | diff --git a/home/docs/help/mongodb_atlas.md b/home/docs/help/mongodb_atlas.md index 05f1f517b7c..b295a517adb 100644 --- a/home/docs/help/mongodb_atlas.md +++ b/home/docs/help/mongodb_atlas.md @@ -4,32 +4,31 @@ title: Monitoring MongoDB Atlas Database sidebar_label: MongoDB Atlas Database keywords: [open-source monitoring system, open-source database monitoring, MongoDB Atlas database monitoring] --- + > Collect and monitor general performance metrics of MongoDB Atlas databases. ### Configuration Parameters - -| Parameter Name | Parameter Description | -| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| Parameter Name | Parameter Description | +|-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------| | Target Host | The IP address, IPv4, IPv6, or domain name of the target to be monitored. Note: ⚠️ Do not include protocol headers (e.g., https://, http://). | -| Task Name | The name identifying this monitor, which must be unique. | -| Username | MongoDB username, optional. | -| Password | MongoDB password, optional. | -| Database | Name of the database. | -| Authentication Database | The name of the database storing user credentials. | -| Connection Timeout | Timeout for MongoDB connection when no response is received, in milliseconds (ms). Default is 6000 ms. | -| Cluster Mode | Value for MongoDB Atlas cluster: mongodb-atlas | -| Collection Interval | Interval for periodic data collection, in seconds. The minimum interval is 30 seconds. | -| Binding Tags | Used for categorizing and managing monitoring resources. | -| Description/Remarks | Additional labels and description for this monitor; users can add notes here. | +| Task Name | The name identifying this monitor, which must be unique. | +| Username | MongoDB username, optional. | +| Password | MongoDB password, optional. | +| Database | Name of the database. | +| Authentication Database | The name of the database storing user credentials. | +| Connection Timeout | Timeout for MongoDB connection when no response is received, in milliseconds (ms). Default is 6000 ms. | +| Cluster Mode | Value for MongoDB Atlas cluster: mongodb-atlas | +| Collection Interval | Interval for periodic data collection, in seconds. The minimum interval is 30 seconds. | +| Binding Tags | Used for categorizing and managing monitoring resources. | +| Description/Remarks | Additional labels and description for this monitor; users can add notes here. | ### Collection Metrics #### Metric Set: Build Information - -| Metric Name | Unit | Metric Description | -| ---------------- | ---- | --------------------------------- | +| Metric Name | Unit | Metric Description | +|------------------|------|-----------------------------------| | version | None | MongoDB version information | | gitVersion | None | Source code git version | | sysInfo | None | System information | @@ -38,9 +37,8 @@ keywords: [open-source monitoring system, open-source database monitoring, Mongo #### Metric Set: Server Document - -| Metric Name | Unit | Metric Description | -| ----------- | ---- | ------------------------------------------------- | +| Metric Name | Unit | Metric Description | +|-------------|------|---------------------------------------------------| | delete | None | Number of deletions | | insert | None | Number of insertions | | update | None | Number of updates | @@ -50,27 +48,24 @@ keywords: [open-source monitoring system, open-source database monitoring, Mongo #### Metric Set: Network Operations - -| Metric Name | Unit | Metric Description | -| ----------- | ---- | --------------------------------------------------- | +| Metric Name | Unit | Metric Description | +|-------------|------|-----------------------------------------------------| | Bytes In | None | Number of times a query needs to scan and sort data | | Bytes Out | None | Number of write conflicts | | Request Num | None | Number of requests | #### Metric Set: Connection Information - -| Metric Name | Unit | Metric Description | -| ------------------------- | ---- | ------------------------------------ | +| Metric Name | Unit | Metric Description | +|---------------------------|------|--------------------------------------| | Current Connections | None | Number of current active connections | | Available Connections | None | Number of available connections | | Total Created Connections | None | Total number of connections created | #### Metric Set: Database Statistics - -| Metric Name | Unit | Metric Description | -| ----------------- | ----- | ------------------------- | +| Metric Name | Unit | Metric Description | +|-------------------|-------|---------------------------| | Database Name | None | Name of the database | | Collections | None | Number of collections | | Views | None | Number of views | @@ -80,3 +75,4 @@ keywords: [open-source monitoring system, open-source database monitoring, Mongo | Storage Size | Bytes | Size of storage used | | Indexes | None | Number of indexes | | Index Size | Bytes | Total size of indexes | + diff --git a/home/docs/help/mysql.md b/home/docs/help/mysql.md index 4ec232ee8df..6d689f68765 100644 --- a/home/docs/help/mysql.md +++ b/home/docs/help/mysql.md @@ -13,9 +13,9 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo - Copy the jar package to the `hertzbeat/ext-lib` directory. - Restart the HertzBeat service. -### Configuration parameter +### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -33,31 +33,28 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| version | none | Database version | -| port | none | Database exposure service port | -| datadir | none | Database storage data disk address | -| max_connections | none | Database maximum connections | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|------------------------------------| +| version | none | Database version | +| port | none | Database exposure service port | +| datadir | none | Database storage data disk address | +| max_connections | none | Database maximum connections | #### Metric set:status -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| threads_created | none | MySql created total connections | -| threads_connected | none | MySql connected connections | -| threads_cached | none | MySql current cached connections | -| threads_running | none | MySql current active connections | - +| Metric name | Metric unit | Metric help description | +|-------------------|-------------|----------------------------------| +| threads_created | none | MySql created total connections | +| threads_connected | none | MySql connected connections | +| threads_cached | none | MySql current cached connections | +| threads_running | none | MySql current active connections | #### Metric set:innodb -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| innodb_data_reads | none | innodb average number of reads from files per second | -| innodb_data_writes | none | innodb average number of writes from file per second | -| innodb_data_read | KB | innodb average amount of data read per second | -| innodb_data_written | KB | innodb average amount of data written per second | - - +| Metric name | Metric unit | Metric help description | +|---------------------|-------------|------------------------------------------------------| +| innodb_data_reads | none | innodb average number of reads from files per second | +| innodb_data_writes | none | innodb average number of writes from file per second | +| innodb_data_read | KB | innodb average amount of data read per second | +| innodb_data_written | KB | innodb average amount of data written per second | diff --git a/home/docs/help/nacos.md b/home/docs/help/nacos.md index 1dbc4113c5d..4de3661c17b 100644 --- a/home/docs/help/nacos.md +++ b/home/docs/help/nacos.md @@ -1,93 +1,95 @@ ---- -id: nacos -title: Monitoring:Nacos Server monitoring -sidebar_label: Nacos Server -keywords: [open source monitoring tool, open source middleware monitoring tool, monitoring Nacos Server metrics] ---- - -> Hertzbeat monitors metrics of the Nacos Server by calling the Nacos Metrics Api. - -### PreRequisites - -#### Deploy Nacos cluster to expose metrics data - -1. Deploy the Nacos cluster according to [deployment document](https://nacos.io/en-us/docs/deployment.html). -2. Configure the application. properties file to expose metrics data. -``` -management.endpoints.web.exposure.include=* -``` -3. Access ```{ip}:8848/nacos/actuator/prometheus``` to see if metrics data can be accessed. - -More information see [Nacos monitor guide](https://nacos.io/en-us/docs/monitor-guide.html). - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Target name | Identify the name of this monitoring. The name needs to be unique | -| Nacos Port | Port provided by the Nacos Server. The default is 8848 | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:jvm - -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| system_cpu_usage | none | cpu usage | -| system_load_average_1m | none | load | -| jvm_memory_used_bytes | byte | jvm memory used | -| jvm_memory_max_bytes | byte | jvm max memory | -| jvm_gc_pause_seconds_count | none | gc count | -| jvm_gc_pause_seconds_sum | second | gc time | -| jvm_threads_daemon | none | jvm threads count | - -#### Metric set:Nacos - -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| http_server_requests_seconds_count | second | http requests count | -| http_server_requests_seconds_sum | second | http requests time | -| nacos_timer_seconds_sum | second | Nacos config notify time | -| nacos_timer_seconds_count | none | Nacos config notify count | -| nacos_monitor{name='longPolling'} | none | Nacos config connection count | -| nacos_monitor{name='configCount'} | none | Nacos configuration file count | -| nacos_monitor{name='dumpTask'} | none | Nacos config dump task count | -| nacos_monitor{name='notifyTask'} | none | Nacos config notify task count | -| nacos_monitor{name='getConfig'} | none | Nacos config read configuration count | -| nacos_monitor{name='publish'} | none | Nacos config update configuration count | -| nacos_monitor{name='ipCount'} | none | Nacos naming ip count | -| nacos_monitor{name='domCount'} | none | Nacos naming domain count(1.x version) | -| nacos_monitor{name='serviceCount'} | none | Nacos naming domain count(2.x version) | -| nacos_monitor{name='failedPush'} | none | Nacos naming push fail count | -| nacos_monitor{name='avgPushCost'} | second | Nacos naming push cost time(average) | -| nacos_monitor{name='leaderStatus'} | none | Nacos naming if node is leader | -| nacos_monitor{name='maxPushCost'} | second | Nacos naming push cost time(max) | -| nacos_monitor{name='mysqlhealthCheck'} | none | Nacos naming mysql health check count | -| nacos_monitor{name='httpHealthCheck'} | none | Nacos naming http health check count | -| nacos_monitor{name='tcpHealthCheck'} | none | Nacos naming tcp health check count | - -#### Metric set:Nacos exception - -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| nacos_exception_total{name='db'} | none | database exception | -| nacos_exception_total{name='configNotify'} | none | Nacos config notify exception | -| nacos_exception_total{name='unhealth'} | none | Nacos config server health check exception | -| nacos_exception_total{name='disk'} | none | Nacos naming write disk exception | -| nacos_exception_total{name='leaderSendBeatFailed'} | none | Nacos naming leader send heart beat fail count | -| nacos_exception_total{name='illegalArgument'} | none | request argument illegal count | -| nacos_exception_total{name='nacos'} | none | Nacos inner exception | - -#### Metric set:client - -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| nacos_monitor{name='subServiceCount'} | none | subscribed services count | -| nacos_monitor{name='pubServiceCount'} | none | published services count | -| nacos_monitor{name='configListenSize'} | none | listened configuration file count | -| nacos_client_request_seconds_count | none | request count | -| nacos_client_request_seconds_sum | second | request time | - \ No newline at end of file +--- +id: nacos +title: Monitoring:Nacos Server monitoring +sidebar_label: Nacos Server +keywords: [open source monitoring tool, open source middleware monitoring tool, monitoring Nacos Server metrics] +--- + +> Hertzbeat monitors metrics of the Nacos Server by calling the Nacos Metrics Api. + +### PreRequisites + +#### Deploy Nacos cluster to expose metrics data + +1. Deploy the Nacos cluster according to [deployment document](https://nacos.io/en-us/docs/deployment.html). +2. Configure the application. properties file to expose metrics data. + +``` +management.endpoints.web.exposure.include=* +``` + +3. Access ```{ip}:8848/nacos/actuator/prometheus``` to see if metrics data can be accessed. + +More information see [Nacos monitor guide](https://nacos.io/en-us/docs/monitor-guide.html). + +### Configuration parameter + +| Parameter name | Parameter help description | +|---------------------|-------------------------------------------------------------------------------------------------------------------------| +| Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Target name | Identify the name of this monitoring. The name needs to be unique | +| Nacos Port | Port provided by the Nacos Server. The default is 8848 | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | + +### Collection Metric + +#### Metric set:jvm + +| Metric name | Metric unit | Metric help description | +|----------------------------|-------------|-------------------------| +| system_cpu_usage | none | cpu usage | +| system_load_average_1m | none | load | +| jvm_memory_used_bytes | byte | jvm memory used | +| jvm_memory_max_bytes | byte | jvm max memory | +| jvm_gc_pause_seconds_count | none | gc count | +| jvm_gc_pause_seconds_sum | second | gc time | +| jvm_threads_daemon | none | jvm threads count | + +#### Metric set:Nacos + +| Metric name | Metric unit | Metric help description | +|----------------------------------------|-------------|-----------------------------------------| +| http_server_requests_seconds_count | second | http requests count | +| http_server_requests_seconds_sum | second | http requests time | +| nacos_timer_seconds_sum | second | Nacos config notify time | +| nacos_timer_seconds_count | none | Nacos config notify count | +| nacos_monitor{name='longPolling'} | none | Nacos config connection count | +| nacos_monitor{name='configCount'} | none | Nacos configuration file count | +| nacos_monitor{name='dumpTask'} | none | Nacos config dump task count | +| nacos_monitor{name='notifyTask'} | none | Nacos config notify task count | +| nacos_monitor{name='getConfig'} | none | Nacos config read configuration count | +| nacos_monitor{name='publish'} | none | Nacos config update configuration count | +| nacos_monitor{name='ipCount'} | none | Nacos naming ip count | +| nacos_monitor{name='domCount'} | none | Nacos naming domain count(1.x version) | +| nacos_monitor{name='serviceCount'} | none | Nacos naming domain count(2.x version) | +| nacos_monitor{name='failedPush'} | none | Nacos naming push fail count | +| nacos_monitor{name='avgPushCost'} | second | Nacos naming push cost time(average) | +| nacos_monitor{name='leaderStatus'} | none | Nacos naming if node is leader | +| nacos_monitor{name='maxPushCost'} | second | Nacos naming push cost time(max) | +| nacos_monitor{name='mysqlhealthCheck'} | none | Nacos naming mysql health check count | +| nacos_monitor{name='httpHealthCheck'} | none | Nacos naming http health check count | +| nacos_monitor{name='tcpHealthCheck'} | none | Nacos naming tcp health check count | + +#### Metric set:Nacos exception + +| Metric name | Metric unit | Metric help description | +|----------------------------------------------------|-------------|------------------------------------------------| +| nacos_exception_total{name='db'} | none | database exception | +| nacos_exception_total{name='configNotify'} | none | Nacos config notify exception | +| nacos_exception_total{name='unhealth'} | none | Nacos config server health check exception | +| nacos_exception_total{name='disk'} | none | Nacos naming write disk exception | +| nacos_exception_total{name='leaderSendBeatFailed'} | none | Nacos naming leader send heart beat fail count | +| nacos_exception_total{name='illegalArgument'} | none | request argument illegal count | +| nacos_exception_total{name='nacos'} | none | Nacos inner exception | + +#### Metric set:client + +| Metric name | Metric unit | Metric help description | +|----------------------------------------|-------------|-----------------------------------| +| nacos_monitor{name='subServiceCount'} | none | subscribed services count | +| nacos_monitor{name='pubServiceCount'} | none | published services count | +| nacos_monitor{name='configListenSize'} | none | listened configuration file count | +| nacos_client_request_seconds_count | none | request count | +| nacos_client_request_seconds_sum | second | request time | + diff --git a/home/docs/help/nebulagraph.md b/home/docs/help/nebulagraph.md index ae2cfb4683f..c23e39c14fe 100644 --- a/home/docs/help/nebulagraph.md +++ b/home/docs/help/nebulagraph.md @@ -14,7 +14,7 @@ The monitoring has two parts,nebulaGraph_stats and rocksdb_stats. nebulaGraph_stats is nebulaGraph's statistics, and rocksdb_stats is rocksdb's statistics. ``` -### +### **1、Obtain available parameters through the stats and rocksdb stats interfaces.** @@ -36,7 +36,7 @@ The default port is 19779 and the access address is:http://ip:19779/rocksdb_stat ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -55,7 +55,7 @@ The default port is 19779 and the access address is:http://ip:19779/rocksdb_stat Too many indicators, related links are as follows **https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |---------------------------------------|-------------|--------------------------------------------------------------| | num_queries_hit_memory_watermark_rate | | The rate of statements that reached the memory watermark. | | num_queries_hit_memory_watermark_sum | | The sum of statements that reached the memory watermark. | @@ -67,8 +67,9 @@ Too many indicators, related links are as follows Too many indicators, related links are as follows **https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |----------------------------|-------------|-------------------------------------------------------------| | rocksdb.backup.read.bytes | | Number of bytes read during the RocksDB database backup. | | rocksdb.backup.write.bytes | | Number of bytes written during the RocksDB database backup. | | ... | | ... | + diff --git a/home/docs/help/nebulagraph_cluster.md b/home/docs/help/nebulagraph_cluster.md index d0da21a7adb..c39195f427e 100644 --- a/home/docs/help/nebulagraph_cluster.md +++ b/home/docs/help/nebulagraph_cluster.md @@ -11,7 +11,7 @@ keywords: [ Open Source Monitoring System, Open Source Database Monitoring, Open ### Configuration parameters -| Parameter Name | Parameter help description | +| Parameter Name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------| | Target Host | The IPv4, IPv6, or domain name of the monitored peer. Note ⚠️ without the protocol header (eg: https://, http://). | | Task Name | Identifies the name of this monitor, ensuring uniqueness of the name. | @@ -35,21 +35,21 @@ keywords: [ Open Source Monitoring System, Open Source Database Monitoring, Open #### Metric Set: Session -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |---------------------|-------------|----------------------------------| | session | None | Number of sessions | | running_query_count | None | Number of queries being executed | #### Metric Set: Jobs -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |--------------|-------------|-------------------------------------------| | queue_jobs | None | Number of pending background tasks | | running_jobs | None | Number of background tasks being executed | #### Metric Set: Cluster node info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------------|-------------|---------------------------------| | total_storage_node | None | Number of storage nodes | | offline_storage_node | None | Number of offline storage nodes | @@ -60,7 +60,7 @@ keywords: [ Open Source Monitoring System, Open Source Database Monitoring, Open #### Metric Set: Storage Nodes -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-----------------------|-------------|-------------------------------------------------------| | host | None | Node address | | port | None | Port | @@ -89,3 +89,4 @@ keywords: [ Open Source Monitoring System, Open Source Database Monitoring, Open | version | None | Version | > If you need to customize monitoring templates to collect data from NebulaGraph clusters, please refer to: [NGQL Custom Monitoring](../advanced/extend-ngql.md) + diff --git a/home/docs/help/nginx.md b/home/docs/help/nginx.md index 99bb389000c..f630e4d4d24 100644 --- a/home/docs/help/nginx.md +++ b/home/docs/help/nginx.md @@ -20,6 +20,7 @@ If you want to monitor information in 'Nginx' with this monitoring type, you nee ```shell nginx -V ``` + View whether it contains `--with-http_stub_status_module`, if not, you need to recompile and install Nginx. 2. Compile and install Nginx, add `ngx_http_stub_status_module` module @@ -50,6 +51,7 @@ server { } } ``` + 4. Reload Nginx ```shell @@ -107,14 +109,13 @@ nginx -s reload 4. Access `http://localhost/req-status` in the browser to view the Nginx monitoring status information. - **Refer Doc: https://github.com/zls0424/ngx_req_status** **⚠️Attention: The endpoint path of the monitoring module is `/nginx-status` `/req-status`** ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -128,28 +129,27 @@ nginx -s reload #### Metrics Set:nginx_status -| Metric name | Metric unit | Metric help description | -|-------------|-------------|------------------------------------------| -| accepts | | Accepted connections | -| handled | | Successfully processed connections | -| active | | Currently active connections | -| dropped | | Discarded connections | -| requests | | Client requests | -| reading | | Connections performing read operations | -| writing | | Connections performing write operations | -| waiting | | Waiting connections | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-----------------------------------------| +| accepts | | Accepted connections | +| handled | | Successfully processed connections | +| active | | Currently active connections | +| dropped | | Discarded connections | +| requests | | Client requests | +| reading | | Connections performing read operations | +| writing | | Connections performing write operations | +| waiting | | Waiting connections | #### Metrics Set:req_status -| Metric name | Metric unit | Metric help description | -|-------------|-------------|---------------------------------| -| zone_name | | Group category | -| key | | Group name | -| max_active | | Maximum concurrent connections | -| max_bw | kb | Maximum bandwidth | -| traffic | kb | Total traffic | -| requests | | Total requests | -| active | | Current concurrent connections | -| bandwidth | kb | Current bandwidth | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------| +| zone_name | | Group category | +| key | | Group name | +| max_active | | Maximum concurrent connections | +| max_bw | kb | Maximum bandwidth | +| traffic | kb | Total traffic | +| requests | | Total requests | +| active | | Current concurrent connections | +| bandwidth | kb | Current bandwidth | diff --git a/home/docs/help/ntp.md b/home/docs/help/ntp.md index 5eca6c58e80..666f2a6b39a 100644 --- a/home/docs/help/ntp.md +++ b/home/docs/help/ntp.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source NTP monitoring tool, monito ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -23,7 +23,7 @@ keywords: [ open source monitoring tool, open source NTP monitoring tool, monito #### Metrics Set:summary -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |--------------|-------------|------------------------------------------------------------------------------------------| | responseTime | ms | The time it takes for the NTP server to respond to a request). | | time | ms | The current time reported by the NTP server). | diff --git a/home/docs/help/openai.md b/home/docs/help/openai.md index 7fc70548645..7165925372f 100644 --- a/home/docs/help/openai.md +++ b/home/docs/help/openai.md @@ -8,6 +8,7 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI ### Preparation #### Obtain Session Key + > 1. Open Chrome browser's network request interface > `Mac: cmd + option + i` > `Windows: ctrl + shift + i` @@ -22,7 +23,7 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI ### Configuration Parameters -| Parameter Name | Parameter Description | +| Parameter Name | Parameter Description | |:------------------|------------------------------------------------------------------------------------------------| | Monitoring Host | Fill in api.openai.com here. | | Task Name | Identify the name of this monitoring, ensuring uniqueness. | @@ -36,7 +37,7 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI #### Metric Set: Credit Grants -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |----------------------|-------------|--------------------------------------| | Total Granted | USD ($) | Total granted credit limit | | Total Used | USD ($) | Total used credit limit | @@ -45,14 +46,14 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI #### Metric Set: Model Cost -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |-------------|-------------|------------------------| | Model Name | None | Name of the model | | Cost | USD ($) | Expenses for the model | #### Metric Set: Billing Subscription -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |--------------------------|-------------|-----------------------------------------| | Has Payment Method | None | Whether payment method is available | | Canceled | None | Whether subscription is cancelled | @@ -80,3 +81,4 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI | Tax IDs | None | Tax IDs | | Billing Address | None | Billing address | | Business Address | None | Business address | + diff --git a/home/docs/help/opengauss.md b/home/docs/help/opengauss.md index 650882861e8..28171658951 100644 --- a/home/docs/help/opengauss.md +++ b/home/docs/help/opengauss.md @@ -5,54 +5,52 @@ sidebar_label: OpenGauss Database keywords: [open source monitoring tool, open source database monitoring tool, monitoring opengauss database metrics] --- -> Collect and monitor the general performance Metrics of OpenGauss database. +> Collect and monitor the general performance Metrics of OpenGauss database. ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 5432 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 5432 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| server_version | none | Version number of the database server | -| port | none | Database server exposure service port | -| server_encoding | none | Character set encoding of database server | -| data_directory | none | Database storage data disk address | -| max_connections | connections | Database maximum connections | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|-------------------------------------------| +| server_version | none | Version number of the database server | +| port | none | Database server exposure service port | +| server_encoding | none | Character set encoding of database server | +| data_directory | none | Database storage data disk address | +| max_connections | connections | Database maximum connections | #### Metric set:state -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | none | Database name, or share-object is a shared object | -| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | -| deadlocks | number | Number of deadlocks detected in the database | -| blks_read | times | The number of disk blocks read in the database | -| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | -| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | -| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | -| stats_reset | none | The last time these statistics were reset | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | none | Database name, or share-object is a shared object | +| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | +| deadlocks | number | Number of deadlocks detected in the database | +| blks_read | times | The number of disk blocks read in the database | +| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | +| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | +| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | +| stats_reset | none | The last time these statistics were reset | #### Metric set:activity -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| running | connections | Number of current client connections | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------------| +| running | connections | Number of current client connections | diff --git a/home/docs/help/opensuse.md b/home/docs/help/opensuse.md index e4220262e67..acaf86632d4 100644 --- a/home/docs/help/opensuse.md +++ b/home/docs/help/opensuse.md @@ -4,103 +4,105 @@ title: Monitoring OpenSUSE Operating System Monitoring sidebar_label: OpenSUSE OS keywords: [open source monitoring system, open source operating system monitoring, OpenSUSE OS monitoring] --- + > Collect and monitor general performance metrics of the OpenSUSE operating system. ### Configuration Parameters -| Parameter Name | Parameter Help Description | -| -------------- | ---------------------------------------------------------------------------- | -| Monitored Host | The IPV4, IPV6, or domain name of the host being monitored. Note ⚠️ No protocol header (e.g., https://, http://). | -| Task Name | The name that identifies this monitoring, which must be unique. | -| Port | The port provided by Linux SSH, default is 22. | -| Timeout | Sets the connection timeout in milliseconds, default is 6000 ms. | -| Connection Reuse | Sets whether SSH connections are reused, default is :false. If false, a new connection is created each time information is retrieved. | -| Username | SSH connection username, optional. | -| Password | SSH connection password, optional. | -| Collector | Configures which collector is used to schedule data collection for this monitoring. | -| Monitoring Period | The interval time for periodic data collection in seconds, with a minimum interval of 30 seconds. | -| Binding Tags | Used for categorized management of monitoring resources. | -| Description | Additional notes and descriptions for this monitoring, where users can make notes. | -| Key | The key required to connect to the server. | +| Parameter Name | Parameter Help Description | +|-------------------|---------------------------------------------------------------------------------------------------------------------------------------| +| Monitored Host | The IPV4, IPV6, or domain name of the host being monitored. Note ⚠️ No protocol header (e.g., https://, http://). | +| Task Name | The name that identifies this monitoring, which must be unique. | +| Port | The port provided by Linux SSH, default is 22. | +| Timeout | Sets the connection timeout in milliseconds, default is 6000 ms. | +| Connection Reuse | Sets whether SSH connections are reused, default is :false. If false, a new connection is created each time information is retrieved. | +| Username | SSH connection username, optional. | +| Password | SSH connection password, optional. | +| Collector | Configures which collector is used to schedule data collection for this monitoring. | +| Monitoring Period | The interval time for periodic data collection in seconds, with a minimum interval of 30 seconds. | +| Binding Tags | Used for categorized management of monitoring resources. | +| Description | Additional notes and descriptions for this monitoring, where users can make notes. | +| Key | The key required to connect to the server. | ### Collection Metrics #### Metric Set: System Basic Information -| Metric Name | Unit | Metric Help Description | -| --------------- | ------- | ------------------------ | -| Host Name | None | Host name | -| System Version | None | Operating system version| -| Uptime | None | Uptime | +| Metric Name | Unit | Metric Help Description | +|----------------|------|--------------------------| +| Host Name | None | Host name | +| System Version | None | Operating system version | +| Uptime | None | Uptime | #### Metric Set: CPU Information -| Metric Name | Unit | Metric Help Description | -| --------------- | ----- | ---------------------------------- | -| info | None | CPU model | -| cores | Cores | Number of CPU cores | -| interrupt | Count | Number of CPU interrupts | -| load | None | Average CPU load over the last 1/5/15 minutes | -| context_switch | Count | Number of context switches | -| usage | % | CPU usage rate | +| Metric Name | Unit | Metric Help Description | +|----------------|-------|-----------------------------------------------| +| info | None | CPU model | +| cores | Cores | Number of CPU cores | +| interrupt | Count | Number of CPU interrupts | +| load | None | Average CPU load over the last 1/5/15 minutes | +| context_switch | Count | Number of context switches | +| usage | % | CPU usage rate | #### Metric Set: Memory Information -| Metric Name | Unit | Metric Help Description | -| ----------- | ---- | ------------------------ | -| total | Mb | Total memory capacity | -| used | Mb | Memory used by user programs | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory used for cache | +| Metric Name | Unit | Metric Help Description | +|-------------|------|-------------------------------------| +| total | Mb | Total memory capacity | +| used | Mb | Memory used by user programs | +| free | Mb | Free memory capacity | +| buff_cache | Mb | Memory used for cache | | available | Mb | Remaining available memory capacity | -| usage | % | Memory usage rate | +| usage | % | Memory usage rate | #### Metric Set: Disk Information -| Metric Name | Unit | Metric Help Description | -| --------------- | ----- | ----------------------------- | -| disk_num | Count | Total number of disks | -| partition_num | Count | Total number of partitions | -| block_write | Count | Total number of blocks written to disk | -| block_read | Count | Total number of blocks read from disk | -| write_rate | iops | Disk block write rate per second | +| Metric Name | Unit | Metric Help Description | +|---------------|-------|----------------------------------------| +| disk_num | Count | Total number of disks | +| partition_num | Count | Total number of partitions | +| block_write | Count | Total number of blocks written to disk | +| block_read | Count | Total number of blocks read from disk | +| write_rate | iops | Disk block write rate per second | #### Metric Set: Network Card Information -| Metric Name | Unit | Metric Help Description | -| ------------------- | ---- | -------------------------- | -| interface_name | None | Network card name | -| receive_bytes | Mb | Inbound data traffic | -| transmit_bytes | Mb | Outbound data traffic | +| Metric Name | Unit | Metric Help Description | +|----------------|------|-------------------------| +| interface_name | None | Network card name | +| receive_bytes | Mb | Inbound data traffic | +| transmit_bytes | Mb | Outbound data traffic | #### Metric Set: File System | Metric Name | Unit | Metric Help Description | -| ---------- | ---- | ------------------------ | -| filesystem | None | Name of the file system | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | Usage rate | -| mounted | None | Mount point directory | +|-------------|------|-------------------------| +| filesystem | None | Name of the file system | +| used | Mb | Used disk size | +| available | Mb | Available disk size | +| usage | % | Usage rate | +| mounted | None | Mount point directory | #### Metric Set: Top 10 CPU Processes Statistics for the top 10 processes using the CPU. Statistics include: Process ID, CPU usage, memory usage, executed command. -| Metric Name | Unit | Metric Help Description | -| ------------ | ---- | ------------------------ | -| pid | None | Process ID | -| cpu_usage | % | CPU usage rate | -| mem_usage | % | Memory usage rate | -| command | None | Executed command | +| Metric Name | Unit | Metric Help Description | +|-------------|------|-------------------------| +| pid | None | Process ID | +| cpu_usage | % | CPU usage rate | +| mem_usage | % | Memory usage rate | +| command | None | Executed command | #### Metric Set: Top 10 Memory Processes Statistics for the top 10 processes using memory. Statistics include: Process ID, memory usage, CPU usage, executed command. -| Metric Name | Unit | Metric Help Description | -| ------------ | ---- | ------------------------ | -| pid | None | Process ID | -| mem_usage | % | Memory usage rate | -| cpu_usage | % | CPU usage rate | -| command | None | Executed command | \ No newline at end of file +| Metric Name | Unit | Metric Help Description | +|-------------|------|-------------------------| +| pid | None | Process ID | +| mem_usage | % | Memory usage rate | +| cpu_usage | % | CPU usage rate | +| command | None | Executed command | + diff --git a/home/docs/help/oracle.md b/home/docs/help/oracle.md index 08e59eb0668..e8d5ddab704 100644 --- a/home/docs/help/oracle.md +++ b/home/docs/help/oracle.md @@ -15,7 +15,7 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -33,37 +33,38 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| database_version | none | Database version | -| database_type | none | Database type | -| hostname | none | Host name | -| instance_name | none | Database instance name | -| startup_time | none | Database start time | -| status | none | Database status | +| Metric name | Metric unit | Metric help description | +|------------------|-------------|-------------------------| +| database_version | none | Database version | +| database_type | none | Database type | +| hostname | none | Host name | +| instance_name | none | Database instance name | +| startup_time | none | Database start time | +| status | none | Database status | #### Metric set:tablespace -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| file_id | none | File ID | -| file_name | none | File name | -| tablespace_name | none | Table space name | -| status | none | Status | -| bytes | MB | Size | -| blocks | none | Number of blocks | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|-------------------------| +| file_id | none | File ID | +| file_name | none | File name | +| tablespace_name | none | Table space name | +| status | none | Status | +| bytes | MB | Size | +| blocks | none | Number of blocks | #### Metric set:user_connect -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| username | none | Username | -| counts | number | Current connection counts | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|---------------------------| +| username | none | Username | +| counts | number | Current connection counts | #### Metric set:performance -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| qps | QPS | I/O Requests per second | -| tps | TPS | User transaction per second | -| mbps | MBPS | I/O Megabytes per second | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-----------------------------| +| qps | QPS | I/O Requests per second | +| tps | TPS | User transaction per second | +| mbps | MBPS | I/O Megabytes per second | + diff --git a/home/docs/help/ping.md b/home/docs/help/ping.md index c5603fdfbce..7c894f488ff 100644 --- a/home/docs/help/ping.md +++ b/home/docs/help/ping.md @@ -5,32 +5,33 @@ sidebar_label: PING connectivity keywords: [open source monitoring tool, open source network monitoring tool, monitoring ping metrics] --- -> Ping the opposite end HOST address and judge its connectivity. +> Ping the opposite end HOST address and judge its connectivity. ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Ping timeout | Set the timeout when Ping does not respond to data, unit:ms, default: 3000ms | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Ping timeout | Set the timeout when Ping does not respond to data, unit:ms, default: 3000ms | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:summary -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| responseTime | ms | Website response time | - +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-------------------------| +| responseTime | ms | Website response time | ### Common Problem 1. Ping connectivity monitoring exception when installing hertzbeat for package deployment. The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 + > The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. > When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address + diff --git a/home/docs/help/plugin.md b/home/docs/help/plugin.md index 8aa95f595c2..ccca94fdc45 100644 --- a/home/docs/help/plugin.md +++ b/home/docs/help/plugin.md @@ -1,10 +1,11 @@ --- id: plugin title: Custom plugin -sidebar_label: Custom plugin +sidebar_label: Custom plugin --- ## Custom plugins + ### Introduction Currently, `Hertzbeat` relies on the `alert` module to notify the user, and then the user can take actions such as sending requests, executing `sql`, executing `shell` scripts, etc. However, this can only be automated manually or by `webhook` to receive the alert message. @@ -13,6 +14,7 @@ After adding the customized code, you only need to package the `plugin` module, Currently, `HertzBeat` only set up the trigger `alert` method after alarm, if you need to set up the trigger method at the time of acquisition, startup program, etc., please mention `Task` in `https://github.com/apache/hertzbeat/issues/new/choose`. ### Specific uses + 1. Pull the master branch code `git clone https://github.com/apache/hertzbeat.git` and locate the `plugin` module's `Plugin` interface. ![plugin-1.png](/img/docs/help/plugin-1.png) @@ -22,6 +24,9 @@ Currently, `HertzBeat` only set up the trigger `alert` method after alarm, if yo 4. Package the `hertzbeat-plugin` module. ![plugin-3.png](/img/docs/help/plugin-3.png) + 5. Copy the packaged `jar` package to the `ext-lib` directory under the installation directory (for `docker` installations, mount the `ext-lib` directory first, then copy it there). ![plugin-4.png](/img/docs/help/plugin-4.png) + 6. Then restart `HertzBeat` to enable the customized post-alert handling policy. + diff --git a/home/docs/help/pop3.md b/home/docs/help/pop3.md index 822192ad66d..fffff2a494f 100644 --- a/home/docs/help/pop3.md +++ b/home/docs/help/pop3.md @@ -24,10 +24,9 @@ If you want to monitor information in 'POP3' with this monitoring type, you just 5. 通过POP3服务器域名,端口号,qq邮箱账号以及授权码连接POP3服务器,采集监控指标 ``` - ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -42,9 +41,8 @@ If you want to monitor information in 'POP3' with this monitoring type, you just #### Metrics Set:email_status -| Metric name | Metric unit | Metric help description | -|--------------|-------------|------------------------------------------| -| email_count | | Number of emails | -| mailbox_size | kb | The total size of emails in the mailbox | - +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-----------------------------------------| +| email_count | | Number of emails | +| mailbox_size | kb | The total size of emails in the mailbox | diff --git a/home/docs/help/port.md b/home/docs/help/port.md index e3350a8776f..7f420fd1375 100644 --- a/home/docs/help/port.md +++ b/home/docs/help/port.md @@ -7,25 +7,23 @@ keywords: [open source monitoring tool, open source port monitoring tool, monito > Judge whether the exposed port of the opposite end service is available, then judge whether the opposite end service is available, and collect Metrics such as response time for monitoring. -### Configuration parameter - -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| Connection timeout | Waiting timeout for port connection, unit:ms, default: 3000ms | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +### Configuration parameter + +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | +| Connection timeout | Waiting timeout for port connection, unit:ms, default: 3000ms | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:summary -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| responseTime | ms | Website response time | - - +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-------------------------| +| responseTime | ms | Website response time | diff --git a/home/docs/help/postgresql.md b/home/docs/help/postgresql.md index de14f9d62eb..57834a713bd 100644 --- a/home/docs/help/postgresql.md +++ b/home/docs/help/postgresql.md @@ -9,50 +9,48 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 5432 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 5432 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| server_version | none | Version number of the database server | -| port | none | Database server exposure service port | -| server_encoding | none | Character set encoding of database server | -| data_directory | none | Database storage data disk address | -| max_connections | connections | Database maximum connections | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|-------------------------------------------| +| server_version | none | Version number of the database server | +| port | none | Database server exposure service port | +| server_encoding | none | Character set encoding of database server | +| data_directory | none | Database storage data disk address | +| max_connections | connections | Database maximum connections | #### Metric set:state -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | none | Database name, or share-object is a shared object | -| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | -| deadlocks | number | Number of deadlocks detected in the database | -| blks_read | times | The number of disk blocks read in the database | -| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | -| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | -| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | -| stats_reset | none | The last time these statistics were reset | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | none | Database name, or share-object is a shared object | +| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | +| deadlocks | number | Number of deadlocks detected in the database | +| blks_read | times | The number of disk blocks read in the database | +| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | +| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | +| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | +| stats_reset | none | The last time these statistics were reset | #### Metric set:activity -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| running | connections | Number of current client connections | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------------| +| running | connections | Number of current client connections | diff --git a/home/docs/help/prestodb.md b/home/docs/help/prestodb.md index 8c19e178ed5..7c438e96cef 100644 --- a/home/docs/help/prestodb.md +++ b/home/docs/help/prestodb.md @@ -4,12 +4,12 @@ title: Monitoring PrestoDB Database sidebar_label: PrestoDB Database keywords: [ open source monitoring system, open source database monitoring, presto database monitoring] --- + > Collect and monitor general performance metrics of PrestoDB Atlas databases. ### Configuration Parameters - -| Parameter Name | Parameter Description | +| Parameter Name | Parameter Description | |---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------| | Target Host | The IP address, IPv4, IPv6, or domain name of the target to be monitored. Note: ⚠️ Do not include protocol headers (e.g., https://, http://). | | port | Port | @@ -23,58 +23,53 @@ keywords: [ open source monitoring system, open source database monitoring, pres #### Metric Set: Cluster Status - -| Metric Name | Unit | Metric Description | -| ---------------- | ---- | ------------------------------- | -| activeWorkers | None | Active Workers | -| runningQueries | None | Running Queries | -| queuedQueries | None | Queued Queries | -| blockedQueries | None | Blocked Queries | -| runningDrivers | None | Running Drivers| -| runningTasks | None | Running Tasks | +| Metric Name | Unit | Metric Description | +|----------------|------|--------------------| +| activeWorkers | None | Active Workers | +| runningQueries | None | Running Queries | +| queuedQueries | None | Queued Queries | +| blockedQueries | None | Blocked Queries | +| runningDrivers | None | Running Drivers | +| runningTasks | None | Running Tasks | ### Metrics Collection: Node Information -| Metric Name | Unit | Metric Description | -|-------------------|------|----------------------------------------------------| -| `uri` | None | Node link | -| `recentRequests` | None | Number of requests in the recent period | -| `recentFailures` | None | Number of failed requests in the recent period | -| `recentSuccesses` | None | Number of successful requests in the recent period | -| `lastRequestTime` | None | Time of the most recent request | -| `lastResponseTime`| None | Time of the most recent response | -| `age` | None | Duration of operation | +| Metric Name | Unit | Metric Description | +|----------------------|------|----------------------------------------------------| +| `uri` | None | Node link | +| `recentRequests` | None | Number of requests in the recent period | +| `recentFailures` | None | Number of failed requests in the recent period | +| `recentSuccesses` | None | Number of successful requests in the recent period | +| `lastRequestTime` | None | Time of the most recent request | +| `lastResponseTime` | None | Time of the most recent response | +| `age` | None | Duration of operation | | `recentFailureRatio` | None | Failure rate in the recent period | - #### Metric Set: Node Status - -| Metric Name | Unit | Metric Description | -| ----------- | ---- | ------------------------------------------------- | -| nodeId | None | Node ID | -| nodeVersion | None | Node Version | -| environment | None | Environment | -| coordinator | None | Is Coordinator | -| uptime | None |Uptime| -| externalAddress | None | External Address | -| internalAddress | None | Internal Address | -| processors | None |Processors | -| processCpuLoad | None | Process CPU Load | -| systemCpuLoad | None | System CPU Load | -| heapUsed | MB | Heap Memory Used | -| heapAvailable | MB | Heap Memory Available | -| nonHeapUsed | MB | Non-Heap Memory Used | - +| Metric Name | Unit | Metric Description | +|-----------------|------|-----------------------| +| nodeId | None | Node ID | +| nodeVersion | None | Node Version | +| environment | None | Environment | +| coordinator | None | Is Coordinator | +| uptime | None | Uptime | +| externalAddress | None | External Address | +| internalAddress | None | Internal Address | +| processors | None | Processors | +| processCpuLoad | None | Process CPU Load | +| systemCpuLoad | None | System CPU Load | +| heapUsed | MB | Heap Memory Used | +| heapAvailable | MB | Heap Memory Available | +| nonHeapUsed | MB | Non-Heap Memory Used | #### Metric Set: Task Query - -| Metric Name | Unit | Metric Description | -| ----------- | ---- | --------------------------------------------------- | -| taskId | None | Task ID | -| version | None | Version | -| state| None | State | -| self| None | Self | -| lastHeartbeat| None | Last Heartbeat | +| Metric Name | Unit | Metric Description | +|---------------|------|--------------------| +| taskId | None | Task ID | +| version | None | Version | +| state | None | State | +| self | None | Self | +| lastHeartbeat | None | Last Heartbeat | diff --git a/home/docs/help/process.md b/home/docs/help/process.md index 599c4f1ea7b..825a20ac43b 100644 --- a/home/docs/help/process.md +++ b/home/docs/help/process.md @@ -4,34 +4,33 @@ title: Monitoring Linux Process Monitoring sidebar_label: Process keywords: [Open Source Monitoring System, Operating System Process Monitoring, Process Monitoring] --- + > Collect and monitor basic information of processes on Linux systems, including CPU usage, memory usage, physical memory, IO, etc. ## Configuration Parameters - -| Parameter Name | Parameter Description | -| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| Parameter Name | Parameter Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------------| | Target Host | The IPv4, IPv6, or domain name of the monitored endpoint. Note ⚠️: Do not include the protocol header (e.g., https://, http://). | -| Task Name | Identifies the name of this monitoring, ensuring uniqueness. | -| Port | SSH port of the Linux system, default: 22 | -| Timeout | Sets the timeout for the connection in milliseconds, default is 6000 milliseconds. | -| Reuse Connection | Sets whether SSH connection is reused, default is false. When false, a new connection is created for each information retrieval. | -| Username | Username for the server. | -| Password | Password for the server. | -| Process Name | Name or part of the name of the process to be monitored. | -| Collector | Specifies which collector to use for scheduling this monitoring. | -| Monitoring Interval | Interval for periodic data collection, in seconds. Minimum interval that can be set is 30 seconds. | -| Tags | Used for categorizing and managing monitoring resources. | -| Description | Additional notes and descriptions for identifying this monitoring. Users can add remarks here. | -| Private Key | Private key required for connecting to the server. | +| Task Name | Identifies the name of this monitoring, ensuring uniqueness. | +| Port | SSH port of the Linux system, default: 22 | +| Timeout | Sets the timeout for the connection in milliseconds, default is 6000 milliseconds. | +| Reuse Connection | Sets whether SSH connection is reused, default is false. When false, a new connection is created for each information retrieval. | +| Username | Username for the server. | +| Password | Password for the server. | +| Process Name | Name or part of the name of the process to be monitored. | +| Collector | Specifies which collector to use for scheduling this monitoring. | +| Monitoring Interval | Interval for periodic data collection, in seconds. Minimum interval that can be set is 30 seconds. | +| Tags | Used for categorizing and managing monitoring resources. | +| Description | Additional notes and descriptions for identifying this monitoring. Users can add remarks here. | +| Private Key | Private key required for connecting to the server. | ### Metrics Collected #### Metric Set: Process Basic Information - | Metric Name | Metric Unit | Metric Description | -| ----------- | ----------- | ------------------ | +|-------------|-------------|--------------------| | PID | NONE | Process ID | | User | NONE | User | | CPU | NONE | CPU Usage | @@ -41,9 +40,8 @@ keywords: [Open Source Monitoring System, Operating System Process Monitoring, P #### Metric Set: Memory Usage Information - | Metric Name | Metric Unit | Metric Description | -| ----------- | ----------- | ------------------ | +|-------------|-------------|--------------------| | PID | NONE | Process ID | | detail | NONE | Detailed metrics | @@ -63,9 +61,8 @@ Includes metrics for: #### Metric Set: Other Monitoring Information - -| Metric Name | Metric Unit | Metric Description | -| ----------- | ----------- | --------------------------------- | +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|-----------------------------------| | PID | NONE | Process ID | | path | NONE | Execution Path | | date | NONE | Start Time | @@ -73,9 +70,8 @@ Includes metrics for: #### Metric Set: IO - | Metric Name | Metric Unit | Metric Description | -| ----------- | ----------- | ------------------ | +|-------------|-------------|--------------------| | PID | NONE | Process ID | | metric | NONE | Metric Name | | value | NONE | Metric Value | @@ -89,3 +85,4 @@ Includes metrics for: - read_bytes (Actual number of bytes read by the process from disk) - write_bytes (Actual number of bytes written by the process to disk) - cancelled_write_bytes (Actual number of bytes cancelled by the process while writing to disk) + diff --git a/home/docs/help/prometheus.md b/home/docs/help/prometheus.md index 4de9f80f67d..571a2e9b51b 100755 --- a/home/docs/help/prometheus.md +++ b/home/docs/help/prometheus.md @@ -9,7 +9,7 @@ keywords: [ open source monitoring tool, Prometheus protocol monitoring ] ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| | Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Task Name | Identify the name of this monitoring. The name needs to be unique. | @@ -40,5 +40,3 @@ You can use the following configuration: Keep the rest of the settings default. - - diff --git a/home/docs/help/pulsar.md b/home/docs/help/pulsar.md index 2cc520d6189..1424bd3f58b 100644 --- a/home/docs/help/pulsar.md +++ b/home/docs/help/pulsar.md @@ -4,52 +4,48 @@ title: Monitoring Pulsar Monitoring sidebar_label: Apache Pulsar keywords: [open-source monitoring system, open-source database monitoring, HbaseMaster monitoring] --- + > Collecting and monitoring general performance metrics of Pulsar **Protocol Used: HTTP** ## Configuration Parameters - -| Parameter Name | Description | -| ------------------- | ---------------------------------------------------------------------------------------------------------------------------- | +| Parameter Name | Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------| | Target Host | The monitored endpoint's IPV4, IPV6, or domain name. Note⚠️: Do not include the protocol header (e.g., https://, http://). | -| Port | The webServicePort value of Pulsar, default is 8080. | -| Task Name | The name identifying this monitoring task, must be unique. | -| Query Timeout | Set the connection timeout in milliseconds, default is 3000 milliseconds. | -| Monitoring Interval | Interval time for periodic data collection, in seconds, minimum interval is 30 seconds. | -| Binding Tags | Used for categorizing monitoring resources. | -| Description/Remarks | Additional notes and descriptions for this monitoring task. Users can add more information here. | +| Port | The webServicePort value of Pulsar, default is 8080. | +| Task Name | The name identifying this monitoring task, must be unique. | +| Query Timeout | Set the connection timeout in milliseconds, default is 3000 milliseconds. | +| Monitoring Interval | Interval time for periodic data collection, in seconds, minimum interval is 30 seconds. | +| Binding Tags | Used for categorizing monitoring resources. | +| Description/Remarks | Additional notes and descriptions for this monitoring task. Users can add more information here. | ### Collected Metrics #### Metric Set: Version Information - -| Metric Name | Unit | Description | -| ------------ | ---- | ------------------- | -| Version Info | NONE | Version Information | +| Metric Name | Unit | Description | +|--------------|------|---------------------| +| Version Info | NONE | Version Information | #### Metric Set: process_start_time_seconds - -| Metric Name | Unit | Description | -| ------------------ | ---- | ------------------ | -| Process Start Time | NONE | Process Start Time | +| Metric Name | Unit | Description | +|--------------------|------|--------------------| +| Process Start Time | NONE | Process Start Time | #### Metric Set: process_open_fds - -| Metric Name | Unit | Description | -| --------------------- | ---- | ------------------------------- | -| Open File Descriptors | NONE | Number of Open File Descriptors | +| Metric Name | Unit | Description | +|-----------------------|------|---------------------------------| +| Open File Descriptors | NONE | Number of Open File Descriptors | #### Metric Set: process_max_fds - -| Metric Name | Unit | Description | -| -------------------- | ---- | ---------------------------------- | -| Max File Descriptors | NONE | Maximum Number of File Descriptors | +| Metric Name | Unit | Description | +|----------------------|------|------------------------------------| +| Max File Descriptors | NONE | Maximum Number of File Descriptors | #### Metric Set: jvm_memory_pool_allocated_bytes diff --git a/home/docs/help/rabbitmq.md b/home/docs/help/rabbitmq.md index 1bcd3ea5851..917ca63c3d3 100644 --- a/home/docs/help/rabbitmq.md +++ b/home/docs/help/rabbitmq.md @@ -7,7 +7,7 @@ keywords: [open source monitoring tool, open source rabbitmq monitoring tool, mo > Monitoring the running status of RabbitMQ message middleware, nodes, topics and other related metrics. -### Pre-monitoring Operations +### Pre-monitoring Operations > HertzBeat uses RabbitMQ Management's Rest Api to collect RabbitMQ metric data. > Therefore, you need to enable the Management plug-in in your RabbitMQ environment @@ -24,7 +24,7 @@ rabbitmq-plugins enable rabbitmq_management ### Configuration parameters -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | | Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | @@ -40,7 +40,7 @@ rabbitmq-plugins enable rabbitmq_management #### metrics: overview -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |--------------------|-------------|------------------------------------| | product_version | None | Product Version | | product_name | None | Product name | @@ -52,7 +52,7 @@ rabbitmq-plugins enable rabbitmq_management #### metrics: object_totals -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |-------------|-------------|-----------------------------| | channels | none | total number of channels | | connections | none | total number of connections | @@ -62,24 +62,24 @@ rabbitmq-plugins enable rabbitmq_management #### metrics: nodes -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |--------------------|-------------|-----------------------------------------------------------| | name | None | The node name | | type | None | The node type | | running | None | Running state | | os_pid | None | Pid in OS | -| mem_limit | MB | Memory usage high watermark | -| mem_used | MB | Total amount of memory used | +| mem_limit | MB | Memory usage high watermark | +| mem_used | MB | Total amount of memory used | | fd_total | None | File descriptors available | -| fd_used | None | File descriptors used | -| sockets_total | None | Sockets available | -| sockets_used | None | Sockets used | -| proc_total | None | Erlang process limit | -| proc_used | None | Erlang processes used | -| disk_free_limit | GB | Free disk space low watermark | +| fd_used | None | File descriptors used | +| sockets_total | None | Sockets available | +| sockets_used | None | Sockets used | +| proc_total | None | Erlang process limit | +| proc_used | None | Erlang processes used | +| disk_free_limit | GB | Free disk space low watermark | | disk_free | GB | Free disk space | -| gc_num | None | GC runs | -| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | +| gc_num | None | GC runs | +| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | | context_switches | None | Context_switches num | | io_read_count | None | Total number of read operations | | io_read_bytes | KB | Total data size read into disk | @@ -100,27 +100,27 @@ rabbitmq-plugins enable rabbitmq_management | queue_deleted | None | queue deleted num | | connection_closed | None | connection closed num | - #### metrics: queues -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |------------------------------|-------------|--------------------------------------------------------------------------------------------------------------------------------------| -| name | None | The name of the queue with non-ASCII characters escaped as in C. | +| name | None | The name of the queue with non-ASCII characters escaped as in C. | | node | None | The queue on the node name | -| state | None | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | +| state | None | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | | type | None | Queue type, one of: quorum, stream, classic. | -| vhost | None | vhost path | +| vhost | None | vhost path | | auto_delete | None | Whether the queue will be deleted automatically when no longer used | -| policy | None | Effective policy name for the queue. | +| policy | None | Effective policy name for the queue. | | consumers | None | Number of consumers. | | memory | B | Bytes of memory allocated by the runtime for the queue, including stack, heap and internal structures. | | messages_ready | None | Number of messages ready to be delivered to clients | -| messages_unacknowledged | None | Number of messages delivered to clients but not yet acknowledged | +| messages_unacknowledged | None | Number of messages delivered to clients but not yet acknowledged | | messages | None | Sum of ready and unacknowledged messages (queue depth) | -| messages_ready_ram | None | Number of messages from messages_ready which are resident in ram | +| messages_ready_ram | None | Number of messages from messages_ready which are resident in ram | | messages_persistent | None | Total number of persistent messages in the queue (will always be 0 for transient queues) | -| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | +| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | | message_bytes_ready | B | Like message_bytes but counting only those messages ready to be delivered to clients | -| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | +| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | | message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | | message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | + diff --git a/home/docs/help/redhat.md b/home/docs/help/redhat.md index d877c46df36..2a8472e00d6 100644 --- a/home/docs/help/redhat.md +++ b/home/docs/help/redhat.md @@ -9,7 +9,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, RedHat OS ### Configuration Parameters -| Parameter Name | Parameter help description | +| Parameter Name | Parameter help description | |---------------------|----------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The IP, IPV6, or domain name of the monitored endpoint. Note ⚠️: Do not include protocol headers (eg: https://, http://). | | Task Name | Identifies the name of this monitoring, ensuring uniqueness. | @@ -28,7 +28,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, RedHat OS #### Metric Set: Basic Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|---------------------------| | Host Name | None | Host name. | | System Version | None | Operating system version. | @@ -36,7 +36,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, RedHat OS #### Metric Set: CPU Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|-------------------------------------------| | info | None | CPU model. | | cores | None | Number of CPU cores. | @@ -47,7 +47,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, RedHat OS #### Metric Set: Memory Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|------------------------------------| | total | Mb | Total memory capacity. | | used | Mb | Used memory by user programs. | @@ -58,7 +58,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, RedHat OS #### Metric Set: Disk Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |---------------|-------------|------------------------------------| | disk_num | None | Total number of disks. | | partition_num | None | Total number of partitions. | @@ -68,7 +68,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, RedHat OS #### Metric Set: Interface Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|--------------------------------| | interface_name | None | Name of the network interface. | | receive_bytes | Mb | Inbound data traffic. | @@ -105,3 +105,4 @@ Top 10 processes consuming memory. Metrics include: Process ID, Memory usage, CP | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | + diff --git a/home/docs/help/redis.md b/home/docs/help/redis.md index de0df0d52ca..bdb78ce3584 100644 --- a/home/docs/help/redis.md +++ b/home/docs/help/redis.md @@ -2,244 +2,239 @@ id: redis title: 监控:REDIS数据库监控 sidebar_label: REDIS -keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] +keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] --- > 对REDIS数据库的通用性能指标进行采集监控。支持REDIS1.0+。 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | -| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | +| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:server -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| redis_version | 无 | Redis 服务器版本 | -| redis_git_sha1 | 无 | Git SHA1 | -| redis_git_dirty | 无 | Git dirty flag | -| redis_build_id | 无 | redis 构建的id | -| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | -| os | 无 | Redis 服务器的宿主操作系统 | -| arch_bits | 无 | 架构(32 或 64 位) | -| multiplexing_api | 无 | Redis使用的事件循环机制| -| atomicvar_api | 无 | Redis使用的原子 API | -| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本| -| process_id | 无 | 服务器进程的PID | -| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | -| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | -| tcp_port | 无 | TCP/IP侦听端口 | -| server_time_usec | 无 | 微秒级精度的基于时间的系统时间| -| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | -| uptime_in_days | 无 | 自Redis服务器启动后的天数 | -| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | -| configured_hz | 无 | 服务器配置的频率设置 | -| lru_clock | 无 | 时钟每分钟递增,用于LRU管理| -| executable | 无 | 服务器可执行文件的路径 | -| config_file | 无 | 配置文件的路径 | -| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志| -| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------|------|-----------------------------------------------| +| redis_version | 无 | Redis 服务器版本 | +| redis_git_sha1 | 无 | Git SHA1 | +| redis_git_dirty | 无 | Git dirty flag | +| redis_build_id | 无 | redis 构建的id | +| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | +| os | 无 | Redis 服务器的宿主操作系统 | +| arch_bits | 无 | 架构(32 或 64 位) | +| multiplexing_api | 无 | Redis使用的事件循环机制 | +| atomicvar_api | 无 | Redis使用的原子 API | +| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本 | +| process_id | 无 | 服务器进程的PID | +| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | +| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | +| tcp_port | 无 | TCP/IP侦听端口 | +| server_time_usec | 无 | 微秒级精度的基于时间的系统时间 | +| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | +| uptime_in_days | 无 | 自Redis服务器启动后的天数 | +| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | +| configured_hz | 无 | 服务器配置的频率设置 | +| lru_clock | 无 | 时钟每分钟递增,用于LRU管理 | +| executable | 无 | 服务器可执行文件的路径 | +| config_file | 无 | 配置文件的路径 | +| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志 | +| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。 | #### 指标集合:clients -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | -| cluster_connections | 无 | 群集总线使用的套接字数量的近似值| -| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。| -| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | -| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | -| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | -| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING)| -| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------|------|--------------------------------------------------------------------------------| +| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | +| cluster_connections | 无 | 群集总线使用的套接字数量的近似值 | +| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。 | +| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | +| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | +| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | +| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING) | +| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | #### 指标集合:memory -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | -| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | -| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字| -| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值| -| used_memory_peak | byte | Redis消耗的峰值内存(字节)| -| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | -| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和| -| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节)| -| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | -| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | -| allocator_allocated | byte| 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同| -| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片| -| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | -| total_system_memory | byte | Redis主机的内存总量 | -| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_lua | byte | Lua引擎使用的字节数 | -| used_memory_lua_human | KB | 上一个值的人类可读值 | -| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | -| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | -| number_of_cached_scripts | 无 |缓存的lua脚本数量 | -| maxmemory | byte | maxmemory配置指令的值| -| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | -| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | -| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | -| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | -| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | -| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | -| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | -| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | -| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | -| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | -| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。| -| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | -| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | -| mem_clients_normal | 无 | 普通客户端使用的内存 | -| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | -| mem_allocator | 无 | 内存分配器,在编译时选择。 | -| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | -| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL)| -| lazyfreed_objects | 无 | 已延迟释放的对象数。| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|----------|-----------------------------------------------------------------------------------------------| +| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | +| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | +| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字 | +| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_peak | byte | Redis消耗的峰值内存(字节) | +| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | +| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和 | +| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节) | +| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | +| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | +| allocator_allocated | byte | 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同 | +| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片 | +| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | +| total_system_memory | byte | Redis主机的内存总量 | +| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_lua | byte | Lua引擎使用的字节数 | +| used_memory_lua_human | KB | 上一个值的人类可读值 | +| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | +| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | +| number_of_cached_scripts | 无 | 缓存的lua脚本数量 | +| maxmemory | byte | maxmemory配置指令的值 | +| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | +| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | +| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | +| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | +| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | +| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | +| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | +| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | +| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | +| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | +| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。 | +| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | +| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | +| mem_clients_normal | 无 | 普通客户端使用的内存 | +| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | +| mem_allocator | 无 | 内存分配器,在编译时选择。 | +| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | +| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL) | +| lazyfreed_objects | 无 | 已延迟释放的对象数。 | #### 指标集合:persistence -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是| -| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | -| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | -| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比| -| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | -| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | -| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | -| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | -| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | -| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功| -| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | -| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | -| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | -| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | -| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite| -| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | -| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | -| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | -| aof_last_write_status | 无 | 上次aof写入状态 | -| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | -| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------------|--------|-----------------------------------------------------------------------------------------------------| +| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是 | +| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | +| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | +| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比 | +| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | +| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | +| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | +| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | +| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | +| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功 | +| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | +| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | +| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | +| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | +| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | +| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite | +| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | +| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | +| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | +| aof_last_write_status | 无 | 上次aof写入状态 | +| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | +| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | +| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | #### 指标集合:stats -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total_connections_received | 无 | 服务器接受的连接总数 | -| total_commands_processed | 无 | 服务器处理的命令总数 | -| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | -| total_net_input_bytes | byte | 从网络读取的字节总数 | -| total_net_output_bytes | byte | 写入网络的总字节数 | -| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | -| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | -| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数| -| sync_full | 无 | 具有副本的完整重新同步数 | -| sync_partial_ok | 无 | 接受的部分重新同步请求数 | -| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | -| expired_keys | 无 | 过期的key总数 | -| expired_stale_perc | 无 | 可能过期key的百分比 | -| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | -| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | -| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | -| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | -| keyspace_misses | 无 | 在主dict 中未查到key的次数 | -| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | -| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | -| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | -| total_forks | 无 | 自服务器启动以来的fork操作总数| -| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | -| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | -| active_defrag_hits | 无 | 主动碎片整理命中次数 | -| active_defrag_misses | 无 | 主动碎片整理未命中次数 | -| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | -| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数| -| tracking_total_keys | 无 | key 查询的总数| -| tracking_total_items | 无 | item查询的总数 | -| tracking_total_prefixes | 无 | 前缀查询的总数 | -| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | -| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | -| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | -| total_reads_processed | 无 | 正在读取的请求数 | -| total_writes_processed | 无 | 正在写入的请求数 | -| io_threaded_reads_processed | 无 | 正在读取的线程数| -| io_threaded_writes_processed | 无 | 正在写入的线程数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|----------------------------------------------------| +| total_connections_received | 无 | 服务器接受的连接总数 | +| total_commands_processed | 无 | 服务器处理的命令总数 | +| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | +| total_net_input_bytes | byte | 从网络读取的字节总数 | +| total_net_output_bytes | byte | 写入网络的总字节数 | +| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | +| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | +| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数 | +| sync_full | 无 | 具有副本的完整重新同步数 | +| sync_partial_ok | 无 | 接受的部分重新同步请求数 | +| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | +| expired_keys | 无 | 过期的key总数 | +| expired_stale_perc | 无 | 可能过期key的百分比 | +| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | +| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | +| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | +| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | +| keyspace_misses | 无 | 在主dict 中未查到key的次数 | +| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | +| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | +| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | +| total_forks | 无 | 自服务器启动以来的fork操作总数 | +| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | +| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | +| active_defrag_hits | 无 | 主动碎片整理命中次数 | +| active_defrag_misses | 无 | 主动碎片整理未命中次数 | +| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | +| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数 | +| tracking_total_keys | 无 | key 查询的总数 | +| tracking_total_items | 无 | item查询的总数 | +| tracking_total_prefixes | 无 | 前缀查询的总数 | +| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | +| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | +| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | +| total_reads_processed | 无 | 正在读取的请求数 | +| total_writes_processed | 无 | 正在写入的请求数 | +| io_threaded_reads_processed | 无 | 正在读取的线程数 | +| io_threaded_writes_processed | 无 | 正在写入的线程数 | #### 指标集合:replication -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| role | 无 | 节点角色 master 主节点 slave 从节点 | -| connected_slaves | 无 | 连接的从节点数 | -| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | -| master_replid | 无 | 实例启动的随机字符串| -| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID| -| master_repl_offset | 无 | 主从同步偏移量 | -| second_repl_offset | 无 | 接受从服务ID的最大偏移量| -| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | -| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | -| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | -| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|-------------------------------------------------------------------------------------| +| role | 无 | 节点角色 master 主节点 slave 从节点 | +| connected_slaves | 无 | 连接的从节点数 | +| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | +| master_replid | 无 | 实例启动的随机字符串 | +| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID | +| master_repl_offset | 无 | 主从同步偏移量 | +| second_repl_offset | 无 | 接受从服务ID的最大偏移量 | +| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | +| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | +| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | +| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和| -| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和| -| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和| -| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | -| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU| -| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|------|------------------------| +| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和 | +| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和 | +| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和 | +| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | +| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU | +| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | #### 指标集合:errorstats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| errorstat_ERR | 无 | 错误累计出现的次数 | -| errorstat_MISCONF | 无 | | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|-----------| +| errorstat_ERR | 无 | 错误累计出现的次数 | +| errorstat_MISCONF | 无 | | #### 指标集合:cluster -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|--------------------| +| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是 | #### 指标集合:commandstats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数| -| cmdstat_get | 无 | get命令的统计信息 | -| cmdstat_setnx | 无 | setnx命令的统计信息 | -| cmdstat_hset | 无 | hset命令的统计信息 | -| cmdstat_hget | 无 | hget命令的统计信息 | -| cmdstat_lpush | 无 | lpush命令的统计信息 | -| cmdstat_rpush | 无 | rpush命令的统计信息 | -| cmdstat_lpop | 无 | lpop命令的统计信息 | -| cmdstat_rpop | 无 | rpop命令的统计信息 | -| cmdstat_llen | 无 | llen命令的统计信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|---------------------------------------------------------------------------------------------------------------------------| +| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数 | +| cmdstat_get | 无 | get命令的统计信息 | +| cmdstat_setnx | 无 | setnx命令的统计信息 | +| cmdstat_hset | 无 | hset命令的统计信息 | +| cmdstat_hget | 无 | hget命令的统计信息 | +| cmdstat_lpush | 无 | lpush命令的统计信息 | +| cmdstat_rpush | 无 | rpush命令的统计信息 | +| cmdstat_lpop | 无 | lpop命令的统计信息 | +| cmdstat_rpop | 无 | rpop命令的统计信息 | +| cmdstat_llen | 无 | llen命令的统计信息 | + diff --git a/home/docs/help/redis_cluster.md b/home/docs/help/redis_cluster.md index 7143f5e3fdd..3aa41136f88 100644 --- a/home/docs/help/redis_cluster.md +++ b/home/docs/help/redis_cluster.md @@ -19,10 +19,10 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to appendonly yes bind 0.0.0.0 protected-mode no - + ``` - *docker-compose.yml* + *docker-compose.yml* ```yml services: @@ -34,7 +34,7 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "1000:6379" - + redis-master-2: image: redis:latest container_name: redis-master-2 @@ -43,7 +43,7 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "2000:6379" - + redis-master-3: image: redis:latest container_name: redis-master-3 @@ -52,7 +52,7 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "3000:6379" - + redis-slave-1: image: redis:latest container_name: redis-slave-1 @@ -61,7 +61,7 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "1001:6379" - + redis-slave-2: image: redis:latest container_name: redis-slave-2 @@ -70,7 +70,7 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "2001:6379" - + redis-slave-3: image: redis:latest container_name: redis-slave-3 @@ -79,60 +79,59 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "3001:6379" - + networks: default: external: name: hertzbeat-redis-cluster ``` - 2. View the IP addresses of all containers from the network, which is required when building a cluster. - ```bash - docker-compose up -d - docker network inspect hertzbeat-redis-cluste - ``` - - ``` - "Containers": { - "187b879f73c473b3cbb82ff95f668e65af46115ddaa27f3ff1a712332b981531": { - ... - "Name": "redis-slave-2", - "IPv4Address": "192.168.117.6/24", - ... - }, - "45e22b64c82e51857fc104436cdd6cc0c5776ad10a2e4b9d8e52e36cfb87217e": { - ... - "Name": "redis-master-3", - "IPv4Address": "192.168.117.3/24 - ... - }, - "57838ae37956f8af181f9a131eb011efec332b9ed3d49480f59d8962ececf288": { - ... - "Name": "redis-master-2", - "IPv4Address": "192.168.117.7/24", - ... - }, - "94478d14bd950bcde533134870beb89b392515843027a0595af56dd1e3305a76": { - ... - "Name": "redis-master-1", - "IPv4Address": "192.168.117.4/24", - ... - }, - "ad055720747e7fc430ba794d5321723740eeb345c280073e4292ed4302ff657c": { - ... - "Name": "redis-slave-3", - "IPv4Address": "192.168.117.2/24", - ... - }, - "eddded1ac4c7528640ba0c6befbdaa48faa7cb13905b934ca1f5c69ab364c725": { - ... - "Name": "redis-slave-1", - "IPv4Address": "192.168.117.5/24", - ... - } - }, - ``` + ```bash + docker-compose up -d + docker network inspect hertzbeat-redis-cluste + ``` + + ``` + "Containers": { + "187b879f73c473b3cbb82ff95f668e65af46115ddaa27f3ff1a712332b981531": { + ... + "Name": "redis-slave-2", + "IPv4Address": "192.168.117.6/24", + ... + }, + "45e22b64c82e51857fc104436cdd6cc0c5776ad10a2e4b9d8e52e36cfb87217e": { + ... + "Name": "redis-master-3", + "IPv4Address": "192.168.117.3/24 + ... + }, + "57838ae37956f8af181f9a131eb011efec332b9ed3d49480f59d8962ececf288": { + ... + "Name": "redis-master-2", + "IPv4Address": "192.168.117.7/24", + ... + }, + "94478d14bd950bcde533134870beb89b392515843027a0595af56dd1e3305a76": { + ... + "Name": "redis-master-1", + "IPv4Address": "192.168.117.4/24", + ... + }, + "ad055720747e7fc430ba794d5321723740eeb345c280073e4292ed4302ff657c": { + ... + "Name": "redis-slave-3", + "IPv4Address": "192.168.117.2/24", + ... + }, + "eddded1ac4c7528640ba0c6befbdaa48faa7cb13905b934ca1f5c69ab364c725": { + ... + "Name": "redis-slave-1", + "IPv4Address": "192.168.117.5/24", + ... + } + }, + ``` 3. Go inside the container to build a Redis cluster. ```bash @@ -162,3 +161,4 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to ### Configuration Parameters Please see [REDIS](https://hertzbeat.apache.org/docs/help/redis) doc. + diff --git a/home/docs/help/rocketmq.md b/home/docs/help/rocketmq.md index f56bdfc2f14..f31dea47d9b 100644 --- a/home/docs/help/rocketmq.md +++ b/home/docs/help/rocketmq.md @@ -9,7 +9,7 @@ keywords: [ open source monitoring tool, monitoring Apache RocketMQ metrics ] ### Configuration parameters -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| | Namesrv Host | IPV4,IPV6 of RocketMQ name server(eg: https://, http://)。 | | Monitoring name | Identify the name of this monitoring. The name needs to be unique. | @@ -24,7 +24,7 @@ keywords: [ open source monitoring tool, monitoring Apache RocketMQ metrics ] #### Metric set:cluster -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|---------------------------------------| | BrokerId | none | Broker id | | Address | none | Broker address | @@ -38,7 +38,7 @@ keywords: [ open source monitoring tool, monitoring Apache RocketMQ metrics ] #### Metric set:Consumer -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------|-------------|-------------------------| | Consumer_group | none | Consumer group | | Client_quantity | none | Number of clients | @@ -46,3 +46,4 @@ keywords: [ open source monitoring tool, monitoring Apache RocketMQ metrics ] | Consume_type | none | Consume type | | Consume_tps | none | Consume tps | | Delay | none | Delay | + diff --git a/home/docs/help/rockylinux.md b/home/docs/help/rockylinux.md index f83eb606b0a..b1e093bc210 100644 --- a/home/docs/help/rockylinux.md +++ b/home/docs/help/rockylinux.md @@ -9,7 +9,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, Rocky Linu ### Configuration Parameters -| Parameter Name | Parameter help description | +| Parameter Name | Parameter help description | |---------------------|----------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The IP, IPV6, or domain name of the monitored endpoint. Note ⚠️: Do not include protocol headers (eg: https://, http://). | | Task Name | Identifies the name of this monitoring, ensuring uniqueness. | @@ -28,7 +28,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, Rocky Linu #### Metric Set: Basic Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|---------------------------| | Host Name | None | Host name. | | System Version | None | Operating system version. | @@ -36,7 +36,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, Rocky Linu #### Metric Set: CPU Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|-------------------------------------------| | info | None | CPU model. | | cores | None | Number of CPU cores. | @@ -47,7 +47,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, Rocky Linu #### Metric Set: Memory Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|------------------------------------| | total | Mb | Total memory capacity. | | used | Mb | Used memory by user programs. | @@ -58,7 +58,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, Rocky Linu #### Metric Set: Disk Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |---------------|-------------|------------------------------------| | disk_num | None | Total number of disks. | | partition_num | None | Total number of partitions. | @@ -68,7 +68,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, Rocky Linu #### Metric Set: Interface Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|--------------------------------| | interface_name | None | Name of the network interface. | | receive_bytes | Mb | Inbound data traffic. | @@ -105,3 +105,4 @@ Top 10 processes consuming memory. Metrics include: Process ID, Memory usage, CP | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | + diff --git a/home/docs/help/shenyu.md b/home/docs/help/shenyu.md index 01523769c78..c7f12bbfaf0 100644 --- a/home/docs/help/shenyu.md +++ b/home/docs/help/shenyu.md @@ -5,27 +5,27 @@ sidebar_label: Apache ShenYu keywords: [open source monitoring tool, open source apache shenyu monitoring tool, monitoring apache shenyu metrics] --- -> monitor ShenYu running status(JVM-related), include request response and other related metrics. +> monitor ShenYu running status(JVM-related), include request response and other related metrics. -## Pre-monitoring operations +## Pre-monitoring operations -Enable `metrics` plugin in ShenYu, expose it's prometheus metrics endpoint。 +Enable `metrics` plugin in ShenYu, expose it's prometheus metrics endpoint。 -Refer [ShenYu Document](https://shenyu.apache.org/docs/plugin-center/observability/metrics-plugin) +Refer [ShenYu Document](https://shenyu.apache.org/docs/plugin-center/observability/metrics-plugin) -Two Steps Mainly: +Two Steps Mainly: -1. add metrics plugin dependency in gateway's pom.xml. +1. add metrics plugin dependency in gateway's pom.xml. ```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + ``` -2. modify this config in shenyu gateway yaml. +2. modify this config in shenyu gateway yaml. ```yaml shenyu: @@ -57,75 +57,74 @@ Finally, restart the access gateway metrics endpoint `http://ip:8090` to respond #### Index collection: shenyu_request_total -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-------------------| -| value | None | Collect all requests from ShenYu gateway | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|------------------------------------------| +| value | None | Collect all requests from ShenYu gateway | #### Metric collection: shenyu_request_throw_created -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-------------------| -| value | None | Collect the number of abnormal requests from ShenYu Gateway | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------------------------| +| value | None | Collect the number of abnormal requests from ShenYu Gateway | #### Metric collection: process_cpu_seconds_total -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------| -| value | none | total user and system CPU elapsed seconds | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------| +| value | none | total user and system CPU elapsed seconds | #### Metric collection: process_open_fds -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-------------| -| value | none | number of open file descriptors | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|---------------------------------| +| value | none | number of open file descriptors | #### Metric collection: process_max_fds -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|----------------| -| value | none | maximum number of open file descriptors | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-----------------------------------------| +| value | none | maximum number of open file descriptors | #### Metric collection: jvm_info | Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-----------| -| runtime | none | JVM version information | -| vendor | none | JVM version information | -| version | None | JVM version information | +|-------------|-------------|-------------------------| +| runtime | none | JVM version information | +| vendor | none | JVM version information | +| version | None | JVM version information | #### Metric collection: jvm_memory_bytes_used -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------| -| area | None | JVM memory area | -| value | MB | used size of the given JVM memory region | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|------------------------------------------| +| area | None | JVM memory area | +| value | MB | used size of the given JVM memory region | #### Metric collection: jvm_memory_pool_bytes_used -| Metric Name | Metric Unit | Metric Help Description | -|--------|------|-----------------| -| pool | None | JVM memory pool | -| value | MB | used size of the given JVM memory pool | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|----------------------------------------| +| pool | None | JVM memory pool | +| value | MB | used size of the given JVM memory pool | #### Metric collection: jvm_memory_pool_bytes_committed -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------| -| pool | None | JVM memory pool | -| value | MB | The committed size of the given JVM memory pool | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------------| +| pool | None | JVM memory pool | +| value | MB | The committed size of the given JVM memory pool | #### Metric collection: jvm_memory_pool_bytes_max -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------| ----------- | -| pool | None | JVM memory pool | -| value | MB | The maximum size of the memory pool for the given JVM | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------------------| +| pool | None | JVM memory pool | +| value | MB | The maximum size of the memory pool for the given JVM | #### Metric collection: jvm_threads_state -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-------------| -| state | none | thread state | -| value | None | The number of threads corresponding to the thread state | - +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|---------------------------------------------------------| +| state | none | thread state | +| value | None | The number of threads corresponding to the thread state | diff --git a/home/docs/help/smtp.md b/home/docs/help/smtp.md index 971de82c3e0..fedb17e0040 100644 --- a/home/docs/help/smtp.md +++ b/home/docs/help/smtp.md @@ -13,12 +13,11 @@ Determine whether the server is available through the hello command in SMTP > see https://datatracker.ietf.org/doc/html/rfc821#page-13 - **Protocol Use:SMTP** ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -33,7 +32,7 @@ Determine whether the server is available through the hello command in SMTP #### Metrics Set:summary -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |--------------|-------------|----------------------------------------------------------------| | responseTime | ms | The time it takes for the SMTP server to respond to a request. | | response | | Response Status. | diff --git a/home/docs/help/spark.md b/home/docs/help/spark.md index 3d4b44828ea..41865300024 100644 --- a/home/docs/help/spark.md +++ b/home/docs/help/spark.md @@ -15,12 +15,9 @@ keywords: [open source monitoring tool, open source java spark monitoring tool, Refer: https://spark.apache.org/docs/latest/spark-standalone.html - **监控配置spark的监控主要分为Master、Worker、driver、executor监控。Master和Worker的监控在spark集群运行时即可监控,Driver和Excutor的监控需要针对某一个app来进行监控。** **如果都要监控,需要根据以下步骤来配置** - - ## 第一步 **修改$SPARK_HOME/conf/spark-env.sh,添加以下语句:** @@ -36,8 +33,6 @@ export SPARK_DAEMON_JAVA_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.manageme 语句中有$JMX_PORT,这个的值可以自定义,也可以获取一个随机数作为端口号。 如果端口自定义为一个具体的值,而 spark 的 Master 和其中之一的 Worker 在同一台机器上,会出现端口冲突的情况。 - - ## 第二步 **vim $SPARK_HOME/conf/metrics.properties 添加如下内容** @@ -50,10 +45,6 @@ driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource ``` - - - - ## 第三步 **vim $SPARK_HOME/conf/spark-defaults.conf,添加以下项为driver和executor设置监控端口,在有程序运行的情况下,此端口会被打开。** @@ -69,11 +60,9 @@ gement.jmxremote.port=8711 在spark的Master和Worker正常运行以及spark-submit提交了一个程序的情况下,可以从linux中查询出端口号码。 - - ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -107,16 +96,15 @@ gement.jmxremote.port=8711 #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|--------------------------| | LoadedClassCount | | Loaded Class Count | | TotalLoadedClassCount | | Total Loaded Class Count | | UnloadedClassCount | | Unloaded Class Count | - #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|----------------------------| | TotalStartedThreadCount | | Total Started Thread Count | | ThreadCount | | Thread Count | @@ -125,4 +113,3 @@ gement.jmxremote.port=8711 | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/docs/help/spring_gateway.md b/home/docs/help/spring_gateway.md index ae24228c222..66c5f0b4f29 100644 --- a/home/docs/help/spring_gateway.md +++ b/home/docs/help/spring_gateway.md @@ -19,6 +19,7 @@ If you want to monitor information in 'Spring Gateway' with this monitoring type spring-boot-starter-actuator ``` + **2. Modify the YML configuration exposure metric interface:** ```yaml @@ -35,56 +36,55 @@ management: ### Configure parameters -| Parameter name | Parameter Help describes the | -| ------------ |------------------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| Port | The default port provided by the database is 8080. | -| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | +| Parameter name | Parameter Help describes the | +|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| Port | The default port provided by the database is 8080. | +| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | +| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | ### Collect metrics #### metric Collection: Health -| Metric Name | metric unit | Metrics help describe | -| ------------------ | -------- |--------------------------------| -| status | None | Service health: UP, Down | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|--------------------------| +| status | None | Service health: UP, Down | #### metric Collection: enviroment -| Metric Name | metric unit | Metrics help describe | -|---------| -------- |----------------------------| -| profile | None | The application runs profile: prod, dev, test | -| port | None | Apply the exposed port | -| os | None | Run the operating system | -| os_arch | None | Run the operating system architecture | -| jdk_vendor | None | jdk vendor | -| jvm_version | None | jvm version | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-----------------------------------------------| +| profile | None | The application runs profile: prod, dev, test | +| port | None | Apply the exposed port | +| os | None | Run the operating system | +| os_arch | None | Run the operating system architecture | +| jdk_vendor | None | jdk vendor | +| jvm_version | None | jvm version | #### metric Collection: threads -| Metric Name | metric unit | Metrics help describe | -| ---------------- |------|--------------------| -| state | None | Thread status | -| number | None | This thread state corresponds to | number of threads +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|----------------------------------|-------------------| +| state | None | Thread status | +| number | None | This thread state corresponds to | number of threads | #### metric Collection: memory_used -| Metric Name | metric unit | Metrics help describe | -|---------|------|------------| -| space | None | Memory space name | -| mem_used | MB | This space occupies a memory size of | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|--------------------------------------| +| space | None | Memory space name | +| mem_used | MB | This space occupies a memory size of | #### metric Collection: route_info -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|--------------------------------------| -| route_id | None | Route id | -| predicate | None | This is a routing matching rule | -| uri | None | This is a service resource identifier| -| order | None | The priority of this route | - +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|---------------------------------------| +| route_id | None | Route id | +| predicate | None | This is a routing matching rule | +| uri | None | This is a service resource identifier | +| order | None | The priority of this route | diff --git a/home/docs/help/springboot2.md b/home/docs/help/springboot2.md index ca46530f77b..6452aff270e 100644 --- a/home/docs/help/springboot2.md +++ b/home/docs/help/springboot2.md @@ -19,6 +19,7 @@ If you want to monitor information in 'SpringBoot' with this monitoring type, yo spring-boot-starter-actuator ``` + **2. Modify the YML configuration exposure metric interface:** ```yaml @@ -29,7 +30,9 @@ management: include: '*' enabled-by-default: on ``` + *Note: If your project also introduces authentication related dependencies, such as springboot security, the interfaces exposed by SpringBoot Actor may be intercepted. In this case, you need to manually release these interfaces. Taking springboot security as an example, you should add the following code to the Security Configuration class:* + ```java public class SecurityConfig extends WebSecurityConfigurerAdapter{ @Override @@ -45,47 +48,49 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ } } ``` + ### Configure parameters -| Parameter name | Parameter Help describes the | -| ------------ |------------------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| Port | The default port provided by the database is 8080. | -| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | +| Parameter name | Parameter Help describes the | +|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| Port | The default port provided by the database is 8080. | +| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | +| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | ### Collect metrics #### metric Collection: Health -| Metric Name | metric unit | Metrics help describe | -| ------------------ | -------- |--------------------------------| -| status | None | Service health: UP, Down | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|--------------------------| +| status | None | Service health: UP, Down | #### metric Collection: enviroment -| Metric Name | metric unit | Metrics help describe | -|---------| -------- |----------------------------| -| profile | None | The application runs profile: prod, dev, test | -| port | None | Apply the exposed port | -| os | None | Run the operating system | -| os_arch | None | Run the operating system architecture | -| jdk_vendor | None | jdk vendor | -| jvm_version | None | jvm version | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-----------------------------------------------| +| profile | None | The application runs profile: prod, dev, test | +| port | None | Apply the exposed port | +| os | None | Run the operating system | +| os_arch | None | Run the operating system architecture | +| jdk_vendor | None | jdk vendor | +| jvm_version | None | jvm version | #### metric Collection: threads -| Metric Name | metric unit | Metrics help describe | -| ---------------- |------|--------------------| -| state | None | Thread status | -| number | None | This thread state corresponds to | number of threads +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|----------------------------------|-------------------| +| state | None | Thread status | +| number | None | This thread state corresponds to | number of threads | #### metric Collection: memory_used -| Metric Name | metric unit | Metrics help describe | -|---------|------|------------| -| space | None | Memory space name | -| mem_used | MB | This space occupies a memory size of | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|--------------------------------------| +| space | None | Memory space name | +| mem_used | MB | This space occupies a memory size of | + diff --git a/home/docs/help/springboot3.md b/home/docs/help/springboot3.md index 0dbc32fc834..47b3db10b5c 100644 --- a/home/docs/help/springboot3.md +++ b/home/docs/help/springboot3.md @@ -51,7 +51,7 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ ### Configuration Parameters -| Parameter Name | Parameter Description | +| Parameter Name | Parameter Description | |-------------------|----------------------------------------------------------------------------------------------------------------------| | Monitor Host | The monitored peer's IPV4, IPV6, or domain name. Note⚠️: Do not include protocol headers (eg: https://, http://). | | Task Name | Identifies the name of this monitor, ensuring uniqueness is necessary. | @@ -65,23 +65,28 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ ### Collection Metrics #### Metric Set: Availability + | Metric Name | Unit | Metric Description | |--------------|------|--------------------| | responseTime | ms | Response time | #### Metric Set: Threads -| Metric Name | Unit | Metric Description | + +| Metric Name | Unit | Metric Description | |-------------|------|----------------------------------| | state | None | Thread state | | size | None | Number of threads for this state | #### Metric Set: Memory Usage -| Metric Name | Unit | Metric Description | + +| Metric Name | Unit | Metric Description | |-------------|------|-----------------------------| | space | None | Memory space name | | mem_used | MB | Memory usage for this space | #### Metric Set: Health Status -| Metric Name | Unit | Metric Description | + +| Metric Name | Unit | Metric Description | |-------------|------|---------------------------------| | status | None | Service health status: UP, Down | + diff --git a/home/docs/help/sqlserver.md b/home/docs/help/sqlserver.md index cc12abf0d7e..71bd8ebdc83 100644 --- a/home/docs/help/sqlserver.md +++ b/home/docs/help/sqlserver.md @@ -9,51 +9,49 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 1433 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 1433 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| machine_name | none | Windows computer name running the server instance | -| server_name | none | Server and instance information SQL Server associated with Windows instance | -| version | none | Version of the instance,SQL Server,format is "major.minor.build.revision" | -| edition | none | The product SQL server version of the installed instance | -| start_time | none | Database start time | +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-----------------------------------------------------------------------------| +| machine_name | none | Windows computer name running the server instance | +| server_name | none | Server and instance information SQL Server associated with Windows instance | +| version | none | Version of the instance,SQL Server,format is "major.minor.build.revision" | +| edition | none | The product SQL server version of the installed instance | +| start_time | none | Database start time | #### Metric set:performance_counters -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| database_pages | none | Database pages, Number of pages obtained (buffer pool) | -| target_pages | none | Target pages, The desired number of pages that the buffer pool must have | -| page_life_expectancy | s | Page life expectancy. The time that data pages stay in the buffer pool. This time is generally greater than 300 | -| buffer_cache_hit_ratio | % | Buffer cache hit ratio, Database buffer pool cache hit rate. The probability that the requested data is found in the buffer pool is generally greater than 80%, otherwise the buffer pool capacity may be too small | -| checkpoint_pages_sec | none | Checkpoint pages/sec, The number of dirty pages written to the disk by the checkpoint per second. If the data is too high, it indicates that there is a lack of memory capacity | -| page_reads_sec | none | Page reads/sec, Number of pages read per second in the cache pool | -| page_writes_sec | none | Page writes/sec, Number of pages written per second in the cache pool | - +| Metric name | Metric unit | Metric help description | +|------------------------|-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| database_pages | none | Database pages, Number of pages obtained (buffer pool) | +| target_pages | none | Target pages, The desired number of pages that the buffer pool must have | +| page_life_expectancy | s | Page life expectancy. The time that data pages stay in the buffer pool. This time is generally greater than 300 | +| buffer_cache_hit_ratio | % | Buffer cache hit ratio, Database buffer pool cache hit rate. The probability that the requested data is found in the buffer pool is generally greater than 80%, otherwise the buffer pool capacity may be too small | +| checkpoint_pages_sec | none | Checkpoint pages/sec, The number of dirty pages written to the disk by the checkpoint per second. If the data is too high, it indicates that there is a lack of memory capacity | +| page_reads_sec | none | Page reads/sec, Number of pages read per second in the cache pool | +| page_writes_sec | none | Page writes/sec, Number of pages written per second in the cache pool | #### Metric set:connection -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| user_connection | none | Number of connected sessions | - +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|------------------------------| +| user_connection | none | Number of connected sessions | ### Common Problem @@ -61,10 +59,12 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo jdk version: jdk11 Description of the problem: SQL Server 2019 uses the SA user connection to report an error -Error message: +Error message: + ```text The driver could not establish a secure connection to SQL Server by using Secure Sockets Layer (SSL) encryption. Error: "PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target". ClientConnectionId:xxxxxxxxxxxxxxxxxxxx ``` + Screenshot of the problem: ![issue](https://user-images.githubusercontent.com/38679717/206621658-c0741d48-673d-45ff-9a3b-47d113064c12.png) diff --git a/home/docs/help/ssl_cert.md b/home/docs/help/ssl_cert.md index a65245d9057..e7b60fc8a89 100644 --- a/home/docs/help/ssl_cert.md +++ b/home/docs/help/ssl_cert.md @@ -9,7 +9,7 @@ keywords: [open source monitoring tool, open source ssl cert monitoring tool, mo ### Configuration parameters -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | | Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | @@ -23,11 +23,12 @@ keywords: [open source monitoring tool, open source ssl cert monitoring tool, mo #### Metric collection: certificate -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|----------| -| subject | none | certificate name | -| expired | no | expired or not | -| start_time | None | Validity start time | -| start_timestamp | ms millisecond | Validity start timestamp | -| end_time | None | Expiration time | -| end_timestamp | ms milliseconds | expiration timestamp | +| Metric Name | Metric Unit | Metric Help Description | +|-----------------|-----------------|--------------------------| +| subject | none | certificate name | +| expired | no | expired or not | +| start_time | None | Validity start time | +| start_timestamp | ms millisecond | Validity start timestamp | +| end_time | None | Expiration time | +| end_timestamp | ms milliseconds | expiration timestamp | + diff --git a/home/docs/help/status.md b/home/docs/help/status.md index 1e1b8251b8d..0d9ce6ff28b 100644 --- a/home/docs/help/status.md +++ b/home/docs/help/status.md @@ -1,7 +1,6 @@ Here is the English translation of the provided text: --- - id: status title: Status Page sidebar_label: Status Page @@ -16,14 +15,14 @@ It supports the linkage synchronization between component status and monitoring The fields that need to be filled in are as follows: -| Field Name | Field Description | Example | -|--------------|--------------------------------------------------|-----------------------------------------------------------------------------------------------------| -| Organization Name | Name of the organization | HertzBeat | -| Organization Description | Detailed description of the organization | Apache HertzBeat (incubating) is an easy-to-use and user-friendly open-source real-time monitoring and alerting system, no agent required, high-performance cluster, compatible with Prometheus, providing powerful custom monitoring and status page building capabilities. | -| Website Link | URL of the organization's website for more information | https://hertzbeat.apache.org/ | -| Logo Image | Path or URL of the organization's official logo image, preferably in .svg format | https://hertzbeat.apache.org/zh-cn/img/hertzbeat-logo.svg | -| Feedback Address | Address to receive feedback | https://github.com/apache/hertzbeat/issues | -| Theme Color | Main color tone of the status page | Click to select on the page | +| Field Name | Field Description | Example | +|--------------------------|----------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Organization Name | Name of the organization | HertzBeat | +| Organization Description | Detailed description of the organization | Apache HertzBeat (incubating) is an easy-to-use and user-friendly open-source real-time monitoring and alerting system, no agent required, high-performance cluster, compatible with Prometheus, providing powerful custom monitoring and status page building capabilities. | +| Website Link | URL of the organization's website for more information | https://hertzbeat.apache.org/ | +| Logo Image | Path or URL of the organization's official logo image, preferably in .svg format | https://hertzbeat.apache.org/zh-cn/img/hertzbeat-logo.svg | +| Feedback Address | Address to receive feedback | https://github.com/apache/hertzbeat/issues | +| Theme Color | Main color tone of the status page | Click to select on the page | After filling in the organization information, click `Confirm`. @@ -35,12 +34,12 @@ After adding a component, the status page will display the status information of Click `Add Component` to add the component to be monitored and fill in the following fields: -| Field Name | Field Description | Example | -|-------------------|-----------------------------------------------------------------------------------------------------------|------------------------------------------| -| Service Component | Name of the component service | Development Environment ElasticSearch | -| Component Description | Detailed description of the component service | Development environment, ElasticSearch (ip:192.168.1.1) | -| Service Status Calculation Method | Method of calculating the service status of the component.
Automatic Calculation: Automatically calculate the status based on the monitored status of the component.
Manual Setting: Manually configure the component status. | Automatic Calculation / Manual Setting (Choose one) | -| Matching Tag | Status calculation associated tag, use the availability status data of all monitors associated with the tag to calculate the service status of the component. | Select the component tag on the page | +| Field Name | Field Description | Example | +|-----------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| Service Component | Name of the component service | Development Environment ElasticSearch | +| Component Description | Detailed description of the component service | Development environment, ElasticSearch (ip:192.168.1.1) | +| Service Status Calculation Method | Method of calculating the service status of the component.
Automatic Calculation: Automatically calculate the status based on the monitored status of the component.
Manual Setting: Manually configure the component status. | Automatic Calculation / Manual Setting (Choose one) | +| Matching Tag | Status calculation associated tag, use the availability status data of all monitors associated with the tag to calculate the service status of the component. | Select the component tag on the page | ### Set Status Calculation Time @@ -64,13 +63,13 @@ Click `Publish Event` ![](/img/docs/help/status-1.png) -| Field Name | Field Description | Example | -|--------------|-------------------------------------------------|------------------------------------------------------------| -| Event Name | Title of the event, should clearly reflect the core content of the event | "Server Downtime Event - April 5, 2023" | -| Affected Component | Select the components affected by this event | Select on the page | -| Process Status | Set the current status of the event for tracking the progress. Optional values: Investigating / Confirmed / Monitoring / Resolved | Confirmed | -| Publish Message | Official notification to convey to relevant parties, including event details, impact assessment, and countermeasures | Dear All, there is an issue with the development environment, the developers are urgently handling it, it is expected to be fixed within two hours. Please be patient, if you have urgent matters, please contact Tom: 130xxxx0000! | +| Field Name | Field Description | Example | +|--------------------|-----------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Event Name | Title of the event, should clearly reflect the core content of the event | "Server Downtime Event - April 5, 2023" | +| Affected Component | Select the components affected by this event | Select on the page | +| Process Status | Set the current status of the event for tracking the progress. Optional values: Investigating / Confirmed / Monitoring / Resolved | Confirmed | +| Publish Message | Official notification to convey to relevant parties, including event details, impact assessment, and countermeasures | Dear All, there is an issue with the development environment, the developers are urgently handling it, it is expected to be fixed within two hours. Please be patient, if you have urgent matters, please contact Tom: 130xxxx0000! | > Note: You can publish messages multiple times to keep the team updated on the current status. -![](/img/docs/help/status-2.png) \ No newline at end of file +![](/img/docs/help/status-2.png) diff --git a/home/docs/help/tidb.md b/home/docs/help/tidb.md index 2a7bc5b65b6..83128c527c8 100644 --- a/home/docs/help/tidb.md +++ b/home/docs/help/tidb.md @@ -1,55 +1,55 @@ ---- -id: tidb -title: Monitoring:TiDB database monitoring -sidebar_label: TiDB database -keywords: [open source monitoring tool, open source database monitoring tool, monitoring tidb database metrics] ---- - -> HertzBeat monitors general performance metrics of TiDB through HTTP and JDBC protocol. - -[Metrics Schema](https://docs.pingcap.com/tidb/stable/metrics-schema) - -[METRICS_SUMMARY](https://docs.pingcap.com/tidb/stable/information-schema-metrics-summary) - -[METRICS_TABLES](https://docs.pingcap.com/tidb/stable/information-schema-metrics-tables) - -**Protocol Use: HTTP and JDBC** - -### Configuration parameter - -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Task name | Identify the name of this monitoring. The name needs to be unique | -| Service Port | The port that the TiDB database provides externally for status reporting is 10080 by default | -| PD Port | The PD port for the TiDB database, which defaults to 2379 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 6000ms | -| JDBC Port | The TiDB database externally provides the port used for client requests, which defaults to 4000 | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| JDBC URL | Database using [JDBC](https://docs.pingcap.com/tidb/stable/dev-guide-connect-to-tidb#jdbc) connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - - -### Collection Metric - -The monitoring template will retrieve the monitoring metrics from the TiDB System Variables table, and the user can retrieve the [TiDB System Variables Table](https://docs.pingcap.com/tidb/stable/system-variables) by himself to query the required information or other system variables. - -Besides, TiDB also provides default monitoring metrics table, see [Metrics Schema](https://docs.pingcap.com/tidb/stable/metrics-schema) and [METRICS_SUMMARY](https://docs.pingcap.com/tidb/stable/information-schema-metrics-summary), and users can add their own sql codes according to their needs. - -Due to the large number of metrics that can be monitored, only the metrics queried in the monitoring template are described below. - -#### Metric set: global variables - -| Metric Name | Metric Unit | Metric Help Description | -|---------------|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| version | none | The MySQL version, followed by the TiDB version. For example '8.0.11-TiDB-v7.5.1'. | -| version_comment | none | The TiDB version. For example, 'TiDB Server (Apache License 2.0) Community Edition, MySQL 8.0 compatible'. | -| version_compile_machine | none | The name of the CPU architecture on which TiDB is running. | -| version_compile_os | none | The name of the OS on which TiDB is running. | -| max_connections | none | The maximum number of concurrent connections permitted for a single TiDB instance. This variable can be used for resources control. The default value 0 means no limit. When the value of this variable is larger than 0, and the number of connections reaches the value, the TiDB server rejects new connections from clients. | -| datadir | none | The location where data is stored. This location can be a local path /tmp/tidb, or point to a PD server if the data is stored on TiKV. A value in the format of ${pd-ip}:${pd-port} indicates the PD server that TiDB connects to on startup. | -| port | none | The port that the tidb-server is listening on when speaking the MySQL protocol. | +--- +id: tidb +title: Monitoring:TiDB database monitoring +sidebar_label: TiDB database +keywords: [open source monitoring tool, open source database monitoring tool, monitoring tidb database metrics] +--- + +> HertzBeat monitors general performance metrics of TiDB through HTTP and JDBC protocol. + +[Metrics Schema](https://docs.pingcap.com/tidb/stable/metrics-schema) + +[METRICS_SUMMARY](https://docs.pingcap.com/tidb/stable/information-schema-metrics-summary) + +[METRICS_TABLES](https://docs.pingcap.com/tidb/stable/information-schema-metrics-tables) + +**Protocol Use: HTTP and JDBC** + +### Configuration parameter + +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Task name | Identify the name of this monitoring. The name needs to be unique | +| Service Port | The port that the TiDB database provides externally for status reporting is 10080 by default | +| PD Port | The PD port for the TiDB database, which defaults to 2379 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 6000ms | +| JDBC Port | The TiDB database externally provides the port used for client requests, which defaults to 4000 | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| JDBC URL | Database using [JDBC](https://docs.pingcap.com/tidb/stable/dev-guide-connect-to-tidb#jdbc) connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | + +### Collection Metric + +The monitoring template will retrieve the monitoring metrics from the TiDB System Variables table, and the user can retrieve the [TiDB System Variables Table](https://docs.pingcap.com/tidb/stable/system-variables) by himself to query the required information or other system variables. + +Besides, TiDB also provides default monitoring metrics table, see [Metrics Schema](https://docs.pingcap.com/tidb/stable/metrics-schema) and [METRICS_SUMMARY](https://docs.pingcap.com/tidb/stable/information-schema-metrics-summary), and users can add their own sql codes according to their needs. + +Due to the large number of metrics that can be monitored, only the metrics queried in the monitoring template are described below. + +#### Metric set: global variables + +| Metric Name | Metric Unit | Metric Help Description | +|-------------------------|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| version | none | The MySQL version, followed by the TiDB version. For example '8.0.11-TiDB-v7.5.1'. | +| version_comment | none | The TiDB version. For example, 'TiDB Server (Apache License 2.0) Community Edition, MySQL 8.0 compatible'. | +| version_compile_machine | none | The name of the CPU architecture on which TiDB is running. | +| version_compile_os | none | The name of the OS on which TiDB is running. | +| max_connections | none | The maximum number of concurrent connections permitted for a single TiDB instance. This variable can be used for resources control. The default value 0 means no limit. When the value of this variable is larger than 0, and the number of connections reaches the value, the TiDB server rejects new connections from clients. | +| datadir | none | The location where data is stored. This location can be a local path /tmp/tidb, or point to a PD server if the data is stored on TiKV. A value in the format of ${pd-ip}:${pd-port} indicates the PD server that TiDB connects to on startup. | +| port | none | The port that the tidb-server is listening on when speaking the MySQL protocol. | + diff --git a/home/docs/help/time_expression.md b/home/docs/help/time_expression.md index e2cb0928b05..482fc04cc05 100644 --- a/home/docs/help/time_expression.md +++ b/home/docs/help/time_expression.md @@ -23,28 +23,28 @@ ${FORMATTER [{ + | - } ]} > Example outputs are based on the current time being `2022-04-24 02:40:00.123` -| Name | Description | Example | -|---------------|------------------------------------|------------------------| -| @now | Formats as `yyyy-MM-dd HH:mm:ss` | 2022-04-24 02:40:00 | -| @date | Formats as `yyyy-MM-dd` | 2022-04-24 | -| @timestamp10 | Returns 10-digit timestamp | 1650768000 | -| @timestamp | Returns 13-digit timestamp | 1650768000000 | -| @time | Formats as `HH:mm:ss` | 02:40:00 | -| @year | Formats as `yyyy` | 2022 | -| @month | Formats as `MM` | 04 | -| @day | Formats as `dd` | 24 | -| @hour | Formats as `HH` | 02 | -| @minute | Formats as `mm` | 40 | -| @millisecond | Formats as `SSS` | 123 | -| @second | Formats as `ss` | 00 | +| Name | Description | Example | +|--------------|----------------------------------|---------------------| +| @now | Formats as `yyyy-MM-dd HH:mm:ss` | 2022-04-24 02:40:00 | +| @date | Formats as `yyyy-MM-dd` | 2022-04-24 | +| @timestamp10 | Returns 10-digit timestamp | 1650768000 | +| @timestamp | Returns 13-digit timestamp | 1650768000000 | +| @time | Formats as `HH:mm:ss` | 02:40:00 | +| @year | Formats as `yyyy` | 2022 | +| @month | Formats as `MM` | 04 | +| @day | Formats as `dd` | 24 | +| @hour | Formats as `HH` | 02 | +| @minute | Formats as `mm` | 40 | +| @millisecond | Formats as `SSS` | 123 | +| @second | Formats as `ss` | 00 | ### Supported Time Units | Name | Description | |------|-------------| -| y | Year | +| y | Year | | M | Month | -| d | Day | +| d | Day | | H | Hour | | m | Minute | | s | Second | @@ -57,8 +57,9 @@ ${FORMATTER [{ + | - } ]} #### Usage Examples 1. Simple expression - - `${now}` gets the current time and formats it as `yyyy-MM-dd HH:mm:ss` - - `${time+1h}` calculates the time one hour from now and formats it as `HH:mm:ss` - - `${time+1h+15s+30s}` calculates the time one hour, 15 minutes, and 30 seconds from now and formats it as `HH:mm:ss` + - `${now}` gets the current time and formats it as `yyyy-MM-dd HH:mm:ss` + - `${time+1h}` calculates the time one hour from now and formats it as `HH:mm:ss` + - `${time+1h+15s+30s}` calculates the time one hour, 15 minutes, and 30 seconds from now and formats it as `HH:mm:ss` 2. Complex expression template (if the built-in formatter does not meet your needs, you can combine multiple expressions) - - `${@year}年${@month}月${@day}日` returns the current date formatted as yyyy年MM月dd日 + - `${@year}年${@month}月${@day}日` returns the current date formatted as yyyy年MM月dd日 + diff --git a/home/docs/help/tomcat.md b/home/docs/help/tomcat.md index 8b35808ffc8..60591f85579 100644 --- a/home/docs/help/tomcat.md +++ b/home/docs/help/tomcat.md @@ -11,61 +11,60 @@ keywords: [open source monitoring tool, open source tomcat monitoring tool, moni ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by JMX | -| Username | JMX connection user name, optional | -| Password | JMX connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by JMX | +| Username | JMX connection user name, optional | +| Password | JMX connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metrics #### Metrics Set:memory_pool -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| name | | metrics name | +| committed | kb | total size | +| init | kb | init size | +| max | kb | max size | +| used | kb | used size | #### Metrics Set:code_cache -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| committed | kb | total size | +| init | kb | init size | +| max | kb | max size | +| used | kb | used size | #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| LoadedClassCount | | Loaded Class Count | -| TotalLoadedClassCount | | Total Loaded Class Count | -| UnloadedClassCount | | Unloaded Class Count | - +| Metric name | Metric unit | Metric help description | +|-----------------------|-------------|--------------------------| +| LoadedClassCount | | Loaded Class Count | +| TotalLoadedClassCount | | Total Loaded Class Count | +| UnloadedClassCount | | Unloaded Class Count | #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| TotalStartedThreadCount | | Total Started Thread Count | -| ThreadCount | | Thread Count | -| PeakThreadCount | | Peak Thread Count | -| DaemonThreadCount | | Daemon Thread Count | -| CurrentThreadUserTime | ms | Current Thread User Time | -| CurrentThreadCpuTime | ms | Current Thread Cpu Time | +| Metric name | Metric unit | Metric help description | +|-------------------------|-------------|----------------------------| +| TotalStartedThreadCount | | Total Started Thread Count | +| ThreadCount | | Thread Count | +| PeakThreadCount | | Peak Thread Count | +| DaemonThreadCount | | Daemon Thread Count | +| CurrentThreadUserTime | ms | Current Thread User Time | +| CurrentThreadCpuTime | ms | Current Thread Cpu Time | ### Tomcat Enable JMX Protocol -1. After building tomcat, enter the bin directory under tomcat and modify the catalina.sh file +1. After building tomcat, enter the bin directory under tomcat and modify the catalina.sh file 2. vim catalina.sh Attention⚠️ Replace Hostname And Port diff --git a/home/docs/help/ubuntu.md b/home/docs/help/ubuntu.md index e7d368c9ea3..8d3b65ce195 100644 --- a/home/docs/help/ubuntu.md +++ b/home/docs/help/ubuntu.md @@ -9,74 +9,74 @@ keywords: [open source monitoring tool, open source linux ubuntu monitoring tool ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Linux SSH. The default is 22 | -| Username | SSH connection user name, optional | -| Password | SSH connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Linux SSH. The default is 22 | +| Username | SSH connection user name, optional | +| Password | SSH connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| hostname | none | Host name | -| version | none | Operating system version | -| uptime | none | System running time | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------| +| hostname | none | Host name | +| version | none | Operating system version | +| uptime | none | System running time | #### Metric set:cpu -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| info | none | CPU model | -| cores | cores | Number of CPU cores | -| interrupt | number | Number of CPU interrupts | -| load | none | Average load of CPU in the last 1/5/15 minutes | -| context_switch | number | Number of current context switches | -| usage | % | CPU usage | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------------------------| +| info | none | CPU model | +| cores | cores | Number of CPU cores | +| interrupt | number | Number of CPU interrupts | +| load | none | Average load of CPU in the last 1/5/15 minutes | +| context_switch | number | Number of current context switches | +| usage | % | CPU usage | #### Metric set:memory -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| total | Mb | Total memory capacity | -| used | Mb | User program memory | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory occupied by cache | -| available | Mb | Remaining available memory capacity | -| usage | % | Memory usage | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------------------| +| total | Mb | Total memory capacity | +| used | Mb | User program memory | +| free | Mb | Free memory capacity | +| buff_cache | Mb | Memory occupied by cache | +| available | Mb | Remaining available memory capacity | +| usage | % | Memory usage | #### Metric set:disk -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| disk_num | blocks | Total number of disks | -| partition_num | partitions | Total number of partitions | -| block_write | blocks | Total number of blocks written to disk | -| block_read | blocks | Number of blocks read from disk | -| write_rate | iops | Rate of writing disk blocks per second | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|----------------------------------------| +| disk_num | blocks | Total number of disks | +| partition_num | partitions | Total number of partitions | +| block_write | blocks | Total number of blocks written to disk | +| block_read | blocks | Number of blocks read from disk | +| write_rate | iops | Rate of writing disk blocks per second | #### Metric set:interface -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| interface_name | none | Network card name | -| receive_bytes | byte | Inbound data traffic(bytes) | -| transmit_bytes | byte | Outbound data traffic(bytes) | +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------| +| interface_name | none | Network card name | +| receive_bytes | byte | Inbound data traffic(bytes) | +| transmit_bytes | byte | Outbound data traffic(bytes) | #### Metric set:disk_free -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| filesystem | none | File system name | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | usage | -| mounted | none | Mount point directory | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| filesystem | none | File system name | +| used | Mb | Used disk size | +| available | Mb | Available disk size | +| usage | % | usage | +| mounted | none | Mount point directory | + diff --git a/home/docs/help/udp_port.md b/home/docs/help/udp_port.md index 7fdcce3cf77..51c3098dc9a 100644 --- a/home/docs/help/udp_port.md +++ b/home/docs/help/udp_port.md @@ -10,7 +10,7 @@ keywords: [open source monitoring tool, open source port monitoring tool, monito ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️ Without protocol header (eg: https://, http://). | | Monitoring name | Identify the name of this monitoring. The name needs to be unique. | @@ -26,9 +26,7 @@ keywords: [open source monitoring tool, open source port monitoring tool, monito #### Metric set:summary -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |---------------|-------------------|-------------------------| | Response Time | Milliseconds (ms) | Website response time | - - diff --git a/home/docs/help/website.md b/home/docs/help/website.md index 5648f6a1d9e..afe86397c9e 100644 --- a/home/docs/help/website.md +++ b/home/docs/help/website.md @@ -5,11 +5,11 @@ sidebar_label: Website Monitor keywords: [open source monitoring tool, open source website monitoring tool, monitoring website metrics] --- -> Monitor whether the website is available, response time and other Metrics. +> Monitor whether the website is available, response time and other Metrics. -### Configuration parameter +### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -20,10 +20,11 @@ keywords: [open source monitoring tool, open source website monitoring tool, mon | Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | | Description remarks | For more information about identifying and describing this monitoring, users can note information here | -### Collection Metric +### Collection Metric -#### Metric set:summary +#### Metric set:summary + +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-------------------------| +| responseTime | ms | Website response time | -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| responseTime | ms | Website response time | diff --git a/home/docs/help/websocket.md b/home/docs/help/websocket.md index 8e3d29a204a..13d1f6eed31 100644 --- a/home/docs/help/websocket.md +++ b/home/docs/help/websocket.md @@ -9,7 +9,7 @@ keywords: [ open source monitoring tool, Websocket监控 ] ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------------|--------------------------------------------------------------------------------------------------------------------------| | Host of WebSocket service | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://). | | Monitoring name | Identify the name of this monitoring. The name needs to be unique. | @@ -23,7 +23,7 @@ keywords: [ open source monitoring tool, Websocket监控 ] #### Metric set:Summary -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |---------------|-------------|-------------------------| | responseTime | ms | Response time | | httpVersion | none | HTTP version | @@ -31,3 +31,4 @@ keywords: [ open source monitoring tool, Websocket监控 ] | statusMessage | none | Status messages | | connection | none | Connect type | | upgrade | none | Upgraded protocols | + diff --git a/home/docs/help/windows.md b/home/docs/help/windows.md index 82e36d23470..e4be2bd6d96 100644 --- a/home/docs/help/windows.md +++ b/home/docs/help/windows.md @@ -6,38 +6,39 @@ keywords: [open source monitoring tool, open source windows monitoring tool, mon --- > Collect and monitor the general performance Metrics of Windows operating system through SNMP protocol. -> Note⚠️ You need to start SNMP service for Windows server. +> Note⚠️ You need to start SNMP service for Windows server. References: [What is SNMP protocol 1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) [What is SNMP protocol 2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) [Win configure SNMP in English](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) -[Win configure SNMP in Chinese](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) +[Win configure SNMP in Chinese](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Windows SNMP service. The default is 161 | -| SNMP version | SNMP protocol version V1 V2c V3 | +| Parameter name | Parameter help description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Windows SNMP service. The default is 161 | +| SNMP version | SNMP protocol version V1 V2c V3 | | SNMP community Word | SNMP agreement community name(Community Name). It is used to realize the authentication of SNMP network administrator when accessing SNMP management agent. Similar to password, the default value is public | -| Timeout | Protocol connection timeout | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Timeout | Protocol connection timeout | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:system -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | none | Host name | -| descr | none | Operating system description | -| uptime | none | System running time | -| numUsers | number | Current number of users | -| services | number | Current number of services | -| processes | number | Current number of processes | -| responseTime | ms | Collection response time | +| Metric name | Metric unit | Metric help description | +|--------------|-------------|------------------------------| +| name | none | Host name | +| descr | none | Operating system description | +| uptime | none | System running time | +| numUsers | number | Current number of users | +| services | number | Current number of services | +| processes | number | Current number of processes | +| responseTime | ms | Collection response time | + diff --git a/home/docs/help/yarn.md b/home/docs/help/yarn.md index 176a3209fee..d7f304ff910 100644 --- a/home/docs/help/yarn.md +++ b/home/docs/help/yarn.md @@ -15,69 +15,70 @@ Retrieve the HTTP monitoring port of Apache Yarn. Value: `yarn.resourcemanager.w ## Configuration Parameters -| Parameter Name | Parameter Description | -| ---------------- |----------------------------------------------------| -| Target Host | IP address, IPV6, or domain name of the monitored endpoint. Without protocol header. | -| Port | Monitoring port number of Apache Yarn, default is 8088. | -| Query Timeout | Timeout for querying Apache Yarn, in milliseconds, default is 6000 milliseconds. | +| Parameter Name | Parameter Description | +|------------------|-------------------------------------------------------------------------------------------| +| Target Host | IP address, IPV6, or domain name of the monitored endpoint. Without protocol header. | +| Port | Monitoring port number of Apache Yarn, default is 8088. | +| Query Timeout | Timeout for querying Apache Yarn, in milliseconds, default is 6000 milliseconds. | | Metrics Interval | Time interval for monitoring data collection, in seconds, minimum interval is 30 seconds. | ### Collected Metrics #### Metric Set: ClusterMetrics -| Metric Name | Unit | Metric Description | -| ----------------------- | ---- | -----------------------------------------| -| NumActiveNMs | | Number of currently active NodeManagers | -| NumDecommissionedNMs | | Number of currently decommissioned NodeManagers | -| NumDecommissioningNMs | | Number of nodes currently decommissioning | -| NumLostNMs | | Number of lost nodes in the cluster | -| NumUnhealthyNMs | | Number of unhealthy nodes in the cluster | +| Metric Name | Unit | Metric Description | +|-----------------------|------|-------------------------------------------------| +| NumActiveNMs | | Number of currently active NodeManagers | +| NumDecommissionedNMs | | Number of currently decommissioned NodeManagers | +| NumDecommissioningNMs | | Number of nodes currently decommissioning | +| NumLostNMs | | Number of lost nodes in the cluster | +| NumUnhealthyNMs | | Number of unhealthy nodes in the cluster | #### Metric Set: JvmMetrics -| Metric Name | Unit | Metric Description | -| ----------------------- | ---- | -------------------------------------------- | -| MemNonHeapCommittedM | MB | Current committed size of non-heap memory in JVM | -| MemNonHeapMaxM | MB | Maximum available non-heap memory in JVM | -| MemNonHeapUsedM | MB | Current used size of non-heap memory in JVM | -| MemHeapCommittedM | MB | Current committed size of heap memory in JVM | -| MemHeapMaxM | MB | Maximum available heap memory in JVM | -| MemHeapUsedM | MB | Current used size of heap memory in JVM | -| GcTimeMillis | | JVM GC time | -| GcCount | | Number of JVM GC occurrences | +| Metric Name | Unit | Metric Description | +|----------------------|------|--------------------------------------------------| +| MemNonHeapCommittedM | MB | Current committed size of non-heap memory in JVM | +| MemNonHeapMaxM | MB | Maximum available non-heap memory in JVM | +| MemNonHeapUsedM | MB | Current used size of non-heap memory in JVM | +| MemHeapCommittedM | MB | Current committed size of heap memory in JVM | +| MemHeapMaxM | MB | Maximum available heap memory in JVM | +| MemHeapUsedM | MB | Current used size of heap memory in JVM | +| GcTimeMillis | | JVM GC time | +| GcCount | | Number of JVM GC occurrences | #### Metric Set: QueueMetrics -| Metric Name | Unit | Metric Description | -| --------------------------- | ---- | -------------------------------------------- | -| queue | | Queue name | -| AllocatedVCores | | Allocated virtual cores (allocated) | -| ReservedVCores | | Reserved cores | -| AvailableVCores | | Available cores (unallocated) | -| PendingVCores | | Blocked scheduling cores | -| AllocatedMB | MB | Allocated (used) memory size | -| AvailableMB | MB | Available memory (unallocated) | -| PendingMB | MB | Blocked scheduling memory | -| ReservedMB | MB | Reserved memory | -| AllocatedContainers | | Number of allocated (used) containers | -| PendingContainers | | Number of blocked scheduling containers | -| ReservedContainers | | Number of reserved containers | -| AggregateContainersAllocated| | Total aggregated containers allocated | -| AggregateContainersReleased| | Total aggregated containers released | -| AppsCompleted | | Number of completed applications | -| AppsKilled | | Number of killed applications | -| AppsFailed | | Number of failed applications | -| AppsPending | | Number of pending applications | -| AppsRunning | | Number of currently running applications | -| AppsSubmitted | | Number of submitted applications | -| running_0 | | Number of jobs running for less than 60 minutes | -| running_60 | | Number of jobs running between 60 and 300 minutes | -| running_300 | | Number of jobs running between 300 and 1440 minutes | -| running_1440 | | Number of jobs running for more than 1440 minutes | +| Metric Name | Unit | Metric Description | +|------------------------------|------|-----------------------------------------------------| +| queue | | Queue name | +| AllocatedVCores | | Allocated virtual cores (allocated) | +| ReservedVCores | | Reserved cores | +| AvailableVCores | | Available cores (unallocated) | +| PendingVCores | | Blocked scheduling cores | +| AllocatedMB | MB | Allocated (used) memory size | +| AvailableMB | MB | Available memory (unallocated) | +| PendingMB | MB | Blocked scheduling memory | +| ReservedMB | MB | Reserved memory | +| AllocatedContainers | | Number of allocated (used) containers | +| PendingContainers | | Number of blocked scheduling containers | +| ReservedContainers | | Number of reserved containers | +| AggregateContainersAllocated | | Total aggregated containers allocated | +| AggregateContainersReleased | | Total aggregated containers released | +| AppsCompleted | | Number of completed applications | +| AppsKilled | | Number of killed applications | +| AppsFailed | | Number of failed applications | +| AppsPending | | Number of pending applications | +| AppsRunning | | Number of currently running applications | +| AppsSubmitted | | Number of submitted applications | +| running_0 | | Number of jobs running for less than 60 minutes | +| running_60 | | Number of jobs running between 60 and 300 minutes | +| running_300 | | Number of jobs running between 300 and 1440 minutes | +| running_1440 | | Number of jobs running for more than 1440 minutes | #### Metric Set: runtime -| Metric Name | Unit | Metric Description | -| ----------------------- | ---- | --------------------------| -| StartTime | | Startup timestamp | \ No newline at end of file +| Metric Name | Unit | Metric Description | +|-------------|------|--------------------| +| StartTime | | Startup timestamp | + diff --git a/home/docs/help/zookeeper.md b/home/docs/help/zookeeper.md index dadbbc70bcd..ca7e026a4c4 100644 --- a/home/docs/help/zookeeper.md +++ b/home/docs/help/zookeeper.md @@ -10,10 +10,12 @@ keywords: [open source monitoring tool, open source zookeeper monitoring tool, m ### PreRequisites #### Zookeeper four word command ->The current implementation scheme uses the four word command provided by zookeeper to collect Metrics. -Users need to add the four word command of zookeeper to the white list by themselves. + +> The current implementation scheme uses the four word command provided by zookeeper to collect Metrics. +> Users need to add the four word command of zookeeper to the white list by themselves. Steps + > 1.Find our zookeeper configuration file, which is usually zoo.cfg. > > 2.Add the following commands to the configuration file @@ -28,92 +30,94 @@ Steps > 3.Restart service -```shell +```shell zkServer.sh restart ``` #### netcat protocol + The current implementation scheme requires us to deploy the Linux server of zookeeper Command environment for installing netcat > netcat installation steps -```shell -yum install -y nc -``` +> +> ```shell +> yum install -y nc +> ``` If the terminal displays the following information, the installation is successful + ```shell Complete! ``` ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Zookeeper. The default is 2181 | -| Query timeout | Set the timeout of Zookeeper connection, unit: ms, default: 3000ms | -| Username | User name of the Linux connection where Zookeeper is located | -| Password | Password of the Linux connection where Zookeeper is located | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Zookeeper. The default is 2181 | +| Query timeout | Set the timeout of Zookeeper connection, unit: ms, default: 3000ms | +| Username | User name of the Linux connection where Zookeeper is located | +| Password | Password of the Linux connection where Zookeeper is located | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:conf -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| clientPort | none | Port | -| dataDir | none | Data snapshot file directory. By default, 100000 operations generate a snapshot | -| dataDirSize | kb | Data snapshot file size | -| dataLogDir | none | Transaction log file directory, production environment on a separate disk | -| dataLogSize | kb | Transaction log file size | -| tickTime | ms | Time interval between servers or between clients and servers to maintain heartbeat | -| minSessionTimeout | ms | Minimum session timeout. Heartbeat timex2. The specified time is less than this time, which is used by default | -| maxSessionTimeout | ms | Maximum session timeout. Heartbeat timex20. The specified time is greater than this time, which is used by default | -| serverId | none | Server id | - +| Metric name | Metric unit | Metric help description | +|-------------------|-------------|--------------------------------------------------------------------------------------------------------------------| +| clientPort | none | Port | +| dataDir | none | Data snapshot file directory. By default, 100000 operations generate a snapshot | +| dataDirSize | kb | Data snapshot file size | +| dataLogDir | none | Transaction log file directory, production environment on a separate disk | +| dataLogSize | kb | Transaction log file size | +| tickTime | ms | Time interval between servers or between clients and servers to maintain heartbeat | +| minSessionTimeout | ms | Minimum session timeout. Heartbeat timex2. The specified time is less than this time, which is used by default | +| maxSessionTimeout | ms | Maximum session timeout. Heartbeat timex20. The specified time is greater than this time, which is used by default | +| serverId | none | Server id | #### Metric set:stats -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| zk_version | none | Server version | -| zk_server_state | none | Server role | -| zk_num_alive_connections | number | Number of connections | -| zk_avg_latency | ms | Average latency | -| zk_outstanding_requests | number | Number of outstanding requests | -| zk_znode_count | number | Number of znode | -| zk_packets_sent | number | Number of packets sent | -| zk_packets_received | number | Number of packets received | -| zk_watch_count | number | Number of watch | -| zk_max_file_descriptor_count | number | Maximum number of file descriptors | -| zk_approximate_data_size | kb | data size | -| zk_open_file_descriptor_count | number | Number of open file descriptors | -| zk_max_latency | ms | Max latency | -| zk_ephemerals_count | number | Number of ephemeral nodes | -| zk_min_latency | ms | Min latency | - +| Metric name | Metric unit | Metric help description | +|-------------------------------|-------------|------------------------------------| +| zk_version | none | Server version | +| zk_server_state | none | Server role | +| zk_num_alive_connections | number | Number of connections | +| zk_avg_latency | ms | Average latency | +| zk_outstanding_requests | number | Number of outstanding requests | +| zk_znode_count | number | Number of znode | +| zk_packets_sent | number | Number of packets sent | +| zk_packets_received | number | Number of packets received | +| zk_watch_count | number | Number of watch | +| zk_max_file_descriptor_count | number | Maximum number of file descriptors | +| zk_approximate_data_size | kb | data size | +| zk_open_file_descriptor_count | number | Number of open file descriptors | +| zk_max_latency | ms | Max latency | +| zk_ephemerals_count | number | Number of ephemeral nodes | +| zk_min_latency | ms | Min latency | #### Metric set:envi -| Metric Name | Metric Unit | Metric help description | -| ------------------- |-------------|-------------------------------| -| zk_version | none | ZooKeeper version | -| hostname | none | Hostname | -| java_version | none | Java version | -| java_vendor | none | Java vendor | -| java_home | none | Java home directory | -| java_class_path | none | Java class path | -| java_library_path | none | Java library path | -| java_io_tmpdir | none | Java temporary directory | -| java_compiler | none | Java compiler | -| os_name | none | Operating system name | -| os_arch | none | Operating system architecture | -| os_version | none | Operating system version | -| user_name | none | Username | -| user_home | none | User home directory | -| user_dir | none | User current directory | \ No newline at end of file +| Metric Name | Metric Unit | Metric help description | +|-------------------|-------------|-------------------------------| +| zk_version | none | ZooKeeper version | +| hostname | none | Hostname | +| java_version | none | Java version | +| java_vendor | none | Java vendor | +| java_home | none | Java home directory | +| java_class_path | none | Java class path | +| java_library_path | none | Java library path | +| java_io_tmpdir | none | Java temporary directory | +| java_compiler | none | Java compiler | +| os_name | none | Operating system name | +| os_arch | none | Operating system architecture | +| os_version | none | Operating system version | +| user_name | none | Username | +| user_home | none | User home directory | +| user_dir | none | User current directory | + diff --git a/home/docs/introduce.md b/home/docs/introduce.md index 0163650390c..95b493c2cc5 100644 --- a/home/docs/introduce.md +++ b/home/docs/introduce.md @@ -5,7 +5,7 @@ sidebar_label: Introduce slug: / --- -> A real-time monitoring system with agentless, performance cluster, prometheus-compatible, custom monitoring and status page building capabilities. +> A real-time monitoring system with agentless, performance cluster, prometheus-compatible, custom monitoring and status page building capabilities. [![Discord](https://img.shields.io/badge/Chat-Discord-7289DA?logo=discord)](https://discord.gg/Fb6M73htGr) [![Reddit](https://img.shields.io/badge/Reddit-Community-7289DA?logo=reddit)](https://www.reddit.com/r/hertzbeat/) @@ -32,11 +32,9 @@ slug: / * Provides flexible alarm threshold rules and timely notifications delivered via `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. * Provides powerful status page building capabilities, easily communicate the real-time status of your service to users. +> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system. -> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system. - ----- - +--- ### Powerful Monitoring Templates > Before we discuss the customizable monitoring capabilities of HertzBeat, which we mentioned at the beginning, let's introduce the different monitoring templates of HertzBeat. And it is because of this monitoring template design that the advanced features come later. @@ -49,7 +47,6 @@ Do you believe that users can just write a monitoring template on the UI page, c ![hertzbeat](/img/home/9.png) - ### Built-in Monitoring Types **There are a lot of built-in monitoring templates for users to add directly on the page, one monitoring type corresponds to one YML monitoring template**. @@ -97,7 +94,7 @@ Do you believe that users can just write a monitoring template on the UI page, c > From the previous introduction of **Monitoring Templates**, it is clear that `HertzBeat` has powerful customization features. > Each monitor type is considered as a monitor template, no matter it is built-in or user-defined. You can easily add, modify and delete indicators by modifying the monitoring template. -> The templates contain a series of functions such as protocol configuration, environment variables, metrics conversion, metrics calculation, units conversion, metrics collection, etc., which help users to collect the metrics they want. +> The templates contain a series of functions such as protocol configuration, environment variables, metrics conversion, metrics calculation, units conversion, metrics collection, etc., which help users to collect the metrics they want. ![hertzbeat](/img/docs/custom-arch.png) @@ -105,12 +102,12 @@ Do you believe that users can just write a monitoring template on the UI page, c > For users who have used various systems, the most troublesome thing is the installation, deployment, debugging and upgrading of various `agents`. > You need to install one `agent` per host, and several corresponding `agents` to monitor different application middleware, and the number of monitoring can easily reach thousands, so writing a batch script may ease the burden. -> The problem of whether the version of `agent` is compatible with the main application, debugging the communication between `agent` and the main application, upgrading the `agent` synchronization and so on and so forth, are all big headaches. +> The problem of whether the version of `agent` is compatible with the main application, debugging the communication between `agent` and the main application, upgrading the `agent` synchronization and so on and so forth, are all big headaches. -The principle of `HertzBeat` is to use different protocols to connect directly to the end system, and use the `PULL` form to pull the collected data, without the need for the user to deploy and install `Agent` | `Exporter` on the host of the end, etc. For example, monitoring the `linux operating system`. +The principle of `HertzBeat` is to use different protocols to connect directly to the end system, and use the `PULL` form to pull the collected data, without the need for the user to deploy and install `Agent` | `Exporter` on the host of the end, etc. For example, monitoring the `linux operating system`. - For example, if you want to monitor `linux OS`, you can just input the IP port account password or key on `HertzBeat` side. -- For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. +- For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. **Password and other sensitive information is encrypted on all links**. @@ -129,7 +126,7 @@ The principle of `HertzBeat` is to use different protocols to connect directly t > Two locations, three centers, multi-cloud environments, multi-isolated networks, you may have heard of these scenarios. When there is a need for a unified monitoring system to monitor the IT resources of different isolated networks, this is where our Cloud Edge Collaboration comes in. In an isolated network where multiple networks are not connected, we need to deploy a monitoring system in each network in the previous solution, which leads to data non-interoperability and inconvenient management, deployment and maintenance. -`HertzBeat` provides the ability of cloud edge collaboration, can be deployed in multiple isolated networks edge collector, collector in the isolated network within the monitoring task collection, collection of data reported by the main service unified scheduling management display. +`HertzBeat` provides the ability of cloud edge collaboration, can be deployed in multiple isolated networks edge collector, collector in the isolated network within the monitoring task collection, collection of data reported by the main service unified scheduling management display. ![hertzbeat](/img/docs/cluster-arch.png) @@ -148,12 +145,11 @@ In an isolated network where multiple networks are not connected, we need to dep - Built on `Java+SpringBoot+TypeScript+Angular` mainstream technology stack , convenient secondary development . - Open source is not the same as free, dev based on HertzBeat must retain copyright, etc. - **HertzBeat has been included in the [CNCF Observability And Analysis - Monitoring Landscape](https://landscape.cncf.io/card-mode?category=monitoring&grouping=category)** ![cncf](/img/home/cncf-landscape-left-logo.svg) ------ +--- **HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system.** @@ -271,7 +267,6 @@ Built-in support for monitoring types include: ![hertzbeat](/img/home/13.png) - ### Alarm Silence - When the alarm is triggered by the threshold rule, it will enter into the alarm silence, the alarm silence will be based on the rules of a specific one-time time period or periodic time period of the alarm message blocking silence, this time period does not send alarm messages. @@ -303,7 +298,6 @@ Built-in support for monitoring types include: ![hertzbeat](/img/home/9.png) - ----- **There's so much more to discover. Have Fun!** diff --git a/home/docs/others/design.md b/home/docs/others/design.md index e7f80d164d7..da5ec8fffee 100644 --- a/home/docs/others/design.md +++ b/home/docs/others/design.md @@ -1,9 +1,9 @@ --- id: design title: 设计文档 -sidebar_label: 设计文档 +sidebar_label: 设计文档 --- -### HertzBeat Arch +### HertzBeat Arch -![architecture](/img/docs/hertzbeat-arch.svg) +![architecture](/img/docs/hertzbeat-arch.svg) diff --git a/home/docs/others/resource.md b/home/docs/others/resource.md index 79c16b4ab56..83188bd1c8e 100644 --- a/home/docs/others/resource.md +++ b/home/docs/others/resource.md @@ -1,19 +1,19 @@ --- id: resource title: Related resources -sidebar_label: Related resources +sidebar_label: Related resources --- -## Icon Resources +## Icon Resources -### HertzBeat Logo +### HertzBeat Logo -![logo](/img/hertzbeat-logo.svg) +![logo](/img/hertzbeat-logo.svg) -Download: [SVG](/img/hertzbeat-logo.svg) [PNG](/img/hertzbeat-logo.png) +Download: [SVG](/img/hertzbeat-logo.svg) [PNG](/img/hertzbeat-logo.png) -### HertzBeat Brand Logo +### HertzBeat Brand Logo -![logo](/img/hertzbeat-brand.svg) +![logo](/img/hertzbeat-brand.svg) -Download: [SVG](/img/hertzbeat-brand.svg) [PNG](/img/hertzbeat-brand.png) +Download: [SVG](/img/hertzbeat-brand.svg) [PNG](/img/hertzbeat-brand.png) diff --git a/home/docs/start/account-modify.md b/home/docs/start/account-modify.md index 81a1f573573..52ddf334e43 100644 --- a/home/docs/start/account-modify.md +++ b/home/docs/start/account-modify.md @@ -1,7 +1,7 @@ --- id: account-modify title: Modify Account Username Password And Secret -sidebar_label: Update Account Secret +sidebar_label: Update Account Secret --- ## Update Account @@ -141,12 +141,11 @@ account: role: [user] ``` -## Update Security Secret +## Update Security Secret -> This secret is the key for account security encryption management and needs to be updated to your custom key string of the same length. +> This secret is the key for account security encryption management and needs to be updated to your custom key string of the same length. - -Update the `application.yml` file in the `config` directory, modify the `sureness.jwt.secret` parameter to your custom key string of the same length. +Update the `application.yml` file in the `config` directory, modify the `sureness.jwt.secret` parameter to your custom key string of the same length. ```yaml sureness: @@ -157,4 +156,4 @@ sureness: dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' ``` -**Restart HertzBeat, access http://ip:1157/ to explore** +**Restart HertzBeat, access http://ip:1157/ to explore** diff --git a/home/docs/start/custom-config.md b/home/docs/start/custom-config.md index 5a60d6b02b9..7554498bc6e 100644 --- a/home/docs/start/custom-config.md +++ b/home/docs/start/custom-config.md @@ -1,7 +1,7 @@ --- id: custom-config title: Advanced Params Config -sidebar_label: Advanced Params Config +sidebar_label: Advanced Params Config --- This describes how to configure the SMS server, the number of built-in availability alarm triggers, etc. @@ -12,13 +12,14 @@ This describes how to configure the SMS server, the number of built-in availabil Modify the configuration file located at `hertzbeat/config/application.yml` Note ⚠️The docker container method needs to mount the application.yml file to the local host -The installation package can be decompressed and modified in `hertzbeat/config/application.yml` +The installation package can be decompressed and modified in `hertzbeat/config/application.yml` 1. Configure the SMS sending server > Only when your own SMS server is successfully configured, the alarm SMS triggered in the monitoring tool will be sent normally. -Add the following Tencent platform SMS server configuration in `application.yml` (parameters need to be replaced with your SMS server configuration) +Add the following Tencent platform SMS server configuration in `application.yml` (parameters need to be replaced with your SMS server configuration) + ```yaml common: sms: @@ -32,7 +33,6 @@ common: 2. Configure alarm custom parameters - ```yaml alerter: # Custom console address @@ -44,6 +44,7 @@ alerter: > By default, the real-time data of our metrics is stored in memory, which can be configured as follows to use redis instead of memory storage. Note ⚠️ `memory.enabled: false, redis.enabled: true` + ```yaml warehouse: store: @@ -56,3 +57,4 @@ warehouse: port: 6379 password: 123456 ``` + diff --git a/home/docs/start/docker-compose-deploy.md b/home/docs/start/docker-compose-deploy.md index 0dd6fe054eb..b63498c7916 100644 --- a/home/docs/start/docker-compose-deploy.md +++ b/home/docs/start/docker-compose-deploy.md @@ -1,7 +1,7 @@ --- id: docker-compose-deploy title: Install HertzBeat via Docker Compose -sidebar_label: Install via Docker Compose +sidebar_label: Install via Docker Compose --- :::tip @@ -26,7 +26,7 @@ Download the installation script package `apache-hertzbeat-xxx-incubating-docker - Unzip the script package -``` +``` $ tar zxvf apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz ``` @@ -49,7 +49,7 @@ docker-compose up -d > View the running status of each container, up is the normal running status -``` +``` docker-compose ps ``` @@ -58,7 +58,6 @@ docker-compose ps **HAVE FUN** - ---- ### FAQ diff --git a/home/docs/start/docker-deploy.md b/home/docs/start/docker-deploy.md index 8222c6d0c92..5cac80f4868 100644 --- a/home/docs/start/docker-deploy.md +++ b/home/docs/start/docker-deploy.md @@ -1,7 +1,7 @@ --- id: docker-deploy title: Install HertzBeat via Docker -sidebar_label: Install via Docker +sidebar_label: Install via Docker --- :::tip @@ -17,7 +17,7 @@ It is necessary to have Docker environment in your environment. If not installed 1. Execute the following command -```shell +```shell $ docker run -d -p 1157:1157 -p 1158:1158 \ -v $(pwd)/data:/opt/hertzbeat/data \ -v $(pwd)/logs:/opt/hertzbeat/logs \ @@ -46,7 +46,7 @@ $ docker run -d -p 1157:1157 -p 1158:1158 \ - This maps the 1157,1158 ports of the container to the 1157,1158 ports of the host. If the port on the host is already occupied, you need to modify the host mapping port. - When mounting files, the first parameter is your custom local file address, and the second parameter is the container file address. Make sure you have this file locally when mounting. - You can execute `docker update --restart=always hertzbeat` to configure the container to restart automatically. -- If you want to use the host network mode to start Docker, you can use `docker run -d --network host .....` +- If you want to use the host network mode to start Docker, you can use `docker run -d --network host .....` ::: 2. Start to explore HertzBeat @@ -63,7 +63,7 @@ By deploying multiple HertzBeat Collectors, high availability, load balancing, a 1. Execute the following command -```shell +```shell $ docker run -d \ -e IDENTITY=custom-collector-name \ -e MODE=public \ @@ -89,7 +89,7 @@ $ docker run -d \ - The `127.0.0.1` in `MANAGER_HOST` needs to be replaced with the external IP address of the HertzBeat Server. - When mounting files, the first parameter is your custom local file address, and the second parameter is the container file address. Make sure you have this file locally when mounting. - You can execute `docker update --restart=always hertzbeat-collector` to configure the container to restart automatically. -- If you want to use the host network mode to start Docker, you can use `docker run -d --network host .....` +- If you want to use the host network mode to start Docker, you can use `docker run -d --network host .....` ::: 2. Access `http://localhost:1157` and you will see the registered new collector in dashboard. @@ -98,30 +98,35 @@ $ docker run -d \ ---- -### FAQ +### FAQ **The most common problem is network problems, please check in advance** 1. MYSQL, TDENGINE, IoTDB and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail -The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. + The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. + > Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. -> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` +> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` 2. According to the process deploy,visit http://ip:1157/ no interface -Please refer to the following points to troubleshoot issues: + Please refer to the following points to troubleshoot issues: + > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. > 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. > 3:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. -3. Historical monitoring charts have been missing data for a long time +3. Historical monitoring charts have been missing data for a long time + > 1:Check whether you configure victoria-metrics or Tdengine or IoTDB. No configuration means no historical chart data. > 2: Check whether IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. 4. If the history chart on the monitoring page is not displayed,popup [please configure time series database] + > As shown in the popup window,the premise of history chart display is that you need install and configure hertzbeat's dependency service database. > Installation and initialization this database, please refer to [Using victoria-metrics to store metrics data](victoria-metrics-init) 5. The time series database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure dependent time series database] + > Please check if the configuration parameters are correct > Is time-series database enable set to true > Note⚠️If both hertzbeat and time-series database are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed @@ -132,10 +137,10 @@ Please refer to the following points to troubleshoot issues: > This file is the configuration file of HertzBeat, used to configure various parameters of HertzBeat, such as database connection information, time series database configuration, etc. > Download `application.yml` file to the host directory, for example: $(pwd)/application.yml > Download source [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) -You can modify the configuration yml file according to your needs. -- If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` -- **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) -- **Recommended** If you need to use the time series database victoria-metrics to store metric data, you need to replace the `warehouse.store.victoria-metrics` parameter in `application.yml` for specific steps, see [Using victoria-metrics to store metrics data](victoria-metrics-init) +> You can modify the configuration yml file according to your needs. +> - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` +> - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) +> - **Recommended** If you need to use the time series database victoria-metrics to store metric data, you need to replace the `warehouse.store.victoria-metrics` parameter in `application.yml` for specific steps, see [Using victoria-metrics to store metrics data](victoria-metrics-init) 7. What is the purpose of sureness.yml @@ -145,3 +150,4 @@ You can modify the configuration yml file according to your needs. > Download and config `sureness.yml` in the host directory,eg:`$(pwd)/sureness.yml` > Download from [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) > For detail steps, please refer to [Configure Account Password](account-modify) + diff --git a/home/docs/start/greptime-init.md b/home/docs/start/greptime-init.md index 92dec8f6d3d..3f347bd9ef7 100644 --- a/home/docs/start/greptime-init.md +++ b/home/docs/start/greptime-init.md @@ -12,18 +12,21 @@ Apache HertzBeat (incubating)'s historical data storage relies on the time serie It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage. -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** +**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** + +### Install GreptimeDB via Docker -### Install GreptimeDB via Docker > Refer to the official website [installation tutorial](https://docs.greptime.com/getting-started/overview) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Install GreptimeDB with Docker +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Install GreptimeDB with Docker ```shell $ docker run -p 127.0.0.1:4000-4003:4000-4003 \ @@ -36,10 +39,10 @@ $ docker run -p 127.0.0.1:4000-4003:4000-4003 \ --postgres-addr 0.0.0.0:4003 ``` - `-v "$(pwd)/greptimedb:/tmp/greptimedb"` is local persistent mount of greptimedb data directory. `$(pwd)/greptimedb` should be replaced with the actual local directory, default is the `greptimedb` directory under the current directory. - use```$ docker ps``` to check if the database started successfully +`-v "$(pwd)/greptimedb:/tmp/greptimedb"` is local persistent mount of greptimedb data directory. `$(pwd)/greptimedb` should be replaced with the actual local directory, default is the `greptimedb` directory under the current directory. +use```$ docker ps``` to check if the database started successfully -### Configure the database connection in hertzbeat `application.yml` configuration file +### Configure the database connection in hertzbeat `application.yml` configuration file 1. Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) diff --git a/home/docs/start/influxdb-init.md b/home/docs/start/influxdb-init.md index 6e5aabcd91d..d4991067a8f 100644 --- a/home/docs/start/influxdb-init.md +++ b/home/docs/start/influxdb-init.md @@ -1,51 +1,54 @@ --- id: influxdb-init title: Use Time Series Database InfluxDB to Store Metrics Data (Optional) -sidebar_label: Metrics Store InfluxDB +sidebar_label: Metrics Store InfluxDB --- Apache HertzBeat (incubating)'s historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) > It is recommended to use VictoriaMetrics as metrics storage. - **Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** **⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** -Note⚠️ Need InfluxDB 1.x Version. +Note⚠️ Need InfluxDB 1.x Version. ### 1. Use HuaweiCloud GaussDB For Influx > Use [HuaweiCloud GaussDB For Influx](https://www.huaweicloud.com/product/gaussdbforinflux.html) - -> Get the `GaussDB For Influx` service url, username and password config. +> +> Get the `GaussDB For Influx` service url, username and password config. ⚠️Note `GaussDB For Influx` enable SSL default, the service url should use `https:` -### 2. Install TDengine via Docker +### 2. Install TDengine via Docker + > Refer to the official website [installation tutorial](https://hub.docker.com/_/influxdb) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Install InfluxDB with Docker - ``` - $ docker run -p 8086:8086 \ - -v /opt/influxdb:/var/lib/influxdb \ - influxdb:1.8 - ``` - `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. - use```$ docker ps``` to check if the database started successfully - - -### Configure the database connection in hertzbeat `application.yml` configuration file +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Install InfluxDB with Docker +> +> ``` +> $ docker run -p 8086:8086 \ +> -v /opt/influxdb:/var/lib/influxdb \ +> influxdb:1.8 +> ``` +> +> `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. +> use```$ docker ps``` to check if the database started successfully + +### Configure the database connection in hertzbeat `application.yml` configuration file 1. Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Replace `warehouse.store.influxdb` data source parameters, URL account and password. + Replace `warehouse.store.influxdb` data source parameters, URL account and password. ```yaml warehouse: @@ -70,3 +73,4 @@ warehouse: 1. Do both the time series databases InfluxDB, IoTDB and TDengine need to be configured? Can they both be used? > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. + diff --git a/home/docs/start/iotdb-init.md b/home/docs/start/iotdb-init.md index 33b82a79183..b740a690529 100644 --- a/home/docs/start/iotdb-init.md +++ b/home/docs/start/iotdb-init.md @@ -3,6 +3,7 @@ id: iotdb-init title: Use Time Series Database IoTDB to Store Metrics Data (Optional) sidebar_label: Metrics Store IoTDB --- + Apache HertzBeat (incubating)'s historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) > It is recommended to use VictoriaMetrics as metrics storage. @@ -87,9 +88,8 @@ warehouse: Configuration parameters: - -| Parameter Name | Description | -| ------------------- |-------------------------------------------------------------------------------------------| +| Parameter Name | Description | +|---------------------|-------------------------------------------------------------------------------------------| | enabled | Whether to enable | | host | IoTDB database address | | rpc-port | IoTDB database port | @@ -120,3 +120,4 @@ Configuration parameters: > Is td-engine enable set to true > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed > You can check the startup logs according to the logs directory + diff --git a/home/docs/start/mysql-change.md b/home/docs/start/mysql-change.md index 34884f60775..a83e8fcc82c 100644 --- a/home/docs/start/mysql-change.md +++ b/home/docs/start/mysql-change.md @@ -1,21 +1,25 @@ --- id: mysql-change title: Use MYSQL Replace H2 Database to Store Metadata(Optional) -sidebar_label: Meta Store MYSQL +sidebar_label: Meta Store MYSQL --- -MYSQL is a reliable relational database. In addition to default built-in H2 database, Apache HertzBeat (incubating) allow you to use MYSQL to store structured relational data such as monitoring information, alarm information and configuration information. -> If you have the MYSQL environment, can be directly to database creation step. +MYSQL is a reliable relational database. In addition to default built-in H2 database, Apache HertzBeat (incubating) allow you to use MYSQL to store structured relational data such as monitoring information, alarm information and configuration information. + +> If you have the MYSQL environment, can be directly to database creation step. + +### Install MYSQL via Docker -### Install MYSQL via Docker 1. Download and install the Docker environment For Docker installation, please refer to the [Docker official documentation](https://docs.docker.com/get-docker/). After the installation, please verify in the terminal that the Docker version can be printed normally. + ``` $ docker -v Docker version 20.10.12, build e91ed57 ``` -2. Install MYSQl with Docker +2. Install MYSQl with Docker + ``` $ docker run -d --name mysql \ -p 3306:3306 \ @@ -24,12 +28,14 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data --restart=always \ mysql:5.7 ``` + `-v /opt/data:/var/lib/mysql` is local persistent mount of mysql data directory. `/opt/data` should be replaced with the actual local directory. use ```$ docker ps``` to check if the database started successfully -### Database creation +### Database creation + 1. Enter MYSQL or use the client to connect MYSQL service - `mysql -uroot -p123456` + `mysql -uroot -p123456` 2. Create database named hertzbeat `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. Check if hertzbeat database has been successfully created @@ -40,8 +46,7 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data - Download the MYSQL jdbc driver jar package, such as mysql-connector-java-8.0.25.jar. https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip - Copy the jar package to the `hertzbeat/ext-lib` directory. - -### Modify hertzbeat's configuration file application.yml and switch data source +### Modify hertzbeat's configuration file application.yml and switch data source - Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file @@ -49,6 +54,7 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data Replace `spring.database` data source parameters, URL account and password. For example: + ```yaml spring: datasource: @@ -68,7 +74,9 @@ spring: logging: level: SEVERE ``` - Specific replacement parameters are as follows and you need to configure account according to the mysql environment: + +Specific replacement parameters are as follows and you need to configure account according to the mysql environment: + ```yaml spring: datasource: @@ -88,6 +96,6 @@ spring: level: SEVERE ``` -- It is recommended to set the host field in the MySQL URL to the public IP address when using Hertzbeat in docker. +- It is recommended to set the host field in the MySQL URL to the public IP address when using Hertzbeat in docker. -**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** +**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/docs/start/package-deploy.md b/home/docs/start/package-deploy.md index 2d1c383e229..f37f978cbb3 100644 --- a/home/docs/start/package-deploy.md +++ b/home/docs/start/package-deploy.md @@ -18,16 +18,17 @@ Deployment via package relies on Java runtime environment, ensure you have Java1 2. Configure HertzBeat's configuration file(optional) - Unzip the installation package to the host eg: /opt/hertzbeat - ``` - $ tar zxvf apache-hertzbeat-xxx-incubating-bin.tar.gz - ``` + Unzip the installation package to the host eg: /opt/hertzbeat + + ``` + $ tar zxvf apache-hertzbeat-xxx-incubating-bin.tar.gz + ``` :::tip The configuration file is located in `config/application.yml`, you can modify the configuration file according to your needs to configure external dependent services, such as databases, time series databases, etc. HertzBeat defaults to using internal services when started, but it is recommended to switch to external database services in production environments. ::: - + It is recommended to use [PostgreSQL](postgresql-change) for metadata storage and [VictoriaMetrics](victoria-metrics-init) for metric data storage. Specific steps are as follows - [Switch built-in H2 database to PostgreSQL](postgresql-change) @@ -42,10 +43,10 @@ It is recommended to use [PostgreSQL](postgresql-change) for metadata storage an 4. Start the service Execute the startup script in the installation directory `bin/`, or `startup.bat` in windows. - ``` + + ``` $ ./startup.sh ``` - 5. Begin to explore HertzBeat Access http://ip:1157/ using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! @@ -67,6 +68,7 @@ Deploying multiple HertzBeat Collectors can achieve high availability, load bala 2. Configure the collector configuration file Unzip the installation package to the host eg: /opt/hertzbeat-collector + ``` $ tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz ``` @@ -74,15 +76,15 @@ $ tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz Configure the collector configuration yml file `config/application.yml`: unique `identity` name, running `mode` (public or private), hertzbeat `manager-host`, hertzbeat `manager-port` ```yaml - collector: - dispatch: - entrance: - netty: - enabled: true - identity: ${IDENTITY:} - mode: ${MODE:public} - manager-host: ${MANAGER_HOST:127.0.0.1} - manager-port: ${MANAGER_PORT:1158} +collector: + dispatch: + entrance: + netty: + enabled: true + identity: ${IDENTITY:} + mode: ${MODE:public} + manager-host: ${MANAGER_HOST:127.0.0.1} + manager-port: ${MANAGER_PORT:1158} ``` > Parameters detailed explanation @@ -96,7 +98,7 @@ Configure the collector configuration yml file `config/application.yml`: unique Run command `$ ./bin/startup.sh ` or `bin/startup.bat` -4. Begin to explore HertzBeat Collector +4. Begin to explore HertzBeat Collector Access `http://ip:1157` and you will see the registered new collector in dashboard @@ -104,7 +106,7 @@ Configure the collector configuration yml file `config/application.yml`: unique ---- -### FAQ +### FAQ 1. you need to prepare the JAVA environment in advance @@ -112,6 +114,7 @@ Configure the collector configuration yml file `config/application.yml`: unique requirement:JDK17 ENV download JAVA installation package: [mirror website](https://repo.huaweicloud.com/java/jdk/) After installation use command line to check whether you install it successfully. + ``` $ java -version java version "17.0.9" @@ -119,9 +122,10 @@ Configure the collector configuration yml file `config/application.yml`: unique Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) ``` - 2. According to the process deploy,visit http://ip:1157/ no interface Please refer to the following points to troubleshoot issues: + > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. > 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. > 3:Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. + diff --git a/home/docs/start/postgresql-change.md b/home/docs/start/postgresql-change.md index c827db2c2e3..3ca07d095d1 100644 --- a/home/docs/start/postgresql-change.md +++ b/home/docs/start/postgresql-change.md @@ -3,35 +3,44 @@ id: postgresql-change title: Use PostgreSQL Replace H2 Database to Store Metadata(Recommended) sidebar_label: Meta Store PostgreSQL (Recommended) --- + PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition to default built-in H2 database, Apache HertzBeat (incubating) allow you to use PostgreSQL to store structured relational data such as monitoring information, alarm information and configuration information. > If you have the PostgreSQL environment, can be directly to database creation step. ### Install PostgreSQL via Docker + 1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` 2. Install PostgreSQL with Docker + ```shell $ docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` + use```$ docker ps```to check if the database started successfully ### Database creation -1. Enter postgreSQL or use the client to connect postgreSQL service - ```shell + +1. Enter postgreSQL or use the client to connect postgreSQL service + + ```shell su - postgres psql ``` -2. Create database named hertzbeat +2. Create database named hertzbeat + ```sql CREATE DATABASE hertzbeat; ``` -3. Check if hertzbeat database has been successfully created +3. Check if hertzbeat database has been successfully created + ```sql SELECT * FROM pg_database where datname='hertzbeat'; ``` @@ -42,6 +51,7 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition Modify `hertzbeat/config/application.yml` configuration file Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `spring.database` data source parameters, URL account and password. + ```yaml spring: datasource: @@ -61,7 +71,9 @@ spring: logging: level: SEVERE ``` + Specific replacement parameters are as follows and you need to configure account, ip, port according to the postgresql environment: + ```yaml spring: datasource: @@ -81,4 +93,4 @@ spring: level: SEVERE ``` -**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** +**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/docs/start/quickstart.md b/home/docs/start/quickstart.md index 9caaa93ff05..393a236a1d1 100644 --- a/home/docs/start/quickstart.md +++ b/home/docs/start/quickstart.md @@ -1,7 +1,7 @@ --- id: quickstart title: Quick Start -sidebar_label: Quick Start +sidebar_label: Quick Start --- ### 🐕 Quick Start @@ -25,6 +25,7 @@ sidebar_label: Quick Start ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : set the collector unique identity name. - `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. - `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. @@ -41,6 +42,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.apache 5. Deploy collector clusters(Optional) - Download the release package `hertzbeat-collector-xx.tar.gz` to new machine [Download Page](https://hertzbeat.apache.org/docs/download) - Configure the collector configuration yml file `hertzbeat-collector/config/application.yml`: unique `identity` name, running `mode` (public or private), hertzbeat `manager-host`, hertzbeat `manager-port` + ```yaml collector: dispatch: @@ -64,9 +66,9 @@ Detailed config refer to [Install HertzBeat via Package](package-deploy) 3. Web:need `nodejs npm angular-cli` environment, Run `ng serve --open` in `web-app` directory after backend startup. 4. Access `http://localhost:4200` to start, default account: `admin/hertzbeat` -Detailed steps refer to [CONTRIBUTING](../community/contribution) +Detailed steps refer to [CONTRIBUTING](../community/contribution) -##### 4:Install All(hertzbeat+postgresql+tsdb) via Docker-compose +##### 4:Install All(hertzbeat+postgresql+tsdb) via Docker-compose Install and deploy the postgresql/mysql database, victoria-metrics/iotdb/tdengine database and hertzbeat at one time through [docker-compose deployment script](https://github.com/apache/hertzbeat/tree/master/script/docker-compose). @@ -78,4 +80,4 @@ Install HertzBeat cluster in a Kubernetes cluster by Helm chart. Detailed steps refer to [Artifact Hub](https://artifacthub.io/packages/helm/hertzbeat/hertzbeat) -**HAVE FUN** +**HAVE FUN** diff --git a/home/docs/start/rainbond-deploy.md b/home/docs/start/rainbond-deploy.md index 57f537aa4ac..ef2c581d57d 100644 --- a/home/docs/start/rainbond-deploy.md +++ b/home/docs/start/rainbond-deploy.md @@ -1,7 +1,7 @@ --- id: rainbond-deploy title: Use Rainbond Deploy HertzBeat -sidebar_label: Install via Rainbond +sidebar_label: Install via Rainbond --- If you are unfamiliar with Kubernetes, and want to install Apache HertzBeat (incubating) in Kubernetes, you can use Rainbond to deploy. Rainbond is a cloud-native application management platform built on Kubernetes and simplifies the application deployment to Kubernetes. diff --git a/home/docs/start/sslcert-practice.md b/home/docs/start/sslcert-practice.md index f7287f2f36e..89d48ec642e 100644 --- a/home/docs/start/sslcert-practice.md +++ b/home/docs/start/sslcert-practice.md @@ -1,7 +1,7 @@ --- id: ssl-cert-practice title: SSL Certificate Monitor Practice -sidebar_label: SSL Certificate Monitor Practice +sidebar_label: SSL Certificate Monitor Practice --- Most websites now support HTTPS by default. The certificate we apply for is usually 3 months or 1 year. It is easy to expire the SSL certificate over time, but we did not find it the first time, or did not update the certificate in time before it expired. @@ -12,7 +12,6 @@ This article introduces how to use the hertzbeat monitoring tool to detect the v Apache HertzBeat (incubating) is a real-time monitoring tool with powerful custom monitoring capabilities without Agent. Website monitoring, PING connectivity, port availability, database, operating system, middleware, API monitoring, threshold alarms, alarm notification (email, WeChat, Ding Ding Feishu). - github: https://github.com/apache/hertzbeat #### Install HertzBeat @@ -29,7 +28,6 @@ github: https://github.com/apache/hertzbeat > System Page -> Monitor Menu -> SSL Certificate -> Add SSL Certificate - ![](/img/docs/start/ssl_1.png) 2. Configure the monitoring website @@ -43,48 +41,38 @@ github: https://github.com/apache/hertzbeat > In the monitoring list, you can view the monitoring status, and in the monitoring details, you can view the metric data chart, etc. - ![](/img/docs/start/ssl_3.png) - ![](/img/docs/start/ssl_11.png) 4. Set the threshold (triggered when the certificate expires) > System Page -> Alarms -> Alarm Thresholds -> New Thresholds - ![](/img/docs/start/ssl_4.png) > Configure the threshold, select the SSL certificate metric object, configure the alarm expression-triggered when the metric `expired` is `true`, that is, `equals(expired,"true")`, set the alarm level notification template information, etc. - ![](/img/docs/start/ssl_5.png) > Associating thresholds with monitoring, in the threshold list, set which monitoring this threshold applies to. - ![](/img/docs/start/ssl_6.png) - 5. Set the threshold (triggered one week before the certificate expires) > In the same way, add a new configuration threshold and configure an alarm expression - when the metric expires timestamp `end_timestamp`, the `now()` function is the current timestamp, if the configuration triggers an alarm one week in advance: `end_timestamp <= (now( ) + 604800000)` , where `604800000` is the 7-day total time difference in milliseconds. - ![](/img/docs/start/ssl_7.png) > Finally, you can see the triggered alarm in the alarm center. - ![](/img/docs/start/ssl_8.png) - 6. Alarm notification (in time notification via Dingding WeChat Feishu, etc.) > Monitoring Tool -> Alarm Notification -> New Receiver - ![](/img/docs/start/ssl_10.png) For token configuration such as Dingding WeChat Feishu, please refer to the help document @@ -93,7 +81,6 @@ https://hertzbeat.apache.org/docs/help/alert_dingtalk > Alarm Notification -> New Alarm Notification Policy -> Enable Notification for the Recipient Just Configured - ![](/img/docs/start/ssl_11.png) 7. OK When the threshold is triggered, we can receive the corresponding alarm message. If there is no notification, you can also view the alarm information in the alarm center. diff --git a/home/docs/start/tdengine-init.md b/home/docs/start/tdengine-init.md index a56bc2cc71b..4edc7c610dd 100644 --- a/home/docs/start/tdengine-init.md +++ b/home/docs/start/tdengine-init.md @@ -1,85 +1,90 @@ --- id: tdengine-init title: Use Time Series Database TDengine to Store Metrics Data (Optional) -sidebar_label: Metrics Store TDengine +sidebar_label: Metrics Store TDengine --- Apache HertzBeat (incubating)'s historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) > It is recommended to use VictoriaMetrics as metrics storage. -TDengine is an open-source IoT time-series database, which we use to store the collected historical data of monitoring metrics. Pay attention to support ⚠️ 3.x version. +TDengine is an open-source IoT time-series database, which we use to store the collected historical data of monitoring metrics. Pay attention to support ⚠️ 3.x version. **Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** **⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** -Note⚠️ Need TDengine 3.x Version. +Note⚠️ Need TDengine 3.x Version. -> If you have TDengine environment, can directly skip to create a database instance. +> If you have TDengine environment, can directly skip to create a database instance. +### Install TDengine via Docker -### Install TDengine via Docker > Refer to the official website [installation tutorial](https://docs.taosdata.com/get-started/docker/) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Install TDengine with Docker - ```shell - $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ - -v /opt/taosdata:/var/lib/taos \ - --name tdengine -e TZ=Asia/Shanghai \ - tdengine/tdengine:3.0.4.0 - ``` - `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. - `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. - use```$ docker ps``` to check if the database started successfully - -### Create database instance +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Install TDengine with Docker +> +> ```shell +> $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ +> -v /opt/taosdata:/var/lib/taos \ +> --name tdengine -e TZ=Asia/Shanghai \ +> tdengine/tdengine:3.0.4.0 +> ``` +> +> `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. +> `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. +> use```$ docker ps``` to check if the database started successfully + +### Create database instance + +1. Enter database Docker container -1. Enter database Docker container ``` $ docker exec -it tdengine /bin/bash ``` 2. Create database named hertzbeat - After entering the container,execute `taos` command as follows: - + After entering the container,execute `taos` command as follows: + ``` root@tdengine-server:~/TDengine-server# taos Welcome to the TDengine shell from Linux, Client Version Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. taos> ``` - - execute commands to create database - + + execute commands to create database + ``` taos> show databases; taos> CREATE DATABASE hertzbeat KEEP 90 DURATION 10 BUFFER 16; ``` - + The above statements will create a database named hertzbeat. The data will be saved for 90 days (more than 90 days data will be automatically deleted). A data file every 10 days, memory blocks buffer is 16MB. -3. Check if hertzbeat database has been created success - +3. Check if hertzbeat database has been created success + ``` taos> show databases; taos> use hertzbeat; ``` -**Note⚠️If you install TDengine using package** +**Note⚠️If you install TDengine using package** > In addition to start the server,you must execute `systemctl start taosadapter` to start adapter -### Configure the database connection in hertzbeat `application.yml` configuration file +### Configure the database connection in hertzbeat `application.yml` configuration file 1. Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file Note⚠️The docker container way need to mount application.yml file locally,while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Replace `warehouse.store.td-engine` data source parameters, URL account and password. + Replace `warehouse.store.td-engine` data source parameters, URL account and password. ```yaml warehouse: @@ -101,16 +106,21 @@ warehouse: ### FAQ 1. Do both the time series databases IoTDB and TDengine need to be configured? Can they both be used? + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. 2. The historical chart of the monitoring page is not displayed, and pops up [Unable to provide historical chart data, please configure to rely on the time series database] + > As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database -3. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed -> Please confirm whether the installed TDengine version is 3.x, version 2.x are not compatible. +3. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed + +> Please confirm whether the installed TDengine version is 3.x, version 2.x are not compatible. 4. The TDengine database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure the dependent time series database] + > Please check if the configuration parameters are correct > Is td-engine enable set to true > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory +> You can check the startup logs according to the logs directory + diff --git a/home/docs/start/update-1.6.0.md b/home/docs/start/update-1.6.0.md index 6fd4ee77763..ee05fe83cd5 100644 --- a/home/docs/start/update-1.6.0.md +++ b/home/docs/start/update-1.6.0.md @@ -30,6 +30,7 @@ Due to significant changes in `application.yml` and `sureness.yml`, it is recomm #### `application.yml` generally needs to modify the following parts: Default is: + ```yaml datasource: driver-class-name: org.h2.Driver @@ -48,7 +49,9 @@ Default is: logging: level: SEVERE ``` + If you change to a MySQL database, here is an example: + ```yaml datasource: driver-class-name: com.mysql.cj.jdbc.Driver @@ -100,49 +103,46 @@ Due to the Apache Foundation's requirements for license compliance, HertzBeat's - MySQL: [https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip) - Oracle (If you want to monitor Oracle, these two drivers are required): - - [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) - - [https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar) + - [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) + - [https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar) Next, run the start-up script as before to experience the latest HertzBeat 1.6.0! ## Docker Upgrade - Mysql Database - Stop the HertzBeat container: + ``` docker stop hertzbeat ``` - - Upgrade the database script: - - Go to [https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), choose the directory of your database and execute the corresponding `V160__update_column.sql` file in MySQL. - + - Go to [https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), choose the directory of your database and execute the corresponding `V160__update_column.sql` file in MySQL. - Upgrade the configuration files: - - As mentioned, due to significant changes in `application.yml` and `sureness.yml`, it is recommended to directly mount and use the new `yml` configuration files, and then modify them based on your own needs. - + - As mentioned, due to significant changes in `application.yml` and `sureness.yml`, it is recommended to directly mount and use the new `yml` configuration files, and then modify them based on your own needs. - Add the corresponding database drivers: - - Due to the Apache Foundation's license compliance requirements, HertzBeat's installation package cannot include MySQL, Oracle, and other GPL-licensed dependencies. Users need to add them themselves by downloading the driver jars from the above links and placing them in the local `ext-lib` directory, then mounting `ext-lib` to the container's `/opt/hertzbeat/ext-lib` directory when starting. + - Due to the Apache Foundation's license compliance requirements, HertzBeat's installation package cannot include MySQL, Oracle, and other GPL-licensed dependencies. Users need to add them themselves by downloading the driver jars from the above links and placing them in the local `ext-lib` directory, then mounting `ext-lib` to the container's `/opt/hertzbeat/ext-lib` directory when starting. Next, run HertzBeat using Docker as before to experience the latest HertzBeat 1.6.0! ## Docker Installation Upgrade - H2 Built-in Database (Not recommended for production use) - Stop the HertzBeat container: + ``` docker stop hertzbeat ``` - - Edit the H2 database files: - - Assuming you have mounted the H2 database files in the `data` directory to the local system, or copied the `/opt/hertzbeat/data` directory from the old container manually. - - Download the H2 driver jar from [https://mvnrepository.com/artifact/com.h2database/h2/2.2.220](https://mvnrepository.com/artifact/com.h2database/h2/2.2.220). - - Start the database locally using the H2 driver jar: - ``` - java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 - ``` + - Assuming you have mounted the H2 database files in the `data` directory to the local system, or copied the `/opt/hertzbeat/data` directory from the old container manually. + - Download the H2 driver jar from [https://mvnrepository.com/artifact/com.h2database/h2/2.2.220](https://mvnrepository.com/artifact/com.h2database/h2/2.2.220). + - Start the database locally using the H2 driver jar: + ``` + java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 + ``` - Upgrade the configuration files: - - As mentioned, due to significant changes in `application.yml` and `sureness.yml`, it is recommended to directly mount and use the new `yml` configuration files, and then modify them based on your own needs. - + - As mentioned, due to significant changes in `application.yml` and `sureness.yml`, it is recommended to directly mount and use the new `yml` configuration files, and then modify them based on your own needs. - Add the corresponding database drivers: - - As mentioned, due to the Apache Foundation's license compliance requirements, HertzBeat's installation package cannot include MySQL, Oracle, and other GPL-licensed dependencies. Users need to add them themselves by downloading the driver jars from the above links and placing them in the local `ext-lib` directory, then mounting `ext-lib` to the container's `/opt/hertzbeat/ext-lib` directory when starting. + - As mentioned, due to the Apache Foundation's license compliance requirements, HertzBeat's installation package cannot include MySQL, Oracle, and other GPL-licensed dependencies. Users need to add them themselves by downloading the driver jars from the above links and placing them in the local `ext-lib` directory, then mounting `ext-lib` to the container's `/opt/hertzbeat/ext-lib` directory when starting. Next, run the Docker to start HertzBeat as before to experience the latest HertzBeat 1.6.0! @@ -151,4 +151,5 @@ Next, run the Docker to start HertzBeat as before to experience the latest Hertz If you do not want to go through the tedious script upgrade method mentioned above, you can directly export and import the monitoring tasks and threshold information from the old environment. - Deploy a new environment with the latest version. -- Export the monitoring tasks and threshold information from the old environment on the page \ No newline at end of file +- Export the monitoring tasks and threshold information from the old environment on the page + diff --git a/home/docs/start/upgrade.md b/home/docs/start/upgrade.md index 8ad14d3b673..ebd4af61e70 100644 --- a/home/docs/start/upgrade.md +++ b/home/docs/start/upgrade.md @@ -1,7 +1,7 @@ --- id: upgrade title: HertzBeat New Version Upgrade -sidebar_label: Version Upgrade Guide +sidebar_label: Version Upgrade Guide --- **HertzBeat Release Version List** @@ -14,7 +14,6 @@ Apache HertzBeat (incubating)'s metadata information is stored in H2 or Mysql, P **You need to save and back up the data files of the database and monitoring templates yml files before upgrading** - ### Upgrade For Docker Deploy 1. If using custom monitoring templates @@ -22,30 +21,26 @@ Apache HertzBeat (incubating)'s metadata information is stored in H2 or Mysql, P - `docker cp hertzbeat:/opt/hertzbeat/define ./define` - And mount the template define directory when docker start `-v $(pwd)/define:/opt/hertzbeat/define` - `-v $(pwd)/define:/opt/hertzbeat/define` - -2. If using the built-in default H2 database +2. If using the built-in default H2 database - Need to mount or back up `-v $(pwd)/data:/opt/hertzbeat/data` database file directory in the container `/opt/hertzbeat/data` - Stop and delete the container, delete the local HertzBeat docker image, and pull the new version image - Refer to [Docker installation of HertzBeat](docker-deploy) to create a new container using a new image. Note that the database file directory needs to be mounted `-v $(pwd)/data:/opt/hertzbeat/data` - -3. If using external relational database Mysql, PostgreSQL +3. If using external relational database Mysql, PostgreSQL - No need to mount the database file directory in the backup container - Stop and delete the container, delete the local HertzBeat docker image, and pull the new version image - Refer to [Docker installation HertzBeat](docker-deploy) to create a new container using the new image, and configure the database connection in `application.yml` - ### Upgrade For Package Deploy -1. If using the built-in default H2 database +1. If using the built-in default H2 database - Back up the database file directory under the installation package `/opt/hertzbeat/data` - If there is a custom monitoring template, you need to back up the template YML under `/opt/hertzbeat/define` - `bin/shutdown.sh` stops the HertzBeat process and downloads the new installation package - Refer to [Installation package to install HertzBeat](package-deploy) to start using the new installation package - -2. If using external relational database Mysql, PostgreSQL +2. If using external relational database Mysql, PostgreSQL - No need to back up the database file directory under the installation package - If there is a custom monitoring template, you need to back up the template YML under `/opt/hertzbeat/define` - `bin/shutdown.sh` stops the HertzBeat process and downloads the new installation package - Refer to [Installation package to install HertzBeat](package-deploy) to start with the new installation package and configure the database connection in `application.yml` -**HAVE FUN** +**HAVE FUN** diff --git a/home/docs/start/victoria-metrics-init.md b/home/docs/start/victoria-metrics-init.md index 455e91e86cf..4d0e48b4a70 100644 --- a/home/docs/start/victoria-metrics-init.md +++ b/home/docs/start/victoria-metrics-init.md @@ -1,7 +1,7 @@ --- id: victoria-metrics-init title: Use Time Series Database VictoriaMetrics to Store Metrics Data (Recommended) -sidebar_label: Metrics Store VictoriaMetrics (Recommended) +sidebar_label: Metrics Store VictoriaMetrics (Recommended) --- Apache HertzBeat (incubating)'s historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) @@ -15,17 +15,19 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t > If you already have an VictoriaMetrics environment, you can skip directly to the YML configuration step. -### Install VictoriaMetrics via Docker +### Install VictoriaMetrics via Docker + > Refer to the official website [installation tutorial](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` - -2. Install VictoriaMetrics via Docker +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` + +2. Install VictoriaMetrics via Docker ```shell $ docker run -d -p 8428:8428 \ @@ -34,8 +36,8 @@ $ docker run -d -p 8428:8428 \ victoriametrics/victoria-metrics:v1.95.1 ``` - `-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` is local persistent mount of VictoriaMetrics data directory - use```$ docker ps``` to check if the database started successfully +`-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` is local persistent mount of VictoriaMetrics data directory +use```$ docker ps``` to check if the database started successfully 3. Configure the database connection in hertzbeat `application.yml`configuration file @@ -61,5 +63,7 @@ warehouse: ### FAQ -1. Do both the time series databases need to be configured? Can they both be used? +1. Do both the time series databases need to be configured? Can they both be used? + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which can affects the historical chart data. + diff --git a/home/docs/template.md b/home/docs/template.md index 4bdf785e588..cee7aa05055 100644 --- a/home/docs/template.md +++ b/home/docs/template.md @@ -4,27 +4,27 @@ title: Monitoring Template Here sidebar_label: Monitoring Template --- -> Apache HertzBeat (incubating) is an open source, real-time monitoring tool with custom-monitor and agentLess. - +> Apache HertzBeat (incubating) is an open source, real-time monitoring tool with custom-monitor and agentLess. +> > We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? -Here is the architecture. +Here is the architecture. ![hertzBeat](/img/docs/hertzbeat-arch.png) -**We define all monitoring collection types (mysql, website, jvm, k8s) as yml templates, and users can import these templates into the hertzbeat system to support corresponding types of monitoring, which is very convenient!** +**We define all monitoring collection types (mysql, website, jvm, k8s) as yml templates, and users can import these templates into the hertzbeat system to support corresponding types of monitoring, which is very convenient!** ![](/img/docs/advanced/extend-point-1.png) **Welcome everyone to contribute your customized general monitoring type YML template during use. The available templates are as follows:** -### Application service monitoring +### Application service monitoring  👉 [Website monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml)
- 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
- 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
- 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
+ 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
+ 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
+ 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
 👉 [Full site monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml)
 👉 [SSL Cert monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml)
 👉 [JVM monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml)
@@ -32,7 +32,7 @@ Here is the architecture.  👉 [SpringBoot3.0](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml)
 👉 [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml)
-### Database monitoring +### Database monitoring  👉 [MYSQL database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml)
 👉 [MariaDB database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml)
@@ -49,7 +49,7 @@ Here is the architecture.  👉 [Redis Sentinel database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml)
 👉 [Redis Cluster database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml)
-### Operating system monitoring +### Operating system monitoring  👉 [Linux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml)
 👉 [Windows operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml)
@@ -64,7 +64,6 @@ Here is the architecture.  👉 [AlmaLinux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml)
 👉 [Debian operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml)
- ### Middleware monitoring  👉 [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml)
@@ -78,13 +77,12 @@ Here is the architecture.  👉 [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml)
 👉 [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml)
- ### CloudNative monitoring  👉 [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml)
 👉 [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml)
-### Network monitoring +### Network monitoring  👉 [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml)
 👉 [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml)
diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md index 255046201cc..02e2cbdd0c8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md @@ -4,16 +4,16 @@ author: tom author_title: tom author_url: https://github.com/tomsun28 author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 -tags: [opensource] +tags: [opensource] --- -[HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn) 开源的一个支持网站,API,PING,端口,数据库,全站,操作系统,中间件等监控类型,支持阈值告警,告警通知 (邮箱,webhook,钉钉,企业微信,飞书机器人),拥有易用友好的可视化操作界面的开源监控告警项目。 +[HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn) 开源的一个支持网站,API,PING,端口,数据库,全站,操作系统,中间件等监控类型,支持阈值告警,告警通知 (邮箱,webhook,钉钉,企业微信,飞书机器人),拥有易用友好的可视化操作界面的开源监控告警项目。 -**官网: [hertzbeat.com](https://hertzbeat.com) | [tancloud.cn](https://tancloud.cn)** +**官网: [hertzbeat.com](https://hertzbeat.com) | [tancloud.cn](https://tancloud.cn)** -从v1.0-beta.1到v1.0-beat.8,经过多个版本的迭代完善,我们很高兴宣布hertzbeat v1.0正式发布。 +从v1.0-beta.1到v1.0-beat.8,经过多个版本的迭代完善,我们很高兴宣布hertzbeat v1.0正式发布。 -感谢从beat.1版本以来 HertzBeat Contributors 的贡献,社区同学和用户们的支持。 此版本更新支持了Redis的监控( @gcdd1993 贡献),覆盖Redis的内存CPU等各个性能指标,全方面监控Redis。修复了多个bug进一步增强稳定性。 +感谢从beat.1版本以来 HertzBeat Contributors 的贡献,社区同学和用户们的支持。 此版本更新支持了Redis的监控( @gcdd1993 贡献),覆盖Redis的内存CPU等各个性能指标,全方面监控Redis。修复了多个bug进一步增强稳定性。 @@ -64,7 +64,6 @@ Redis监控来啦: 2022-05-29 20 24 21 - > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn)开源的一个支持网站,API,PING,端口,数据库,操作系统等监控类型,拥有易用友好的可视化操作界面的开源监控告警项目。 > 当然,我们也提供了对应的[SAAS云监控版本](https://console.tancloud.cn),中小团队和个人无需再为了监控自己的网站资源,而去部署一套繁琐的监控系统,[登录即可免费开始](https://console.tancloud.cn)监控之旅。 > HertzBeat 支持自定义监控,只用通过配置YML文件我们就可以自定义需要的监控类型和指标,来满足常见的个性化需求。 @@ -72,10 +71,9 @@ Redis监控来啦: > HertzBeat 支持更自由化的告警配置(计算表达式),支持告警通知,告警模版,邮件钉钉微信飞书等及时通知送达 > 欢迎登录 HertzBeat 的 [云环境TanCloud](https://console.tancloud.cn) 试用发现更多。 > 我们正在快速迭代中,欢迎参与加入共建项目开源生态。 - +> > `HertzBeat` 的多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 - **仓库地址** [Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md index 82c59c3b8ec..15d260bd215 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md @@ -4,12 +4,12 @@ author: tom author_title: tom author_url: https://github.com/tomsun28 author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 -tags: [opensource] +tags: [opensource] --- -[HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn) 开源的一个支持网站,API,PING,端口,数据库,全站,操作系统,中间件等监控类型,支持阈值告警,告警通知 (邮箱,webhook,钉钉,企业微信,飞书机器人),拥有易用友好的可视化操作界面的开源监控告警项目。 +[HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn) 开源的一个支持网站,API,PING,端口,数据库,全站,操作系统,中间件等监控类型,支持阈值告警,告警通知 (邮箱,webhook,钉钉,企业微信,飞书机器人),拥有易用友好的可视化操作界面的开源监控告警项目。 -**官网: [hertzbeat.com](https://hertzbeat.com) | [tancloud.cn](https://tancloud.cn)** +**官网: [hertzbeat.com](https://hertzbeat.com) | [tancloud.cn](https://tancloud.cn)** 大家好,HertzBeat v1.1.0 发布啦!这个版本我们支持了SNMP协议,并使用SNMP协议监控支持了windwos操作系统的应用监控。 另一个重大变更是我们默认使用了H2数据库来替换MYSQL数据库作为存储,来方便使用者们的安装部署,现在只需要一条docker命令即可安装体验hertzbeat : `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` @@ -35,12 +35,11 @@ Bugfix. Online https://console.tancloud.cn. ------------------------ +--- Windows Monitor coming: 2022-06-19 11 30 57 - ⚠️ ⚠️⚠️⚠️请注意其它版本升级到v1.1.0需要先执行下面的SQL脚本. 现在我们的表名称有个统一前缀 hzb_ prefix. ``` @@ -55,13 +54,14 @@ ALTER TABLE param_define RENAME TO hzb_param_define; ALTER TABLE tag RENAME TO hzb_tag; ALTER TABLE tag_monitor_bind RENAME TO hzb_tag_monitor_bind; commit; -``` +``` Have Fun! ---- ## V1.1.0 + Home: hertzbeat.com | tancloud.cn Hi guys! HertzBeat v1.1.0 is coming. This version we support snmp protocol and use snmp to collect windows metrics. @@ -76,7 +76,7 @@ Feature: 2. [[monitor]change default database mysql to h2 #191](https://github.com/apache/hertzbeat/pull/191) 3. [[manager]support monitor params name i18n #184](https://github.com/apache/hertzbeat/pull/184). 4. [[script]build multi cpu arch hertzbeat docker version #189](https://github.com/apache/hertzbeat/pull/189). -5. [[monitor]feature: support oracle multi tablespaces #163](https://github.com/apache/hertzbeat/pull/163) contribute by @brave4Time +5. [[monitor]feature: support oracle multi tablespaces #163](https://github.com/apache/hertzbeat/pull/163) contribute by @brave4Time 6. [[monitor]database tables append prefix hzb_ #193](https://github.com/apache/hertzbeat/pull/193) issue from @shimingxy Bugfix. @@ -88,12 +88,12 @@ Bugfix. Online https://console.tancloud.cn. ------------------------ +--- + Windows Monitor coming: 2022-06-19 11 30 57 - ⚠️ ⚠️⚠️⚠️Attention other version upgrade to v1.1.0 need run sql script. Now the tables name has hzb_ prefix. ``` @@ -108,10 +108,9 @@ ALTER TABLE param_define RENAME TO hzb_param_define; ALTER TABLE tag RENAME TO hzb_tag; ALTER TABLE tag_monitor_bind RENAME TO hzb_tag_monitor_bind; commit; -``` - -Have Fun! +``` +Have Fun! ---- @@ -122,11 +121,9 @@ Have Fun! > HertzBeat 支持更自由化的告警配置(计算表达式),支持告警通知,告警模版,邮件钉钉微信飞书等及时通知送达 > 欢迎登录 HertzBeat 的 [云环境TanCloud](https://console.tancloud.cn) 试用发现更多。 > 我们正在快速迭代中,欢迎参与加入共建项目开源生态。 - +> > `HertzBeat` 的多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 - - **仓库地址** [Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md index a949092e375..15d260bd215 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md @@ -4,12 +4,12 @@ author: tom author_title: tom author_url: https://github.com/tomsun28 author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 -tags: [opensource] +tags: [opensource] --- -[HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn) 开源的一个支持网站,API,PING,端口,数据库,全站,操作系统,中间件等监控类型,支持阈值告警,告警通知 (邮箱,webhook,钉钉,企业微信,飞书机器人),拥有易用友好的可视化操作界面的开源监控告警项目。 +[HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn) 开源的一个支持网站,API,PING,端口,数据库,全站,操作系统,中间件等监控类型,支持阈值告警,告警通知 (邮箱,webhook,钉钉,企业微信,飞书机器人),拥有易用友好的可视化操作界面的开源监控告警项目。 -**官网: [hertzbeat.com](https://hertzbeat.com) | [tancloud.cn](https://tancloud.cn)** +**官网: [hertzbeat.com](https://hertzbeat.com) | [tancloud.cn](https://tancloud.cn)** 大家好,HertzBeat v1.1.0 发布啦!这个版本我们支持了SNMP协议,并使用SNMP协议监控支持了windwos操作系统的应用监控。 另一个重大变更是我们默认使用了H2数据库来替换MYSQL数据库作为存储,来方便使用者们的安装部署,现在只需要一条docker命令即可安装体验hertzbeat : `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` @@ -35,12 +35,11 @@ Bugfix. Online https://console.tancloud.cn. ------------------------ +--- Windows Monitor coming: 2022-06-19 11 30 57 - ⚠️ ⚠️⚠️⚠️请注意其它版本升级到v1.1.0需要先执行下面的SQL脚本. 现在我们的表名称有个统一前缀 hzb_ prefix. ``` @@ -55,13 +54,14 @@ ALTER TABLE param_define RENAME TO hzb_param_define; ALTER TABLE tag RENAME TO hzb_tag; ALTER TABLE tag_monitor_bind RENAME TO hzb_tag_monitor_bind; commit; -``` +``` Have Fun! ---- ## V1.1.0 + Home: hertzbeat.com | tancloud.cn Hi guys! HertzBeat v1.1.0 is coming. This version we support snmp protocol and use snmp to collect windows metrics. @@ -76,7 +76,7 @@ Feature: 2. [[monitor]change default database mysql to h2 #191](https://github.com/apache/hertzbeat/pull/191) 3. [[manager]support monitor params name i18n #184](https://github.com/apache/hertzbeat/pull/184). 4. [[script]build multi cpu arch hertzbeat docker version #189](https://github.com/apache/hertzbeat/pull/189). -5. [[monitor]feature: support oracle multi tablespaces #163](https://github.com/apache/hertzbeat/pull/163) contribute by @brave4Time +5. [[monitor]feature: support oracle multi tablespaces #163](https://github.com/apache/hertzbeat/pull/163) contribute by @brave4Time 6. [[monitor]database tables append prefix hzb_ #193](https://github.com/apache/hertzbeat/pull/193) issue from @shimingxy Bugfix. @@ -88,12 +88,12 @@ Bugfix. Online https://console.tancloud.cn. ------------------------ +--- + Windows Monitor coming: 2022-06-19 11 30 57 - ⚠️ ⚠️⚠️⚠️Attention other version upgrade to v1.1.0 need run sql script. Now the tables name has hzb_ prefix. ``` @@ -108,10 +108,9 @@ ALTER TABLE param_define RENAME TO hzb_param_define; ALTER TABLE tag RENAME TO hzb_tag; ALTER TABLE tag_monitor_bind RENAME TO hzb_tag_monitor_bind; commit; -``` - -Have Fun! +``` +Have Fun! ---- @@ -122,10 +121,9 @@ Have Fun! > HertzBeat 支持更自由化的告警配置(计算表达式),支持告警通知,告警模版,邮件钉钉微信飞书等及时通知送达 > 欢迎登录 HertzBeat 的 [云环境TanCloud](https://console.tancloud.cn) 试用发现更多。 > 我们正在快速迭代中,欢迎参与加入共建项目开源生态。 - +> > `HertzBeat` 的多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 - **仓库地址** [Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md index b3de59cfbb2..589b8113d31 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md @@ -4,12 +4,12 @@ author: tom author_title: tom author_url: https://github.com/tomsun28 author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 -tags: [opensource] +tags: [opensource] --- -[HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn) 开源的一个支持网站,API,PING,端口,数据库,全站,操作系统,中间件等监控类型,支持阈值告警,告警通知 (邮箱,webhook,钉钉,企业微信,飞书机器人),拥有易用友好的可视化操作界面的开源监控告警项目。 +[HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn) 开源的一个支持网站,API,PING,端口,数据库,全站,操作系统,中间件等监控类型,支持阈值告警,告警通知 (邮箱,webhook,钉钉,企业微信,飞书机器人),拥有易用友好的可视化操作界面的开源监控告警项目。 -**官网: [hertzbeat.com](https://hertzbeat.com) | [tancloud.cn](https://tancloud.cn)** +**官网: [hertzbeat.com](https://hertzbeat.com) | [tancloud.cn](https://tancloud.cn)** 大家好,HertzBeat v1.1.1 发布啦!这个版本带来了自定义监控增强,采集指标数据可以作为变量赋值给下一个采集。修复了若干bug,提升整体稳定性。 @@ -24,7 +24,7 @@ Feature: 2. [[monitor] 支持前置采集指标数据作为变量赋值给下一采集流程 #206](https://github.com/apache/hertzbeat/pull/206). 3. [[collector] 使用基本的http headers头实现basic auth替换前置模式 #212](https://github.com/apache/hertzbeat/pull/212) 4. [[manager,alerter] 支持告警通知设置钉钉机器人微信飞书自定义 webhook url #213](https://github.com/apache/hertzbeat/pull/213) -5. [[monitor] feature 更新数值指标数据不带末尾为0的小数点 #217](https://github.com/apache/hertzbeat/pull/217) +5. [[monitor] feature 更新数值指标数据不带末尾为0的小数点 #217](https://github.com/apache/hertzbeat/pull/217) 6. [[web-app]feature:toggle [enable and cancel] button #218](https://github.com/apache/hertzbeat/pull/218) 7. [[manager] 更新监控define yml文件前缀名称 "app" or "param",便于自定义监控区别 #221](https://github.com/apache/hertzbeat/pull/221) @@ -42,7 +42,6 @@ Bugfix. Online https://console.tancloud.cn. - Have Fun! ---- @@ -54,10 +53,9 @@ Have Fun! > HertzBeat 支持更自由化的告警配置(计算表达式),支持告警通知,告警模版,邮件钉钉微信飞书等及时通知送达 > 欢迎登录 HertzBeat 的 [云环境TanCloud](https://console.tancloud.cn) 试用发现更多。 > 我们正在快速迭代中,欢迎参与加入共建项目开源生态。 - +> > `HertzBeat` 的多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 - **仓库地址** [Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md index affc90b5c34..e4c3064b1fd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md @@ -4,7 +4,7 @@ author: tom author_title: tom author_url: https://github.com/tomsun28 author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 -tags: [opensource] +tags: [opensource] --- Home: hertzbeat.com | tancloud.cn @@ -22,9 +22,9 @@ Feature: 2. [[monitor] feature: support apache kafka monitor #263](https://github.com/apache/hertzbeat/pull/263) contribute by @wang1027-wqh 3. [[webapp] support history chart query 3 mouth time range #265](https://github.com/apache/hertzbeat/pull/265) issue by @ericfrol 4. [[monitor] support ssl certificate expired monitor #266](https://github.com/apache/hertzbeat/pull/266) suggest by @noear -5. [[web-app] update default interval 600s to 120s #268](https://github.com/apache/hertzbeat/pull/268) -6. [[web-app] update layout ui - help button, nav menu #272](https://github.com/apache/hertzbeat/pull/272) -7. [[alert,webapp] support delete all alerts at once. #273](https://github.com/apache/hertzbeat/pull/273) issue by @ericfrol +5. [[web-app] update default interval 600s to 120s #268](https://github.com/apache/hertzbeat/pull/268) +6. [[web-app] update layout ui - help button, nav menu #272](https://github.com/apache/hertzbeat/pull/272) +7. [[alert,webapp] support delete all alerts at once. #273](https://github.com/apache/hertzbeat/pull/273) issue by @ericfrol 8. [[web-app] update home background image #276](https://github.com/apache/hertzbeat/pull/276) Bugfix. @@ -37,9 +37,9 @@ Bugfix. Online https://console.tancloud.cn. Have Fun! ----- - +--- ## V1.1.3 + 官网: hertzbeat.com | tancloud.cn 大家好,HertzBeat v1.1.3 发布啦!这个版本支持了apache kafka监控,SSL证书过期监控等。修复了若干bug,提升整体稳定性。 @@ -55,9 +55,9 @@ Feature: 2. [[monitor] feature: support apache kafka monitor #263](https://github.com/apache/hertzbeat/pull/263) contribute by @wang1027-wqh 3. [[webapp] support history chart query 3 mouth time range #265](https://github.com/apache/hertzbeat/pull/265) issue by @ericfrol 4. [[monitor] support ssl certificate expired monitor #266](https://github.com/apache/hertzbeat/pull/266) suggest by @noear -5. [[web-app] update default interval 600s to 120s #268](https://github.com/apache/hertzbeat/pull/268) -6. [[web-app] update layout ui - help button, nav menu #272](https://github.com/apache/hertzbeat/pull/272) -7. [[alert,webapp] support delete all alerts at once. #273](https://github.com/apache/hertzbeat/pull/273) issue by @ericfrol +5. [[web-app] update default interval 600s to 120s #268](https://github.com/apache/hertzbeat/pull/268) +6. [[web-app] update layout ui - help button, nav menu #272](https://github.com/apache/hertzbeat/pull/272) +7. [[alert,webapp] support delete all alerts at once. #273](https://github.com/apache/hertzbeat/pull/273) issue by @ericfrol 8. [[web-app] update home background image #276](https://github.com/apache/hertzbeat/pull/276) Bugfix. @@ -69,6 +69,6 @@ Bugfix. Online https://console.tancloud.cn. - Have Fun! ----- +--- + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-10-ssl-practice.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-10-ssl-practice.md index d7f06703c97..75bf7f7008c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-10-ssl-practice.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-10-ssl-practice.md @@ -4,7 +4,7 @@ author: tom author_title: tom author_url: https://github.com/tomsun28 author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 -tags: [opensource, practice] +tags: [opensource, practice] --- 先祝看到的同学中秋快乐,身体健康,在身体健康的基础上尽量暴富。 @@ -38,7 +38,6 @@ gitee: https://gitee.com/hertzbeat/hertzbeat > 系统页面 -> 监控菜单 -> SSL证书 -> 新增SSL证书 - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/bd53f343a5b54feab62e71458d076441~tplv-k3u1fbpfcp-zoom-1.image) 2. 配置监控网站 @@ -52,52 +51,38 @@ gitee: https://gitee.com/hertzbeat/hertzbeat > 在监控列表可以查看任务状态,进监控详情可以查看指标数据图表等。 - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/f874b45e909c4bb0acdd28b3fb034a61~tplv-k3u1fbpfcp-zoom-1.image) - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ef5d7443f8c04818ae5aa28d421203be~tplv-k3u1fbpfcp-zoom-1.image) - - 4. 设置阈值(证书过期时触发) > 系统页面 -> 告警 -> 告警阈值 -> 新增阈值 - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/8d6205172d43463aa34e534477f132f1~tplv-k3u1fbpfcp-zoom-1.image) > 配置阈值,选择SSL证书指标对象,配置告警表达式-当指标`expired`为`true`触发,即`equals(expired,"true")` , 设置告警级别通知模版信息等。 - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/83d17b381d994f26a6240e01915b2001~tplv-k3u1fbpfcp-zoom-1.image) > 关联阈值与监控, 在阈值列表设置此阈值应用于哪些监控。 - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/9b9063d7bcf9454387be0491fc382bd1~tplv-k3u1fbpfcp-zoom-1.image) - - - 5. 设置阈值(证书过期前一周触发) > 同理如上,新增配置阈值,配置告警表达式-当指标有效期时间戳 `end_timestamp`,`now()`函数为当前时间戳,若配置提前一周触发告警即:`end_timestamp <= (now() + 604800000)` , 其中 `604800000` 为7天总时间差毫秒值。 - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/0d6f837f57c247e09f668f60eff4a0ff~tplv-k3u1fbpfcp-zoom-1.image) > 最终可以在告警中心看到已触发的告警。 - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/5a61b23127524976b2c209ce0ca6a339~tplv-k3u1fbpfcp-zoom-1.image) - 6. 告警通知(通过钉钉微信飞书等及时通知) > 监控系统 -> 告警通知 -> 新增接收人 - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/7f36956060ef410a82bbecafcbb2957f~tplv-k3u1fbpfcp-zoom-1.image) 钉钉微信飞书等token配置可以参考帮助文档 @@ -107,7 +92,6 @@ https://tancloud.cn/docs/help/alert_dingtalk > 告警通知 -> 新增告警通知策略 -> 将刚才配置的接收人启用通知 - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/d976343e81f843138344a039f3aff8a3~tplv-k3u1fbpfcp-zoom-1.image) 7. OK 当阈值触发后我们就可以收到对应告警消息啦,如果没有配通知,也可以在告警中心查看告警信息。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-10-08-hertzbeat-v1.2.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-10-08-hertzbeat-v1.2.0.md index 38541e27286..4daa13c04e2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-10-08-hertzbeat-v1.2.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-10-08-hertzbeat-v1.2.0.md @@ -26,9 +26,9 @@ Feature: 2. [[webapp] auto redirect url when detect browser language #289](https://github.com/apache/hertzbeat/pull/289) contribute by @DreamGirl524 3. [[home] update logo icon cdn url with fault tolerance #293](https://github.com/apache/hertzbeat/pull/293) contribute by @VampireAchao 4. [[monitor] enable powerful jsonpath parser, add es metrics #295](https://github.com/apache/hertzbeat/pull/295) contribute by @Ceilzcx -5. [[webapp] update ui theme #296](https://github.com/apache/hertzbeat/pull/296) -6. [Feature change main pom artifactId #300](https://github.com/apache/hertzbeat/pull/300) contribute by @Privauto -7. [[home,webapp] add users logo and update hertzbeat brand #302](https://github.com/apache/hertzbeat/pull/302) +5. [[webapp] update ui theme #296](https://github.com/apache/hertzbeat/pull/296) +6. [Feature change main pom artifactId #300](https://github.com/apache/hertzbeat/pull/300) contribute by @Privauto +7. [[home,webapp] add users logo and update hertzbeat brand #302](https://github.com/apache/hertzbeat/pull/302) 8. [[monitor] alerter notify test throw msg to front, optional spring.email config #305](https://github.com/apache/hertzbeat/pull/305) contribute by @Ceilzcx 9. [[home]doc:update docker-deploy.md and tdengine-init.md #306](https://github.com/apache/hertzbeat/pull/306) contribute by @Privauto 10. [[hertzbeat] refactor common collect metrics data and alert data queue #320](https://github.com/apache/hertzbeat/pull/320) @@ -58,14 +58,17 @@ Have Fun! ### Upgrade⚠️ Need Convert `application.yml` + ``` spring: resources: static-locations: - classpath:/dist/ - classpath:../dist/ -``` +``` + To + ``` spring: web: @@ -78,6 +81,7 @@ spring: ---- ## V1.2.0 + 官网: hertzbeat.com | tancloud.cn 大家好,HertzBeat v1.2.0 发布啦!这个版本支持了更强大的jsonpath去采集自定义监控指标,将springboot版本升级到2.7,支持指标单位的提取转换展示等。修复了若干bug,提升整体稳定性。 @@ -95,9 +99,9 @@ Feature: 2. [[webapp] auto redirect url when detect browser language #289](https://github.com/apache/hertzbeat/pull/289) contribute by @DreamGirl524 3. [[home] update logo icon cdn url with fault tolerance #293](https://github.com/apache/hertzbeat/pull/293) contribute by @VampireAchao 4. [[monitor] enable powerful jsonpath parser, add es metrics #295](https://github.com/apache/hertzbeat/pull/295) contribute by @Ceilzcx -5. [[webapp] update ui theme #296](https://github.com/apache/hertzbeat/pull/296) -6. [Feature change main pom artifactId #300](https://github.com/apache/hertzbeat/pull/300) contribute by @Privauto -7. [[home,webapp] add users logo and update hertzbeat brand #302](https://github.com/apache/hertzbeat/pull/302) +5. [[webapp] update ui theme #296](https://github.com/apache/hertzbeat/pull/296) +6. [Feature change main pom artifactId #300](https://github.com/apache/hertzbeat/pull/300) contribute by @Privauto +7. [[home,webapp] add users logo and update hertzbeat brand #302](https://github.com/apache/hertzbeat/pull/302) 8. [[monitor] alerter notify test throw msg to front, optional spring.email config #305](https://github.com/apache/hertzbeat/pull/305) contribute by @Ceilzcx 9. [[home]doc:update docker-deploy.md and tdengine-init.md #306](https://github.com/apache/hertzbeat/pull/306) contribute by @Privauto 10. [[hertzbeat] refactor common collect metrics data and alert data queue #320](https://github.com/apache/hertzbeat/pull/320) @@ -121,23 +125,24 @@ Bugfix. 9. [[web-app] fix echarts y-axis value tip overflow #325](https://github.com/apache/hertzbeat/pull/325) 10. [[webapp] fix interceptor http resp common error-msg when error #329](https://github.com/apache/hertzbeat/pull/329) - Online https://console.tancloud.cn. - Have Fun! ### 升级注意⚠️ 需要将配置文件内容 `application.yml` + ``` spring: resources: static-locations: - classpath:/dist/ - classpath:../dist/ -``` +``` + 变更为 + ``` spring: web: @@ -148,3 +153,4 @@ spring: ``` ---- + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-11-28-hertzbeat-v1.2.2.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-11-28-hertzbeat-v1.2.2.md index f8488941bd4..941192b69cc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-11-28-hertzbeat-v1.2.2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-11-28-hertzbeat-v1.2.2.md @@ -8,6 +8,7 @@ tags: [opensource] --- ## v1.2.2 + Home: hertzbeat.com | tancloud.cn Hi guys! HertzBeat v1.2.2 is coming. This release brings significant features. This version we support monitor kubernets, docker, springboot, nacos and database dm, opengauss and more. Also we bring a experimental feature, users can custom define metrics collect from promethues with promql. Fixed several bugs and improved the overall stable usability. And more, linux monitor we support top10 cpu usage metrics, top10 memory usage mertics. @@ -26,8 +27,8 @@ Feature: 2. [[home] add DM db document supplement #411](https://github.com/apache/hertzbeat/pull/411) @TJxiaobao 3. [[home] support algolia search #416](https://github.com/apache/hertzbeat/pull/416) 4. [[collector] support trigger and grading multiple subtasks through -_- placeholder expression #418](https://github.com/apache/hertzbeat/pull/418) -5. [WIP:feature support k8s monitor, http monitor nacos, service&http_micro monitor msa #421](https://github.com/apache/hertzbeat/pull/421) @cuipiheqiuqiu -6. [[manager] support opengauss database monitor #422](https://github.com/apache/hertzbeat/pull/422) +5. [WIP:feature support k8s monitor, http monitor nacos, service&http_micro monitor msa #421](https://github.com/apache/hertzbeat/pull/421) @cuipiheqiuqiu +6. [[manager] support opengauss database monitor #422](https://github.com/apache/hertzbeat/pull/422) 7. [[#406][warehose] Add unit test MetricsDataControllerTest.java #426](https://github.com/apache/hertzbeat/pull/426) @haibo-duan 8. [[#358][manager] Add unit test manager/service/NoticeConfigServiceTest.java #427](https://github.com/apache/hertzbeat/pull/427) @haibo-duan 9. [[#356][manager] unit test case of manager/service/MailServiceTest.java #432](https://github.com/apache/hertzbeat/pull/432) @csyshu @@ -40,7 +41,6 @@ Feature: 16. [[hertzbeat] update use PromQL to collect metrics from promethues server #456](https://github.com/apache/hertzbeat/pull/456) 17. [[manager] support custom monitor api response data code #460](https://github.com/apache/hertzbeat/pull/460) - Bugfix. 1. [【bugfix#408】if logs dir not exist, create logs dir #409](https://github.com/apache/hertzbeat/pull/409) @Ceilzcx @@ -58,6 +58,7 @@ Have Fun! ---- ## V1.2.2 + 官网: hertzbeat.com | tancloud.cn 大家好,HertzBeat v1.2.2发布啦!这个版本带来个超多重大更新,我们支持了对云原生kubernets, docker的监控,支持了对springboot应用, nacos注册发现中心,达梦数据库,opengauss数据库等的指标监控。我们也引入了一个实验性特性,用户可以使用promethues promql 从promethues server拿取指标数据作为hertzbeat自定义监控指标数据。当然我们也新增了多个测试用户覆盖,修复了多个BUG。还有个很多用户想要的更新,我们新增了对linux监控的top10 cpu 内存利用率的进程监控指标。有个这个指标,我们就可以干很多事情。比如监控某个进程CPU异常,内存爆满啥的。快来试试吧! @@ -89,7 +90,6 @@ Feature: 16. [[hertzbeat] update use PromQL to collect metrics from promethues server #456](https://github.com/apache/hertzbeat/pull/456) 17. [[manager] support custom monitor api response data code #460](https://github.com/apache/hertzbeat/pull/460) - Bugfix. 1. [【bugfix#408】if logs dir not exist, create logs dir #409](https://github.com/apache/hertzbeat/pull/409) @Ceilzcx @@ -101,3 +101,4 @@ Bugfix. 7. [[home] fix typo in springboot2.md #464](https://github.com/apache/hertzbeat/pull/464) @eltociear ---- + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md index 04a7e28d849..34df92ffbd4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md @@ -9,7 +9,6 @@ tags: [opensource] > 非常高兴 HertzBeat 迎来了两位新晋社区Committer, 两位都是来自互联网公司的开发工程师,让我们来了解下他们的开源经历吧! - ## 第一位 花城 姓名:王庆华 @@ -25,18 +24,18 @@ github:[wang1027-wqh](https://github.com/wang1027-wqh) 说起来挺偶然的,结识hertzbeat是因为我大学的毕业设计,当时在一家互联网公司实习,那个时候第一次看到了企业是怎么监控项目的,不管是系统监控、业务监控还是物联网iot监控,那个时候见世面不广,只知道Prometheus + Grafana,但是学起来、用起来成本比较高,那个时候就觉得应该有其他类型的监控,恰好,到了大学毕业设计选题,我就开始寻找这方面的开源项目,那个时候我们小组正在使用Shen Yu网关,我就看了下社区,发现了hertzbeat,自此我便于它结缘了。 ## 开始提交PR + 到了2022-02-18 我开始提交了我第一个pr,当时只是为了优化一些controller入参的格式,没有什么技术含量,但是这是我接触开源的第一步,让我在从理论学习跨出了实践的一步 ## 持续的开源贡献与收获 到目前为止,参与hertzbeat开源项目已有半年多时间,贡献了许多,也成长收获了许多。具体如下: -1. 见证了hertzbeat的贡献值从0到1 -2. 兼容了zookeeper、JVM、Kafka等监控功能 -3. 实现了hertzbeat项目的国际化 -4. 参与了开源之夏并顺利结项 -5. 增加了监控系统的基础告警功能: 钉钉、飞书、企业微信、邮箱等 - +1. 见证了hertzbeat的贡献值从0到1 +2. 兼容了zookeeper、JVM、Kafka等监控功能 +3. 实现了hertzbeat项目的国际化 +4. 参与了开源之夏并顺利结项 +5. 增加了监控系统的基础告警功能: 钉钉、飞书、企业微信、邮箱等 ## 感谢社区小伙伴 @@ -44,14 +43,13 @@ github:[wang1027-wqh](https://github.com/wang1027-wqh) ## 对新人的一点建议 -1. 不要觉得自己一上手就能接触到核心,心急吃不了热豆腐 -2. 不要只注重看代码,要上手搭建、使用 -3. 有想法就大胆尝试,不管自己的方案是否完善 -4. 多多关注开源,了解社区动态,多和开源开发者交流 - +1. 不要觉得自己一上手就能接触到核心,心急吃不了热豆腐 +2. 不要只注重看代码,要上手搭建、使用 +3. 有想法就大胆尝试,不管自己的方案是否完善 +4. 多多关注开源,了解社区动态,多和开源开发者交流 ------ ------ +--- +--- ## 第二位 星辰 @@ -65,13 +63,10 @@ Hertzbeat Committer github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) - ## 初识Hertzbeat 2022年8月开始接触Hertzbeat,由于公司监控elasticsearch使用的cerebro,虽然有非常强大的数据监控,但缺少告警通知的功能;就去github上浏览监控类的项目,刚好看到Hertzbeat,对此非常感兴趣,在了解完整个项目结构和实现后,刚好elasticsearch的监控部分做的不够完善,我就根据cerebro完善了这部分监控数据并提交了pull request。后面在tom老哥的帮助下也开始其他部分的实现。 - - ## 开始提交PR 从2022年9月至今提交了好几个pr,主要包括: @@ -83,8 +78,6 @@ github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) + 一些bug的修复。。。 + promethues exporter 协议解析 - - ## 持续的开源贡献与收获 到目前为止,参与Hertzbeat社区开源已有半年多时间,贡献了许多,也成长收获了许多。 @@ -93,19 +86,16 @@ github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) 同时在社区群里,看到别人提的问题和帮助别人可以学到很多新的知识,很多问题你目前不一定会遇到,其他人遇到的时候你可以思考并收获很多知识。 - ## 感谢社区小伙伴 感谢无偿帮助过我或给过我启发的小伙伴:[tomsun28](https://github.com/tomsun28) - ## 对新人的一点建议 + 使用者可以先看官网,官网基本能够解决你的问题。部分简单或者常见的问题其他可以自己解决,对自己也是一种锻炼 + 可以尝试阅读源码,大部分源码都是包含注释的,并不难;不懂的地方也可以通过运行test,debug看一下整个流程 + 有想法或者bug,可以前往gitee或者github提交issues,也可以在群里询问,不要怕,都是从菜逼过来的 - ## 如何参与Hertzbeat + 官网有非常完善的贡献者指南:[贡献者指南 | HertzBeat](https://hertzbeat.com/docs/community/contribution) @@ -114,5 +104,5 @@ github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) + 如果是大的改动,建议提交前编写issues,在提交pr,同时请注意编码的规范,尽量减少bug和警告的产生 - > 以上就是我们新晋Committer们的开源经历了,可以看出参与开源并不难,更重要的是迈出第一步,无论是代码还是文档修复或者提交issue,这些都是贡献者参与开源的姿势。快来加入我们吧! + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-28-hertzbeat-v1.2.3.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-28-hertzbeat-v1.2.3.md index 79028a22e82..5bc276eb240 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-28-hertzbeat-v1.2.3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-28-hertzbeat-v1.2.3.md @@ -7,7 +7,7 @@ author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 tags: [opensource] --- -## v1.2.3 +## v1.2.3 Home: hertzbeat.com | tancloud.cn @@ -38,7 +38,6 @@ Feature: 12. [add Prometheus exporter metrics parser and IoTDB monitor #505](https://github.com/apache/hertzbeat/pull/505) @Ceilzcx 13. [support apache shenyu metrics monitoring #507](https://github.com/apache/hertzbeat/pull/507) - Bugfix. 1. [[manager] fix cross domain problem in SecurityCorsConfiguration #469](https://github.com/apache/hertzbeat/pull/469) @zenan08 @@ -57,6 +56,7 @@ Have Fun! ---- ## V1.2.3 + 官网: hertzbeat.com | tancloud.cn 大家好,HertzBeat v1.2.3发布啦!这个版本带来了重大更新,我们支持了对prometheus exporter协议监控,用户可以很方便的使用hertzbeat来适配监控prometheus exporter. 基于这个能力,这个版本我们也支持了对apache shenyu, apache iotdb的指标监控。我们更新了UI布局,修复了多个BUG,也支持了短信通知。快来体验下吧! @@ -84,7 +84,6 @@ Feature: 12. [add Prometheus exporter metrics parser and IoTDB monitor #505](https://github.com/apache/hertzbeat/pull/505) @Ceilzcx 13. [support apache shenyu metrics monitoring #507](https://github.com/apache/hertzbeat/pull/507) - Bugfix. 1. [[manager] fix cross domain problem in SecurityCorsConfiguration #469](https://github.com/apache/hertzbeat/pull/469) @zenan08 @@ -97,3 +96,4 @@ Bugfix. 8. [[manager] springboot2 monitor support base path config #515](https://github.com/apache/hertzbeat/pull/515) ---- + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md index 95283ff6ada..74b9d28d2f6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md @@ -10,12 +10,12 @@ keywords: [开源监控系统, 开源数据库监控, IotDB数据库监控] ## 使用 HertzBeat 对物联网数据库 IoTDB 进行监控实践,5分钟搞定! -### IoTDB 介绍 +### IoTDB 介绍 > Apache IoTDB (Internet of Things Database) 是一款时序数据库管理系统,可以为用户提供数据收集、存储和分析等服务。 -> IoTDB由于其轻量级架构、高性能和高可用的特性,以及与 Hadoop 和 Spark 生态的无缝集成,满足了工业 IoT 领域中海量数据存储、高吞吐量数据写入和复杂数据查询分析的需求。 +> IoTDB由于其轻量级架构、高性能和高可用的特性,以及与 Hadoop 和 Spark 生态的无缝集成,满足了工业 IoT 领域中海量数据存储、高吞吐量数据写入和复杂数据查询分析的需求。 -### HertzBeat 介绍 +### HertzBeat 介绍 > HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 > 支持对应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书)。 @@ -23,14 +23,15 @@ keywords: [开源监控系统, 开源数据库监控, IotDB数据库监控] ### 在 HertzBeat 5分钟搞定监控 IoTDB -#### 操作前提,您已拥有 IoTDB 环境和 HertzBeat 环境。 +#### 操作前提,您已拥有 IoTDB 环境和 HertzBeat 环境。 -- IoTDB [部署安装文档](https://iotdb.apache.org/UserGuide/V0.13.x/QuickStart/QuickStart.html) -- HertzBeat [部署安装文档](https://hertzbeat.com/docs/start/docker-deploy) +- IoTDB [部署安装文档](https://iotdb.apache.org/UserGuide/V0.13.x/QuickStart/QuickStart.html) +- HertzBeat [部署安装文档](https://hertzbeat.com/docs/start/docker-deploy) #### 一. 在 IoTDB 端开启`metrics`功能,它将提供 prometheus metrics 形式的接口数据。 -1. metric 采集默认是关闭的,需要先到 `conf/iotdb-metric.yml` 中修改参数打开后重启 server +1. metric 采集默认是关闭的,需要先到 `conf/iotdb-metric.yml` 中修改参数打开后重启 server + ``` # 是否启动监控模块,默认为false enableMetric: true @@ -41,75 +42,72 @@ metricReporterList: - PROMETHEUS ``` -2. 重启 IoTDB, 打开浏览器或者用curl 访问 http://ip:9091/metrics, 就能看到metric数据了。 +2. 重启 IoTDB, 打开浏览器或者用curl 访问 http://ip:9091/metrics, 就能看到metric数据了。 -#### 二. 在 HertzBeat 监控页面添加 IoTDB 监控 +#### 二. 在 HertzBeat 监控页面添加 IoTDB 监控 -1. 点击新增IoTDB监控 +1. 点击新增IoTDB监控 -路径:菜单 -> 数据库监控 -> IoTDB监控 -> 新增IoTDB监控 +路径:菜单 -> 数据库监控 -> IoTDB监控 -> 新增IoTDB监控 -![hertzbeat](/img/blog/monitor-iotdb-1.png) +![hertzbeat](/img/blog/monitor-iotdb-1.png) -2. 配置监控IoTDB所需参数 +2. 配置监控IoTDB所需参数 在监控页面填写 IoTDB **服务IP**,**监控端口**(默认9091),最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/iotdb/) https://hertzbeat.com/docs/help/iotdb/ +其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/iotdb/) https://hertzbeat.com/docs/help/iotdb/ -![hertzbeat](/img/blog/monitor-iotdb-2.png) +![hertzbeat](/img/blog/monitor-iotdb-2.png) -3. 完成✅,现在我们已经添加好对 IoTDB 的监控了,查看监控列表即可看到我们的添加项。 +3. 完成✅,现在我们已经添加好对 IoTDB 的监控了,查看监控列表即可看到我们的添加项。 -![hertzbeat](/img/blog/monitor-iotdb-3.png) +![hertzbeat](/img/blog/monitor-iotdb-3.png) -4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 IoTDB的实时监控指标数据。 +4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 IoTDB的实时监控指标数据。 -![hertzbeat](/img/blog/monitor-iotdb-4.png) +![hertzbeat](/img/blog/monitor-iotdb-4.png) -5. 点击**监控历史详情TAB** 即可浏览 IoTDB的历史监控指标数据图表📈。 +5. 点击**监控历史详情TAB** 即可浏览 IoTDB的历史监控指标数据图表📈。 -![hertzbeat](/img/blog/monitor-iotdb-5.png) +![hertzbeat](/img/blog/monitor-iotdb-5.png) **完成DONE!通过上面几步,总结起来其实也就是两步** - **一步开启 IoTDB 端`metrics`功能** -- **另一步在 HertzBeat 监控页面配置IP端口添加监控即可** - +- **另一步在 HertzBeat 监控页面配置IP端口添加监控即可** -**这样我们就完成了对 IoTDB 的监控,我们可以随时查看监控详情指标信息来观测其服务状态,但人不可能是一直去看,总有要休息的时候,监控往往伴随着告警,当监控指标发生异常,监控系统需要能及时通知到负责人** +**这样我们就完成了对 IoTDB 的监控,我们可以随时查看监控详情指标信息来观测其服务状态,但人不可能是一直去看,总有要休息的时候,监控往往伴随着告警,当监控指标发生异常,监控系统需要能及时通知到负责人** -**接下来我们就来一步一步教您配置 HertzBeat 系统里的阈值告警通知** +**接下来我们就来一步一步教您配置 HertzBeat 系统里的阈值告警通知** -#### 三. 在 HertzBeat 系统添加 IoTDB 指标阈值告警 +#### 三. 在 HertzBeat 系统添加 IoTDB 指标阈值告警 -1. 对某个重要指标配置阈值告警 +1. 对某个重要指标配置阈值告警 -路径:菜单 -> 告警阈值 -> 新增阈值 +路径:菜单 -> 告警阈值 -> 新增阈值 -- 选择配置的指标对象,IotDB监控有非常多的指标,其中有个指标关系到节点的状态 `cluster_node_status` -> `status` (节点状态,1=online 2=offline)。 -- 这里我们就配置当此指标 `status==2` 时发出告警,告警级别为**紧急告警**,一次即触发,具体如下图。 +- 选择配置的指标对象,IotDB监控有非常多的指标,其中有个指标关系到节点的状态 `cluster_node_status` -> `status` (节点状态,1=online 2=offline)。 +- 这里我们就配置当此指标 `status==2` 时发出告警,告警级别为**紧急告警**,一次即触发,具体如下图。 -![hertzbeat](/img/blog/monitor-iotdb-6.png) +![hertzbeat](/img/blog/monitor-iotdb-6.png) +2. 新增消息通知接收人 -2. 新增消息通知接收人 +路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 -路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 +消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 - -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 -- 在 HertzBeat 配置接收人参数如下。 +- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 +- 在 HertzBeat 配置接收人参数如下。 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-1.png) - -3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 +![hertzbeat](/img/blog/alert-notice-1.png) -![hertzbeat](/img/blog/alert-notice-2.png) +3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 +![hertzbeat](/img/blog/alert-notice-2.png) -### 完毕,现在坐等告警消息过来了。叮叮叮叮 +### 完毕,现在坐等告警消息过来了。叮叮叮叮 ``` [HertzBeat告警通知] @@ -121,16 +119,16 @@ metricReporterList: 内容详情 : 监控到 IOTDB 节点 127.0.0.1 状态 OFFLINE, 请及时处理。 ``` -## 小结 +## 小结 -这篇实践文章带我们体验了如何使用 HertzBeat 监控 IoTDB 数据库指标数据,可以发现将 监控-告警-通知 集一体的 HertzBeat 在操作与使用方面更加的便捷,在页面上简单点一点就能把 IoTDB 纳入监控,再也不需要部署多个组件,写多个有门槛的YML配置文件了。 +这篇实践文章带我们体验了如何使用 HertzBeat 监控 IoTDB 数据库指标数据,可以发现将 监控-告警-通知 集一体的 HertzBeat 在操作与使用方面更加的便捷,在页面上简单点一点就能把 IoTDB 纳入监控,再也不需要部署多个组件,写多个有门槛的YML配置文件了。 IoTDB Github: https://github.com/apache/iotdb -HertzBeat Github: https://github.com/apache/hertzbeat +HertzBeat Github: https://github.com/apache/hertzbeat **欢迎了解使用Star支持哦!** 只需要一条docker命令即可安装体验heartbeat : `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` -注意⚠️HertzBeat v1.2.3 版本支持 IoTDB v0.12 v0.13, 由于其v1.0刚发布, 暂未对此版本全部指标兼容。 +注意⚠️HertzBeat v1.2.3 版本支持 IoTDB v0.12 v0.13, 由于其v1.0刚发布, 暂未对此版本全部指标兼容。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md index d8a2b6ca37b..2ddf42275b3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md @@ -9,9 +9,9 @@ tags: [opensource, practice] ## 使用 HertzBeat 对 API 网关 Apache ShenYu 进行监控实践,5分钟搞定! -### Apache ShenYu 介绍 +### Apache ShenYu 介绍 -> Apache ShenYu 一个异步的,高性能的,跨语言的,响应式的 API 网关。 +> Apache ShenYu 一个异步的,高性能的,跨语言的,响应式的 API 网关。 - 代理:支持Apache Dubbo,Spring Cloud,gRPC,Motan,SOFA,TARS,WebSocket,MQTT - 安全性:签名,OAuth 2.0,JSON Web令牌,WAF插件 @@ -22,19 +22,18 @@ tags: [opensource, practice] - 集群:NGINX、Docker、Kubernetes - 语言:提供.NET,Python,Go,Java客户端用于API注册 - -### HertzBeat 介绍 +### HertzBeat 介绍 > HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 > 支持对应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书)。 -> HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 +> HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 ### 在 HertzBeat 5分钟搞定监控 Apache ShenYu -#### 操作前提,您已拥有 ShenYu 环境和 HertzBeat 环境。 +#### 操作前提,您已拥有 ShenYu 环境和 HertzBeat 环境。 -- ShenYu [部署安装文档](https://shenyu.apache.org/zh/docs/deployment/deployment-before) -- HertzBeat [部署安装文档](https://hertzbeat.com/docs/start/docker-deploy) +- ShenYu [部署安装文档](https://shenyu.apache.org/zh/docs/deployment/deployment-before) +- HertzBeat [部署安装文档](https://hertzbeat.com/docs/start/docker-deploy) #### 一. 在 ShenYu 端开启`metrics`插件,它将提供 metrics 接口数据。 @@ -44,11 +43,11 @@ tags: [opensource, practice] 1. 在网关的 `pom.xml` 文件中添加 `metrics插件` 的依赖。 ```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + ``` 2. `metric`插件 采集默认是关闭的, 在网关的配置`yaml`文件中编辑如下内容: @@ -65,83 +64,81 @@ shenyu: jvm_enabled: true #开启jvm的监控指标 ``` -3. 重启 ShenYu网关, 打开浏览器或者用curl 访问 `http://ip:8090`, 就能看到metric数据了。 +3. 重启 ShenYu网关, 打开浏览器或者用curl 访问 `http://ip:8090`, 就能看到metric数据了。 -#### 二. 在 HertzBeat 监控页面添加 ShenYu 监控 +#### 二. 在 HertzBeat 监控页面添加 ShenYu 监控 -1. 点击新增 ShenYu 监控 +1. 点击新增 ShenYu 监控 -路径:菜单 -> 中间件监控 -> ShenYu监控 -> 新增ShenYu监控 +路径:菜单 -> 中间件监控 -> ShenYu监控 -> 新增ShenYu监控 -![hertzbeat](/img/blog/monitor-shenyu-1.png) +![hertzbeat](/img/blog/monitor-shenyu-1.png) -2. 配置监控 ShenYu 所需参数 +2. 配置监控 ShenYu 所需参数 在监控页面填写 ShenYu **服务IP**,**监控端口**(默认8090),最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/shenyu/) https://hertzbeat.com/docs/help/shenyu/ +其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/shenyu/) https://hertzbeat.com/docs/help/shenyu/ -![hertzbeat](/img/blog/monitor-shenyu-1.png) +![hertzbeat](/img/blog/monitor-shenyu-1.png) -3. 完成✅,现在我们已经添加好对 ShenYu 的监控了,查看监控列表即可看到我们的添加项。 +3. 完成✅,现在我们已经添加好对 ShenYu 的监控了,查看监控列表即可看到我们的添加项。 -![hertzbeat](/img/blog/monitor-shenyu-3.png) +![hertzbeat](/img/blog/monitor-shenyu-3.png) -4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 ShenYu 的实时监控指标数据。 +4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 ShenYu 的实时监控指标数据。 -![hertzbeat](/img/blog/monitor-shenyu-4.png) +![hertzbeat](/img/blog/monitor-shenyu-4.png) -5. 点击**监控历史详情TAB** 即可浏览 ShenYu 的历史监控指标数据图表📈。 +5. 点击**监控历史详情TAB** 即可浏览 ShenYu 的历史监控指标数据图表📈。 -![hertzbeat](/img/blog/monitor-shenyu-5.png) +![hertzbeat](/img/blog/monitor-shenyu-5.png) ![hertzbeat](/img/blog/monitor-shenyu-6.png) **DONE!完成啦!通过上面几步,总结起来其实也就只用两步** - **第一步开启 ShenYu 端`metrics`插件功能** -- **第二步在 HertzBeat 监控页面配置IP端口添加监控即可** +- **第二步在 HertzBeat 监控页面配置IP端口添加监控即可** :::tip 通过上面的两步我们就完成了对 Apache ShenYu 的监控,我们可以在 HertzBeat 随时查看监控详情指标信息来观测其服务状态。 当然只是看肯定是不完美的,监控往往伴随着告警阈值,当 ShenYu 的某些指标超出我们的期望值或异常时,能及时的通知到我们对应的负责人,负责人收到通知处理问题,这样才是一个完整的监控告警流程。 ::: -**接下来我们就来一步一步演示如何配置 HertzBeat 系统里的阈值告警通知,让 ShenYu 的指标异常时,及时通知给我们** - -#### 三. 在 HertzBeat 系统添加 ShenYu 指标阈值告警 +**接下来我们就来一步一步演示如何配置 HertzBeat 系统里的阈值告警通知,让 ShenYu 的指标异常时,及时通知给我们** -1. 对某个重要指标配置告警阈值 +#### 三. 在 HertzBeat 系统添加 ShenYu 指标阈值告警 -路径:菜单 -> 告警阈值 -> 新增阈值 +1. 对某个重要指标配置告警阈值 -- 选择配置的指标对象,ShenYu 监控有非常多的指标,我们举例对 `打开的文件描述符的数量` `process_open_fds` -> `value` 这个指标进行阈值设置, 当服务端打开文件描述符数量大于3000时发出告警。 -- 这里我们就配置当此指标`process_open_fds` 的 `value>3000` 时发出告警,告警级别为**警告告警**,三次即触发,具体如下图。 +路径:菜单 -> 告警阈值 -> 新增阈值 -![hertzbeat](/img/blog/monitor-shenyu-7.png) +- 选择配置的指标对象,ShenYu 监控有非常多的指标,我们举例对 `打开的文件描述符的数量` `process_open_fds` -> `value` 这个指标进行阈值设置, 当服务端打开文件描述符数量大于3000时发出告警。 +- 这里我们就配置当此指标`process_open_fds` 的 `value>3000` 时发出告警,告警级别为**警告告警**,三次即触发,具体如下图。 +![hertzbeat](/img/blog/monitor-shenyu-7.png) 2. 新增消息通知接收人 -> 配置接收人,让告警消息知道要发给谁,用什么方式发。 +> 配置接收人,让告警消息知道要发给谁,用什么方式发。 -路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 +路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 -消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 +消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 -- 在 HertzBeat 配置接收人参数如下。 +- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 +- 在 HertzBeat 配置接收人参数如下。 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-1.png) +![hertzbeat](/img/blog/alert-notice-1.png) -3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 +3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 > 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 -![hertzbeat](/img/blog/alert-notice-2.png) - +![hertzbeat](/img/blog/alert-notice-2.png) -### 完毕,现在坐等告警消息过来啦。叮叮叮叮 +### 完毕,现在坐等告警消息过来啦。叮叮叮叮 ``` [HertzBeat告警通知] @@ -153,14 +150,14 @@ shenyu: 内容详情 : 请注意⚠️ ShenYu网关打开的文件描述符的数量为 3044 超过3000 ``` -## 小结 +## 小结 :::tip 这篇实践文章带我们体验了如何使用 HertzBeat 监控 Apache ShenYu 指标数据,可以发现将 `监控-告警-通知` 集一体的 HertzBeat 在操作与使用方面更加的便捷,在页面上简单点一点就能把 ShenYu 纳入监控,再也不需要部署多个组件,写多个有门槛的YML配置文件了。 ::: Apache ShenYu Github: https://github.com/apache/shenyu -HertzBeat Github: https://github.com/apache/hertzbeat +HertzBeat Github: https://github.com/apache/hertzbeat **欢迎了解使用Star支持哦!** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md index 16c8464fb36..60663c6041f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md @@ -9,33 +9,32 @@ tags: [opensource, practice] ## 使用 HertzBeat 对 线程池框架 DynamicTp 进行监控实践,5分钟搞定! -### 线程池框架 DynamicTp 介绍 +### 线程池框架 DynamicTp 介绍 -> DynamicTp 是Jvm语言的基于配置中心的轻量级动态线程池,内置监控告警功能,可通过SPI自定义扩展实现。 +> DynamicTp 是Jvm语言的基于配置中心的轻量级动态线程池,内置监控告警功能,可通过SPI自定义扩展实现。 -- 支持对运行中线程池参数的动态修改,实时生效。 -- 实时监控线程池的运行状态,触发设置的报警策略时报警,报警信息推送办公平台。 -- 定时采集线程池指标数据,配合像 grafana 这种可视化监控平台做大盘监控。 +- 支持对运行中线程池参数的动态修改,实时生效。 +- 实时监控线程池的运行状态,触发设置的报警策略时报警,报警信息推送办公平台。 +- 定时采集线程池指标数据,配合像 grafana 这种可视化监控平台做大盘监控。 +### HertzBeat 介绍 -### HertzBeat 介绍 +> HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 -> HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 +- 支持对应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Slack Discord Telegram)。 +- 其将Http, Jmx, Ssh, Snmp, Jdbc, Prometheus等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? +- HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 -- 支持对应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Slack Discord Telegram)。 -- 其将Http, Jmx, Ssh, Snmp, Jdbc, Prometheus等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? -- HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 +### 在 HertzBeat 5分钟搞定监控 DynamicTp -### 在 HertzBeat 5分钟搞定监控 DynamicTp +#### 操作前提,您已拥有 DynamicTp 环境和 HertzBeat 环境。 -#### 操作前提,您已拥有 DynamicTp 环境和 HertzBeat 环境。 - -- DynamicTp [集成接入文档](https://dynamictp.cn/guide/use/quick-start.html) -- HertzBeat [部署安装文档](https://hertzbeat.com/docs/start/docker-deploy) +- DynamicTp [集成接入文档](https://dynamictp.cn/guide/use/quick-start.html) +- HertzBeat [部署安装文档](https://hertzbeat.com/docs/start/docker-deploy) #### 一. 在 DynamicTp 端暴露出`DynamicTp`指标接口 `/actuator/dynamic-tp`,它将提供 metrics 接口数据。 -1. 开启 SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 +1. 开启 SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 ```yaml management: @@ -79,81 +78,79 @@ management: ] ``` -#### 二. 在 HertzBeat 监控页面添加 DynamicTp 线程池监控 +#### 二. 在 HertzBeat 监控页面添加 DynamicTp 线程池监控 -1. 点击新增 DynamicTp 监控 +1. 点击新增 DynamicTp 监控 -路径:菜单 -> 中间件监控 -> DynamicTp监控 -> 新增DynamicTp监控 +路径:菜单 -> 中间件监控 -> DynamicTp监控 -> 新增DynamicTp监控 ![hertzbeat](/img/blog/monitor-dynamic-tp-1.png) -2. 配置监控 DynamicTp 所需参数 +2. 配置监控 DynamicTp 所需参数 在监控页面填写 DynamicTp **服务IP**,**监控端口**(默认8080),最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/dynamic_tp/) https://hertzbeat.com/docs/help/dynamic_tp/ +其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/dynamic_tp/) https://hertzbeat.com/docs/help/dynamic_tp/ -![hertzbeat](/img/blog/monitor-dynamic-tp-2.png) +![hertzbeat](/img/blog/monitor-dynamic-tp-2.png) -3. 完成✅,现在我们已经添加好对 DynamicTp 的监控了,查看监控列表即可看到我们的添加项。 +3. 完成✅,现在我们已经添加好对 DynamicTp 的监控了,查看监控列表即可看到我们的添加项。 -![hertzbeat](/img/blog/monitor-dynamic-tp-1.png) +![hertzbeat](/img/blog/monitor-dynamic-tp-1.png) -4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 DynamicTp线程池 的实时监控指标数据。 +4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 DynamicTp线程池 的实时监控指标数据。 -![hertzbeat](/img/blog/monitor-dynamic-tp-3.png) +![hertzbeat](/img/blog/monitor-dynamic-tp-3.png) -5. 点击**监控历史详情TAB** 即可浏览 DynamicTp线程池 的历史监控指标数据图表📈。 +5. 点击**监控历史详情TAB** 即可浏览 DynamicTp线程池 的历史监控指标数据图表📈。 -![hertzbeat](/img/blog/monitor-dynamic-tp-4.png) +![hertzbeat](/img/blog/monitor-dynamic-tp-4.png) ![hertzbeat](/img/blog/monitor-dynamic-tp-5.png) **DONE!完成啦!通过上面几步,总结起来其实也就只用两步** - **第一步暴露 DynamicTp 端`metrics`端点`/actuator/dynamic-tp`** -- **第二步在 HertzBeat 监控页面配置IP端口添加监控即可** +- **第二步在 HertzBeat 监控页面配置IP端口添加监控即可** :::tip 通过上面的两步我们就完成了对 DynamicTp 的监控,我们可以在 HertzBeat 随时查看监控详情指标信息来观测其服务状态。 当然只是看肯定是不完美的,监控往往伴随着告警阈值,当 DynamicTp 的线程池指标超出我们的期望值或异常时,能及时的通知到我们对应的负责人,负责人收到通知处理问题,这样才是一个完整的监控告警流程。 ::: -**接下来我们就来一步一步演示如何配置 HertzBeat 系统里的阈值告警通知,让 DynamicTp线程池 的指标异常时,及时通知给我们** +**接下来我们就来一步一步演示如何配置 HertzBeat 系统里的阈值告警通知,让 DynamicTp线程池 的指标异常时,及时通知给我们** -#### 三. 在 HertzBeat 系统添加 DynamicTp线程池 指标阈值告警 +#### 三. 在 HertzBeat 系统添加 DynamicTp线程池 指标阈值告警 -1. 对某个重要指标配置告警阈值 +1. 对某个重要指标配置告警阈值 -路径:菜单 -> 告警阈值 -> 新增阈值 +路径:菜单 -> 告警阈值 -> 新增阈值 -- 选择配置的指标对象,DynamicTp监控主要是一些线程池相关指标,我们举例对 `运行超时线程数量` `thread_pool_running` -> `run_timeout_count` 这个指标进行阈值设置, 当线程运行超时数量大于1时发出告警。 -- 这里我们就配置当此指标`thread_pool_running` 的 `run_timeout_count>1` 时发出告警,告警级别为**严重告警**,三次即触发,具体如下图。 - -![hertzbeat](/img/blog/monitor-dynamic-tp-6.png) +- 选择配置的指标对象,DynamicTp监控主要是一些线程池相关指标,我们举例对 `运行超时线程数量` `thread_pool_running` -> `run_timeout_count` 这个指标进行阈值设置, 当线程运行超时数量大于1时发出告警。 +- 这里我们就配置当此指标`thread_pool_running` 的 `run_timeout_count>1` 时发出告警,告警级别为**严重告警**,三次即触发,具体如下图。 +![hertzbeat](/img/blog/monitor-dynamic-tp-6.png) 2. 新增消息通知接收人 -> 配置接收人,让告警消息知道要发给谁,用什么方式发。 +> 配置接收人,让告警消息知道要发给谁,用什么方式发。 -路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 +路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 -消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 +消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 -- 在 HertzBeat 配置接收人参数如下。 +- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 +- 在 HertzBeat 配置接收人参数如下。 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-1.png) +![hertzbeat](/img/blog/alert-notice-1.png) -3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 +3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 > 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 -![hertzbeat](/img/blog/alert-notice-2.png) - +![hertzbeat](/img/blog/alert-notice-2.png) -### 完毕,现在坐等告警消息过来啦。叮叮叮叮 +### 完毕,现在坐等告警消息过来啦。叮叮叮叮 ``` [HertzBeat告警通知] @@ -165,14 +162,14 @@ management: 内容详情 : DynamicTp has run timeout thread, count is 2 ``` -## 小结 +## 小结 :::tip 这篇实践文章带我们体验了如何使用 HertzBeat 监控 DynamicTp线程池 指标数据,可以发现集 `监控-告警-通知` 的 HertzBeat 在操作与使用方面更加的便捷,只需页面上简单点一点就能把 DynamicTp线程池 纳入监控并告警通知,再也不需要部署多个组件写YML配置文件那些繁琐操作了。 ::: DynamicTp Github: https://github.com/dromara/dynamic-tp -HertzBeat Github: https://github.com/apache/hertzbeat +HertzBeat Github: https://github.com/apache/hertzbeat **欢迎了解使用Star支持哦!** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-10-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-10-new-committer.md index abdd58e8f1c..72b996e62c5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-10-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-10-new-committer.md @@ -64,13 +64,10 @@ github:TJxiaobao 首先要在这里感谢🙏花城师兄,因为当时自己想学习一些优秀的 `Java` 项目。然后在吃饭的时候向师兄请教了一下有没有好的项目推荐,恰好这时师兄把我推荐给了tom哥。当我亲自使用了 `hertzbeat` 的时候真的是发现了新的大陆,相比较于自己之前接触简单的 `Java` 项目,不论是 `hertzbeat` 的架构设计,还是它的实用功能都深深折服了我。此时一颗 `想要贡献自己的一份力量` 的种子已经在我的心中种下。 - - ### 🌻 开始提交PR 在 Oct 20, 2022 是我第一次提交 `PR` 的时间,虽然本次 `PR` 是简单的翻译注释,看着技术含量不是很高。但是他也能让我更快的熟悉项目的业务逻辑和架构设计,能为以后的贡献打下坚实的基础。而这次 `PR` 也是我迈向开源的第一步,也是让我爱上开源的起点! - ### 🌻 持续的开源贡献和收获 从第一次 `PR` 到现在,参加 `hertzbeat` 开源项目已经有一段时间了,也贡献了一小部分,也成长收获了很多。具体如下。 @@ -88,12 +85,10 @@ github:TJxiaobao - 2、开阔自己的眼界。 - 3、从大佬们身上学到了很多知识。 - ### 🌻 感谢社区小伙伴 感谢无偿帮助过我或给过我启发的小伙伴(排名不分先后):tomsun28(tom哥),花城(师兄) - ### 🌻 对新人的一点建议 首先我也是一枚新手村的萌新啦,但是我可以把我的一些经验分享给大家,希望能给大家有所帮助。 @@ -103,14 +98,13 @@ github:TJxiaobao - 3、慢慢的尝试阅读源码,并理解。 - 4、如果遇见bug,可以直接反馈到 isses,也可以自己尝试解决嘿嘿。 - ## What is HertzBeat? > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需Agent的实时监控告警工具。应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Discord Slack Telegram)。 - +> > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。 > 您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? - +> > `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 **Github: https://github.com/apache/hertzbeat** @@ -118,10 +112,11 @@ github:TJxiaobao ## ⛄ Supported -- 网站监控, 端口可用性, Http Api, Ping连通性, Jvm, SiteMap全站, Ssl证书, SpringBoot, FTP服务器 -- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, 达梦, OpenGauss, ClickHouse, IoTDB -- Linux, Ubuntu, CentOS, Windows -- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ -- Kubernetes, Docker -- 和更多您的自定义监控。 -- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 +- 网站监控, 端口可用性, Http Api, Ping连通性, Jvm, SiteMap全站, Ssl证书, SpringBoot, FTP服务器 +- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, 达梦, OpenGauss, ClickHouse, IoTDB +- Linux, Ubuntu, CentOS, Windows +- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ +- Kubernetes, Docker +- 和更多您的自定义监控。 +- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md index df5b379f995..b41bc15de9a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md @@ -10,100 +10,97 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] ## 使用开源实时监控工具 HertzBeat 对 Mysql 数据库监控告警实践,5分钟搞定! -### Mysql 数据库介绍 +### Mysql 数据库介绍 > MySQL是一个开源关系型数据库管理系统,由瑞典MySQL AB 公司开发,属于 Oracle 旗下产品。MySQL 是最流行的开源关系型数据库管理系统之一,在 WEB 应用方面,MySQL是最好的 RDBMS (Relational Database Management System,关系数据库管理系统) 应用软件之一。 +### HertzBeat 介绍 -### HertzBeat 介绍 +> HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 -> HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 +- 集**监控-告警-通知为一体**,支持对应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Slack Discord Telegram)。 +- 其将Http, Jmx, Ssh, Snmp, Jdbc, Prometheus等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? +- HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 -- 集**监控-告警-通知为一体**,支持对应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Slack Discord Telegram)。 -- 其将Http, Jmx, Ssh, Snmp, Jdbc, Prometheus等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? -- HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 +### 在 HertzBeat 5分钟搞定对 Mysql 数据库监控 -### 在 HertzBeat 5分钟搞定对 Mysql 数据库监控 +#### 操作前提,您已拥有 Mysql 环境和 HertzBeat 环境。 -#### 操作前提,您已拥有 Mysql 环境和 HertzBeat 环境。 - -- Mysql [安装部署文档](https://www.runoob.com/mysql/mysql-install.html) +- Mysql [安装部署文档](https://www.runoob.com/mysql/mysql-install.html) - HertzBeat [安装部署文档](https://hertzbeat.com/docs/start/docker-deploy) -#### 在开源监控系统 HertzBeat 监控页面添加对 Mysql 数据库监控 +#### 在开源监控系统 HertzBeat 监控页面添加对 Mysql 数据库监控 -1. 点击新增 Mysql 监控 +1. 点击新增 Mysql 监控 -路径:菜单 -> 数据库监控 -> Mysql数据库 -> 新增Mysql数据库监控 +路径:菜单 -> 数据库监控 -> Mysql数据库 -> 新增Mysql数据库监控 ![hertzbeat](/img/blog/monitor-mysql-1.png) -2. 配置新增监控 Mysql 数据库所需参数 +2. 配置新增监控 Mysql 数据库所需参数 在监控页面填写 Mysql **服务IP**,**监控端口**(默认3306),**账户密码等**,最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/mysql/) https://hertzbeat.com/docs/help/mysql/ +其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/mysql/) https://hertzbeat.com/docs/help/mysql/ -![hertzbeat](/img/blog/monitor-mysql-2.png) +![hertzbeat](/img/blog/monitor-mysql-2.png) -3. 完成✅,现在我们已经添加好对 Mysql数据库 的监控了,查看监控列表即可看到我们的添加项。 +3. 完成✅,现在我们已经添加好对 Mysql数据库 的监控了,查看监控列表即可看到我们的添加项。 -![hertzbeat](/img/blog/monitor-mysql-1.png) +![hertzbeat](/img/blog/monitor-mysql-1.png) -4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 Mysql数据库 的实时监控指标数据。 +4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 Mysql数据库 的实时监控指标数据。 -![hertzbeat](/img/blog/monitor-mysql-3.png) +![hertzbeat](/img/blog/monitor-mysql-3.png) -5. 点击**监控历史详情TAB** 即可浏览 Mysql数据库 的历史监控指标数据图表📈。 +5. 点击**监控历史详情TAB** 即可浏览 Mysql数据库 的历史监控指标数据图表📈。 ![hertzbeat](/img/blog/monitor-mysql-4.png) **DONE!完成啦!通过上面几步,总结起来其实也就只用一步即可** -- **在 HertzBeat 监控页面配置IP端口账户密码添加 Mysql 监控即可** +- **在 HertzBeat 监控页面配置IP端口账户密码添加 Mysql 监控即可** :::tip 通过上面的两步我们就完成了对 Mysql数据库 的监控,我们可以在 HertzBeat 随时查看监控详情指标信息来观测其服务状态。 当然只是看肯定是不完美的,监控往往伴随着告警阈值,当 Mysql 数据库的指标超出我们的期望值或异常时,能及时的通知到我们对应的负责人,负责人收到通知处理问题,这样才是一个完整的监控告警流程。 ::: -**接下来我们就来一步一步演示如何配置 HertzBeat 系统里的阈值告警通知,让及时发现 Mysql 数据库的指标异常时,及时通知给我们** +**接下来我们就来一步一步演示如何配置 HertzBeat 系统里的阈值告警通知,让及时发现 Mysql 数据库的指标异常时,及时通知给我们** -#### 三. 在 HertzBeat 系统添加 Mysql 数据库指标阈值告警 +#### 三. 在 HertzBeat 系统添加 Mysql 数据库指标阈值告警 -1. 对某个重要指标配置告警阈值 +1. 对某个重要指标配置告警阈值 -路径:菜单 -> 阈值规则 -> 新增阈值 +路径:菜单 -> 阈值规则 -> 新增阈值 -- 选择配置的指标对象,Mysql 数据库监控主要是数据库性能等相关指标,我们举例对 `查询缓存命中率` `cache` -> `query_cache_hit_rate` 这个指标进行阈值设置, 当Mysql的查询缓存命中率很低小于30%时发出告警。 -- 这里我们就配置当此指标`cache` 的 `query_cache_hit_rate<30` 时发出告警,告警级别为**严重告警**,三次即触发,具体如下图。 +- 选择配置的指标对象,Mysql 数据库监控主要是数据库性能等相关指标,我们举例对 `查询缓存命中率` `cache` -> `query_cache_hit_rate` 这个指标进行阈值设置, 当Mysql的查询缓存命中率很低小于30%时发出告警。 +- 这里我们就配置当此指标`cache` 的 `query_cache_hit_rate<30` 时发出告警,告警级别为**严重告警**,三次即触发,具体如下图。 -![hertzbeat](/img/blog/monitor-mysql-5.png) +![hertzbeat](/img/blog/monitor-mysql-5.png) ![hertzbeat](/img/blog/monitor-mysql-6.png) - 2. 新增消息通知接收人 -> 配置接收人,让告警消息知道要发给谁,用什么方式发。 +> 配置接收人,让告警消息知道要发给谁,用什么方式发。 -路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 +路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 -消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 +消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 -- 在 HertzBeat 配置接收人参数如下。 +- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 +- 在 HertzBeat 配置接收人参数如下。 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-1.png) +![hertzbeat](/img/blog/alert-notice-1.png) -3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 +3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 > 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 -![hertzbeat](/img/blog/alert-notice-2.png) - +![hertzbeat](/img/blog/alert-notice-2.png) -### 完毕,现在坐等告警消息过来啦。叮叮叮叮 +### 完毕,现在坐等告警消息过来啦。叮叮叮叮 ``` [HertzBeat告警通知] @@ -115,17 +112,17 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] 内容详情 : mysql db query_cache_hit_rate is too low, now is 20. ``` -## 小结 +## 小结 :::tip 这篇实践文章带我们体验了如何使用开源实时监控工具 HertzBeat 来监控 Mysql 数据库指标数据,可以发现集 `监控-告警-通知` 的 HertzBeat 在操作与使用方面更加的便捷,只需页面上简单点一点就能把 Mysql 数据库纳入监控并告警通知,再也不需要部署多个组件编写配置文件那些繁琐操作了。 ::: Mysql Github: https://github.com/mysql/mysql-server -HertzBeat Github: https://github.com/apache/hertzbeat +HertzBeat Github: https://github.com/apache/hertzbeat **欢迎了解使用支持Star哦!** -> 只需要一条docker命令即可安装体验heartbeat: +> 只需要一条docker命令即可安装体验heartbeat: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md index 3b491e6fe48..c80d7a5fcbb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md @@ -10,48 +10,48 @@ keywords: [开源监控系统, 操作系统监控, Linux监控] ## 使用开源实时监控工具 HertzBeat 对 Linux 操作系统的监控告警实践,5分钟搞定! -### HertzBeat 介绍 +### HertzBeat 介绍 -> HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 +> HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 -- 集**监控-告警-通知为一体**,支持对应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Slack Discord Telegram)。 -- 其将Http, Jmx, Ssh, Snmp, Jdbc, Prometheus等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? -- HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 +- 集**监控-告警-通知为一体**,支持对应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Slack Discord Telegram)。 +- 其将Http, Jmx, Ssh, Snmp, Jdbc, Prometheus等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? +- HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 -Github: https://github.com/apache/hertzbeat +Github: https://github.com/apache/hertzbeat -### 在 HertzBeat 5分钟搞定对 Linux 的监控 +### 在 HertzBeat 5分钟搞定对 Linux 的监控 -#### 操作前提,您已拥有 Linux 环境和 HertzBeat 环境。 +#### 操作前提,您已拥有 Linux 环境和 HertzBeat 环境。 - HertzBeat [安装部署文档](https://hertzbeat.com/docs/start/docker-deploy) -#### 在开源监控系统 HertzBeat 监控页面添加对 Linux 操作系统监控 +#### 在开源监控系统 HertzBeat 监控页面添加对 Linux 操作系统监控 -1. 点击新增 Linux 监控 +1. 点击新增 Linux 监控 -路径:菜单 -> 操作系统监控 -> Linux操作系统 -> 新增Linux操作系统监控 +路径:菜单 -> 操作系统监控 -> Linux操作系统 -> 新增Linux操作系统监控 ![hertzbeat](/img/blog/monitor-linux-1.png) -2. 配置新增监控 Linux 所需参数 +2. 配置新增监控 Linux 所需参数 在监控页面填写 Linux **对端IP**,**SSH端口**(默认22),**账户密码等**,最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考帮助文档 https://hertzbeat.com/docs/help/mysql/ +其他参数如**采集间隔**,**超时时间**等可以参考帮助文档 https://hertzbeat.com/docs/help/mysql/ -![hertzbeat](/img/blog/monitor-linux-2.png) +![hertzbeat](/img/blog/monitor-linux-2.png) -3. 完成✅,现在我们已经添加好对 Linux 的监控了,查看监控列表即可看到我们的添加项。 +3. 完成✅,现在我们已经添加好对 Linux 的监控了,查看监控列表即可看到我们的添加项。 -![hertzbeat](/img/blog/monitor-linux-3.png) +![hertzbeat](/img/blog/monitor-linux-3.png) -4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 Linux 的实时监控指标数据。 +4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 Linux 的实时监控指标数据。 -![hertzbeat](/img/blog/monitor-linux-4.png) +![hertzbeat](/img/blog/monitor-linux-4.png) ![hertzbeat](/img/blog/monitor-linux-7.png) -5. 点击**监控历史详情TAB** 即可浏览 Linux 的历史监控指标数据图表📈。 +5. 点击**监控历史详情TAB** 即可浏览 Linux 的历史监控指标数据图表📈。 ![hertzbeat](/img/blog/monitor-linux-5.png) @@ -59,113 +59,110 @@ Github: https://github.com/apache/hertzbeat **DONE!完成啦!不需要我们去部署agent或者各种繁琐操作,是不是很简单** -- **只需一步在 HertzBeat 监控页面配置IP端口账户密码添加 Linux 监控即可** +- **只需一步在 HertzBeat 监控页面配置IP端口账户密码添加 Linux 监控即可** ### Linux 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| hostname | 无 | 主机名称 | +| version | 无 | 操作系统版本 | +| uptime | 无 | 系统运行时间 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:memory -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:disk -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:interface -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | byte | 入站数据流量(bytes) | +| transmit_bytes | byte | 出站数据流量(bytes) | #### 指标集合:disk_free -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | :::tip 通过上面我们就完成了对 Linux 的监控,我们可以在 HertzBeat 随时查看Linux的各种指标状态和可用性。 当然不可能人工一直实时查看指标,监控往往伴随着告警阈值,当 Linux 的性能指标超出我们的阈值或Linux本身异常时,能及时的通知到我们对应的负责人,负责人收到通知处理,这样才是一个完整的监控告警流程。 ::: -**接下来我们就来一步一步演示如何配置 HertzBeat 系统里的阈值告警通知,当 Linux 的指标异常时,及时通知给我们** +**接下来我们就来一步一步演示如何配置 HertzBeat 系统里的阈值告警通知,当 Linux 的指标异常时,及时通知给我们** -#### 三. 在 HertzBeat 系统添加 Linux 指标阈值告警 +#### 三. 在 HertzBeat 系统添加 Linux 指标阈值告警 -1. 对某个重要指标配置告警阈值 +1. 对某个重要指标配置告警阈值 -路径:菜单 -> 阈值规则 -> 新增阈值 +路径:菜单 -> 阈值规则 -> 新增阈值 -- 选择配置的指标对象,Linux 监控主要是cpu 内存 磁盘 网络性能等相关指标,我们举例对 `CPU利用率` `cpu` -> `usage` 这个指标进行阈值设置, 当Linux cpu利用率大于90%时发出告警。 -- 这里我们就配置当此指标`cpu` 的 `usage>90` 时发出告警,告警级别为**警告告警**,三次即触发,具体如下图。 +- 选择配置的指标对象,Linux 监控主要是cpu 内存 磁盘 网络性能等相关指标,我们举例对 `CPU利用率` `cpu` -> `usage` 这个指标进行阈值设置, 当Linux cpu利用率大于90%时发出告警。 +- 这里我们就配置当此指标`cpu` 的 `usage>90` 时发出告警,告警级别为**警告告警**,三次即触发,具体如下图。 -![hertzbeat](/img/blog/monitor-linux-8.png) +![hertzbeat](/img/blog/monitor-linux-8.png) ![hertzbeat](/img/blog/monitor-linux-9.png) - 2. 新增消息通知接收人 -> 配置接收人,让告警消息知道要发给谁,用什么方式发。 +> 配置接收人,让告警消息知道要发给谁,用什么方式发。 -路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 +路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 -消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 +消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 -- 在 HertzBeat 配置接收人参数如下。 +- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 +- 在 HertzBeat 配置接收人参数如下。 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-1.png) +![hertzbeat](/img/blog/alert-notice-1.png) -3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 +3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 > 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 -![hertzbeat](/img/blog/alert-notice-2.png) - +![hertzbeat](/img/blog/alert-notice-2.png) -### 完毕,现在坐等告警消息过来啦。叮叮叮叮 +### 完毕,现在坐等告警消息过来啦。叮叮叮叮 ``` [HertzBeat告警通知] @@ -177,23 +174,23 @@ Github: https://github.com/apache/hertzbeat 内容详情 : The linux cpu usage is too high. now is 95. ``` -## 小结 +## 小结 :::tip 这篇实践文章带我们体验了如何使用开源实时监控工具 HertzBeat 来监控 Linux 指标数据,可以发现集 `监控-告警-通知` 的 HertzBeat 在操作与使用方面更加的便捷,只需页面上简单点一点就能把 Linux 纳入监控并告警通知,再也不需要部署多个组件编写配置文件那些繁琐操作了。 ::: -> 只需要一条docker命令即可安装体验heartbeat: +> 只需要一条docker命令即可安装体验heartbeat: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` ## What is HertzBeat? > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需Agent的实时监控告警工具。应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Discord Slack Telegram)。 - +> > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。 > 您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? - +> > `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 **Github: https://github.com/apache/hertzbeat** @@ -201,10 +198,11 @@ Github: https://github.com/apache/hertzbeat ## ⛄ Supported -- 网站监控, 端口可用性, Http Api, Ping连通性, Jvm, SiteMap全站, Ssl证书, SpringBoot, FTP服务器 -- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, 达梦, OpenGauss, ClickHouse, IoTDB -- Linux, Ubuntu, CentOS, Windows -- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ -- Kubernetes, Docker -- 和更多您的自定义监控。 -- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 +- 网站监控, 端口可用性, Http Api, Ping连通性, Jvm, SiteMap全站, Ssl证书, SpringBoot, FTP服务器 +- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, 达梦, OpenGauss, ClickHouse, IoTDB +- Linux, Ubuntu, CentOS, Windows +- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ +- Kubernetes, Docker +- 和更多您的自定义监控。 +- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md index a7843b11337..90c0b4e7f27 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md @@ -10,25 +10,20 @@ keywords: [开源监控系统, 告警系统, Linux监控] 官网: hertzbeat.com | tancloud.cn - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/a9629ef5bb6e486cacddb899f1495c6e~tplv-k3u1fbpfcp-zoom-1.image) - - ### What is HertzBeat? > HertzBeat赫兹跳动 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 > 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等指标监控,阈值告警通知一步到位。 > 支持更自由化的阈值规则(计算表达式),`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式及时送达。 - +> > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 - +> > 您相信只需定义YML就能立刻适配一款K8s或Docker等新的监控类型吗? - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4236e748f5ac4352b7cf4bb65ccf97aa~tplv-k3u1fbpfcp-zoom-1.image) - **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** @@ -49,8 +44,7 @@ keywords: [开源监控系统, 告警系统, Linux监控] - 修复若干BUG,更完善的文档,重构了代码。 ----- - +--- 只需要一条docker命令即可安装体验heartbeat `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` @@ -89,7 +83,6 @@ Feature: 26. [[webapp] change alert define trigger times from max 10 to max 999 #706](https://github.com/apache/hertzbeat/pull/706) @Grass-Life 27. [[doc] change default locale i18n from zh-cn to en #725](https://github.com/apache/hertzbeat/pull/725) - Bugfix. 1. [[collector] bugfix oracle query error: ORA-01000 happen #618](https://github.com/apache/hertzbeat/pull/618) @@ -115,6 +108,7 @@ Bugfix. 21. [bugfix job metrics set concurrent modification exception #723](https://github.com/apache/hertzbeat/pull/723) 22. [[script] modified the linux memory metrics specified script code #719](https://github.com/apache/hertzbeat/pull/719) 23. [[webapp] bugfix the cover of the big screen is too small #724](https://github.com/apache/hertzbeat/pull/724) + ---- 升级注意⚠️. @@ -122,6 +116,7 @@ Bugfix. 对于之前使用iotdb或者tdengine来存储指标数据的用户,需要修改 application.yml 来关闭JPA存储 `warehouse.store.jpa.enabled` 如下: 修改 `application.yml` 并设置 `warehouse.store.jpa.enabled` 参数为 false + ``` warehouse: store: @@ -130,23 +125,24 @@ warehouse: ``` 执行SQL脚本 + ``` ALTER table hzb_monitor modify job_id bigint default null; COMMIT; ``` ----- +--- ## ⛄ Supported -- 网站监控, 端口可用性, Http Api, Ping连通性, Jvm, SiteMap全站, Ssl证书, SpringBoot, FTP服务器 -- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, 达梦, OpenGauss, ClickHouse, IoTDB, Redis集群 -- Linux, Ubuntu, CentOS, Windows -- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ -- Kubernetes, Docker -- Huawei Switch, HPE Switch, TP-LINK Switch, Cisco Switch -- 和更多你的自定义监控。 -- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 +- 网站监控, 端口可用性, Http Api, Ping连通性, Jvm, SiteMap全站, Ssl证书, SpringBoot, FTP服务器 +- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, 达梦, OpenGauss, ClickHouse, IoTDB, Redis集群 +- Linux, Ubuntu, CentOS, Windows +- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ +- Kubernetes, Docker +- Huawei Switch, HPE Switch, TP-LINK Switch, Cisco Switch +- 和更多你的自定义监控。 +- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 ---- diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md index 3c05f91b58f..49c2d8121f1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md @@ -10,21 +10,21 @@ keywords: [开源监控系统, SpringBoot监控, 监控告警] ## 使用开源实时监控工具 HertzBeat 对 SpringBoot2 应用的监控告警实践,5分钟搞定! -### HertzBeat 介绍 +### HertzBeat 介绍 -> HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 +> HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 -- 集**监控-告警-通知为一体**,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Slack Discord Telegram)。 -- 其将Http, Jmx, Ssh, Snmp, Jdbc, Prometheus等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? -- HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 +- 集**监控-告警-通知为一体**,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Slack Discord Telegram)。 +- 其将Http, Jmx, Ssh, Snmp, Jdbc, Prometheus等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? +- HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 -Github: https://github.com/apache/hertzbeat +Github: https://github.com/apache/hertzbeat -### 在 HertzBeat 5分钟搞定对 SpringBoot2 应用的监控 +### 在 HertzBeat 5分钟搞定对 SpringBoot2 应用的监控 -#### 操作前提,您已拥有 SpringBoot2 应用环境和 HertzBeat 环境。 +#### 操作前提,您已拥有 SpringBoot2 应用环境和 HertzBeat 环境。 -- HertzBeat [安装部署文档](https://hertzbeat.com/docs/start/docker-deploy) +- HertzBeat [安装部署文档](https://hertzbeat.com/docs/start/docker-deploy) #### 一. 在 SpringBoot2 应用端暴露出`actuator`指标接口,它将提供 metrics 接口数据。 @@ -79,82 +79,79 @@ management: } ``` -#### 在开源监控系统 HertzBeat 监控页面添加对 SpringBoot2 应用监控 +#### 在开源监控系统 HertzBeat 监控页面添加对 SpringBoot2 应用监控 -1. 点击新增 SpringBoot2 监控 +1. 点击新增 SpringBoot2 监控 -路径:菜单 -> 应用服务监控 -> SpringBoot2 -> 新增SpringBoot2监控 +路径:菜单 -> 应用服务监控 -> SpringBoot2 -> 新增SpringBoot2监控 ![hertzbeat](/img/blog/monitor-springboot2-1.png) -2. 配置新增监控 SpringBoot2 所需参数 +2. 配置新增监控 SpringBoot2 所需参数 在监控页面填写 SpringBoot2应用 **对端IP**,**服务端口**(默认8080),**账户密码等**,最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考帮助文档 https://hertzbeat.com/docs/help/ +其他参数如**采集间隔**,**超时时间**等可以参考帮助文档 https://hertzbeat.com/docs/help/ -![hertzbeat](/img/blog/monitor-springboot2-2.png) +![hertzbeat](/img/blog/monitor-springboot2-2.png) -3. 完成✅,现在我们已经添加好对 SpringBoot2应用 的监控了,查看监控列表即可看到我们的添加项。 +3. 完成✅,现在我们已经添加好对 SpringBoot2应用 的监控了,查看监控列表即可看到我们的添加项。 -![hertzbeat](/img/blog/monitor-springboot2-3.png) +![hertzbeat](/img/blog/monitor-springboot2-3.png) -4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 SpringBoot2应用 的实时监控指标数据。 +4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 SpringBoot2应用 的实时监控指标数据。 ![hertzbeat](/img/blog/monitor-springboot2-4.png) -5. 点击**监控历史详情TAB** 即可浏览 SpringBoot2应用 的历史监控指标数据图表📈。 +5. 点击**监控历史详情TAB** 即可浏览 SpringBoot2应用 的历史监控指标数据图表📈。 ![hertzbeat](/img/blog/monitor-springboot2-5.png) **DONE!完成啦!不需要我们去部署agent或者各种繁琐操作,是不是很简单** -- **只需一步在 HertzBeat 监控页面配置IP端口添加 SpringBoot2应用 监控即可** - +- **只需一步在 HertzBeat 监控页面配置IP端口添加 SpringBoot2应用 监控即可** :::tip 通过上面我们就完成了对 SpringBoot2应用 的监控,我们可以在 HertzBeat 随时查看SpringBoot2应用的各种指标状态和可用性。 当然不可能人工一直实时查看指标,监控往往伴随着告警阈值,当 SpringBoot2应用 的性能指标超出我们的阈值或SpringBoot2应用本身异常时,能及时的通知到我们对应的负责人,负责人收到通知处理,这样才是一个完整的监控告警流程。 ::: -**接下来我们就来一步一步演示如何配置 HertzBeat 系统里的阈值告警通知,当 SpringBoot2应用 的指标异常时,及时通知给我们** +**接下来我们就来一步一步演示如何配置 HertzBeat 系统里的阈值告警通知,当 SpringBoot2应用 的指标异常时,及时通知给我们** -#### 三. 在 HertzBeat 系统添加 SpringBoot2应用 指标阈值告警 +#### 三. 在 HertzBeat 系统添加 SpringBoot2应用 指标阈值告警 -1. 对某个重要指标配置告警阈值 +1. 对某个重要指标配置告警阈值 -路径:菜单 -> 阈值规则 -> 新增阈值 +路径:菜单 -> 阈值规则 -> 新增阈值 -- 选择配置的指标对象,SpringBoot2应用 监控主要是 堆栈内存 线程等相关指标,我们举例对 `状态线程数` `threads` -> `threads` 这个指标进行阈值设置, 当`runnable`状态的线程数量大于300时发出告警。 -- 这里我们就配置当此指标`size`,`state` 的 `equals(state,"runnable"") && size>300` 时发出告警,告警级别为**警告告警**,三次即触发,具体如下图。 +- 选择配置的指标对象,SpringBoot2应用 监控主要是 堆栈内存 线程等相关指标,我们举例对 `状态线程数` `threads` -> `threads` 这个指标进行阈值设置, 当`runnable`状态的线程数量大于300时发出告警。 +- 这里我们就配置当此指标`size`,`state` 的 `equals(state,"runnable"") && size>300` 时发出告警,告警级别为**警告告警**,三次即触发,具体如下图。 -![hertzbeat](/img/blog/monitor-springboot2-6.png) +![hertzbeat](/img/blog/monitor-springboot2-6.png) ![hertzbeat](/img/blog/monitor-springboot2-7.png) - 2. 新增消息通知接收人 -> 配置接收人,让告警消息知道要发给谁,用什么方式发。 +> 配置接收人,让告警消息知道要发给谁,用什么方式发。 -路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 +路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 -消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 +消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 -- 在 HertzBeat 配置接收人参数如下。 +- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 +- 在 HertzBeat 配置接收人参数如下。 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-1.png) +![hertzbeat](/img/blog/alert-notice-1.png) -3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 +3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 > 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 -![hertzbeat](/img/blog/alert-notice-2.png) - +![hertzbeat](/img/blog/alert-notice-2.png) -### 完毕,现在坐等告警消息过来啦。叮叮叮叮 +### 完毕,现在坐等告警消息过来啦。叮叮叮叮 ``` [HertzBeat告警通知] @@ -166,17 +163,17 @@ management: 内容详情 : The springboot2 service's runnable state threads num is over 300, now is 444. ``` -## 小结 +## 小结 :::tip 这篇实践文章带我们体验了如何使用开源实时监控工具 HertzBeat 来监控 SpringBoot2应用 指标数据,可以发现集 `监控-告警-通知` 的 HertzBeat 在操作与使用方面更加的便捷,只需页面上简单点一点就能把 SpringBoot2应用 纳入监控并告警通知,再也不需要部署多个组件编写配置文件那些繁琐操作了。 ::: -> 只需要一条docker命令即可安装体验heartbeat: +> 只需要一条docker命令即可安装体验heartbeat: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` -## 更强大 +## 更强大 > 通过上面的简单步骤我们实现了对SpringBoot2的监控,但里面的内置指标固定不满足需要,是否能自定义监控更多SpringBoot2的指标呢?答案当然是可以的,通过在页面上的**监控定义**->**SpringBoot2**随时通过编辑如下的YML配置文件自定义添加修改想要监控的性能指标。 @@ -185,10 +182,10 @@ management: ## What is HertzBeat? > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需Agent的实时监控告警工具。应用服务,数据库,操作系统,中间件,云原生,网络等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Discord Slack Telegram)。 - +> > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。 > 您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? - +> > `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 **Github: https://github.com/apache/hertzbeat** @@ -196,11 +193,12 @@ management: ## ⛄ Supported -- 网站监控, 端口可用性, Http Api, Ping连通性, Jvm, SiteMap全站, Ssl证书, SpringBoot, FTP服务器 -- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, 达梦, OpenGauss, ClickHouse, IoTDB -- Linux, Ubuntu, CentOS, Windows -- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ -- Kubernetes, Docker -- CiscoSwitch, HpeSwitch, HuaweiSwitch, TpLinkSwitch -- 和更多的自定义监控。 -- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 +- 网站监控, 端口可用性, Http Api, Ping连通性, Jvm, SiteMap全站, Ssl证书, SpringBoot, FTP服务器 +- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, 达梦, OpenGauss, ClickHouse, IoTDB +- Linux, Ubuntu, CentOS, Windows +- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ +- Kubernetes, Docker +- CiscoSwitch, HpeSwitch, HuaweiSwitch, TpLinkSwitch +- 和更多的自定义监控。 +- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-09-hertzbeat-v1.3.1.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-09-hertzbeat-v1.3.1.md index 8cf642bc3a4..d68058fec9d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-09-hertzbeat-v1.3.1.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-09-hertzbeat-v1.3.1.md @@ -10,25 +10,20 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 官网: hertzbeat.com | tancloud.cn - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/a9629ef5bb6e486cacddb899f1495c6e~tplv-k3u1fbpfcp-zoom-1.image) - - ### What is HertzBeat? > HertzBeat赫兹跳动 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 > 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等指标监控,阈值告警通知一步到位。 > 支持更自由化的阈值规则(计算表达式),`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式及时送达。 - +> > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 - +> > 您相信只需定义YML就能立刻适配一款K8s或Docker等新的监控类型吗? - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4236e748f5ac4352b7cf4bb65ccf97aa~tplv-k3u1fbpfcp-zoom-1.image) - **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** @@ -47,15 +42,14 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 若之前使用的TDengine时序数据库,需升级至TDengine3.0+ - 需要执行SQL升级脚本 + ``` ALTER table hzb_alert_define modify field varchar(255) default null; COMMIT; ``` ----- - +--- ## ⛄ 已支持 > 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! @@ -70,7 +64,7 @@ COMMIT; - 和更多自定义监控模版。 - 通知支持 Discord Slack Telegram 邮件 钉钉 微信 飞书 短信 Webhook。 ----- +--- **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-11-greptimedb-store.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-11-greptimedb-store.md index 6035efe29ed..ad93e35571a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-11-greptimedb-store.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-11-greptimedb-store.md @@ -56,14 +56,13 @@ $ docker run -p 4000-4004:4000-4004 \ 2. 使用```$ docker ps | grep greptime```查看 GreptimeDB 是否启动成功 - #### 安装部署 HertzBeat 具体可以参考 [官方文档](https://hertzbeat.com/zh-cn/docs/start/docker-deploy) 1. Docker 安装 HertzBeat -```shell +```shell $ docker run -d -p 1157:1157 \ -e LANG=zh_CN.UTF-8 \ -e TZ=Asia/Shanghai \ diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-07-05-hertzbeat-v1.3.2.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-07-05-hertzbeat-v1.3.2.md index 81d063c94af..03509304667 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-07-05-hertzbeat-v1.3.2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-07-05-hertzbeat-v1.3.2.md @@ -10,7 +10,6 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 官网: hertzbeat.com | tancloud.cn - ![hertzBeat](/img/home/0.png) ### HertzBeat 介绍 @@ -19,13 +18,11 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] > 致力于**易用友好**,全 WEB 页面操作,鼠标点一点就能监控告警,零上手学习成本。 > 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等指标监控,阈值告警通知一步到位。 > 支持更自由化的阈值规则(计算表达式),`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式及时送达。 - +> > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 - ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4236e748f5ac4352b7cf4bb65ccf97aa~tplv-k3u1fbpfcp-zoom-1.image) - **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** @@ -34,7 +31,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] **这次累计 27 位小伙伴们的辛苦贡献才出来了这个令人欣喜的版本。 感谢他们!爱心💗** -这个版本我们支持对**freebsd, debian, opensuse, redhat, apache doris**等新的监控类型和指标。 +这个版本我们支持对**freebsd, debian, opensuse, redhat, apache doris**等新的监控类型和指标。 - 支持WEB页面配置邮件服务器,取代之前的文件配置 - 支持告警收敛,是否遇到了重复告警频繁发送,有了告警收敛马上解决 @@ -42,11 +39,9 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] - 新的监控中心页面,聚合所有监控类型,不用像之前那样切来切去了。 - 支持标签分组展示,把同一业务类别的监控们分组标记,统一管理。 - 阈值配置不仅仅有表达式,还支持更人性化的操作UI,之前的表达式对新人不太友好很容易出错,现在可以直接UI操作啦,它可以和表达式互相切换。 -- 还有HTTP ipv6等更多功能。 - -修复了大量BUG,完善文档代码,提高了整体的稳定可用性。更多新功能欢迎探索! - +- 还有HTTP ipv6等更多功能。 +修复了大量BUG,完善文档代码,提高了整体的稳定可用性。更多新功能欢迎探索! 只需要一条docker命令即可安装体验hertzbeat: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` @@ -54,8 +49,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 若dockerhub网络超时,可以使用下面命令: `docker run -d -p 1157:1157 --name hertzbeat quay.io/tancloud/hertzbeat` ----- - +--- ## ⛄ 已支持 > 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! @@ -70,10 +64,10 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] - 和更多自定义监控模版。 - 通知支持 Discord Slack Telegram 邮件 钉钉 微信 飞书 短信 Webhook。 ----- +--- -**欢迎star三连来支持我们** +**欢迎star三连来支持我们** **Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: https://gitee.com/hertzbeat/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md index 97126e42bd4..02d1abc665a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md @@ -8,7 +8,6 @@ tags: [opensource, practice] keywords: [open source monitoring system, alerting system, Linux monitoring] --- - ![hertzBeat](/img/home/0.png) ### 什么是 HertzBeat? @@ -23,26 +22,25 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] - 高性能,支持多采集器集群横向扩展,支持多隔离网络监控,云边协同。 - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式消息及时送达。 +> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 -> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 - -![hertzBeat](/img/docs/hertzbeat-arch.png) +![hertzBeat](/img/docs/hertzbeat-arch.png) **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** -### 集群版来啦 +### 集群版来啦 我们之前的hertzbeat一直是单机版本,组件代码模块化但不支持采集器独立部署,所支持的监控数量上限受到了单节点的天然限制,且无法应对多个隔离网络的资源的统一纳管。 -经过一个多月的迭代,我们重写了采集任务调度,采集器独立部署,设计单机版和集群版用同一套代码方便后续的维护升级,单机集群两种模式可相互切换无感知。最终很高兴,集群版如期与大家见面了。 +经过一个多月的迭代,我们重写了采集任务调度,采集器独立部署,设计单机版和集群版用同一套代码方便后续的维护升级,单机集群两种模式可相互切换无感知。最终很高兴,集群版如期与大家见面了。 -集群版不仅仅给我们带来了更强大的监控性能,更有云边协同等功能让人充满想象。 +集群版不仅仅给我们带来了更强大的监控性能,更有云边协同等功能让人充满想象。 #### 高性能集群 -- 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 -- 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 +- 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 +- 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 - 单机模式与集群模式相互切换部署非常方便,无需额外组件部署。 ![hertzbeat](/img/docs/cluster-arch.png) @@ -53,31 +51,29 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 在多个网络不相通的隔离网络中,在以往的方案中我们需要在每个网络都部署一套监控系统,这导致数据不互通,管理部署维护都不方便。 HertzBeat 提供云边协同能力,可以在多个隔离网络部署边缘采集器,添加监控时指定采集器,采集器在隔离网络内部进行监控任务采集,采集数据上报,由主 HertzBeat 服务统一调度管理展示。 -这多用于多个隔离数据中心或不同厂商云资源和云下资源的统一监控场景。 +这多用于多个隔离数据中心或不同厂商云资源和云下资源的统一监控场景。 ![hertzbeat](/img/docs/cluster-arch.png) -### 为什么要开源集群版? +### 为什么要开源集群版? 往往一些做需要商业化的开源产品的策略会是单机版作为玩具给小玩家们的入门使用,然后集群版作为闭源产品给有需求的氪金玩家付费使用。这样的模式是可以说非常不错的且值得肯定的,即保证开源也得到了收益,也适用于很多开源项目的发展策略,可能会在商业路径上走得更通顺点。 网络上有些人会对这样的分单机和集群版的开源项目嗤之以鼻,觉得它们是伪开源,开源是噱头,他们觉得开源应该什么都开源免费出来,开源团队什么都应该无私奉献出来。。。。很无语这类人,有投入才有回报,当你免费使用着开源软件并得到价值的时候,是否应该想一想你付出给开源软件了什么而不是一味的索取。 那回到正题,我们又为什么要开源集群版?仅因为热爱开源?如果说我们还在少年可能这话你信,但一个快奔30还有家庭责任的人说出这话你信吗,我自己都不信😂。 -首先我们来看看开源能带来什么,或者为什么要做开源。最开始全职开源的想法很简单,做自己喜欢的开源产品(已实现),程序员的梦想能部署在成千上万的服务器上(看下载量已实现),然后基于此开源产品挣钱(暂未哭)。 +首先我们来看看开源能带来什么,或者为什么要做开源。最开始全职开源的想法很简单,做自己喜欢的开源产品(已实现),程序员的梦想能部署在成千上万的服务器上(看下载量已实现),然后基于此开源产品挣钱(暂未哭)。 - 用户流量。开源项目免费提供给用户和开发者,吸引用户使用,宣传等方面都有优势。 - 用户信任。开源的产品天生容易获取用户的信任和使用耐心,或者说降低用户的信任门槛。 - 社区协作。开源的产品可以吸引到顶级贡献者一起贡献,接收用户的反馈issue,pr贡献等,在社区的驱动下使开源项目越来越好,正向反馈后也会有更多人参与和使用。社区协作我觉得这是开源的意义,而且这样不仅仅只是程序员之间的贡献代码协作,用户都是协作对象(比如我们这个项目有大量的运维朋友贡献代码和文档),如果是仅仅代码开源而不社区协作,那还不如放个安装包给别人免费使用下载就好。 -- 产品生态。这对一些需要生态的产品是需要的,比如hertzbeat,需要支持对接各种类型协议的监控类型,大量的监控模版。一个好的开源项目生态才能吸引到其它贡献者贡献和分享,在生态中互通有无,最终大家在生态中都受益。这在闭源程序中是很难做到的。 +- 产品生态。这对一些需要生态的产品是需要的,比如hertzbeat,需要支持对接各种类型协议的监控类型,大量的监控模版。一个好的开源项目生态才能吸引到其它贡献者贡献和分享,在生态中互通有无,最终大家在生态中都受益。这在闭源程序中是很难做到的。 上面几点,重在社区协作和产品生态,这也是开源集群版的原因,只有卷开源产品卷自己到更强的产品力,比如集群这一技术特性天生会吸引到开发者(而且集群本身就是我们社区协作的产物),会吸引到更多的用户和贡献者使用反馈大家一起迭代,社区驱动进而正向促进开源项目和满足用户功能体验。 -而对于开源商业化,开源商业化的前提是得有个真正好的,受欢迎,被广泛使用的开源产品,然后在此基础上做商业化挣钱。 +而对于开源商业化,开源商业化的前提是得有个真正好的,受欢迎,被广泛使用的开源产品,然后在此基础上做商业化挣钱。 对了这里再说下开源不等同于免费,基于HertzBeat二次开发需保留logo,名称,页面脚注,版权等。 免费使用不是白嫖,这种破坏开源协议的才是,目前发现大量白嫖怪,小心点哈你们。我每年正月初七都会祝你们用这些钱吃的安心,住的放心,玩的开心哈。(仅个人言论不代表社区) - -### 尝试部署集群版 - +### 尝试部署集群版 1. `docker` 环境仅需一条命令即可开始 @@ -94,18 +90,17 @@ HertzBeat 提供云边协同能力,可以在多个隔离网络部署边缘采 ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_IP=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 - `-e MANAGER_IP=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) - ----- - +--- ### 更多的 v1.4.0 版本更新 -> 更多版本新功能更新欢迎探索,感谢社区小伙伴们的辛苦贡献,爱心💗! +> 更多版本新功能更新欢迎探索,感谢社区小伙伴们的辛苦贡献,爱心💗! * [doc] add v1.3.2 publish doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1075 * remove elasticsearch unused param index by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1080 @@ -155,7 +150,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN * [doc] add collector clusters document by @tomsun28 in https://github.com/apache/hertzbeat/pull/1161 * [hertzbeat] release hertzbeat version v1.4.0 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1168 ----- +--- ## ⛄ 已支持 @@ -178,5 +173,5 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 欢迎star一波来支持我们哦。 **Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: https://gitee.com/hertzbeat/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-28-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-28-new-committer.md index ee2e8f496da..6adbd15b1fd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-28-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-28-new-committer.md @@ -8,7 +8,6 @@ tags: [opensource, practice] keywords: [open source monitoring system, alerting system] --- - ![hertzBeat](/img/blog/new-committer.png) 很高兴迎来了新的社区`Committer`,和其它贡献者不一样的是`logicz`来自讯飞的运维实施岗位而不是开发岗位,但不管是代码还是文档等贡献质量都非常高👍。这也是我们`HertzBeat`与其它开源项目不一样的地方,因为用户群体更多面向运维开发,在我们139位贡献者中运维工程师的占比超30%,这打破了开源项目的协作贡献对象都是开发岗位的固有认知,这说明不管是运维工程师还是测试工程师对开源项目的贡献参与都是非常热情的,而不仅仅只是作为开源协作的旁观者。开源项目的参与并不是只专属于某一类人群,而是面向所有的想来参与的人,可能是一篇文档,一个脚本或者一段代码,想象一下你参与的开源项目被部署到成千上万的服务器上运行跑起来,帮助到他人被使用或者浏览Review讨论,git记录永留存,这也许就是参与开源项目的意义。 @@ -37,29 +36,29 @@ github:zqr10159 **贡献**: -* 实现实时数据存入Redis自定义db +* 实现实时数据存入Redis自定义db -* 新增历史数据存入GreptimeDB +* 新增历史数据存入GreptimeDB -* 提供监控批量导入、导出为xlsx和yml文件功能 +* 提供监控批量导入、导出为xlsx和yml文件功能 -* 提供Web页面自定义邮件服务器设置功能 +* 提供Web页面自定义邮件服务器设置功能 -* 提供Apache doris FE,BE监控模板 +* 提供Apache doris FE,BE监控模板 -* 实现ServerChan(Server酱)告警推送通知 +* 实现ServerChan(Server酱)告警推送通知 -* 接入第三方告警(腾讯云),实现第三方告警与Hertzbeat告警统一推送 +* 接入第三方告警(腾讯云),实现第三方告警与Hertzbeat告警统一推送 -* 修复导入、导出监控时不含采集器信息等bug +* 修复导入、导出监控时不含采集器信息等bug -* 若干文档的更新 +* 若干文档的更新 **收获**: -* Hertzbeat真的是个很优秀的项目,在项目部署方面,脚本和配置都非常规范,我自己写的很多项目都有借鉴 +* Hertzbeat真的是个很优秀的项目,在项目部署方面,脚本和配置都非常规范,我自己写的很多项目都有借鉴 -* 最重要的还是收获到了开源精神,大家在一个社区里面共同交流、进步 +* 最重要的还是收获到了开源精神,大家在一个社区里面共同交流、进步 ## 感谢社区小伙伴 @@ -67,8 +66,8 @@ github:zqr10159 ## 对新人的一点建议 -* 开源不易,要能够坚持下来,与大家分享自己的成果是很有成就感的一件事情 -* 养成良好的代码习惯,代码可以写的不好,注释一定要写清楚,方便其他人阅读和修改你的代码 +* 开源不易,要能够坚持下来,与大家分享自己的成果是很有成就感的一件事情 +* 养成良好的代码习惯,代码可以写的不好,注释一定要写清楚,方便其他人阅读和修改你的代码 ## 什么是 HertzBeat? @@ -76,15 +75,14 @@ HertzBeat 赫兹跳动是一个拥有强大自定义监控能力,高性能集 ### 特点 -* 集 **监控+告警+通知** 为一体,支持对应用服务,数据库,操作系统,中间件,云原生,网络等监控阈值告警通知一步到位。 -* 易用友好,无需 `Agent`,全 `WEB` 页面操作,鼠标点一点就能监控告警,零上手学习成本。 -* 将 `Http,Jmx,Ssh,Snmp,Jdbc` 等协议规范可配置化,只需在浏览器配置监控模版 `YML` 就能使用这些协议去自定义采集想要的指标。您相信只需配置下就能立刻适配一款 `K8s` 或 `Docker` 等新的监控类型吗? -* 高性能,支持多采集器集群横向扩展,支持多隔离网络监控,云边协同。 -* 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 +* 集 **监控+告警+通知** 为一体,支持对应用服务,数据库,操作系统,中间件,云原生,网络等监控阈值告警通知一步到位。 +* 易用友好,无需 `Agent`,全 `WEB` 页面操作,鼠标点一点就能监控告警,零上手学习成本。 +* 将 `Http,Jmx,Ssh,Snmp,Jdbc` 等协议规范可配置化,只需在浏览器配置监控模版 `YML` 就能使用这些协议去自定义采集想要的指标。您相信只需配置下就能立刻适配一款 `K8s` 或 `Docker` 等新的监控类型吗? +* 高性能,支持多采集器集群横向扩展,支持多隔离网络监控,云边协同。 +* 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 > `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 - Github: https://github.com/apache/hertzbeat 欢迎更多的用户参与到`HertzBeat`的开源协作中来,不管是一个错别字还是标点符号我们都非常欢迎。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-09-26-hertzbeat-v1.4.1.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-09-26-hertzbeat-v1.4.1.md index 25ae9bb91f3..bb29c6c9d0a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-09-26-hertzbeat-v1.4.1.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-09-26-hertzbeat-v1.4.1.md @@ -22,7 +22,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] - **监控指标名称国际化** 用户可以看指标的中英文名称啦,欢迎一起完善监控模版里面的i18n国际化资源 - **支持kubernetes helm charts一键部署** 见 https://artifacthub.io/packages/search?repo=hertzbeat -**更多的特性和BUG修复,稳定性提示** 感谢 @zqr10159 @Carpe-Wang @luxx-lq @l646505418 @LINGLUOJUN @luelueking @qyaaaa @novohit @gcdd1993 +**更多的特性和BUG修复,稳定性提示** 感谢 @zqr10159 @Carpe-Wang @luxx-lq @l646505418 @LINGLUOJUN @luelueking @qyaaaa @novohit @gcdd1993 ### 上效果图: @@ -46,7 +46,6 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] image - ### 什么是 HertzBeat? [HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,高性能集群,无需 Agent 的开源实时监控告警系统。 @@ -59,8 +58,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] - 高性能,支持多采集器集群横向扩展,支持多隔离网络监控,云边协同。 - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式消息及时送达。 - -> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 +> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 ![hertzBeat](/img/docs/hertzbeat-arch.png) @@ -68,10 +66,8 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] **Gitee: https://gitee.com/hertzbeat/hertzbeat** - ### 尝试部署 - 1. `docker` 环境仅需一条命令即可开始 ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` @@ -87,14 +83,14 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) ----- - +--- ## ⛄ 已支持 > 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! @@ -111,8 +107,8 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - 和更多自定义监控模版。 - 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱`。 ----- +--- **Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: https://gitee.com/hertzbeat/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md index 7aec4a52016..5e18cd2ba5d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md @@ -16,12 +16,12 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] - **消息通知模版特性,开源之夏课题** - **支持华为云OBS存储监控模版文件** -- **支持MQTT消息服务器 emqx 监控** +- **支持MQTT消息服务器 emqx 监控** - **支持对 udp 端口可用性监控** - **更多的特性功能支持和BUG修复** - **安装包内置JDK一键启动** -**更多的特性和BUG修复欢迎使用探索,1.4.2 版本共有 13 位社区小伙伴们参与,感谢他们的贡献❤️** +**更多的特性和BUG修复欢迎使用探索,1.4.2 版本共有 13 位社区小伙伴们参与,感谢他们的贡献❤️** ### 什么是 HertzBeat? @@ -35,8 +35,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] - 高性能,支持多采集器集群横向扩展,支持多隔离网络监控,云边协同。 - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 - -> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 +> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 ![hertzBeat](/img/docs/hertzbeat-arch.png) @@ -44,10 +43,8 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] **Gitee: https://gitee.com/hertzbeat/hertzbeat** - ### 尝试部署 - 1. `docker` 环境仅需一条命令即可开始 ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` @@ -63,14 +60,14 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) ----- - +--- ## ⛄ 已支持 > 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! @@ -87,7 +84,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - 和更多自定义监控模版。 - 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱`。 ----- +--- **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** @@ -113,3 +110,4 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - ⬇️ [hertzbeat-collector-macos_arm64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-macos_arm64_1.4.2.tar.gz) - ⬇️ [hertzbeat-collector-macos_amd64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-macos_amd64_1.4.2.tar.gz) - ⬇️ [hertzbeat-collector-windows64_1.4.2.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-windows64_1.4.2.zip) + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md index 285097da988..4550a1f4278 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md @@ -40,7 +40,6 @@ keywords: [open source monitoring system, alerting system] ### 尝试部署 - 1. `docker` 环境仅需一条命令即可开始 ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` @@ -56,14 +55,14 @@ keywords: [open source monitoring system, alerting system] ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) ----- - +--- ## ⛄ 已支持 > 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! @@ -80,13 +79,12 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - 和更多自定义监控模版。 - 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱`。 ----- +--- **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** ----- - +--- ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! @@ -151,7 +149,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN **Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.4.2...v1.4.3 ----- +--- ## ⛄ Supported @@ -167,7 +165,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN ---- **Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: https://gitee.com/hertzbeat/hertzbeat** ### **下载链接** @@ -190,3 +188,4 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - ⬇️ [hertzbeat-collector-macos_arm64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-macos_arm64_1.4.3.tar.gz) - ⬇️ [hertzbeat-collector-macos_amd64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-macos_amd64_1.4.3.tar.gz) - ⬇️ [hertzbeat-collector-windows64_1.4.3.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-windows64_1.4.3.zip) + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md index 895dd782421..c699daba0a1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md @@ -8,7 +8,6 @@ tags: [opensource, practice] keywords: [open source monitoring system, alerting system] --- - ![hertzBeat](/img/blog/new-committer.png) > 热烈欢迎 HertzBeat 有三位小伙伴新晋社区 Committer, 让我们来了解下他们的开源经历吧! @@ -39,17 +38,11 @@ keywords: [open source monitoring system, alerting system] - 接触到了很优秀的开源社区,提升了相关领域的技术水平。 - 感谢Tom哥和我的ospp导师郑晨鑫,他们在我接触开源社区的过程中给出了许多帮助和建议。目前我仍然在社区中负责部分代码的开发,希望Hertzbeat以后可以越来越好! - - ----- - +--- # New Committer - 淞筱 - - **姓名:周书胜** **河南师范大学大三学生** @@ -67,13 +60,11 @@ keywords: [open source monitoring system, alerting system] * 支持Spring Gateway、Apache Spark、Apache Hive等服务指标采集 * 自定义nginx、pop3协议,对Nginx,POP3邮箱服务器进行指标采集,并添加相应帮助文档 - ## 收获 * 接触了更加优秀、结构更加复杂的大型项目,提高了编程和解决问题的能力 * 将理论知识付诸于实践,收获了JUC,微服务相关的开发经验,以及宝贵的项目经历 - ## 感谢社区小伙伴 感谢HertzBeat的作者、HertzBeat/Sms4j Committer铁甲小宝同学、Sms4j Committer东风同学,在我遇到自己不能解决的问题时,常常向三位哥哥请教,他们也总是不厌其烦,耐心的帮助我解决问题,实在是无以言表。 @@ -85,14 +76,10 @@ keywords: [open source monitoring system, alerting system] * 初次参与开源项目时,可以从简单的任务开始。逐渐熟悉项目的代码和流程,并逐步承担更复杂的任务。 * 如果遇到自己无法解决的问题时,可以多多请教社区的小伙伴们。 - - ----- +--- # New Committer - 东风 - - **姓名:张洋** **河南师范大学应届生** @@ -126,7 +113,6 @@ keywords: [open source monitoring system, alerting system] - issue和pr是你了解的项目的敲门砖,一点要敢于讨论并发表观点。 - 贡献不分大小,要敢于尝试,并不断提升自己。 - ---- ## 什么是 HertzBeat? @@ -142,10 +128,8 @@ keywords: [open source monitoring system, alerting system] - 高性能,支持多采集器集群横向扩展,支持多隔离网络监控,云边协同。 - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 - > `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 - **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md index 6d60f9bb1ee..cbee4825d83 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md @@ -39,10 +39,8 @@ keywords: [open source monitoring system, alerting system] - add smtp protocol and support smtp monitoring by @ZY945 - more feature, document and bugfix - ### 尝试部署 - 1. `docker` 环境仅需一条命令即可开始 ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` @@ -58,14 +56,14 @@ keywords: [open source monitoring system, alerting system] ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) ----- - +--- ## ⛄ 已支持 > 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! @@ -82,13 +80,12 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - 和更多自定义监控模版。 - 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱`。 ----- +--- **Github: https://github.com/apache/hertzbeat** **Gitee: https://gitee.com/hertzbeat/hertzbeat** ----- - +--- ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! @@ -101,7 +98,10 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN * monitoring the availability of websockets through handshake. by @ZY945 in https://github.com/apache/hertzbeat/pull/1413 * Task-1386 When adding tags in tag management, random colors are given by default. by @prolevel1 in https://github.com/apache/hertzbeat/pull/1412 * add prolevel1 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1415 -* #1397 feature: support for dns monitoring by @Calvin979 in https://github.com/apache/hertzbeat/pull/1416 +* + +# 1397 feature: support for dns monitoring by @Calvin979 in https://github.com/apache/hertzbeat/pull/1416 + * Support monitoring hive metrics by @a-little-fool in https://github.com/apache/hertzbeat/pull/1417 * support legend pageable in history data charts by @tomsun28 in https://github.com/apache/hertzbeat/pull/1414 * update component tip and help tip doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1418 @@ -163,7 +163,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN **Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.4.3...v1.4.4 ----- +--- ## Supported @@ -179,7 +179,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN ---- **Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: https://gitee.com/hertzbeat/hertzbeat** ### **下载链接** @@ -203,8 +203,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - ⬇️ [hertzbeat-collector-macos_amd64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-macos_amd64_1.4.4.tar.gz) - ⬇️ [hertzbeat-collector-windows64_1.4.4.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-windows64_1.4.4.zip) - **hertzbeat docker compose script** -- ⬇️ [docker-compose](https://github.com/apache/hertzbeat/releases/download/v1.4.4/docker-compose.zip) +- ⬇️ [docker-compose](https://github.com/apache/hertzbeat/releases/download/v1.4.4/docker-compose.zip) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-04-17-to-apache.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-04-17-to-apache.md index d34f4b4097d..d9b0409be70 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-04-17-to-apache.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-04-17-to-apache.md @@ -12,18 +12,14 @@ keywords: [open source monitoring system, alerting system] HertzBeat 于 2022 年 1 月在 Dromara 开源社区正式开源,经过两年多的社区小伙伴们的不间断项目维护迭代,持续的社区成长,现在,HertzBeat 正式官宣加入 Apache 孵化器! - ![](/img/blog/apache-incubator-1.png) ![](/img/blog/apache-incubator-2.png) - - 加入 Apache 孵化器不是终点,这是项目的全新起点。加入 Apache 孵化器后,在社区层面,HertzBeat 将积极践行『开放、协作』 的 Apache 之道,持续构建一个公平,多元,包容的开源社区。在开源产品层面,倾听用户声音,提升优化产品使用体验,打造对接开源生态。 非常欢迎各位加入到 HertzBeat 社区,社区接受任何形式的贡献,让我们一起共同推动开源的发展,希望有朝一日 HertzBeat 能像 Apache Kafka, Apache Tomcat 这些基金会顶级项目一样,成为一款世界级的开源产品,我们所贡献的代码被部署运行在全球的各个行业,各个角落。 - ## 什么是 HertzBeat [Apache HertzBeat](https://github.com/apache/hertzbeat) (incubating) 是一个易用友好的开源实时监控告警系统,无需 Agent,高性能集群,兼容 Prometheus,提供强大的自定义监控和状态页构建能力。 @@ -38,19 +34,16 @@ HertzBeat 于 2022 年 1 月在 Dromara 开源社区正式开源,经过两年 - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 - 提供强大的状态页构建能力,轻松向用户传达您产品服务的实时状态。 - > `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助用户快速搭建自有监控系统。 ## HertzBeat 发展现状 HertzBeat 开源以来主仓库在 GitHub 累计收获 **4.5K+** star, Gitee **2.4K+** star, 累计发布版本 **20** 次,统计的软件安装包 DockerHub 等下载安装量累计超 **100K+** ,社区参与代码贡献人数 **180** 人,项目和社区的不断完善进步, 由衷的感谢每位贡献者的努力和付出💗。 - ## 社区小伙伴们 > 谢谢这些可爱的贡献者们。 -
@@ -281,8 +274,6 @@ HertzBeat 开源以来主仓库在 GitHub 累计收获 **4.5K+** star, Gitee **2
- - ## 特别感谢 感谢所有参与到社区的贡献者。 @@ -304,11 +295,10 @@ HertzBeat 开源以来主仓库在 GitHub 累计收获 **4.5K+** star, Gitee **2 > 感谢导师 @XiaoYu 、 @Yonglun 、 @Justin 、 @Francis 各位导师无私的帮助, 在合规和流程推进上的给予了专业指导。从 Dromara 开源社区到 Apache 孵化器,@XiaoYu 一直是 HertzBeat 的开源领路人。相信未来在各位导师的指导下社区一定社区更加健康的成长。 - ## 来自 Dromara 社区的寄语 > HertzBeat 从 Dromara 明星项目到成为 Apache 孵化器的一员,展现出了强劲的生命力和健康的成长态势。我们为 HertzBeat 的每一步前进感到自豪和振奋。在此,我们向 HertzBeat 团队致以最诚挚的祝贺,也向所有为该项目贡献力量的社区成员表示深深的感激。 - +> > 愿 HertzBeat 在未来的旅程中持续发展,不断创新,成为开源社区中的亮眼之星。我们相信,凭借团队的出色才能和社区的广泛支持,HertzBeat 必将实现更加辉煌的成就,为全球的开发者和用户提供优质的服务和体验。Dromara 将继续全力支持和关注 HertzBeat 的发展,期待它创造出更加精彩的篇章! --- @@ -329,4 +319,3 @@ HertzBeat 开源以来主仓库在 GitHub 累计收获 **4.5K+** star, Gitee **2 发送任意内容至 dev-subcribe@hertzbeat.apache.org 订阅 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-05-09-hertzbeat-ospp-subject-introduction.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-05-09-hertzbeat-ospp-subject-introduction.md index 349490ea278..755dae83375 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-05-09-hertzbeat-ospp-subject-introduction.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-05-09-hertzbeat-ospp-subject-introduction.md @@ -17,8 +17,6 @@ **Gitee: https://gitee.com/hertzbeat/hertzbeat** - - ## 什么是开源之夏? 开源之夏是由中国科学院软件研究所“开源软件供应链点亮计划”发起并长期支持的一项暑期开源活动,旨在鼓励在 @@ -39,8 +37,6 @@ 各位同学可以自由选择项目,与社区导师沟通实现方案并撰写项目计划书。被选中的学生将在社区导师指导下,按计划完成开发工作,并将成果贡献给社区。社区评估学生的完成度,主办方根据评估结果发放资助金额给学生。 - - ## HertzBeat 课题 ### 1、实现监控模版市场商店 @@ -55,8 +51,6 @@ 这样不仅能让 `HertzBeat` 的生态更加完善,也能让用户的体验变的更好! - - **要求:** 1. 使用Java17, springboot3编写后端代码,Angular(建议)或Vue编写前端代码。 @@ -64,20 +58,14 @@ 3. 模板页面展示下载量、分类、模板描述信息、模版历史版本(可选)。 4. 实现用户个人页面注册、登录(后期),上传模板。 - - **产出:** - 1. 特性代码能以PR的形式合入HertzBeat仓库。 - 2. 完成 HertzBeat官方模板市场 - 3. 更新相关帮助文档 - - **联系导师:** 赵青然 [zqr10159@dromara.org](mailto:zqr10159@dromara.org) - - ### 2、实现 Java 原生的 ipmi2 通信协议 **项目难度:进阶/Advanced** @@ -90,8 +78,6 @@ 温度传感器信息和时钟信息。 - - **要求:** 1. 使⽤ Java 基于 UDP 协议实现原⽣的 IPMI2 协议(查询部分),不依赖任何第三⽅包。 @@ -102,8 +88,6 @@ 3. 对查询到的指标信息进⾏抽象和规范化处理,实现配置化管理(可选)。 4. 输出详细的项⽬⽂档,包括设计思路、实现细节、使⽤说明等。 - - **产出:** - 特性代码能以PR的形式合⼊HertzBeat仓库。 @@ -114,12 +98,8 @@ - 完善帮助⽂档。 - - **联系导师:** 铁甲小宝 [tjxiaobao2024@qq.com](mailto:tjxiaobao2024@qq.com) - - ## 参与 HertzBeat 能收获什么? 有的同学可能会疑惑参与开源之夏能收获到什么呢? @@ -130,4 +110,4 @@ 4. **【推荐入职/实习】在本次编程之夏项目中表现优秀同学,可推荐入职/实习 你心意的公司工作。** 5. **【额外获得社区惊喜】所有参与本次编程之夏项目的同学,均可有机会成为 Apache HertzBeat 的committer,并拥有属于自己的 apache邮箱。** -**百分百有奖品拿哦**,现在唯一的问题是时间不多了,赶紧上车报名!截止报名时间是6月4日,快点来报名参与 2023 编程之夏吧~ \ No newline at end of file +**百分百有奖品拿哦**,现在唯一的问题是时间不多了,赶紧上车报名!截止报名时间是6月4日,快点来报名参与 2023 编程之夏吧~ diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md index e4f668eb5d8..2f612c42c28 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md @@ -1,6 +1,7 @@ # HertzBeat 1.6.0 升级指南 ### 注意:该指南适用于1.5.0向1.6.0版本升级 + ### 如果你使用更老的版本,建议使用导出功能重新安装,或先升级到1.5.0再按本指南升级到1.6.0 ### 二进制安装包升级 @@ -12,6 +13,7 @@ - 当你的服务器中默认环境变量为Java17时,这一步你无需任何操作。 - 当你的服务器中默认环境变量不为Java17时,如Java8、Java11,若你服务器中**没有**其他应用需要低版本Java,根据你的系统,到 [https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html](https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html) 选择相应的发行版下载,并在搜索引擎搜索如何设置新的环境变量指向新的Java17。 - 当你的服务器中默认环境变量不为Java17时,如Java8、Java11,若你服务器中**有**其他应用需要低版本Java,根据你的系统,到 [https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html](https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html) 选择相应的发行版下载,并将解压后的文件夹重命名为java,复制到Hertzbeat的解压目录下。 + 2. 升级数据库 打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), @@ -24,6 +26,7 @@ - `application.yml`一般需要修改以下部分 默认为: + ```yaml datasource: driver-class-name: org.h2.Driver @@ -42,7 +45,9 @@ logging: level: SEVERE ``` + 如若修改为mysql数据库,给出一个示例: + ```yaml datasource: driver-class-name: com.mysql.cj.jdbc.Driver @@ -63,6 +68,7 @@ ``` - `sureness.yml`修改是可选的,一般在你需要修改账号密码时 + ```yaml # account info config # eg: admin has role [admin,user], password is hertzbeat @@ -87,10 +93,10 @@ account: role: [user] ``` - 4. 添加相应的数据库驱动 - 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动,复制到安装目录下`ext-lib`中。 + 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动,复制到安装目录下`ext-lib`中。 + mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) oracle(如果你要监控oracle,这两个驱动是必须的) [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) @@ -100,6 +106,7 @@ oracle(如果你要监控oracle,这两个驱动是必须的) ### Docker 方式升级 - Mysql数据库 1. 关闭 HertzBeat 容器 + ``` docker stop hertzbeat ``` @@ -116,6 +123,7 @@ docker stop hertzbeat - `application.yml`一般需要修改以下部分 默认为: + ```yaml datasource: driver-class-name: com.mysql.cj.jdbc.Driver @@ -136,6 +144,7 @@ docker stop hertzbeat ``` - `sureness.yml`修改是可选的,一般在你需要修改账号密码时 + ```yaml # account info config # eg: admin has role [admin,user], password is hertzbeat @@ -162,7 +171,8 @@ account: 4. 添加相应的数据库驱动 - 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动 jar 放到本地 `ext-lib`目录下,然后启动时将`ext-lib`挂载到容器的 `/opt/hertzbeat/ext-lib`目录。 + 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动 jar 放到本地 `ext-lib`目录下,然后启动时将`ext-lib`挂载到容器的 `/opt/hertzbeat/ext-lib`目录。 + mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) oracle(如果你要监控oracle,这两个驱动是必须的) [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) @@ -172,6 +182,7 @@ oracle(如果你要监控oracle,这两个驱动是必须的) ### Docker安装升级 - H2内置数据库(生产环境不推荐使用H2) 1. 关闭 HertzBeat 容器 + ``` docker stop hertzbeat ``` @@ -181,9 +192,11 @@ docker stop hertzbeat 前题你已经将 H2 数据库文件 data 目录挂载到本地,或者启动老容器手动将 /opt/hertzbeat/data 目录拷贝出来。 下载 h2 驱动 jar [https://mvnrepository.com/artifact/com.h2database/h2/2.2.220](https://mvnrepository.com/artifact/com.h2database/h2/2.2.220) 使用 h2 驱动 jar 本地启动数据库 + ``` java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 ``` + 打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), 选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件在 H2 执行升级sql。 @@ -194,6 +207,7 @@ java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 - `application.yml`一般需要修改以下部分 默认为: + ```yaml datasource: driver-class-name: org.h2.Driver @@ -214,6 +228,7 @@ java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 ``` - `sureness.yml`修改是可选的,一般在你需要修改账号密码时 + ```yaml # account info config # eg: admin has role [admin,user], password is hertzbeat @@ -238,10 +253,10 @@ account: role: [user] ``` - 4. 添加相应的数据库驱动 - 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动 jar 放到本地 `ext-lib`目录下,然后启动时将`ext-lib`挂载到容器的 `/opt/hertzbeat/ext-lib`目录。 + 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动 jar 放到本地 `ext-lib`目录下,然后启动时将`ext-lib`挂载到容器的 `/opt/hertzbeat/ext-lib`目录。 + mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) oracle(如果你要监控oracle,这两个驱动是必须的) [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) @@ -249,8 +264,8 @@ oracle(如果你要监控oracle,这两个驱动是必须的) 接下来,像之前那样 Docker 运行启动,即可体验最新的HertzBeat1.6.0! ### 通过导出导入升级 -> 若不想如上繁琐的脚本升级方式,可以直接将老环境的监控任务和阈值信息导出导入 +> 若不想如上繁琐的脚本升级方式,可以直接将老环境的监控任务和阈值信息导出导入 1. 部署一套最新版本的新环境 2. 在页面上将老环境的监控任务和阈值信息导出。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-15-hertzbeat-v1.6.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-15-hertzbeat-v1.6.0.md index 41267f8f964..e0f982e0e7a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-15-hertzbeat-v1.6.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-15-hertzbeat-v1.6.0.md @@ -13,7 +13,7 @@ keywords: [open source, monitoring, alerting] 经过近五个月的社区开发迭代贡献和两个月的Apache Incubator孵化过程,Apache HertzBeat (incubating) v1.6.0 终于出来了。 这个版本我们增加了对OpenAi监控,Redfish协议服务器,插件机制,支持了NebulaGraph, Apache Yarn, HDFS, Hbase, Storm等更多功能特性。 由于License兼容问题,我们在底层替换了ORM框架,计算框架等多个依赖,Hibernate -> EclipseLink, 这也算是JPA生态下为数不多的迁移踩坑实践。 -同时修复了一些bug和优化了一些功能,更完善的文档。欢迎大家尝试使用,提出宝贵意见和建议,共同推动HertzBeat的发展。🎉 +同时修复了一些bug和优化了一些功能,更完善的文档。欢迎大家尝试使用,提出宝贵意见和建议,共同推动HertzBeat的发展。🎉 **当然,最重要的是给在社区的贡献者们致以最好的感谢!** @@ -35,7 +35,6 @@ keywords: [open source, monitoring, alerting] - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 - 提供强大的状态页构建能力,轻松向用户传达您产品服务的实时状态。 - > `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助用户快速搭建自有监控系统。 ![hertzBeat](/img/docs/hertzbeat-arch.png) @@ -367,7 +366,6 @@ keywords: [open source, monitoring, alerting] * @lw-yang made their first contribution in https://github.com/apache/hertzbeat/pull/2047 * @xfl12345 made their first contribution in https://github.com/apache/hertzbeat/pull/2048 - ## 一条命令即可开始 ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` @@ -376,12 +374,9 @@ keywords: [open source, monitoring, alerting] ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` - 详细参考 HertzBeat 官网文档 https://hertzbeat.com/docs - ----- - +--- **Github: https://github.com/apache/hertzbeat** 下载页面: https://hertzbeat.apache.org/docs/download/ @@ -390,7 +385,7 @@ keywords: [open source, monitoring, alerting] Have Fun! ----- +--- HertzBeat, Make Monitoring Easier! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-07-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-07-new-committer.md index 3cd89bc0ac2..dd19af388f2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-07-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-07-new-committer.md @@ -8,7 +8,6 @@ tags: [opensource, practice] keywords: [open source monitoring system, alerting system] --- - ![hertzBeat](/img/blog/new-committer.png) > 🎉非常高兴成为 Apache HertzBeat 项目的 Committer,受社区邀请来做一个自我介绍🥰。 @@ -43,6 +42,3 @@ keywords: [open source monitoring system, alerting system] 最后要感谢社区的 logicz 邀请我成为 Committer,tom 对我 PR 的 review,预祝 HertzBeat 从孵化器毕业成为明星项目🎊。 - - - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-08-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-08-new-committer.md index 9cefa3dc661..a045e67963a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-08-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-08-new-committer.md @@ -8,10 +8,8 @@ tags: [opensource, practice] keywords: [open source monitoring system, alerting system] --- - ![hertzBeat](/img/blog/new-committer.png) - ### 初识 Apache Hertzbeat 我第一次接触 Apache Hertzbeat 项目是因为一个偶然的机会。当时,我正在寻找一个开源项目来监控我们公司相关服务。Apache Hertzbeat 作为一个通用监控的项目,自然进入了我的视野。我开始通过阅读官方文档和浏览项目的代码库,来了解它的架构和功能。 @@ -24,7 +22,6 @@ keywords: [open source monitoring system, alerting system] 随着对项目的理解加深,我开始挑战一些更复杂的问题。我阅读了大量的代码,参与了社区的日常讨论,月例会讨论,并在issue/pr中活跃发言。这些活动不仅帮助我提升了技术能力,还让我对项目的发展方向有了更好的把握。我开始提出自己的想法和建议,积极参与设计讨论,并提出了一些新的功能。 - ### 被提名为 Committer 在经过一段时间的积极贡献之后,我收到了来自社区(tom) 的Committer 的提名。成为 Committer 意味着我将拥有更大的责任和权限。提名过程是公开和透明的,社区的每一个成员都有机会表达他们的意见。 @@ -49,4 +46,4 @@ keywords: [open source monitoring system, alerting system] ### 结语 -成为 Apache Hertzbeat 项目的 Committer 是一个充满挑战和收获的旅程。通过不断的学习和贡献,我不仅提升了自己的技术能力,也在社区中找到了归属感和成就感。我希望我的经历能够激励更多的人参与到开源社区中来,共同推动技术的进步和发展。借用tom老哥的话:参与开源不要影响大家的工作和生活,那就违背初衷啦,大家利用下班后空闲时间参与哦。 \ No newline at end of file +成为 Apache Hertzbeat 项目的 Committer 是一个充满挑战和收获的旅程。通过不断的学习和贡献,我不仅提升了自己的技术能力,也在社区中找到了归属感和成就感。我希望我的经历能够激励更多的人参与到开源社区中来,共同推动技术的进步和发展。借用tom老哥的话:参与开源不要影响大家的工作和生活,那就违背初衷啦,大家利用下班后空闲时间参与哦。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-15-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-15-new-committer.md index 2cba35df2ce..1942ccf08d7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-15-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-15-new-committer.md @@ -8,12 +8,10 @@ tags: [opensource, practice] keywords: [open source monitoring system, alerting system] --- - ![hertzBeat](/img/blog/new-committer.png) 大家好,非常荣幸可以收到社区邀请被提名为 Apache HertzBeat 的 Committer。我先做一个简单的自我介绍,从2019年工作开始,一直从事后端开发工作,主要使用Java语言,目前在一家网络安全公司从事网络安全相关产品后端开发工作。 - ### 遇见 我工作的项目里部署着数台物理服务器,其中运行着各种数据库,中间件,尽管部署了 Prometheus + grafana 的监控组合,但由于大多数的服务和服务器都需要额外安装 exporter,这套监控系统并没有覆盖到项目的全部,有时候服务宕机了,被用到了才发现。四月份的某一天我刷到了一篇公众号文章介绍了 HertzBeat,我马上被不需要 agent,全程可视化配置的特新吸引加上支持docker一键部署,我迅速的部署起来了 HertzBeat 并投入了使用。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-27-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-27-new-committer.md index 562ecc8eee7..05a5d5329ce 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-27-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-27-new-committer.md @@ -36,4 +36,4 @@ keywords: [open source monitoring system, alerting system] ## 结语 -成为Apache Hertzbeat的Committer于我而言是一次很有趣的经历,无时无刻都在激励着我。今后我也会持续对Apache Hertzbeat社区贡献下去,也希望Apache Hertzbeat能顺利从孵化器毕业。 \ No newline at end of file +成为Apache Hertzbeat的Committer于我而言是一次很有趣的经历,无时无刻都在激励着我。今后我也会持续对Apache Hertzbeat社区贡献下去,也希望Apache Hertzbeat能顺利从孵化器毕业。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md index be6c1adf2fb..ad575b95474 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md @@ -11,11 +11,13 @@ keywords: [open source monitoring system, alerting system] ![hertzBeat](/img/blog/new-committer.png) > 非常荣幸能成为Apache Hertzbeat的Committer -## 个人介绍 +> + ## 个人介绍 本人是2023年毕业,目前在一家互联网公司担任java开发工程师。 ## 初识 Apache Hertzbeat + 毕业之后,工作了半年,萌生出接触开源的想法。然而,在 GitHub 上搜索开源项目时,发现诸多社区活跃度欠佳,官方文档也不完善,致使项目上手难度颇大,因此开源之事暂且搁置。 某天,在某平台看到小宝大佬的一个帖子,我瞬间兴趣盎然,随即私聊小宝,咨询开源相关事宜。小宝大佬极其热情且细致地为我讲解了不少开源方面的情况,并向我推荐了 Hertzbeat 这个项目。 @@ -34,4 +36,4 @@ keywords: [open source monitoring system, alerting system] ## 结语 -成为Apache Hertzbeat的Committer于我而言是很有意义对事情,今后我也会持续对Apache Hertzbeat社区贡献下去,也希望Apache Hertzbeat能顺利从孵化器毕业,希望社区越来越好。 \ No newline at end of file +成为Apache Hertzbeat的Committer于我而言是很有意义对事情,今后我也会持续对Apache Hertzbeat社区贡献下去,也希望Apache Hertzbeat能顺利从孵化器毕业,希望社区越来越好。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-29-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-29-new-committer.md index 506725aaac6..5292ceefc37 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-29-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-29-new-committer.md @@ -10,25 +10,30 @@ keywords: [open source monitoring system, alerting system] ![hertzBeat](/img/blog/new-committer.png) -> 非常荣幸能成为Apache Hertzbeat的Committer +> 非常荣幸能成为Apache Hertzbeat的Committer ## 成为更强大团队的一份子:我的贡献与成长 + 在开源社区中,每一次的贡献不仅是对项目的推动,也是个人成长的见证。近期,在Apache HertzBeat项目中,我通过一系列的优化与功能增强,不仅为项目的发展贡献了力量,也实现了自我技能的提升。今天,我想借此机会分享我的贡献经历与成长感悟。 ## 从细节入手,优化视觉与交互 + 我始终认为细节决定成败,在参与项目之初,我便从界面优化入手,致力于提升用户的视觉和交互体验。我优化了监控选择菜单的模态窗口布局,使其更加符合用户的操作习惯;调整了监控详情页面的头部样式和内容布局,使得信息展示更加清晰直观。同时,我还统一了组件的边框圆角值,解决了国际化翻译缺失等问题,确保了系统界面的一致性和完整性。 这些看似微小的改动,实则大大提升了系统的整体美观度和用户体验。通过这个过程,我深刻体会到界面设计对于用户体验的重要性,也锻炼了自己在细节处理上的能力。 ## 深入功能模块,实现功能增强 + 除了界面优化外,我还积极参与了功能模块的扩展与增强工作。我重构了系统中的重复代码,抽象出了通用组件,提高了代码的复用性和可维护性。这些改进不仅简化了开发流程,还降低了后期维护的成本。同时,我还为多功能输入组件添加了密码类型支持和搜索类型支持,进一步丰富了组件的功能和用途。 在功能实现的过程中,我遇到了不少挑战。但正是这些挑战促使我不断学习和探索新的技术和方法。通过查阅官方文档等方式,我逐步攻克了难关,并成功完成了任务。这个过程不仅提升了我的技术能力,也让我更加深刻地理解了团队协作的重要性。 ## 注重用户反馈,持续优化产品 + 我始终认为用户是产品的最终评判者。因此,我在公司内部以及社区持续收集和分析用户的反馈意见,并根据这些意见进行针对性的优化和改进。通过优化搜索与筛选功能、统一使用简洁的交互元素等方式,我不断提升了用户的使用体验。 这个过程中,我深刻体会到了用户导向的重要性。只有真正关注用户的需求和期望才能打造出符合市场需求的产品。 ## 展望未来,持续贡献与成长 + 回顾过去一段时间的贡献经历我深感自豪和满足。但同时我也清楚地认识到自己还有很多不足之处需要不断学习和提升。未来我将继续秉持着严谨、创新、用户至上的精神不断探索和实践为Apache HertzBeat项目贡献更多的力量。同时我也期待与更多的团队成员一起共同成长和进步共同推动项目的繁荣发展。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-default.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-default.md index 5f8db64d1be..e734c1536a2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-default.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-default.md @@ -3,14 +3,17 @@ id: extend-http-default title: HTTP协议系统默认解析方式 sidebar_label: 系统默认解析方式 --- -> HTTP接口调用获取响应数据后,用 Apache HertzBeat (incubating) 默认的解析方式去解析响应数据。 -**此需接口响应数据结构符合HertzBeat指定的数据结构规则** +> HTTP接口调用获取响应数据后,用 Apache HertzBeat (incubating) 默认的解析方式去解析响应数据。 -### HertzBeat数据格式规范 -注意⚠️ 响应数据为JSON +**此需接口响应数据结构符合HertzBeat指定的数据结构规则** + +### HertzBeat数据格式规范 + +注意⚠️ 响应数据为JSON 单层格式:key-value + ```json { "metricName1": "metricValue", @@ -19,7 +22,9 @@ sidebar_label: 系统默认解析方式 "metricName4": "metricValue" } ``` + 多层格式:数组里面套key-value + ```json [ { @@ -36,9 +41,11 @@ sidebar_label: 系统默认解析方式 } ] ``` + 样例: 查询自定义系统的CPU信息,其暴露接口为 `/metrics/cpu`,我们需要其中的`hostname,core,useage`指标 -若只有一台虚拟机,其单层格式为: +若只有一台虚拟机,其单层格式为: + ```json { "hostname": "linux-1", @@ -48,7 +55,9 @@ sidebar_label: 系统默认解析方式 "runningTime": 100 } ``` -若有多台虚拟机,其多层格式为: + +若有多台虚拟机,其多层格式为: + ```json [ { @@ -75,7 +84,7 @@ sidebar_label: 系统默认解析方式 ] ``` -**对应的监控模版YML可以配置为如下** +**对应的监控模版YML可以配置为如下** ```yaml category: custom @@ -274,3 +283,4 @@ metrics: parseType: jsonPath parseScript: '$' ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-hertzbeat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-hertzbeat.md index 357cbd133ca..fcd44c5bbf3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-hertzbeat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-hertzbeat.md @@ -1,19 +1,17 @@ --- id: extend-http-example-hertzbeat title: 教程一:适配一款基于HTTP协议的监控类型 -sidebar_label: 教程一:适配一款HTTP协议监控 +sidebar_label: 教程一:适配一款HTTP协议监控 --- -通过此教程我们一步一步描述如何在 Apache HertzBeat (incubating) 监控系统下新增适配一款基于http协议的监控类型。 - -阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 +通过此教程我们一步一步描述如何在 Apache HertzBeat (incubating) 监控系统下新增适配一款基于http协议的监控类型。 +阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 ### HTTP协议解析通用响应结构体,获取指标数据 > 很多场景我们需要对提供的 HTTP API 接口进行监控,获取接口返回的指标值。这篇文章我们通过http自定义协议来解析我们常见的http接口响应结构,获取返回体中的字段作为指标数据。 - ``` { "code": 200, @@ -22,6 +20,7 @@ sidebar_label: 教程一:适配一款HTTP协议监控 } ``` + 如上,通常我们的后台API接口会设计这这样一个通用返回。hertzbeat系统的后台也是如此,我们今天就用hertzbeat的 API 做样例,新增适配一款新的监控类型 **hertzbeat**,监控采集它的系统摘要统计API `http://localhost:1157/api/summary`, 其响应数据为: @@ -58,7 +57,6 @@ sidebar_label: 教程一:适配一款HTTP协议监控 **我们这次获取其app下的 `category`,`app`,`status`,`size`,`availableSize`等指标数据。** - ### 新增自定义监控模版YML **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** @@ -190,32 +188,24 @@ metrics: **新增完毕,现在我们重启hertzbeat系统。我们可以看到系统页面已经多了一个`hertzbeat`监控类型了。** - ![](/img/docs/advanced/extend-http-example-1.png) - ### 系统页面添加对`hertzbeat`监控类型的监控 > 我们点击新增 `HertzBeat监控系统`,配置监控IP,端口,采集周期,高级设置里的账户密码等, 点击确定添加监控。 - ![](/img/docs/advanced/extend-http-example-2.png) - ![](/img/docs/advanced/extend-http-example-3.png) > 过一定时间(取决于采集周期)我们就可以在监控详情看到具体的指标数据和历史图表啦! - ![](/img/docs/advanced/extend-http-example-4.png) - - ### 设置阈值告警通知 > 接下来我们就可以正常的设置阈值,告警触发后可以在告警中心查看,也可以新增接收人,设置告警通知等,Have Fun!!! - ---- #### 完! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-token.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-token.md index b7a665a60b9..fc87145178a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-token.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-token.md @@ -6,15 +6,15 @@ sidebar_label: 教程二:获取TOKEN后续认证使用 通过此教程我们一步一步描述如何在教程一的基础上改造,新增一个监控指标,先调用认证接口获取TOKEN后,使用TOKEN作为参数供后面的监控指标采集认证使用。 -阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 +阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 -### 请求流程 +### 请求流程 -【**认证信息监控指标(优先级最高)**】【**HTTP接口携带账户密码调用**】->【**响应数据解析**】->【**解析签发TOKEN-accessToken作为指标**】->【**将accessToken作为变量参数赋值给其他采集监控指标**】 +【**认证信息监控指标(优先级最高)**】【**HTTP接口携带账户密码调用**】->【**响应数据解析**】->【**解析签发TOKEN-accessToken作为指标**】->【**将accessToken作为变量参数赋值给其他采集监控指标**】 > 这里我们依然用教程一的hertzbeat监控举例!hertzbeat后台接口不仅仅支持教程一使用的basic直接账户密码认证,也支持token认证。 -**我们需要`POST`调用登录接口`/api/account/auth/form`获取`accessToken`,请求body(json格式)如下**: +**我们需要`POST`调用登录接口`/api/account/auth/form`获取`accessToken`,请求body(json格式)如下**: ```json { @@ -22,7 +22,8 @@ sidebar_label: 教程二:获取TOKEN后续认证使用 "identifier": "admin" } ``` -**响应结构数据如下**: + +**响应结构数据如下**: ```json { @@ -104,9 +105,9 @@ params: required: false ``` -### 定义监控指标`auth`登录请求获取`token` +### 定义监控指标`auth`登录请求获取`token` -1. 在`app-hertzbeat_token.yml`新增一个监控指标定义 `auth`, 设置采集优先级为最高0,采集指标 `token`. +1. 在`app-hertzbeat_token.yml`新增一个监控指标定义 `auth`, 设置采集优先级为最高0,采集指标 `token`. ```yaml @@ -222,16 +223,13 @@ metrics: ![](/img/docs/advanced/extend-http-example-5.png) - **新增成功后我们就可以在详情页面看到我们采集的 `token`, `refreshToken`指标数据。** ![](/img/docs/advanced/extend-http-example-6.png) ![](/img/docs/advanced/extend-http-example-7.png) - - -### 将`token`作为变量参数给后面的监控指标采集使用 +### 将`token`作为变量参数给后面的监控指标采集使用 **在`app-hertzbeat_token.yml`新增一个监控指标定义 `summary` 同教程一中的`summary`相同, 设置采集优先级为1** **设置此监控指标的HTTP协议配置中认证方式为 `Bearer Token` 将上一个监控指标`auth`采集的指标`token`作为参数给其赋值,使用`^o^`作为内部替换符标识,即`^o^token^o^`。如下:** @@ -247,7 +245,7 @@ metrics: bearerTokenToken: ^o^token^o^ ``` -**最终`app-hertzbeat_token.yml`定义如下:** +**最终`app-hertzbeat_token.yml`定义如下:** ```yaml @@ -381,9 +379,9 @@ metrics: ``` -**配置完成后,再次重启 `hertzbeat` 系统,查看监控详情页面** +**配置完成后,再次重启 `hertzbeat` 系统,查看监控详情页面** -![](/img/docs/advanced/extend-http-example-8.png) +![](/img/docs/advanced/extend-http-example-8.png) ![](/img/docs/advanced/extend-http-example-9.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-jsonpath.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-jsonpath.md index fef3c3d2fa8..1439c532219 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-jsonpath.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-jsonpath.md @@ -3,16 +3,18 @@ id: extend-http-jsonpath title: HTTP协议JsonPath解析方式 sidebar_label: JsonPath解析方式 --- + > HTTP接口调用获取响应数据后,用JsonPath脚本解析的解析方式去解析响应数据。 注意⚠️ 响应数据为JSON格式 -**使用JsonPath脚本将响应数据解析成符合 Apache HertzBeat (incubating) 指定的数据结构规则的数据** +**使用JsonPath脚本将响应数据解析成符合 Apache HertzBeat (incubating) 指定的数据结构规则的数据** + +#### JsonPath操作符 -#### JsonPath操作符 -[JSONPath在线验证](https://www.jsonpath.cn) +[JSONPath在线验证](https://www.jsonpath.cn) -| JSONPATH | 帮助描述 | +| JSONPATH | 帮助描述 | |------------------|-----------------------------------| | $ | 根对象或元素 | | @ | 当前对象或元素 | @@ -25,8 +27,10 @@ sidebar_label: JsonPath解析方式 | ?() | 过滤器(脚本)表达式. | | () | 脚本表达式. | -#### HertzBeat数据格式规范 +#### HertzBeat数据格式规范 + 单层格式:key-value + ```json { "metricName1": "metricValue", @@ -35,7 +39,9 @@ sidebar_label: JsonPath解析方式 "metricName4": "metricValue" } ``` + 多层格式:数组里面套key-value + ```json [ { @@ -53,10 +59,11 @@ sidebar_label: JsonPath解析方式 ] ``` -#### 样例 +#### 样例 查询自定义系统的数值信息,其暴露接口为 `/metrics/person`,我们需要其中的`type,num`指标 -接口返回的原始数据如下: +接口返回的原始数据如下: + ```json { "firstName": "John", @@ -80,7 +87,8 @@ sidebar_label: JsonPath解析方式 } ``` -我们使用JsonPath脚本解析,对应的脚本为: `$.number[*]` ,解析后的数据结构如下: +我们使用JsonPath脚本解析,对应的脚本为: `$.number[*]` ,解析后的数据结构如下: + ```json [ { @@ -93,9 +101,10 @@ sidebar_label: JsonPath解析方式 } ] ``` -此数据结构符合HertzBeat的数据格式规范,成功提取指标`type,num`值。 -**对应的监控模版YML可以配置为如下** +此数据结构符合HertzBeat的数据格式规范,成功提取指标`type,num`值。 + +**对应的监控模版YML可以配置为如下** ```yaml category: custom @@ -165,3 +174,4 @@ metrics: parseType: jsonPath parseScript: '$.number[*]' ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http.md index 9a0dca41ae3..35a8f4fa5f5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http.md @@ -1,21 +1,22 @@ --- id: extend-http title: HTTP协议自定义监控 -sidebar_label: HTTP协议自定义监控 +sidebar_label: HTTP协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用HTTP协议自定义指标监控。 -### HTTP协议采集流程 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用HTTP协议自定义指标监控。 + +### HTTP协议采集流程 + 【**HTTP接口调用**】->【**响应校验**】->【**响应数据解析**】->【**默认方式解析|JsonPath脚本解析 | XmlPath解析(todo) | Prometheus解析**】->【**指标数据提取**】 由流程可见,我们自定义一个HTTP协议的监控类型,需要配置HTTP请求参数,配置获取哪些指标,对响应数据配置解析方式和解析脚本。 -HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数,请求方式,请求体等。 +HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数,请求方式,请求体等。 **系统默认解析方式**:http接口返回hertzbeat规定的json数据结构,即可用默认解析方式解析数据提取对应的指标数据,详细介绍见 [**系统默认解析**](extend-http-default) -**JsonPath脚本解析方式**:用JsonPath脚本对响应的json数据进行解析,返回系统指定的数据结构,然后提供对应的指标数据,详细介绍见 [**JsonPath脚本解析**](extend-http-jsonpath) - +**JsonPath脚本解析方式**:用JsonPath脚本对响应的json数据进行解析,返回系统指定的数据结构,然后提供对应的指标数据,详细介绍见 [**JsonPath脚本解析**](extend-http-jsonpath) -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** @@ -23,15 +24,14 @@ HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数, ------- -下面详细介绍下监控模版YML的配置用法,请注意看使用注释。 +下面详细介绍下监控模版YML的配置用法,请注意看使用注释。 -### 监控模版YML +### 监控模版YML > 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个监控模版,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 -样例:自定义一个名称为example_http的自定义监控类型,其使用HTTP协议采集指标数据。 - +样例:自定义一个名称为example_http的自定义监控类型,其使用HTTP协议采集指标数据。 ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -277,3 +277,4 @@ metrics: basicAuthPassword: ^_^password^_^ parseType: default ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jdbc.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jdbc.md index 6ff4b9bbed1..bb946d8ce1c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jdbc.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jdbc.md @@ -1,29 +1,34 @@ --- id: extend-jdbc title: JDBC协议自定义监控 -sidebar_label: JDBC协议自定义监控 +sidebar_label: JDBC协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JDBC(目前支持mysql,mariadb,postgresql,sqlserver)自定义指标监控。 -> JDBC协议自定义监控可以让我们很方便的通过写SQL查询语句就能监控到我们想监控的指标 -### JDBC协议采集流程 -【**系统直连MYSQL**】->【**运行SQL查询语句**】->【**响应数据解析:oneRow, multiRow, columns**】->【**指标数据提取**】 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JDBC(目前支持mysql,mariadb,postgresql,sqlserver)自定义指标监控。 +> JDBC协议自定义监控可以让我们很方便的通过写SQL查询语句就能监控到我们想监控的指标 + +### JDBC协议采集流程 + +【**系统直连MYSQL**】->【**运行SQL查询语句**】->【**响应数据解析:oneRow, multiRow, columns**】->【**指标数据提取**】 由流程可见,我们自定义一个JDBC协议的监控类型,需要配置JDBC请求参数,配置获取哪些指标,配置查询SQL语句。 -### 数据解析方式 +### 数据解析方式 + SQL查询回来的数据字段和我们需要的指标映射,就能获取对应的指标数据,目前映射解析方式有三种:oneRow, multiRow, columns -#### **oneRow** -> 查询一行数据, 通过查询返回结果集的列名称,和查询的字段映射 +#### **oneRow** + +> 查询一行数据, 通过查询返回结果集的列名称,和查询的字段映射 例如: 查询的指标字段为:one tow three four 查询SQL:select one, tow, three, four from book limit 1; -这里指标字段就能和响应数据一一映射为一行采集数据。 +这里指标字段就能和响应数据一一映射为一行采集数据。 #### **multiRow** -> 查询多行数据, 通过查询返回结果集的列名称,和查询的字段映射 + +> 查询多行数据, 通过查询返回结果集的列名称,和查询的字段映射 例如: 查询的指标字段为:one tow three four @@ -31,33 +36,34 @@ SQL查询回来的数据字段和我们需要的指标映射,就能获取对 这里指标字段就能和响应数据一一映射为多行采集数据。 #### **columns** -> 采集一行指标数据, 通过查询的两列数据(key-value),key和查询的字段匹配,value为查询字段的值 + +> 采集一行指标数据, 通过查询的两列数据(key-value),key和查询的字段匹配,value为查询字段的值 例如: 查询字段:one tow three four 查询SQL:select key, value from book; -SQL响应数据: +SQL响应数据: -| key | value | -|----------|-------| -| one | 243 | -| two | 435 | -| three | 332 | -| four | 643 | +| key | value | +|-------|-------| +| one | 243 | +| two | 435 | +| three | 332 | +| four | 643 | -这里指标字段就能和响应数据的key映射,获取对应的value为其采集监控数据。 +这里指标字段就能和响应数据的key映射,获取对应的value为其采集监控数据。 -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) - ------- -下面详细介绍下文件的配置用法,请注意看使用注释。 -### 监控模版YML +下面详细介绍下文件的配置用法,请注意看使用注释。 + +### 监控模版YML > 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 @@ -237,3 +243,4 @@ metrics: sql: show global status where Variable_name like 'innodb%'; url: ^_^url^_^ ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jmx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jmx.md index cca1bcfb726..71bb06ba2b2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jmx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jmx.md @@ -1,38 +1,38 @@ --- id: extend-jmx title: JMX协议自定义监控 -sidebar_label: JMX协议自定义监控 +sidebar_label: JMX协议自定义监控 --- + > 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JMX协议自定义指标监控。 -> JMX协议自定义监控可以让我们很方便的通过配置 JMX Mbean Object 就能监控采集到我们想监控的 Mbean 指标 +> JMX协议自定义监控可以让我们很方便的通过配置 JMX Mbean Object 就能监控采集到我们想监控的 Mbean 指标 + +### JMX协议采集流程 -### JMX协议采集流程 -【**对端JAVA应用暴露JMX服务**】->【**HertzBeat直连对端JMX服务**】->【**获取配置的 Mbean Object 数据**】->【**指标数据提取**】 +【**对端JAVA应用暴露JMX服务**】->【**HertzBeat直连对端JMX服务**】->【**获取配置的 Mbean Object 数据**】->【**指标数据提取**】 由流程可见,我们自定义一个JMX协议的监控类型,需要配置JMX请求参数,配置获取哪些指标,配置查询Object信息。 -### 数据解析方式 +### 数据解析方式 通过配置监控模版YML的指标`field`, `aliasFields`, `jmx` 协议的 `objectName` 来和对端系统暴露的 `Mbean`对象信息映射解析。 - - -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) ------- -下面详细介绍下监控模版的配置用法,请注意看使用注释。 + +下面详细介绍下监控模版的配置用法,请注意看使用注释。 ### 监控模版YML > 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 -样例:自定义一个名称为 `example_jvm` 的自定义监控类型,其使用JMX协议采集指标数据。 - +样例:自定义一个名称为 `example_jvm` 的自定义监控类型,其使用JMX协议采集指标数据。 ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -236,3 +236,4 @@ metrics: objectName: java.lang:type=MemoryPool,name=* url: ^_^url^_^ ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ngql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ngql.md index b5685eb3b16..34514b3f2bb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ngql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ngql.md @@ -20,11 +20,11 @@ NGQL查询回来的数据字段和我们需要的指标映射,就能获取对 > `filterName`: 过滤属性名称(可选) > `filterValue`: 过滤属性值(可选) -例如: +例如: - online_meta_count#SHOW HOSTS META#Status#ONLINE - 对 `SHOW HOSTS META` 返回的结果中统计滤Status==ONLINE的数量 +对 `SHOW HOSTS META` 返回的结果中统计滤Status==ONLINE的数量 - online_meta_count#SHOW HOSTS META## - 统计 `SHOW HOSTS META` 返回的行数 +统计 `SHOW HOSTS META` 返回的行数 #### **oneRow** @@ -72,6 +72,7 @@ NGQL查询回来的数据字段和我们需要的指标映射,就能获取对 ![](/img/docs/advanced/extend-point-1.png) ------- + 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML @@ -169,3 +170,4 @@ metrics: - match (v:tag2) return "tag2" as name ,count(v) as cnt timeout: ^_^timeout^_^ ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-point.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-point.md index d2b0ee36cdc..6de319a1c15 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-point.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-point.md @@ -1,12 +1,13 @@ --- id: extend-point title: 自定义监控 -sidebar_label: 自定义监控 +sidebar_label: 自定义监控 --- + > HertzBeat拥有自定义监控能力,您只需配置监控模版YML就能适配一款自定义的监控类型。 -> 目前自定义监控支持[HTTP协议](extend-http),[JDBC协议](extend-jdbc),[SSH协议](extend-ssh),[JMX协议](extend-jmx),[SNMP协议](extend-snmp),后续会支持更多通用协议。 +> 目前自定义监控支持[HTTP协议](extend-http),[JDBC协议](extend-jdbc),[SSH协议](extend-ssh),[JMX协议](extend-jmx),[SNMP协议](extend-snmp),后续会支持更多通用协议。 -### 自定义流程 +### 自定义流程 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** @@ -14,13 +15,13 @@ sidebar_label: 自定义监控 ------- -### 监控模版YML +### 监控模版YML **HertzBeat的设计是一个监控模版对应一个监控类型,所有监控类型都是由监控模版来定义的**。 -> 监控模版YML定义了 *监控类型的名称(国际化), 配置参数映射, 采集指标信息, 采集协议配置* 等。 +> 监控模版YML定义了 *监控类型的名称(国际化), 配置参数映射, 采集指标信息, 采集协议配置* 等。 -下面使用样例详细介绍下这监控模版YML的配置用法。 +下面使用样例详细介绍下这监控模版YML的配置用法。 样例:自定义一个 `app` 名称为 `example2` 的自定义监控类型,其使用HTTP协议采集指标数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-snmp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-snmp.md index 3ff65d60a17..387d67c5987 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-snmp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-snmp.md @@ -1,38 +1,38 @@ --- id: extend-snmp title: SNMP协议自定义监控 -sidebar_label: SNMP协议自定义监控 +sidebar_label: SNMP协议自定义监控 --- + > 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用 SNMP 协议自定义指标监控。 -> SNMP 协议自定义监控可以让我们很方便的通过配置 Mib OID信息 就能监控采集到我们想监控的OID指标 +> SNMP 协议自定义监控可以让我们很方便的通过配置 Mib OID信息 就能监控采集到我们想监控的OID指标 + +### SNMP协议采集流程 -### SNMP协议采集流程 -【**对端开启SNMP服务**】->【**HertzBeat直连对端SNMP服务**】->【**根据配置抓取对端OID指标信息**】->【**指标数据提取**】 +【**对端开启SNMP服务**】->【**HertzBeat直连对端SNMP服务**】->【**根据配置抓取对端OID指标信息**】->【**指标数据提取**】 由流程可见,我们自定义一个SNMP协议的监控类型,需要配置SNMP请求参数,配置获取哪些指标,配置查询OID信息。 -### 数据解析方式 +### 数据解析方式 通过配置监控模版YML的指标`field`, `aliasFields`, `snmp` 协议下的 `oids`来抓取对端指定的数据并解析映射。 - - -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) ------- -下面详细介绍下文件的配置用法,请注意看使用注释。 + +下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML > 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 -样例:自定义一个名称为 example_windows 的自定义监控类型,其使用 SNMP 协议采集指标数据。 - +样例:自定义一个名称为 example_windows 的自定义监控类型,其使用 SNMP 协议采集指标数据。 ```yaml # The monitoring type category:service-application service monitoring db-database monitoring mid-middleware custom-custom monitoring os-operating system monitoring @@ -207,3 +207,4 @@ metrics: processes: 1.3.6.1.2.1.25.1.6.0 location: 1.3.6.1.2.1.1.6.0 ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ssh.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ssh.md index 8284726e661..0f643f153f8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ssh.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ssh.md @@ -1,21 +1,25 @@ --- id: extend-ssh title: SSH协议自定义监控 -sidebar_label: SSH协议自定义监控 +sidebar_label: SSH协议自定义监控 --- + > 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用SSH协议自定义指标监控。 -> SSH协议自定义监控可以让我们很方便的通过写sh命令脚本就能监控采集到我们想监控的Linux指标 +> SSH协议自定义监控可以让我们很方便的通过写sh命令脚本就能监控采集到我们想监控的Linux指标 + +### SSH协议采集流程 -### SSH协议采集流程 -【**系统直连Linux**】->【**运行SHELL命令脚本语句**】->【**响应数据解析:oneRow, multiRow**】->【**指标数据提取**】 +【**系统直连Linux**】->【**运行SHELL命令脚本语句**】->【**响应数据解析:oneRow, multiRow**】->【**指标数据提取**】 由流程可见,我们自定义一个SSH协议的监控类型,需要配置SSH请求参数,配置获取哪些指标,配置查询脚本语句。 -### 数据解析方式 +### 数据解析方式 + SHELL脚本查询回来的数据字段和我们需要的指标映射,就能获取对应的指标数据,目前映射解析方式有两种:oneRow, multiRow,能满足绝大部分指标需求。 -#### **oneRow** -> 查询出一列数据, 通过查询返回结果集的字段值(一行一个值)与字段映射 +#### **oneRow** + +> 查询出一列数据, 通过查询返回结果集的字段值(一行一个值)与字段映射 例如: 需要查询Linux的指标 hostname-主机名称,uptime-启动时间 @@ -23,31 +27,37 @@ SHELL脚本查询回来的数据字段和我们需要的指标映射,就能获 启动时间原始查询命令:`uptime | awk -F "," '{print $1}'` 则在hertzbeat对应的这两个指标的查询脚本为(用`;`将其连接到一起): `hostname; uptime | awk -F "," '{print $1}'` -终端响应的数据为: +终端响应的数据为: + ``` tombook 14:00:15 up 72 days -``` +``` + 则最后采集到的指标数据一一映射为: hostname值为 `tombook` -uptime值为 `14:00:15 up 72 days` +uptime值为 `14:00:15 up 72 days` -这里指标字段就能和响应数据一一映射为一行采集数据。 +这里指标字段就能和响应数据一一映射为一行采集数据。 #### **multiRow** -> 查询多行数据, 通过查询返回结果集的列名称,和查询的指标字段映射 + +> 查询多行数据, 通过查询返回结果集的列名称,和查询的指标字段映射 例如: 查询的Linux内存相关指标字段:total-内存总量 used-已使用内存 free-空闲内存 buff-cache-缓存大小 available-可用内存 -内存指标原始查询命令为:`free -m`, 控制台响应: +内存指标原始查询命令为:`free -m`, 控制台响应: + ```shell total used free shared buff/cache available Mem: 7962 4065 333 1 3562 3593 Swap: 8191 33 8158 ``` + 在hertzbeat中multiRow格式解析需要响应数据列名称和指标值一一映射,则对应的查询SHELL脚本为: `free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` -控制台响应为: +控制台响应为: + ```shell total used free buff_cache available 7962 4066 331 3564 3592 @@ -55,22 +65,22 @@ total used free buff_cache available 这里指标字段就能和响应数据一一映射为采集数据。 -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) ------- -下面详细介绍下文件的配置用法,请注意看使用注释。 + +下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML > 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 -样例:自定义一个名称为example_linux的自定义监控类型,其使用SSH协议采集指标数据。 - +样例:自定义一个名称为example_linux的自定义监控类型,其使用SSH协议采集指标数据。 ```yaml # 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 @@ -206,3 +216,4 @@ metrics: script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' parseType: multiRow ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-telnet.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-telnet.md index 14643c29c4e..4d2e2425257 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-telnet.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-telnet.md @@ -1,28 +1,31 @@ --- id: extend-telnet title: Telnet协议自定义监控 -sidebar_label: Telnet协议自定义监控 +sidebar_label: Telnet协议自定义监控 --- + > 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用Telnet协议自定义指标监控。 -> 使用 Telnet 协议自定义监控可以让我们通过编写 Telnet 命令脚本来监控和采集我们想要监控的 Linux 指标 +> 使用 Telnet 协议自定义监控可以让我们通过编写 Telnet 命令脚本来监控和采集我们想要监控的 Linux 指标 + +### Telnet协议采集流程 -### Telnet协议采集流程 -【**系统直连Linux**】->【**运行TELNET命令脚本语句**】->【**响应数据解析**】->【**指标数据提取**】 +【**系统直连Linux**】->【**运行TELNET命令脚本语句**】->【**响应数据解析**】->【**指标数据提取**】 由流程可见,我们自定义一个Telnet协议的监控类型,需要配置Telnet请求参数,配置获取哪些指标,配置查询脚本语句。 -### 数据解析方式 -通过配置监控模版YML的指标field, aliasFields, telnet 协议下的获取数据映射。 +### 数据解析方式 +通过配置监控模版YML的指标field, aliasFields, telnet 协议下的获取数据映射。 -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) ------- -下面详细介绍下文件的配置用法,请注意看使用注释。 + +下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML @@ -294,3 +297,4 @@ metrics: ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-tutorial.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-tutorial.md index 22578d67854..7b3112f52f8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-tutorial.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-tutorial.md @@ -4,16 +4,14 @@ title: 自定义适配一款基于HTTP协议的新监控类型 sidebar_label: 教程案例 --- -通过此教程我们一步一步描述如何在 Apache HertzBeat (incubating) 系统下自定义新增适配一款基于 http 协议的监控类型。 - -阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 +通过此教程我们一步一步描述如何在 Apache HertzBeat (incubating) 系统下自定义新增适配一款基于 http 协议的监控类型。 +阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 ### HTTP协议解析通用响应结构体,获取指标数据 > 很多场景我们需要对提供的 HTTP API 接口进行监控,获取接口返回的指标值。这篇文章我们通过http自定义协议来解析我们常见的http接口响应结构,获取返回体中的字段作为指标数据。 - ``` { "code": 200, @@ -22,6 +20,7 @@ sidebar_label: 教程案例 } ``` + 如上,通常我们的后台API接口会设计这这样一个通用返回。hertzbeat系统的后台也是如此,我们今天就用hertzbeat的 API 做样例,新增适配一款新的监控类型 **hertzbeat**,监控采集它的系统摘要统计API `http://localhost:1157/api/summary`, 其响应数据为: @@ -58,12 +57,11 @@ sidebar_label: 教程案例 **我们这次获取其app下的 `category`,`app`,`status`,`size`,`availableSize`等指标数据。** - ### 新增配置监控模版YML **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -定义我们在页面上需要输入哪些参数,一般的HTTP协议参数主要有ip, port, headers, params, uri, 账户密码等,我们直接复用 `api`监控模版 里面的参数定义内容,删除其中的我们不需要输入的uri参数和keyword关键字等参数即可。 +定义我们在页面上需要输入哪些参数,一般的HTTP协议参数主要有ip, port, headers, params, uri, 账户密码等,我们直接复用 `api`监控模版 里面的参数定义内容,删除其中的我们不需要输入的uri参数和keyword关键字等参数即可。 定义采集类型是啥,需要用哪种协议采集方式,采集的指标是啥,协议的配置参数等。我们直接复用 `api`监控模版 里面的定义内容,修改为我们当前的监控类型`hertzbeat`配置参数即可,如下:注意⚠️我们这次获取接口响应数据中的`category`,`app`,`status`,`size`,`availableSize`等字段作为指标数据。 @@ -229,32 +227,24 @@ metrics: **点击保存并应用。我们可以看到系统页面的自定义监控菜单已经多了一个`hertzbeat`监控类型了。** - ![](/img/docs/advanced/extend-http-example-1.png) - ### 页面添加对`hertzbeat`监控类型的监控 > 我们点击新增 `HertzBeat监控系统`,配置监控IP,端口,采集周期,高级设置里的账户密码等, 点击确定添加监控。 - ![](/img/docs/advanced/extend-http-example-2.png) - ![](/img/docs/advanced/extend-http-example-3.png) > 过一定时间(取决于采集周期)我们就可以在监控详情看到具体的指标数据和历史图表啦! - ![](/img/docs/advanced/extend-http-example-4.png) - - ### 设置阈值告警通知 > 接下来我们就可以正常设置阈值,告警触发后可以在告警中心查看,也可以新增接收人,设置告警通知等,Have Fun!!! - ---- #### 完! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_committer.md index c91cdca7171..c30a850a3c2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_committer.md @@ -5,23 +5,22 @@ sidebar_position: 2 --- - ## 成为 Apache HertzBeat™ 的 Committer 任何支持社区并在 CoPDoC 领域中工作的人都可以成为 Apache HertzBeat 的 Committer。CoPDoC 是 ASF 的缩写,用来描述我们如何不仅仅通过代码来认识到您的贡献。 @@ -58,3 +57,4 @@ Committer 的候选人应该持续参与并为 HertzBeat 做出大量的贡献 - 对于拉取请求审查保持积极、有礼貌与尊重。 - 即使存在分歧,也要以专业和外交的态度参与技术路线图的讨论。 - 通过撰写文章或举办活动来推广项目。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_pmc_member.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_pmc_member.md index e64661b7595..39cf1da9123 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_pmc_member.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_pmc_member.md @@ -5,20 +5,20 @@ sidebar_position: 3 --- ## 成为 Apache HertzBeat™ 的 PMC 成员 @@ -45,6 +45,7 @@ PMC 成员的候选人应该持续参与并为 HertzBeat 做出大量的贡献 - +3 个月的高度活动和参与。 ### 贡献的质量 + - 对项目有深入的理解。 - 经过充分测试、良好设计、遵循 Apache HertzBeat 的编码标准,及简单的修复补丁。 - 井井有条的面向用户的详细文档。 @@ -56,3 +57,4 @@ PMC 成员的候选人应该持续参与并为 HertzBeat 做出大量的贡献 - 对于拉取请求审查保持积极、有礼貌与尊重。 - 即使存在分歧,也要以专业和外交的态度参与技术路线图的讨论。 - 通过撰写文章或举办活动来推广项目。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md index a9a6f54d474..1cad8c3add1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md @@ -5,41 +5,38 @@ sidebar_position: 3 --- - ## 1 拉取请求与变更规则 1. `ISSUE`/`PR`(拉取请求) 的引导和命名 - - 新建 `PR` 后需要在 `PR` 页面的 Github Development 按钮处关联已存在的对应 `ISSUE`(若无建议新建对应ISSUE) + - 新建 `PR` 后需要在 `PR` 页面的 Github Development 按钮处关联已存在的对应 `ISSUE`(若无建议新建对应ISSUE) - - 标题命名格式(英文,小写) - `[feature/bugfix/doc/improve/refactor/bug/cleanup] title` + - 标题命名格式(英文,小写) + `[feature/bugfix/doc/improve/refactor/bug/cleanup] title` 2. 添加描述信息 - - 新建 `PR` 时请仔细描述此贡献,描述文档和代码同样重要。审阅者可以从描述中,而不仅仅是从代码中,了解问题和解决方案。 - - 勾选是否完成了对应的 Checklist。 - + - 新建 `PR` 时请仔细描述此贡献,描述文档和代码同样重要。审阅者可以从描述中,而不仅仅是从代码中,了解问题和解决方案。 + - 勾选是否完成了对应的 Checklist。 3. 建议一次 `PR` 只包含一个功能/一种修复/一类改进/一种重构/一次清理/一类文档等 - 4. 提交消息(英文,小写,无特殊字符) - 消息的提交应遵循与 `PR` 类似的模式:`[feature/bugfix/doc/improve/refactor/bug/cleanup] title` + 消息的提交应遵循与 `PR` 类似的模式:`[feature/bugfix/doc/improve/refactor/bug/cleanup] title` ## 2 代码检查样式 @@ -69,81 +66,79 @@ sidebar_position: 3 ### 3.1 命名风格 1. 优先为变量命名选择名词,这样更容易区分`变量`或`方法`。 + ```java - Cache publicKeyCache; + Cache publicKeyCache; ``` - 2. 变量的拼音缩写是禁止的(排除地名等名词),例如chengdu。 - 3. 推荐的变量名以 `类型` 结尾。 对于 `Collection/List` 类型的变量,取 `xxxx` (复数表示多个元素)或以 `xxxList` (特定类型)结束。 对于 `map` 类型的变量,清晰地描述 `key` 和 `value`: + ```java - Map idUserMap; - Map userIdNameMap; + Map idUserMap; + Map userIdNameMap; ``` - 4. 通过其名称直观地知道变量的类型和含义。 方法名称应首先以动词开始,如下所示: + ```java - void computeVcores(Object parameter1); + void computeVcores(Object parameter1); ``` - > 注意:在 `Builder` 工具类中不必严格遵循这项规则。 - + > 注意:在 `Builder` 工具类中不必严格遵循这项规则。 ### 3.2 常量变量定义 1. 多余的字符串应提取为常量 - >如果一个常量被硬编码两次或多次,请直接提取它为常量并更改相应的引用。 - 通常,`log` 中的常量可以忽略提取。 - - - 负面示例: - - ```java - public static RestResponse success(Object data) { - RestResponse resp = new RestResponse(); - resp.put("status", "success"); - resp.put("code", ResponseCode.CODE_SUCCESS); - resp.put("data", data); - return resp; - } - - public static RestResponse error() { - RestResponse resp = new RestResponse(); - resp.put("status", "error"); - resp.put("code", ResponseCode.CODE_FAIL); - resp.put("data", null); - return resp; - } - ``` - - - 正面示例: - - > 字符串提取为常量引用。 - - ```java - public static final String STATUS = "status"; - public static final String CODE = "code"; - public static final String DATA = "data"; - - public static RestResponse success(Object data) { - RestResponse resp = new RestResponse(); - resp.put(STATUS, "success"); - resp.put(CODE, ResponseCode.CODE_SUCCESS); - resp.put(DATA, data); - return resp; - } - - public static RestResponse error() { - RestResponse resp = new RestResponse(); - resp.put(STATUS, "error"); - resp.put(CODE, ResponseCode.CODE_FAIL); - resp.put(DATA, null); - return resp; - } - ``` + > 如果一个常量被硬编码两次或多次,请直接提取它为常量并更改相应的引用。 + > 通常,`log` 中的常量可以忽略提取。 + + - 负面示例: + + ```java + public static RestResponse success(Object data) { + RestResponse resp = new RestResponse(); + resp.put("status", "success"); + resp.put("code", ResponseCode.CODE_SUCCESS); + resp.put("data", data); + return resp; + } + + public static RestResponse error() { + RestResponse resp = new RestResponse(); + resp.put("status", "error"); + resp.put("code", ResponseCode.CODE_FAIL); + resp.put("data", null); + return resp; + } + ``` + - 正面示例: + + > 字符串提取为常量引用。 + + ```java + public static final String STATUS = "status"; + public static final String CODE = "code"; + public static final String DATA = "data"; + + public static RestResponse success(Object data) { + RestResponse resp = new RestResponse(); + resp.put(STATUS, "success"); + resp.put(CODE, ResponseCode.CODE_SUCCESS); + resp.put(DATA, data); + return resp; + } + + public static RestResponse error() { + RestResponse resp = new RestResponse(); + resp.put(STATUS, "error"); + resp.put(CODE, ResponseCode.CODE_FAIL); + resp.put(DATA, null); + return resp; + } + ``` 2. 确保代码的可读性和直观性 - `annotation` 符号中的字符串不需要提取为常量。 @@ -155,9 +150,9 @@ sidebar_position: 3 4. 关于 `constant/variable` 行的排序顺序 按以下顺序对类中的变量行进行排序: - 1. `public static final V`, `static final V`,`protected static final V`, `private static final V` - 2. `public static v`, `static v`,`protected static v`, `private static v` - 3. `public v`, `v`, `protected v`, `private v` + 1. `public static final V`, `static final V`,`protected static final V`, `private static final V` + 2. `public static v`, `static v`,`protected static v`, `private static v` + 3. `public v`, `v`, `protected v`, `private v` ### 3.3 方法规则 @@ -174,32 +169,28 @@ sidebar_position: 3 3. 如果方法中的代码行数太多,请尝试在适当的点上使用多个子方法来分段方法体。 一般来说,需要坚持以下原则: - - 便于测试 - - 有好的语义 - - 易于阅读 + - 便于测试 + - 有好的语义 + - 易于阅读 此外,还需要考虑在组件、逻辑、抽象和场景等方面的切割是否合理。 > 然而,目前还没有明确的演示定义。在演变过程中,我们将为开发者提供更多的示例,以便他们有更清晰的参考和理解。 - ### 3.4 集合规则 1. 对于返回的 `collection` 值,除非有特殊的 `concurrent` (如线程安全),总是返回 `interface`,例如: - - - 如果使用 `ArrayList`,则返回 List - - 如果使用 `HashMap`,则返回 Map - - 如果使用 `HashSet`,则返回 Set - + - 如果使用 `ArrayList`,则返回 List + - 如果使用 `HashMap`,则返回 Map + - 如果使用 `HashSet`,则返回 Set 2. 如果存在多线程,可以使用以下声明或返回类型: - ```java - private CurrentHashMap map; - public CurrentHashMap funName(); - ``` +```java +private CurrentHashMap map; +public CurrentHashMap funName(); +``` 3. 使用 `isEmpty()` 而不是 `length() == 0` 或者 `size() == 0` - - 负面示例: ```java @@ -207,7 +198,6 @@ sidebar_position: 3 return; } ``` - - 正面示例: ```java @@ -227,9 +217,8 @@ sidebar_position: 3 ### 3.6 控制/条件语句 1. 避免因不合理的 `条件/控制` 分支顺序导致: - - - 多个代码行的 `深度` 为 `n+1` - - 多余的行 + - 多个代码行的 `深度` 为 `n+1` + - 多余的行 一般来说,如果一个方法的代码行深度由于连续嵌套的 `if... else..` 超过了 `2+ Tabs`,那么应该考虑试图 - `合并分支`, @@ -238,77 +227,85 @@ sidebar_position: 3 以减少代码行深度并提高可读性,例如: - 联合或将逻辑合并到下一级调用中 - - 负面示例: - ```java - if (isInsert) { - save(platform); - } else { - updateById(platform); - } - ``` - - 正面示例: - ```java - saveOrUpdate(platform); - ``` +- 负面示例: + +```java +if (isInsert) { +save(platform); +} else { +updateById(platform); +} +``` + +- 正面示例: + +```java +saveOrUpdate(platform); +``` + - 合并条件 - - 负面示例: - ```java - if (expression1) { - if(expression2) { - ...... - } - } - ``` - - 正面示例: - ```java - if (expression1 && expression2) { - ...... - } - ``` +- 负面示例: + +```java +if (expression1) { +if(expression2) { +...... +} +} + +``` + +- 正面示例: + + ```java + if (expression1 && expression2) { + ...... + } + ``` - 反转条件 - - 负面示例: - - ```java - public void doSomething() { - // 忽略更深的代码块行 - // ..... - if (condition1) { - ... - } else { - ... - } - } - ``` - - - 正面示例: - - ```java - public void doSomething() { - // 忽略更深的代码块行 - // ..... - if (!condition1) { - ... - return; - } - // ... - } - ``` +- 负面示例: + + ```java + public void doSomething() { + // 忽略更深的代码块行 + // ..... + if (condition1) { + ... + } else { + ... + } + } + ``` +- 正面示例: + + ```java + public void doSomething() { + // 忽略更深的代码块行 + // ..... + if (!condition1) { + ... + return; + } + // ... + } + ``` - 使用单一变量或方法减少复杂的条件表达式 - - 负面示例: - ```java - if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { - ... - } - ``` - - - 正面示例: - ```java - if (containsSqlServer(dbType)) { - .... - } - //..... - // containsSqlServer的定义 - ``` +- 负面示例: + + ```java + if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { + ... + } + ``` +- 正面示例: + + ```java + if (containsSqlServer(dbType)) { + .... + } + //..... + // containsSqlServer的定义 + ``` > 在未来,使用 `sonarlint` 和 `better highlights` 检查代码深度看起来是个不错的选择。 @@ -316,20 +313,20 @@ sidebar_position: 3 1. 方法缺少注释: - - `When`:该方法何时可以被调用 - - `How`:如何使用此方法以及如何传递参数等 - - `What`:此方法实现了哪些功能 - - `Note`:在调用此方法时开发人员应注意什么 +- `When`:该方法何时可以被调用 +- `How`:如何使用此方法以及如何传递参数等 +- `What`:此方法实现了哪些功能 +- `Note`:在调用此方法时开发人员应注意什么 2. 缺少必要的类头部描述注释。 - 添加 `What`,`Note` 等,如上述 `1` 中提到的。 +添加 `What`,`Note` 等,如上述 `1` 中提到的。 3. 在接口中的方法声明必须被注释。 - - 如果实现的语义和接口声明的注释内容不一致,则具体的实现方法也需要用注释重写。 +- 如果实现的语义和接口声明的注释内容不一致,则具体的实现方法也需要用注释重写。 - - 如果方法实现的语义与接口声明的注释内容一致,则建议不写注释以避免重复的注释。 +- 如果方法实现的语义与接口声明的注释内容一致,则建议不写注释以避免重复的注释。 4. 在注释行中的第一个词需要大写,如 `param` 行,`return` 行。 如果特殊引用作为主题不需要大写,需要注意特殊符号,例如引号。 @@ -339,31 +336,29 @@ sidebar_position: 3 1. 更倾向于使用 `non-capturing` lambda(不包含对外部范围的引用的lambda)。 Capturing lambda 在每次调用时都需要创建一个新的对象实例。`Non-capturing` lambda 可以为每次调用使用相同的实例。 - - 负面示例: +- 负面示例: - ```java - map.computeIfAbsent(key, x -> key.toLowerCase()) - ``` - - - 正面示例: + ```java + map.computeIfAbsent(key, x -> key.toLowerCase()) + ``` +- 正面示例: - ```java - map.computeIfAbsent(key, k -> k.toLowerCase()); - ``` + ```java + map.computeIfAbsent(key, k -> k.toLowerCase()); + ``` 2. 考虑使用方法引用而不是内联lambda - - 负面示例: - - ```java - map.computeIfAbsent(key, k-> Loader.load(k)); - ``` +- 负面示例: - - 正面示例: + ```java + map.computeIfAbsent(key, k-> Loader.load(k)); + ``` +- 正面示例: - ```java - map.computeIfAbsent(key, Loader::load); - ``` + ```java + map.computeIfAbsent(key, Loader::load); + ``` ### 3.9 Java Streams @@ -381,186 +376,180 @@ sidebar_position: 3 1. 使用 `StringUtils.isBlank` 而不是 `StringUtils.isEmpty` - - 负面示例: - - ```java - if (StringUtils.isEmpty(name)) { - return; - } - ``` +- 负面示例: - - 正面示例: + ```java + if (StringUtils.isEmpty(name)) { + return; + } + ``` +- 正面示例: - ```java - if (StringUtils.isBlank(name)) { - return; - } - ``` + ```java + if (StringUtils.isBlank(name)) { + return; + } + ``` 2. 使用 `StringUtils.isNotBlank` 而不是 `StringUtils.isNotEmpty` - - 负面示例: - - ```java - if (StringUtils.isNotEmpty(name)) { - return; - } - ``` +- 负面示例: - - 正面示例: + ```java + if (StringUtils.isNotEmpty(name)) { + return; + } + ``` +- 正面示例: - ```java - if (StringUtils.isNotBlank(name)) { - return; - } - ``` + ```java + if (StringUtils.isNotBlank(name)) { + return; + } + ``` 3. 使用 `StringUtils.isAllBlank` 而不是 `StringUtils.isAllEmpty` - - 负面示例: - - ```java - if (StringUtils.isAllEmpty(name, age)) { - return; - } - ``` +- 负面示例: - - 正面示例: + ```java + if (StringUtils.isAllEmpty(name, age)) { + return; + } + ``` +- 正面示例: - ```java - if (StringUtils.isAllBlank(name, age)) { - return; - } - ``` + ```java + if (StringUtils.isAllBlank(name, age)) { + return; + } + ``` ### 3.12 `Enum` 类 1. 枚举值比较 - - 负面示例: +- 负面示例: - ```java - if (status.equals(JobStatus.RUNNING)) { - return; - } - ``` - - - 正面示例: + ```java + if (status.equals(JobStatus.RUNNING)) { + return; + } + ``` +- 正面示例: - ```java - if (status == JobStatus.RUNNING) { - return; - } - ``` + ```java + if (status == JobStatus.RUNNING) { + return; + } + ``` 2. 枚举类不需要实现 Serializable - - 负面示例: - - ```java - public enum JobStatus implements Serializable { - ... - } - ``` +- 负面示例: - - 正面示例: + ```java + public enum JobStatus implements Serializable { + ... + } + ``` +- 正面示例: - ```java - public enum JobStatus { - ... - } - ``` + ```java + public enum JobStatus { + ... + } + ``` 3. 使用 `Enum.name()` 而不是 `Enum.toString()` - - 负面示例: - - ```java - System.out.println(JobStatus.RUNNING.toString()); - ``` +- 负面示例: - - 正面示例: + ```java + System.out.println(JobStatus.RUNNING.toString()); + ``` +- 正面示例: - ```java - System.out.println(JobStatus.RUNNING.name()); - ``` + ```java + System.out.println(JobStatus.RUNNING.name()); + ``` 4. 枚举类名称统一使用 Enum 后缀 - - 负面示例: +- 负面示例: - ```java - public enum JobStatus { - ... - } - ``` - - - 正面示例: + ```java + public enum JobStatus { + ... + } + ``` +- 正面示例: - ```java - public enum JobStatusEnum { - ... - } - ``` + ```java + public enum JobStatusEnum { + ... + } + ``` ### 3.13 `Deprecated` 注解 - - 负面示例: +- 负面示例: - ```java - @deprecated - public void process(String input) { - ... - } - ``` +```java +@deprecated +public void process(String input) { + ... +} +``` - - 正面示例: +- 正面示例: - ```java - @Deprecated - public void process(String input) { - ... - } - ``` +```java +@Deprecated +public void process(String input) { + ... +} +``` ## 4 日志 1. 使用 `占位符` 进行日志输出: - - 负面示例 - ```java - log.info("Deploy cluster request " + deployRequest); - ``` - - 正面示例 - ```java - log.info("load plugin:{} to {}", file.getName(), appPlugins); - ``` +- 负面示例 -2. 打印日志时,注意选择 `日志级别` + ```java + log.info("Deploy cluster request " + deployRequest); + ``` +- 正面示例 - 当打印日志内容时,如果传递了日志占位符的实际参数,必须避免过早评估,以避免由日志级别导致的不必要评估。 + ```java + log.info("load plugin:{} to {}", file.getName(), appPlugins); + ``` - - 负面示例: +2. 打印日志时,注意选择 `日志级别` - 假设当前日志级别为 `INFO`: +当打印日志内容时,如果传递了日志占位符的实际参数,必须避免过早评估,以避免由日志级别导致的不必要评估。 - ```java - // 忽略声明行。 - List userList = getUsersByBatch(1000); - LOG.debug("All users: {}", getAllUserIds(userList)); - ``` +- 负面示例: - - 正面示例: + 假设当前日志级别为 `INFO`: - 在这种情况下,我们应该在进行实际的日志调用之前提前确定日志级别,如下所示: + ```java + // 忽略声明行。 + List userList = getUsersByBatch(1000); + LOG.debug("All users: {}", getAllUserIds(userList)); + ``` +- 正面示例: - ```java - // 忽略声明行。 - List userList = getUsersByBatch(1000); - if (LOG.isDebugEnabled()) { - LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); - } - ``` + 在这种情况下,我们应该在进行实际的日志调用之前提前确定日志级别,如下所示: + + ```java + // 忽略声明行。 + List userList = getUsersByBatch(1000); + if (LOG.isDebugEnabled()) { + LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); + } + ``` ## 5 测试 @@ -568,10 +557,15 @@ sidebar_position: 3 2. 实现的接口需在`e2e`模块下编写`e2e`测试用例脚本。 - ## 参考资料 + - https://site.mockito.org/ - https://alibaba.github.io/p3c/ - https://rules.sonarsource.com/java/ - https://junit.org/junit5/ - https://streampark.apache.org/ + +``` + +``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contact.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contact.md index 6167d13aa5f..0c574937517 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contact.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contact.md @@ -1,8 +1,9 @@ --- id: contact title: 交流联系 -sidebar_label: 交流联系 +sidebar_label: 交流联系 --- + > 如果您在使用过程有任何需要帮助或者想交流建议,可以通过 群 ISSUE 讨论交流。 [订阅邮件列表](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) : 发送邮件至 `dev-subscribe@hertzbeat.apache.org` 来订阅邮件列表. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contribution.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contribution.md index e4aae1c519a..7223d9b5997 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contribution.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contribution.md @@ -5,20 +5,20 @@ sidebar_position: 0 --- > 非常欢迎参与项目贡献,我们致力于维护一个互相帮助的快乐社区。 @@ -87,23 +87,31 @@ sidebar_position: 0 1. 首先您需要 Fork 目标仓库 [hertzbeat repository](https://github.com/apache/hertzbeat). 2. 然后 用 git 命令 将代码下载到本地: + ```shell git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended ``` + 3. 下载完成后,请参考目标仓库的入门指南或者 README 文件对项目进行初始化。 4. 接着,您可以参考如下命令进行代码的提交, 切换新的分支, 进行开发: + ```shell git checkout -b a-feature-branch #Recommended ``` + 5. 提交 commit, commit 描述信息需要符合约定格式: [module name or type name]feature or bugfix or doc: custom message. + ```shell git add git commit -m '[docs]feature: necessary instructions' #Recommended ``` + 6. 推送到远程仓库 + ```shell git push origin a-feature-branch ``` + 7. 然后您就可以在 GitHub 上发起新的 PR (Pull Request)。 请注意 PR 的标题需要符合我们的规范,并且在 PR 中写上必要的说明,来方便 Committer 和其他贡献者进行代码审查。 @@ -148,14 +156,15 @@ git pull upstream master ### 模块 - **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** 提供监控管理,系统管理基础服务 + > 提供对监控的管理,监控应用配置的管理,系统用户租户后台管理等。 -- **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** 提供监控数据采集服务 +> - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** 提供监控数据采集服务 > 使用通用协议远程采集获取对端指标数据。 -- **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** 提供监控数据仓储服务 +> - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** 提供监控数据仓储服务 > 采集指标结果数据管理,数据落盘,查询,计算统计。 -- **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** 提供告警服务 +> - **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** 提供告警服务 > 告警计算触发,任务状态联动,告警配置,告警通知。 -- **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** 提供可视化控制台页面 +> - **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** 提供可视化控制台页面 > 监控告警系统可视化控制台前端 ![hertzBeat](/img/docs/hertzbeat-arch.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/development.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/development.md index 2bfebd21d2f..c9ed4b45859 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/development.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/development.md @@ -1,7 +1,7 @@ --- id: development title: 如何将 HertzBeat 运行编译? -sidebar_label: 运行编译 +sidebar_label: 运行编译 --- ## 让 HertzBeat 运行起来 @@ -9,7 +9,6 @@ sidebar_label: 运行编译 > 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 > 此为前后端分离项目,本地代码启动需将后端 [manager](https://github.com/apache/hertzbeat/tree/master/manager) 和前端 [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) 分别启动生效。 - ### 后端启动 1. 需要 `maven3+`, `java17` 和 `lombok` 环境 @@ -38,7 +37,7 @@ sidebar_label: 运行编译 ## 生成二进制包 -> 需要 `maven3+`, `java17`, `node` 和 `yarn` 环境. +> 需要 `maven3+`, `java17`, `node` 和 `yarn` 环境. ### 前端打包 @@ -52,7 +51,6 @@ sidebar_label: 运行编译 5. 打包: `yarn package` - ### 后端打包 1. 需要 `maven3+`, `java17` 环境 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/document.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/document.md index e696b3c35e4..4cf56e0137b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/document.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/document.md @@ -5,20 +5,20 @@ sidebar_position: 1 --- 对于任何类型的软件来说,良好的文档都是至关重要的。任何能够改进 HertzBeat 文档的贡献都是受欢迎的。 @@ -93,3 +93,4 @@ css 和其他样式文件放在 `src/css` 目录中。 ### 页面内容修改 > 所有页面文档都可以通过底部的'编辑此页面'按钮直接跳转到相应的 github 资源修改页面。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md index 0af99b19c08..1b79df79f85 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md @@ -12,7 +12,7 @@ sidebar_position: 4 - JDK 17 - Node18 Yarn -- Apache Maven 3.x +- Apache Maven 3.x - GnuPG 2.x - Git - SVN (Apache使用svn来托管项目发布) @@ -22,11 +22,11 @@ sidebar_position: 4 ## 2. 准备发布 > 首先整理帐户信息以更好地了解操作过程,稍后会多次使用。 -- apache id: `muchunjin (APACHE LDAP 用户名)` -- apache passphrase: `APACHE LDAP 密钥` -- apache email: `muchunjin@apache.org` -- gpg real name: `muchunjin (任何名称均可用, 在这里我将其设置为与apache id相同的名称)` -- gpg key passphrase: `创建gpg密钥时设置的密码,你需要记住此密码` +> - apache id: `muchunjin (APACHE LDAP 用户名)` +> - apache passphrase: `APACHE LDAP 密钥` +> - apache email: `muchunjin@apache.org` +> - gpg real name: `muchunjin (任何名称均可用, 在这里我将其设置为与apache id相同的名称)` +> - gpg key passphrase: `创建gpg密钥时设置的密码,你需要记住此密码` ### 2.1 生成密钥 @@ -130,7 +130,6 @@ gpg: unchanged: 1 或者进入 https://keyserver.ubuntu.com/ 网址,输入密钥的名称,然后点击'Search key' 按钮,查看是否有对应名称的密钥。 - #### 2.4 将 gpg 公钥添加到 Apache SVN 项目仓库的 KEYS 文件中 - Apache HertzBeat Dev 分支 https://dist.apache.org/repos/dist/dev/incubator/hertzbeat @@ -230,9 +229,9 @@ release-1.6.0-rc1 #### 3.5 对二进制和源码包进行签名 -将上步骤生成的三个文件包放到`dist`目录下(若无则新建目录),然后对文件包进行签名和SHA512校验和生成。 +将上步骤生成的三个文件包放到`dist`目录下(若无则新建目录),然后对文件包进行签名和SHA512校验和生成。 -> 其中 `gpg -u 33545C76` 的 `33545C76` 是你的 GPG 密钥 ID,可以通过 `gpg --keyid-format SHORT --list-keys` 查看。 +> 其中 `gpg -u 33545C76` 的 `33545C76` 是你的 GPG 密钥 ID,可以通过 `gpg --keyid-format SHORT --list-keys` 查看。 ```shell cd dist @@ -341,7 +340,7 @@ svn commit -m "release for HertzBeat 1.6.0-RC1" > `Send to`: dev@hertzbeat.apache.org
> `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0 rc1
-> `Body`: +> `Body`: ``` Hello HertzBeat Community: @@ -404,8 +403,7 @@ Dear HertzBeat community, Thanks for your review and vote for "Release Apache HertzBeat (incubating) 1.6.0-rc1" I'm happy to announce the vote has passed: - - +--- 4 binding +1, from: - cc @@ -413,24 +411,20 @@ I'm happy to announce the vote has passed: 1 non-binding +1, from: - Roc Marshal - - +--- no 0 or -1 votes. Vote thread: https://lists.apache.org/thread/t01b2lbtqzyt7j4dsbdp5qjc3gngjsdq - - +--- Thank you to everyone who helped us to verify and vote for this release. We will move to the ASF Incubator voting shortly. - - +--- Best, ChunJin Mu ``` 邮件内容中的一项是`Vote thread`,在 https://lists.apache.org/list.html?dev@hertzbeat.apache.org 查看获取 - #### 3.2 发送孵化社区投票邮件 发送孵化社区投票邮件需要至少三个`+1`,且无`-1`。 @@ -476,17 +470,14 @@ More detailed checklist please refer: Steps to validate the release, Please refer to: • https://www.apache.org/info/verification.html • https://hertzbeat.apache.org/docs/community/how_to_verify_release - - +--- How to Build: https://hertzbeat.apache.org/docs/community/development/#build-hertzbeat-binary-package - - +--- Thanks, On behalf of Apache HertzBeat (incubating) community - - +--- Best, ChunJin Mu ``` @@ -562,11 +553,14 @@ https://github.com/apache/hertzbeat/blob/master/home/i18n/zh-cn/docusaurus-plugi 然后输入发版标题和描述 -- 发版标题: +- 发版标题: + ``` v1.6.0 ``` + - 描述: + ``` xxx release note: xxx @@ -603,8 +597,7 @@ https://hertzbeat.apache.org/ HertzBeat Resources: - Issue: https://github.com/apache/hertzbeat/issues - Mailing list: dev@hertzbeat.apache.org - - +--- Apache HertzBeat Team Best, @@ -613,6 +606,6 @@ ChunJin Mu 该版本的发布顺利结束。 ----- +--- -This doc refer from [Apache StreamPark](https://streampark.apache.org/) +This doc refer from [Apache StreamPark](https://streampark.apache.org/) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-verify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-verify.md index ee4f9563c83..9904af1967b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-verify.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-verify.md @@ -3,6 +3,7 @@ id: how_to_verify_release title: 版本物料的验证 sidebar_position: 4 --- + # 验证候选版本 详细检查列表请参考官方的[check list](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) @@ -21,12 +22,10 @@ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_versio ``` - ## 2. 验证上传的版本是否合规 > 开始验证环节,验证包含但不局限于以下内容和形式 - ### 2.1 查看发布包是否完整 > 上传到dist的包必须包含源码包,二进制包可选 @@ -36,7 +35,6 @@ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_versio 3. 是否包含源码包的sha512 4. 如果上传了二进制包,则同样检查(2)-(4)所列的内容 - ### 2.2 检查gpg签名 首先导入发布人公钥。从svn仓库导入KEYS到本地环境。(发布版本的人不需要再导入,帮助做验证的人需要导入,用户名填发版人的即可) @@ -47,6 +45,7 @@ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_versio $ curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # 下载KEYS $ gpg --import KEYS # 导入KEYS到本地 ``` + #### 2.2.2 信任公钥 > 信任此次版本所使用的KEY @@ -75,6 +74,7 @@ Do you really want to set this key to ultimate trust? (y/N) y #选择y gpg> ``` + #### 2.2.3 检查签名 ```shell @@ -82,6 +82,7 @@ for i in *.tar.gz; do echo $i; gpg --verify $i.asc $i ; done ``` 检查结果 + > 出现类似以下内容则说明签名正确,关键字:**`Good signature`** ```shell @@ -95,7 +96,6 @@ gpg: Good signature from "xxx @apache.org>" > 本地计算sha512哈希后,验证是否与dist上的一致,如果上传二进制包,则同样需要检查二进制包的sha512哈希 - ```shell for i in *.tar.gz; do echo $i; sha512sum --check $i.sha512; done ``` @@ -123,7 +123,6 @@ tar -xzvf apache-hertzbeat-${release.version}-incubating-bin.tar.gz 参考: https://apache.org/legal/resolved.html - ### 2.5. 源码编译验证 解压缩 `apache-hertzbeat-${release_version}-incubating-src.tar.gz` @@ -132,7 +131,7 @@ tar -xzvf apache-hertzbeat-${release.version}-incubating-bin.tar.gz cd apache-hertzbeat-${release_version}-incubating-src ``` -编译源码: https://hertzbeat.apache.org/docs/community/development/#build-hertzbeat-binary-package +编译源码: https://hertzbeat.apache.org/docs/community/development/#build-hertzbeat-binary-package 进行如下检查: @@ -148,7 +147,6 @@ cd apache-hertzbeat-${release_version}-incubating-src 参考: https://apache.org/legal/resolved.html - ## 3. 邮件回复 如果发起了发布投票,验证后,可以参照此回复示例进行邮件回复 @@ -169,11 +167,11 @@ IPMC 在 general@incubator.apache.org incubator 社区投票,请带上 binding Forward my +1 from dev@hertzbeat (non-binding) Copy my +1 from hertzbeat DEV ML (non-binding) ``` -::: - +::: 非PPMC/IPMC成员 + ```html +1 (non-binding) I checked: @@ -185,6 +183,7 @@ I checked: ``` PPMC/IPMC成员 + ```html //incubator社区 投票时,只有IPMC成员才具有约束性 binding +1 (binding) @@ -196,7 +195,6 @@ I checked: 5. .... ``` - --- -This doc refer from [Apache StreamPark](https://streampark.apache.org/) +This doc refer from [Apache StreamPark](https://streampark.apache.org/) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/mailing_lists.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/mailing_lists.md index 57de5409834..922cbfe9a6a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/mailing_lists.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/mailing_lists.md @@ -5,20 +5,20 @@ sidebar_position: 1 --- [开发者邮件列表](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) 是社区推荐的沟通和获取最新信息的方式。 @@ -32,19 +32,17 @@ sidebar_position: 1 - 使用此列表提出您对 HertzBeat 的问题 - 由 HertzBeat 贡献者用来讨论 HertzBeat 的开发 - -| 列表名称 | 地址 | 订阅 | 退订 | 归档 | -|--------------|------------------------------|------------------------------------------------------|---------------------------------------------------------|------------------------------------------------------------------------| -| **开发者列表** | dev@hertzbeat.apache.org | [订阅](mailto:dev-subscribe@hertzbeat.apache.org) | [退订](mailto:dev-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | - +| 列表名称 | 地址 | 订阅 | 退订 | 归档 | +|-----------|--------------------------|-------------------------------------------------|---------------------------------------------------|-------------------------------------------------------------------| +| **开发者列表** | dev@hertzbeat.apache.org | [订阅](mailto:dev-subscribe@hertzbeat.apache.org) | [退订](mailto:dev-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | ### 通知列表 - 关于 HertzBeat 代码库的更改的通知,频繁通知 -| 列表名称 | 地址 | 订阅 | 退订 | 归档 | -|-----------|------------------------------|-------------------------------------------------------------|--------------------------------------------------------------|-----------------------------------------------------------------------------| -| **通知列表** | notifications@hertzbeat.apache.org | [订阅](mailto:notifications-subscribe@hertzbeat.apache.org) | [退订](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | +| 列表名称 | 地址 | 订阅 | 退订 | 归档 | +|----------|------------------------------------|-----------------------------------------------------------|-------------------------------------------------------------|-----------------------------------------------------------------------------| +| **通知列表** | notifications@hertzbeat.apache.org | [订阅](mailto:notifications-subscribe@hertzbeat.apache.org) | [退订](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | ## 订阅步骤 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md index 5bb52e0a425..c4c61aac4bc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md @@ -5,20 +5,20 @@ sidebar_position: 4 --- [官方指南](https://community.apache.org/newcommitter.html#new-committer-process) @@ -49,7 +49,6 @@ sidebar_position: 4 - 等待root告诉我们已经完成 - [Roster](https://whimsy.apache.org/roster/ppmc/hertzbeat) 添加新的 committer - - Announce New Committer 参见 **Announce New Committer Template** @@ -285,7 +284,9 @@ Best Wishes! ## 详细步骤 ### 接受邀请 + 当通过PMC内部投票后,PMC会向您索要个人邮箱,之后您会收到一封邮件,邮件会指引您进行下一步操作,内容为: + ``` Hello xxxx, @@ -371,14 +372,14 @@ hi, i accept. Thanks for invitaion. ![](/img/docs/community/icla-content-2.png) 在PDF中需要填写的字段: - - `Full name` - - `Public name` - - `Postal Address` - - `Country` - - `E-Mail` - - `(optional) preferred Apache id(s)` - - `(optional) notify project` - - `Date` +- `Full name` +- `Public name` +- `Postal Address` +- `Country` +- `E-Mail` +- `(optional) preferred Apache id(s)` +- `(optional) notify project` +- `Date` > 填写样例 > ![](/img/docs/community/icla-content-3.png) @@ -450,6 +451,7 @@ Thanks ! ![](/img/docs/community/account-6.png) 邮箱中会收到一封邮件,点击`Join @apache` + > 这里邮箱指的是您提供给PMC的那个邮箱地址 ![](/img/docs/community/account-7.png) @@ -460,6 +462,7 @@ Thanks ! ![](/img/docs/community/account-9.png) 回到Apache页面,提示GitHub启用多重身份认证,在GitHub页面启用双重身份认证 + > GitHub双重身份认证介绍,[点击跳转官方文档](https://docs.github.com/zh/authentication/securing-your-account-with-two-factor-authentication-2fa/about-two-factor-authentication) ![](/img/docs/community/account-10.png) @@ -472,7 +475,6 @@ Thanks ! 最后一步,联系PMC主席开启svn和其他访问权限。 - ## 邮箱绑定 ### GMail邮箱绑定 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_pmc_member_process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_pmc_member_process.md index 4488f1fcfea..d7e144bb52b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_pmc_member_process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_pmc_member_process.md @@ -5,20 +5,20 @@ sidebar_position: 5 --- 这个指南是基于 [apache newcommitter](https://community.apache.org/newcommitter.html#new-committer-process) 的。 @@ -82,7 +82,6 @@ ${Work list}[1] [midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) [Apache 参考投票指南](https://community.apache.org/newcommitter.html) - ### Close Vote Template ```text @@ -284,3 +283,4 @@ A PPMC member helps manage and guide the direction of the project. Thanks, On behalf of the Apache HertzBeat (incubating) PPMC ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/submit-code.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/submit-code.md index ee553bb6e1c..8940571f71c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/submit-code.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/submit-code.md @@ -5,72 +5,64 @@ sidebar_position: 2 --- - * 首先从远程仓库 将代码的一份副本 fork 到您自己的仓库 * 远程仓库开发合并分支:`master` * 将您fork仓库clone到您的本地设备 - ```shell - git clone git@github.com:<您的账户名>/hertzbeat.git - ``` - + ```shell + git clone git@github.com:<您的账户名>/hertzbeat.git + ``` * 添加远程仓库地址,命名为 upstream - ```shell - git remote add upstream git@github.com:apache/hertzbeat.git - ``` - + ```shell + git remote add upstream git@github.com:apache/hertzbeat.git + ``` * 查看仓库 - ```shell - git remote -v - ``` + ```shell + git remote -v + ``` > 此时会有两个仓库:origin(您自己的仓库)和 upstream(远程仓库) * 获取/更新远程仓库代码 - ```shell - git fetch upstream - ``` - + ```shell + git fetch upstream + ``` * 将远程仓库代码同步到本地仓库 - ```shell - git checkout origin/master - git merge --no-ff upstream/master - ``` - + ```shell + git checkout origin/master + git merge --no-ff upstream/master + ``` * **⚠️注意一定要新建分支开发特性 `git checkout -b feature-xxx`,不建议使用master分支直接开发** - * 在本地修改代码后,提交到自己的仓库: - **注意提交信息为英文,不包含特殊字符** - ```shell - git commit -m '[docs]necessary instructions' - git push - ``` + **注意提交信息为英文,不包含特殊字符** + ```shell + git commit -m '[docs]necessary instructions' + git push + ``` * 将更改提交到远程仓库后,您可以在您的仓库页面上看到一个绿色的按钮“Compare & pull request”,点击它。 - * 这会弹出新建 Pull Request 页面,您需要这里仔细填写信息(英文),描述和代码同样重要,然后点击“Create pull request”按钮。 - * 然后社区的 Committers 将进行 CodeReview,并与您讨论一些细节(包括设计、实现、性能等),之后您可以根据建议直接在这个分支更新代码(无需新建PR)。当社区 Committer approve之后,提交将被合并到 master 分支。 - * 最后,恭喜您,您已经成为 HertzBeat 的官方贡献者,您会被加在贡献者墙上,您可以联系社区获取贡献者证书! + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md index dedb919ebfc..c4b43f355ac 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md @@ -18,11 +18,9 @@ sidebar_label: Download 以前版本的 HertzBeat 可能会受到安全问题的影响,请考虑使用最新版本。 ::: - -| 版本 | 日期 | 下载 | Release Notes | -|---------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------| -| v1.6.0 | 2024.06.10 | [apache-hertzbeat-1.6.0-incubating-bin.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz.sha512) )
[apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz.sha512) )
[apache-hertzbeat-1.6.0-incubating-src.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz.sha512) ) | [release note](https://github.com/apache/hertzbeat/releases/tag/v1.6.0) | - +| 版本 | 日期 | 下载 | Release Notes | +|--------|------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------| +| v1.6.0 | 2024.06.10 | [apache-hertzbeat-1.6.0-incubating-bin.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz.sha512) )
[apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz.sha512) )
[apache-hertzbeat-1.6.0-incubating-src.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz.sha512) ) | [release note](https://github.com/apache/hertzbeat/releases/tag/v1.6.0) | ## Docker 镜像版本 @@ -31,7 +29,6 @@ sidebar_label: Download - HertzBeat https://hub.docker.com/r/apache/hertzbeat - HertzBeat Collector https://hub.docker.com/r/apache/hertzbeat-collector - ## 归档版本 在这里查看所有归档版本:[archive](https://archive.apache.org/dist/incubator/hertzbeat/). diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/activemq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/activemq.md index b2d8f1489f5..29d5478158a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/activemq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/activemq.md @@ -9,7 +9,7 @@ keywords: [开源监控系统, 开源中间件监控, ActiveMQ消息中间件监 **使用协议:JMX** -### 监控前操作 +### 监控前操作 > 您需要在 ActiveMQ 开启 `JMX` 服务,HertzBeat 使用 JMX 协议对 ActiveMQ 进行指标采集。 @@ -23,9 +23,10 @@ keywords: [开源监控系统, 开源中间件监控, ActiveMQ消息中间件监 ``` -2. 修改安装目录下的 `bin/env` 文件,配置JMX 端口 IP等 +2. 修改安装目录下的 `bin/env` 文件,配置JMX 端口 IP等 + +将如下原配置信息 -将如下原配置信息 ```text # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" @@ -34,7 +35,9 @@ keywords: [开源监控系统, 开源中间件监控, ActiveMQ消息中间件监 ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" ``` -更新为如下配置,⚠️注意修改`本机对外IP` + +更新为如下配置,⚠️注意修改`本机对外IP` + ```text # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" @@ -49,25 +52,25 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" 3. 重启 ACTIVEMQ 服务,在 HertzBeat 添加对应 ActiveMQ 监控即可,参数使用 JMX 配置的 IP 端口。 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | -|-------------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| JMX端口 | JMX 对外提供的HTTP端口,默认为 11099。 | -| JMX URL | 可选,自定义 JMX URL 连接 | -| 用户名 | 认证时使用的用户名 | -| 密码 | 认证时使用的密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|---------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| JMX端口 | JMX 对外提供的HTTP端口,默认为 11099。 | +| JMX URL | 可选,自定义 JMX URL 连接 | +| 用户名 | 认证时使用的用户名 | +| 密码 | 认证时使用的密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 #### 指标集合:broker -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------------------|------|-----------------------------------------------------------------------| | BrokerName | 无 | The name of the broker. | | BrokerVersion | 无 | The version of the broker. | @@ -88,57 +91,56 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" | MaxMessageSize | 无 | Max message size on this broker | | MinMessageSize | 无 | Min message size on this broker | -#### 指标集合:topic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------|------|-------------------------------------------------------------------------------------------| -| Name | 无 | Name of this destination. | -| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | -| MemoryPercentUsage | 无 | The percentage of the memory limit used | -| ProducerCount | 无 | Number of producers attached to this destination | -| ConsumerCount | 无 | Number of consumers subscribed to this destination. | -| EnqueueCount | 无 | Number of messages that have been sent to the destination. | -| DequeueCount | 无 | Number of messages that has been acknowledged (and removed) from the destination. | -| ForwardCount | 无 | Number of messages that have been forwarded (to a networked broker) from the destination. | -| InFlightCount | 无 | Number of messages that have been dispatched to, but not acknowledged by, consumers. | -| DispatchCount | 无 | Number of messages that has been delivered to consumers, including those not acknowledged | -| ExpiredCount | 无 | Number of messages that have been expired. | -| StoreMessageSize | B | The memory size of all messages in this destination's store. | -| AverageEnqueueTime | ms | Average time a message was held on this destination. | -| MaxEnqueueTime | ms | The longest time a message was held on this destination | -| MinEnqueueTime | ms | The shortest time a message was held on this destination | -| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | -| AverageMessageSize | B | Average message size on this destination | -| MaxMessageSize | B | Max message size on this destination | -| MinMessageSize | B | Min message size on this destination | - +#### 指标集合:topic + +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|-------------------------------------------------------------------------------------------| +| Name | 无 | Name of this destination. | +| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | +| MemoryPercentUsage | 无 | The percentage of the memory limit used | +| ProducerCount | 无 | Number of producers attached to this destination | +| ConsumerCount | 无 | Number of consumers subscribed to this destination. | +| EnqueueCount | 无 | Number of messages that have been sent to the destination. | +| DequeueCount | 无 | Number of messages that has been acknowledged (and removed) from the destination. | +| ForwardCount | 无 | Number of messages that have been forwarded (to a networked broker) from the destination. | +| InFlightCount | 无 | Number of messages that have been dispatched to, but not acknowledged by, consumers. | +| DispatchCount | 无 | Number of messages that has been delivered to consumers, including those not acknowledged | +| ExpiredCount | 无 | Number of messages that have been expired. | +| StoreMessageSize | B | The memory size of all messages in this destination's store. | +| AverageEnqueueTime | ms | Average time a message was held on this destination. | +| MaxEnqueueTime | ms | The longest time a message was held on this destination | +| MinEnqueueTime | ms | The shortest time a message was held on this destination | +| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | +| AverageMessageSize | B | Average message size on this destination | +| MaxMessageSize | B | Max message size on this destination | +| MinMessageSize | B | Min message size on this destination | #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ai_config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ai_config.md index bf8d7570c59..a2e61c704fe 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ai_config.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ai_config.md @@ -9,52 +9,54 @@ keywords: [人工智能 AI] ### 配置参数说明 -| 参数名称 | 参数帮助描述 | -| ----------- |------------------------------| -| type | 选择AI大模型(如智普、通义千问...) | -| model | 选择模型,默认为GLM-4 | -| api-key | 获取api_key,如果没有该配置,无法与大模型进行对话 | +| 参数名称 | 参数帮助描述 | +|---------|------------------------------| +| type | 选择AI大模型(如智普、通义千问...) | +| model | 选择模型,默认为GLM-4 | +| api-key | 获取api_key,如果没有该配置,无法与大模型进行对话 | ### 大模型选项与配置详解 #### 智普AI -| 参数名称 | 示例 | 链接 | -|--------------|-----------------------------------------------------|----| -| type | zhiPu(必须和示例完全相同) | 无 | -| model | glm-4-0520、glm-4 、glm-4-air、glm-4-airx、 glm-4-flash | 无 | -| api-key | xxxxx.xxxxxx | https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys | +| 参数名称 | 示例 | 链接 | +|---------|-----------------------------------------------------|-----------------------------------------------------------------| +| type | zhiPu(必须和示例完全相同) | 无 | +| model | glm-4-0520、glm-4 、glm-4-air、glm-4-airx、 glm-4-flash | 无 | +| api-key | xxxxx.xxxxxx | https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys | #### 阿里巴巴AI -| 参数名称 | 示例 | 链接 | -|--------------|----------------------------------------------------|----| -| type | alibabaAi(必须和示例完全相同) | 无 | -| model | qwen-turbo、qwen-plus、qwen-max、qwen-max-0428、qwen-max-0403、qwen-max-0107、qwen-max-longcontext | https://help.aliyun.com/zh/dashscope/developer-reference/model-introduction?spm=a2c4g.11186623.0.0.4e0246c1RQFKMH | -| api-key | xxxxxxxxxxx | https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key?spm=a2c4g.11186623.0.i10 | +| 参数名称 | 示例 | 链接 | +|---------|----------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------| +| type | alibabaAi(必须和示例完全相同) | 无 | +| model | qwen-turbo、qwen-plus、qwen-max、qwen-max-0428、qwen-max-0403、qwen-max-0107、qwen-max-longcontext | https://help.aliyun.com/zh/dashscope/developer-reference/model-introduction?spm=a2c4g.11186623.0.0.4e0246c1RQFKMH | +| api-key | xxxxxxxxxxx | https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key?spm=a2c4g.11186623.0.i10 | #### 月之暗面AI -| 参数名称 | 示例 | 链接 | -|--------------|----------------------------------------------------|-----------------------------------------------| -| type | kimiAi(必须和示例完全相同) | 无 | -| model | moonshot-v1-8k、moonshot-v1-32k、moonshot-v1-128k | 无 | -| api-key | xxxxxxxxxxx | https://platform.moonshot.cn/console/api-keys | +| 参数名称 | 示例 | 链接 | +|---------|-------------------------------------------------|-----------------------------------------------| +| type | kimiAi(必须和示例完全相同) | 无 | +| model | moonshot-v1-8k、moonshot-v1-32k、moonshot-v1-128k | 无 | +| api-key | xxxxxxxxxxx | https://platform.moonshot.cn/console/api-keys | #### 科大讯飞AI + 快速入门:https://www.xfyun.cn/doc/platform/quickguide.html -| 参数名称 | 示例 | 链接 | -|--------------|-------------------------------------------------|---------------------------------------------------------------| -| type | sparkDesk (must be exactly the same as example) | | -| model | general、generalv2、generalv3、generalv3.5、4.0Ultra | | -| api-key | xxxxxxxxxxx |https://console.xfyun.cn/services/cbm| -| api-secret | xxxxxxxxxxx |https://console.xfyun.cn/services/cbm| +| 参数名称 | 示例 | 链接 | +|------------|--------------------------------------------------|---------------------------------------| +| type | sparkDesk (must be exactly the same as example) | | +| model | general、generalv2、generalv3、generalv3.5、4.0Ultra | | +| api-key | xxxxxxxxxxx | https://console.xfyun.cn/services/cbm | +| api-secret | xxxxxxxxxxx | https://console.xfyun.cn/services/cbm | -| 模型版本 | 模型类型(application.yml的model参数) | +| 模型版本 | 模型类型(application.yml的model参数) | |-----------------|-------------------------------| -| Spark4.0 Ultra | 4.0Ultra | -| Spark Max | generalv3.5 | +| Spark4.0 Ultra | 4.0Ultra | +| Spark Max | generalv3.5 | | Spark Pro | generalv3 | -| Spark V2.0 | generalv2 | -| Spark Lite(免费版) | general | \ No newline at end of file +| Spark V2.0 | generalv2 | +| Spark Lite(免费版) | general | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/airflow.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/airflow.md index 5323ede8110..52367155d89 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/airflow.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/airflow.md @@ -9,33 +9,31 @@ keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8080 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| HTTPS | 是否启用HTTPS | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-----------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | +| 端口 | 数据库对外提供的端口,默认为8080 | +| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | +| HTTPS | 是否启用HTTPS | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:airflow_health -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | -------------------- | -| metadatabase | 无 | metadatabase健康情况 | -| scheduler | 无 | scheduler健康情况 | -| triggerer | 无 | triggerer健康情况 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|------------------| +| metadatabase | 无 | metadatabase健康情况 | +| scheduler | 无 | scheduler健康情况 | +| triggerer | 无 | triggerer健康情况 | #### 指标集合:airflow_version -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | -------- | --------------- | -| value | 无 | Airflow版本 | -| git_version | 无 | Airflow git版本 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|---------------| +| value | 无 | Airflow版本 | +| git_version | 无 | Airflow git版本 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_console.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_console.md index 68cf7339eae..5198b961b66 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_console.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_console.md @@ -6,13 +6,13 @@ sidebar_label: 告警模板登录台地址 > 阈值触发后发送告警信息,通过钉钉/企业微信/飞书机器人通知或者使用邮箱通知的时候,告警内容中有登录控制台的详情链接 - ### 自定义设置 在我们的启动配置文件application.yml中,找到下面的配置 + ```yml alerter: console-url: #这里就是我们的自定义控制台地址 ``` -默认值是赫兹跳动的官方控制台地址 \ No newline at end of file +默认值是赫兹跳动的官方控制台地址 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_dingtalk.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_dingtalk.md index aec6342f7d3..9d0ee3b088f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_dingtalk.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_dingtalk.md @@ -5,15 +5,15 @@ sidebar_label: 告警钉钉机器人通知 keywords: [告警钉钉机器人通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过钉钉机器人通知到接收人。 +> 阈值触发后发送告警信息,通过钉钉机器人通知到接收人。 -### 操作步骤 +### 操作步骤 1. **【钉钉桌面客户端】-> 【群设置】-> 【智能群助手】-> 【添加新建机器人-选自定义】-> 【设置机器人名称头像】-> 【注意⚠️设置自定义关键字: HertzBeat】 ->【添加成功后复制其WebHook地址】** -> 注意⚠️ 新增机器人时需在安全设置块需设置其自定义关键字: HertzBeat ,其它安全设置加签或IP段不填写 +> 注意⚠️ 新增机器人时需在安全设置块需设置其自定义关键字: HertzBeat ,其它安全设置加签或IP段不填写 -![email](/img/docs/help/alert-notice-8.png) +![email](/img/docs/help/alert-notice-8.png) 2. **【保存机器人的WebHook地址access_token值】** @@ -24,18 +24,18 @@ keywords: [告警钉钉机器人通知, 开源告警系统, 开源监控告警 ![email](/img/docs/help/alert-notice-9.png) -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### 钉钉机器人通知常见问题 -### 钉钉机器人通知常见问题 +1. 钉钉群未收到机器人告警通知 -1. 钉钉群未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 > 请排查钉钉机器人是否配置了安全自定义关键字:HertzBeat > 请排查是否配置正确机器人ACCESS_TOKEN,是否已配置告警策略关联 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_discord.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_discord.md index d6c4879a2ba..9694126d0dd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_discord.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_discord.md @@ -5,68 +5,66 @@ sidebar_label: 告警 Discord 机器人通知 keywords: [告警 Discord 机器人通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过 Discord 机器人通知到接收人。 +> 阈值触发后发送告警信息,通过 Discord 机器人通知到接收人。 -## 操作步骤 +## 操作步骤 -> 部署网络本身需支持科学上网,不支持设置代理 +> 部署网络本身需支持科学上网,不支持设置代理 -### 在 Discord 创建应用, 应用下创建机器人, 获取机器人 Token +### 在 Discord 创建应用, 应用下创建机器人, 获取机器人 Token -1. 访问 [https://discord.com/developers/applications](https://discord.com/developers/applications) 创建应用 +1. 访问 [https://discord.com/developers/applications](https://discord.com/developers/applications) 创建应用 ![bot](/img/docs/help/discord-bot-1.png) -2. 在应用下创建机器人,获取机器人 Token +2. 在应用下创建机器人,获取机器人 Token ![bot](/img/docs/help/discord-bot-2.png) ![bot](/img/docs/help/discord-bot-3.png) -3. 授权机器人到聊天服务器 +3. 授权机器人到聊天服务器 -> 在 OAuth2 菜单下给此机器人授权,`SCOPES` 范围选 `bot`, `BOT PERMISSIONS` 选发送消息 `Send Messages` +> 在 OAuth2 菜单下给此机器人授权,`SCOPES` 范围选 `bot`, `BOT PERMISSIONS` 选发送消息 `Send Messages` ![bot](/img/docs/help/discord-bot-4.png) > 获取到最下方生成的 URL, 浏览器访问此 URL 给机器人正式授权,即设置将机器人加入哪个聊天服务器。 -4. 查看您的聊天服务器是否已经加入机器人成员 +4. 查看您的聊天服务器是否已经加入机器人成员 ![bot](/img/docs/help/discord-bot-5.png) -### 开启开发者模式,获取频道 Channel ID +### 开启开发者模式,获取频道 Channel ID -1. 个人设置 -> 高级设置 -> 开启开发者模式 +1. 个人设置 -> 高级设置 -> 开启开发者模式 ![bot](/img/docs/help/discord-bot-6.png) -2. 获取频道 Channel ID +2. 获取频道 Channel ID -> 右键选中您想要发送机器人消息的聊天频道,点击 COPY ID 按钮获取 Channel ID +> 右键选中您想要发送机器人消息的聊天频道,点击 COPY ID 按钮获取 Channel ID ![bot](/img/docs/help/discord-bot-7.png) - -### 在 HertzBeat 新增告警通知人,通知方式为 Discord Bot +### 在 HertzBeat 新增告警通知人,通知方式为 Discord Bot 1. **【告警通知】->【新增接收人】 ->【选择 Discord 机器人通知方式】->【设置机器人Token和ChannelId】-> 【确定】** ![email](/img/docs/help/discord-bot-8.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -![email](/img/docs/help/alert-notice-policy.png) +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 +![email](/img/docs/help/alert-notice-policy.png) -### Discord 机器人通知常见问题 +### Discord 机器人通知常见问题 -1. Discord 未收到机器人告警通知 +1. Discord 未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人Token, ChannelId,是否已配置告警策略关联 > 请排查机器人是否被 Discord聊天服务器正确赋权 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_email.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_email.md index 7033f6036d6..d4dc218c591 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_email.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_email.md @@ -5,34 +5,35 @@ sidebar_label: 告警邮件通知 keywords: [告警邮件通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过邮件通知到接收人。 +> 阈值触发后发送告警信息,通过邮件通知到接收人。 -### 操作步骤 +### 操作步骤 -1. **【告警通知】->【新增接收人】 ->【选择邮件通知方式】** +1. **【告警通知】->【新增接收人】 ->【选择邮件通知方式】** -![email](/img/docs/help/alert-notice-1.png) +![email](/img/docs/help/alert-notice-1.png) 2. **【获取验证码】-> 【输入邮箱验证码】-> 【确定】** -![email](/img/docs/help/alert-notice-2.png) + ![email](/img/docs/help/alert-notice-2.png) -![email](/img/docs/help/alert-notice-3.png) +![email](/img/docs/help/alert-notice-3.png) -3. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +3. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### 邮件通知常见问题 -### 邮件通知常见问题 +1. 自己内网部署的HertzBeat无法接收到邮件通知 -1. 自己内网部署的HertzBeat无法接收到邮件通知 -> HertzBeat需要自己配置邮件服务器,TanCloud无需,请确认是否在application.yml配置了自己的邮件服务器 +> HertzBeat需要自己配置邮件服务器,TanCloud无需,请确认是否在application.yml配置了自己的邮件服务器 + +2. 云环境TanCloud无法接收到邮件通知 -2. 云环境TanCloud无法接收到邮件通知 > 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确邮箱,是否已配置告警策略关联 -> 请查询邮箱的垃圾箱里是否把告警邮件拦截 +> 请查询邮箱的垃圾箱里是否把告警邮件拦截 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_enterprise_wechat_app.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_enterprise_wechat_app.md index 3f1c5a2b9c1..b70c8b10c40 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_enterprise_wechat_app.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_enterprise_wechat_app.md @@ -5,28 +5,28 @@ sidebar_label: 企业微信应用告警通知 keywords: [开源告警系统, 开源监控告警系统, 企业微信应用告警通知] --- -> 阈值触发后发送告警信息,通过企业微信应用通知到接收人. +> 阈值触发后发送告警信息,通过企业微信应用通知到接收人. -### Operation steps +### Operation steps 1. **【企业微信后台管理】-> 【App管理】-> 【创建一个新的应用】-> 【设置应用信息】->【添加成功后复制应用的AgentId和Secret】** -![email](/img/docs/help/alert-wechat-1.jpg) +![email](/img/docs/help/alert-wechat-1.jpg) 2. **【告警通知】->【新增接收人】 ->【选择企业微信应用通知方式】->【设置企业ID,企业应用id和应用的secret 】-> 【确定】** ![email](/img/docs/help/alert-wechat-2.jpg) -3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人。** +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人。** -![email](/img/docs/help/alert-wechat-3.jpg) +![email](/img/docs/help/alert-wechat-3.jpg) +### 企业微信应用通知常见问题 -### 企业微信应用通知常见问题 +1. 企业微信应用未收到告警通知. -1. 企业微信应用未收到告警通知. > 请检查用户是否具有应用程序权限. > 请检查企业应用程序回调地址设置是否正常. > 请检查服务器IP是否在企业应用程序白名单上. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_feishu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_feishu.md index 448e70de223..604eff34fdc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_feishu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_feishu.md @@ -5,30 +5,30 @@ sidebar_label: 告警飞书机器人通知 keywords: [告警飞书机器人通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过飞书机器人通知到接收人。 +> 阈值触发后发送告警信息,通过飞书机器人通知到接收人。 -### 操作步骤 +### 操作步骤 1. **【飞书客户端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** -2. **【保存机器人的WebHook地址的KEY值】** +2. **【保存机器人的WebHook地址的KEY值】** > 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择飞书机器人通知方式】->【设置飞书机器人KEY】-> 【确定】** -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### 飞书机器人通知常见问题 -### 飞书机器人通知常见问题 +1. 飞书群未收到机器人告警通知 -1. 飞书群未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 +> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md index e137d07e9e2..c81f5608674 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md @@ -5,34 +5,33 @@ sidebar_label: 告警 Slack Webhook 通知 keywords: [告警 Slack Webhook 通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过 Slack Webhook 通知到接收人。 +> 阈值触发后发送告警信息,通过 Slack Webhook 通知到接收人。 -## 操作步骤 +## 操作步骤 -> 部署网络本身需支持科学上网,不支持设置代理 +> 部署网络本身需支持科学上网,不支持设置代理 -### 在 Slack 开启 Webhook, 获取 Webhook URL +### 在 Slack 开启 Webhook, 获取 Webhook URL -参考官网文档 [Sending messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) +参考官网文档 [Sending messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) -### 在 HertzBeat 新增告警通知人,通知方式为 Slack Webhook +### 在 HertzBeat 新增告警通知人,通知方式为 Slack Webhook 1. **【告警通知】->【新增接收人】 ->【选择 Slack Webhook 通知方式】->【设置 Webhook URL】-> 【确定】** ![email](/img/docs/help/slack-bot-1.png) -2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-policy.png) +![email](/img/docs/help/alert-notice-policy.png) +### Slack 机器人通知常见问题 -### Slack 机器人通知常见问题 - -1. Slack 未收到机器人告警通知 +1. Slack 未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 +> 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_telegram.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_telegram.md index 6730aa19dcb..df609e66b50 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_telegram.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_telegram.md @@ -5,22 +5,22 @@ sidebar_label: 告警 Telegram 机器人通知 keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过 Telegram 机器人通知到接收人。 +> 阈值触发后发送告警信息,通过 Telegram 机器人通知到接收人。 -## 操作步骤 +## 操作步骤 -> 部署网络本身需支持科学上网,不支持设置代理 +> 部署网络本身需支持科学上网,不支持设置代理 ### 在 Telegram 创建机器人, 获取 Bot Token 和 UserId -1. 使用 [@BotFather](https://t.me/BotFather) 创建自己的机器人并获取访问令牌`Token` +1. 使用 [@BotFather](https://t.me/BotFather) 创建自己的机器人并获取访问令牌`Token` ![telegram-bot](/img/docs/help/telegram-bot-1.png) -2. 获取接收人的 `User ID` +2. 获取接收人的 `User ID` -**使用您要通知的接收人账户给刚创建 Bot 账户随便发送一个信息**, -访问 ```https://api.telegram.org/bot/getUpdates``` , **`使用上一步的 Bot Token 替换其中的`**, 响应`Json`数据中第一个`result.message.from.id` 值即为接收人的 `User ID` +**使用您要通知的接收人账户给刚创建 Bot 账户随便发送一个信息**, +访问 ```https://api.telegram.org/bot/getUpdates``` , **`使用上一步的 Bot Token 替换其中的`**, 响应`Json`数据中第一个`result.message.from.id` 值即为接收人的 `User ID` ```json { @@ -42,27 +42,26 @@ keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] } ``` -3. 记录保存我们获得的 `Token` 和 `User Id` +3. 记录保存我们获得的 `Token` 和 `User Id` -### 在 HertzBeat 新增告警通知人,通知方式为 Telegram Bot +### 在 HertzBeat 新增告警通知人,通知方式为 Telegram Bot 1. **【告警通知】->【新增接收人】 ->【选择 Telegram 机器人通知方式】->【设置机器人Token和UserId】-> 【确定】** ![email](/img/docs/help/telegram-bot-2.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-policy.png) +![email](/img/docs/help/alert-notice-policy.png) +### Telegram 机器人通知常见问题 -### Telegram 机器人通知常见问题 - -1. Telegram 未收到机器人告警通知 +1. Telegram 未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 -> UserId 应为消息接收对象的UserId +> UserId 应为消息接收对象的UserId -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_threshold.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_threshold.md index 401a3d04d45..a41ae531e28 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_threshold.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_threshold.md @@ -3,6 +3,7 @@ id: alert_threshold title: 阈值告警配置 sidebar_label: 阈值告警配置 --- + > 对监控指标配置告警阈值(警告告警,严重告警,紧急告警),系统根据阈值配置和采集指标数据计算触发告警。 ## 操作步骤 @@ -20,6 +21,7 @@ sidebar_label: 阈值告警配置 如上图所示,新增标签,这里我们设置标签为: linux:dev (开发环境使用Linux) #### 配置标签 + TODO 图片名称更新 ![threshold](/img/docs/help/alert-threshold-3.png) @@ -47,7 +49,6 @@ TODO 图片名称更新 - **恢复通知**:告警触发后是否发送恢复通知,默认不发送。 - **启用告警**:此告警阈值配置开启生效或关闭 - **阈值告警配置完毕,已经被成功触发的告警信息可以在【告警中心】看到。** **若需要将告警信息邮件,微信,钉钉飞书通知给相关人员,可以在【告警通知】配置。** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_threshold_expr.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_threshold_expr.md index 5924d8965f8..37a9fb29d1b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_threshold_expr.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_threshold_expr.md @@ -3,11 +3,11 @@ id: alert_threshold_expr title: 阈值触发表达式 sidebar_label: 阈值触发表达式 --- + > 在我们配置阈值告警时,需要配置阈值触发表达式,系统根据表达式和监控指标值计算触发是否告警,这里详细介绍下表达式使用。 #### 表达式支持的操作符 - | 运算符(可视化配置) | 运算符(表达式配置) | 支持类型 | 说明 | | | ------------------ | -------------------- | ----------------------- | ------------------------ | - | | 等于 | equals(str1,str2) | 字符型 | 判断字符串是否相等 | | @@ -27,22 +27,21 @@ sidebar_label: 阈值触发表达式 #### 表达式函数库列表 - -| 支持函数库 | 说明 | -| -------------------------------------------- | ------------------------------------------------------------------ | -| condition ? trueExpression : falseExpression | 三元运算符 | -| toDouble(str) | 将字符串转换为Double类型 | -| toBoolean(str) | 将字符串转换为Boolean类型 | -| toInteger(str) | 将字符串转换为Integer类型 | -| array[n] | 取数组第n个元素 | -| * | 算法乘 | -| / | 算法除 | -| % | 求余 | -| ( 和 ) | 括号用于控制运算的优先级,确保在逻辑或数学表达式中某些部分先被计算 | -| + | 加号用于表示数学中的加法运算、字符串拼接 | -| - | 减号用于表示数学中的减法运算 | -| && | 逻辑AND操作符 | -| \|\| | 逻辑OR操作符 | +| 支持函数库 | 说明 | +|----------------------------------------------|-----------------------------------| +| condition ? trueExpression : falseExpression | 三元运算符 | +| toDouble(str) | 将字符串转换为Double类型 | +| toBoolean(str) | 将字符串转换为Boolean类型 | +| toInteger(str) | 将字符串转换为Integer类型 | +| array[n] | 取数组第n个元素 | +| * | 算法乘 | +| / | 算法除 | +| % | 求余 | +| ( 和 ) | 括号用于控制运算的优先级,确保在逻辑或数学表达式中某些部分先被计算 | +| + | 加号用于表示数学中的加法运算、字符串拼接 | +| - | 减号用于表示数学中的减法运算 | +| && | 逻辑AND操作符 | +| \|\| | 逻辑OR操作符 | #### 支持的环境变量 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_webhook.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_webhook.md index 61ed1dc99b5..022cd50f07e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_webhook.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_webhook.md @@ -5,23 +5,24 @@ sidebar_label: 告警 Webhook 回调通知 keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过post请求方式调用WebHook接口通知到接收人。 +> 阈值触发后发送告警信息,通过post请求方式调用WebHook接口通知到接收人。 -## 操作步骤 +## 操作步骤 -1. **【告警通知】->【新增接收人】 ->【选择WebHook通知方式】-> 【设置WebHook回调地址】 -> 【确定】** +1. **【告警通知】->【新增接收人】 ->【选择WebHook通知方式】-> 【设置WebHook回调地址】 -> 【确定】** ![email](/img/docs/help/alert-notice-5.png) -2. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) -### WebHook回调POST请求体BODY内容 +### WebHook回调POST请求体BODY内容 + +内容格式:JSON -内容格式:JSON ```json { "alarmId": 76456, @@ -43,22 +44,23 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 } ``` -| | | -|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | -| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | -| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | -| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | -| content | string title: The actual content of the alarm notification 告警通知实际内容 | -| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | -| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | -| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | -| tags | example: {key1:value1} | +| | | +|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | +| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | +| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | +| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | +| content | string title: The actual content of the alarm notification 告警通知实际内容 | +| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | +| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | +| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | +| tags | example: {key1:value1} | + +### webhook通知常见问题 -### webhook通知常见问题 +1. WebHook回调未生效 -1. WebHook回调未生效 > 请查看告警中心是否已经产生此条告警信息 > 请排查配置的WebHook回调地址是否正确 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_wework.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_wework.md index ce73c131d00..e0dbabf1a70 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_wework.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_wework.md @@ -5,34 +5,34 @@ sidebar_label: 告警企业微信通知 keywords: [告警企业微信通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过企业微信机器人通知到接收人。 +> 阈值触发后发送告警信息,通过企业微信机器人通知到接收人。 -### 操作步骤 +### 操作步骤 -1. **【企业微信端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** +1. **【企业微信端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** -![email](/img/docs/help/alert-notice-6.jpg) +![email](/img/docs/help/alert-notice-6.jpg) -2. **【保存机器人的WebHook地址的KEY值】** +2. **【保存机器人的WebHook地址的KEY值】** > 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -3. **【告警通知】->【新增接收人】 ->【选择企业微信机器人通知方式】->【设置企业微信机器人KEY】-> 【确定】** +3. **【告警通知】->【新增接收人】 ->【选择企业微信机器人通知方式】->【设置企业微信机器人KEY】-> 【确定】** ![email](/img/docs/help/alert-notice-7.png) -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### 企业微信机器人通知常见问题 -### 企业微信机器人通知常见问题 +1. 企业微信群未收到机器人告警通知 -1. 企业微信群未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 +> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/almalinux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/almalinux.md index 64f15c3777d..391005c080c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/almalinux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/almalinux.md @@ -4,114 +4,105 @@ title: 监控:AlmaLinux操作系统监控 sidebar_label: AlmaLinux操作系统 keywords: [开源监控系统, 开源操作系统监控, AlmaLinux操作系统监控] --- + > 对AlmaLinux操作系统的通用性能指标进行采集监控。 ### 配置参数 - -| 参数名称 | 参数帮助描述 | -| -------- | ------------------------------------------------------------------------- | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | -| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集器 | 配置此监控使用哪台采集器调度采集 | -| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -| 密钥 | 连接服务器所需密钥 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux SSH对外提供的端口,默认为22。 | +| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | +| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | +| 用户名 | SSH连接用户名,可选 | +| 密码 | SSH连接密码,可选 | +| 采集器 | 配置此监控使用哪台采集器调度采集 | +| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 密钥 | 连接服务器所需密钥 | ### 采集指标 #### 指标集合:系统基本信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | ------------ | -| Host Name | 无 | 主机名称 | -| System Version | 无 | 操作系统版本 | -| Uptime | 无 | 启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------| +| Host Name | 无 | 主机名称 | +| System Version | 无 | 操作系统版本 | +| Uptime | 无 | 启动时间 | #### 指标集合:CPU 信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | --------------------------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:内存信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------- | -------- | ---------------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:磁盘信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------- | -------- | ------------------ | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:网卡信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | ------------------- | -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | byte | 入站数据流量(bytes) | +| transmit_bytes | byte | 出站数据流量(bytes) | #### 指标集合:文件系统 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------- | -------- | -------------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | #### 指标集合:Top10 CPU进程 统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- | -------- | ------------ | -| pid | 无 | 进程ID | -| cpu_usage | % | CPU占用率 | -| mem_usage | % | 内存占用率 | -| command | 无 | 执行命令 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| pid | 无 | 进程ID | +| cpu_usage | % | CPU占用率 | +| mem_usage | % | 内存占用率 | +| command | 无 | 执行命令 | #### 指标集合:Top10 内存进程 统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| pid | 无 | 进程ID | +| mem_usage | % | 内存占用率 | +| cpu_usage | % | CPU占用率 | +| command | 无 | 执行命令 | -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- | -------- | ------------ | -| pid | 无 | 进程ID | -| mem_usage | % | 内存占用率 | -| cpu_usage | % | CPU占用率 | -| command | 无 | 执行命令 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/api.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/api.md index 9b80fc35828..89f3cd701bc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/api.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/api.md @@ -5,34 +5,33 @@ sidebar_label: HTTP API keywords: [开源监控系统, 开源网站监控, HTTP API监控] --- -> 调用HTTP API接口,查看接口是否可用,对其响应时间等指标进行监测 +> 调用HTTP API接口,查看接口是否可用,对其响应时间等指标进行监测 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | -| 请求方式 | 设置接口调用的请求方式:GET,POST,PUT,DELETE。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| 用户名 | 接口Basic认证或Digest认证时使用的用户名 | -| 密码 | 接口Basic认证或Digest认证时使用的密码 | -| 请求Headers | HTTP 请求头 | -| 查询Params | HTTP查询参数,支持[时间表达式](time_expression) | -| Content-Type | 设置携带BODY请求体数据请求时的资源类型 | -| 请求BODY | 设置携带BODY请求体数据,PUT POST请求方式时有效 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------------|-------------------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | +| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | +| 请求方式 | 设置接口调用的请求方式:GET,POST,PUT,DELETE。 | +| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | +| 用户名 | 接口Basic认证或Digest认证时使用的用户名 | +| 密码 | 接口Basic认证或Digest认证时使用的密码 | +| 请求Headers | HTTP 请求头 | +| 查询Params | HTTP查询参数,支持[时间表达式](time_expression) | +| Content-Type | 设置携带BODY请求体数据请求时的资源类型 | +| 请求BODY | 设置携带BODY请求体数据,PUT POST请求方式时有效 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| responseTime | ms毫秒 | 网站响应时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | ms毫秒 | 网站响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/centos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/centos.md index 03c514bc7b8..3d0654db3b5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/centos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/centos.md @@ -9,74 +9,74 @@ keywords: [开源监控系统, 开源操作系统监控, CentOS操作系统监 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux SSH对外提供的端口,默认为22。 | +| 用户名 | SSH连接用户名,可选 | +| 密码 | SSH连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| hostname | 无 | 主机名称 | +| version | 无 | 操作系统版本 | +| uptime | 无 | 系统运行时间 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:memory -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:disk -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:interface -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | byte | 入站数据流量(bytes) | +| transmit_bytes | byte | 出站数据流量(bytes) | #### 指标集合:disk_free -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/clickhouse.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/clickhouse.md index 1c5adeeccc2..955c87b4e4f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/clickhouse.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/clickhouse.md @@ -4,96 +4,93 @@ title: 监控:Clickhouse数据库监控 sidebar_label: Clickhouse数据库 keywords: [开源监控系统, 开源数据库监控, Clickhouse数据库监控] --- + > 对Clickhouse数据库的通用性能指标进行采集监控。 ### 配置参数 - -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------------------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为8123。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为8123。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:ping 可用性 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | ------------ | -| responseTime | 无 | 响应时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | 无 | 响应时间 | #### 指标集合:system.metrics表的数据 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- | -------- | -------------------------------------- | -| Query | 无 | 正在执行的查询的数量 | -| Merge | 无 | 正在执行的后台合并的数量 | -| Move | 无 | 正在执行的后台移动的数量 | -| PartMutation | 无 | 表变更的次数 | -| ReplicatedFetch | 无 | 从副本收集的数据块数量 | -| ReplicatedSend | 无 | 发送到副本的数量块数量 | -| ReplicatedChecks | 无 | 检查一致性的数据块数量 | -| QueryPreempted | 无 | 停止或等待的查询数量 | -| TCPConnection | 无 | TCP连接数量 | -| HTTPConnection | 无 | HTTP连接数量 | -| OpenFileForRead | 无 | 打开的可读文件的数量 | -| OpenFileForWrite | 无 | 打开的可写文件的数量 | -| QueryThread | 无 | 查询处理的线程数量 | -| ReadonlyReplica | 无 | 处于只读状态的 Replicated table 的数量 | -| EphemeralNode | 无 | ZooKeeper 中保存的临时节点数 | -| ZooKeeperWatch | 无 | ZooKeeper事件订阅数 | -| StorageBufferBytes | Bytes | Buffer tables 中的字节数 | -| VersionInteger | 无 | ClickHouse 版本号 | -| RWLockWaitingReaders | 无 | 等待读表的读写锁的线程数量 | -| RWLockWaitingWriters | 无 | 等待写表的读写锁的线程数量 | -| RWLockActiveReaders | 无 | 在一个表的读写锁中持有读锁的线程数 | -| RWLockActiveWriters | 无 | 在一个表的读写锁中持有写锁的线程数 | -| GlobalThread | 无 | 全局线程池中的线程数 | -| GlobalThreadActive | 无 | 全局线程池中活跃的线程数 | -| LocalThread | 无 | 本地线程池中的线程数 | -| LocalThreadActive | 无 | 本地线程池中活跃的线程数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------|-------|------------------------------| +| Query | 无 | 正在执行的查询的数量 | +| Merge | 无 | 正在执行的后台合并的数量 | +| Move | 无 | 正在执行的后台移动的数量 | +| PartMutation | 无 | 表变更的次数 | +| ReplicatedFetch | 无 | 从副本收集的数据块数量 | +| ReplicatedSend | 无 | 发送到副本的数量块数量 | +| ReplicatedChecks | 无 | 检查一致性的数据块数量 | +| QueryPreempted | 无 | 停止或等待的查询数量 | +| TCPConnection | 无 | TCP连接数量 | +| HTTPConnection | 无 | HTTP连接数量 | +| OpenFileForRead | 无 | 打开的可读文件的数量 | +| OpenFileForWrite | 无 | 打开的可写文件的数量 | +| QueryThread | 无 | 查询处理的线程数量 | +| ReadonlyReplica | 无 | 处于只读状态的 Replicated table 的数量 | +| EphemeralNode | 无 | ZooKeeper 中保存的临时节点数 | +| ZooKeeperWatch | 无 | ZooKeeper事件订阅数 | +| StorageBufferBytes | Bytes | Buffer tables 中的字节数 | +| VersionInteger | 无 | ClickHouse 版本号 | +| RWLockWaitingReaders | 无 | 等待读表的读写锁的线程数量 | +| RWLockWaitingWriters | 无 | 等待写表的读写锁的线程数量 | +| RWLockActiveReaders | 无 | 在一个表的读写锁中持有读锁的线程数 | +| RWLockActiveWriters | 无 | 在一个表的读写锁中持有写锁的线程数 | +| GlobalThread | 无 | 全局线程池中的线程数 | +| GlobalThreadActive | 无 | 全局线程池中活跃的线程数 | +| LocalThread | 无 | 本地线程池中的线程数 | +| LocalThreadActive | 无 | 本地线程池中活跃的线程数 | #### 指标集合:system.events表的数据 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------------------------- |-------| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------------------|-------|------------------------------------------------------------------------------------------------| | Query | 无 | 要解释和可能执行的查询数量。 不包括由于 AST 大小限制、配额限制或同时运行的查询数量限制而无法解析或被拒绝的查询。 可能包括 ClickHouse 本身发起的内部查询。 不计算子查询。 | -| SelectQuery | 无 | 可能执行的 Select 查询数 | -| InsertQuery | 无 | 可能执行的 Insert 查询数 | -| InsertedRows | 无 | 被插入到所有表中的行数 | -| InsertedBytes | Bytes | 被插入到所有表中的字节数 | -| FailedQuery | 无 | 执行失败的查询数量 | -| FailedSelectQuery | 无 | 执行失败的 Select 查询数量 | -| FileOpen | 无 | 文件打开数 | -| MergeTreeDataWriterRows | 无 | 写入 MergeTree 表的数据行数 | -| MergeTreeDataWriterCompressedBytes | Bytes | 压缩写入 MergeTree 表的数据字节数 | +| SelectQuery | 无 | 可能执行的 Select 查询数 | +| InsertQuery | 无 | 可能执行的 Insert 查询数 | +| InsertedRows | 无 | 被插入到所有表中的行数 | +| InsertedBytes | Bytes | 被插入到所有表中的字节数 | +| FailedQuery | 无 | 执行失败的查询数量 | +| FailedSelectQuery | 无 | 执行失败的 Select 查询数量 | +| FileOpen | 无 | 文件打开数 | +| MergeTreeDataWriterRows | 无 | 写入 MergeTree 表的数据行数 | +| MergeTreeDataWriterCompressedBytes | Bytes | 压缩写入 MergeTree 表的数据字节数 | #### 指标集合:system.asynchronous_metrics表的数据 +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------------------------|------|----------------------------------| +| AsynchronousMetricsCalculationTimeSpent | 无 | 异步指标计算花费的时间(秒) | +| jemalloc.arenas.all.muzzy_purged | 无 | 被清除的模糊(muzzy)页的数量 | +| jemalloc.arenas.all.dirty_purged | 无 | 被清除的脏 (dirty)页的数量 | +| BlockReadBytes_ram1 | 无 | ram1 块读取的字节数 | +| jemalloc.background_thread.run_intervals | 无 | jemalloc 后台线程的运行间隔数 | +| BlockQueueTime_nbd13 | 无 | nbd13 块队列等待时间 | +| jemalloc.background_thread.num_threads | 无 | jemalloc 后台线程的数量 | +| jemalloc.resident | 无 | jemalloc 分配器占用的物理内存大小(字节) | +| InterserverThreads | 无 | Interserver 线程数 | +| BlockWriteMerges_nbd7 | 无 | nbd7 块写合并数量 | +| MarkCacheBytes | 无 | StorageMergeTree 的 marks 的缓存大小 | +| MarkCacheFiles | 无 | StorageMergeTree 的 marks 的缓存文件数量 | +| MaxPartCountForPartition | 无 | partitions 中最大的活跃数据块的数量 | -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------------------------------- | ------ | ----------------------------------------- | -| AsynchronousMetricsCalculationTimeSpent | 无 | 异步指标计算花费的时间(秒) | -| jemalloc.arenas.all.muzzy_purged | 无 | 被清除的模糊(muzzy)页的数量 | -| jemalloc.arenas.all.dirty_purged | 无 | 被清除的脏 (dirty)页的数量 | -| BlockReadBytes_ram1 | 无 | ram1 块读取的字节数 | -| jemalloc.background_thread.run_intervals | 无 | jemalloc 后台线程的运行间隔数 | -| BlockQueueTime_nbd13 | 无 | nbd13 块队列等待时间 | -| jemalloc.background_thread.num_threads | 无 | jemalloc 后台线程的数量 | -| jemalloc.resident | 无 | jemalloc 分配器占用的物理内存大小(字节) | -| InterserverThreads | 无 | Interserver 线程数 | -| BlockWriteMerges_nbd7 | 无 | nbd7 块写合并数量 | -| MarkCacheBytes | 无 | StorageMergeTree 的 marks 的缓存大小 | -| MarkCacheFiles | 无 | StorageMergeTree 的 marks 的缓存文件数量 | -| MaxPartCountForPartition | 无 | partitions 中最大的活跃数据块的数量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/debian.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/debian.md index dcda89ee2b6..6b353bafd0b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/debian.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/debian.md @@ -4,71 +4,67 @@ title: 监控:debian 系统监控 sidebar_label: Debian keywords: [开源监控系统, 操作系统监控, Debian监控] --- + > 对Debian系统的通用性能指标进行采集监控 ## 配置参数 - -| 参数名称 | 参数帮助描述 | -| -------- | ------------------------------------------------------------------------- | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Debian系统的ssh端口,默认: 22 | -| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | -| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | -| 用户名 | 服务器用户名 | -| 密码 | 服务器密码 | -| 采集器 | 配置此监控使用哪台采集器调度采集 | -| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -| 密钥 | 连接服务器所需密钥 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Debian系统的ssh端口,默认: 22 | +| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | +| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | +| 用户名 | 服务器用户名 | +| 密码 | 服务器密码 | +| 采集器 | 配置此监控使用哪台采集器调度采集 | +| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 密钥 | 连接服务器所需密钥 | ### 采集指标 #### 指标集合:系统基本信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | ------------ | -| Host Name | 无 | 主机名称 | -| System Version | 无 | 操作系统版本 | -| Uptime | 无 | 启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------| +| Host Name | 无 | 主机名称 | +| System Version | 无 | 操作系统版本 | +| Uptime | 无 | 启动时间 | #### 指标集合:CPU 信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | ------------ | -| Info | 无 | 型号 | -| Cores | 无 | 核数 | -| Interrupt | 无 | 中断数 | -| Load | 无 | 负载 | -| Context Switch | 无 | 上下文切换 | -| Usage | % | 使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------| +| Info | 无 | 型号 | +| Cores | 无 | 核数 | +| Interrupt | 无 | 中断数 | +| Load | 无 | 负载 | +| Context Switch | 无 | 上下文切换 | +| Usage | % | 使用率 | #### 指标集合:内存信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------- | -------- | -------------- | -| Total Memory | Mb | 总内存容量 | -| User Program Memory | Mb | 用户程序内存量 | -| Free Memory | Mb | 空闲内存容量 | -| Buff Cache Memory | Mb | 缓存占用内存 | -| Available Memory | Mb | 剩余可用内存 | -| Memory Usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------|------|---------| +| Total Memory | Mb | 总内存容量 | +| User Program Memory | Mb | 用户程序内存量 | +| Free Memory | Mb | 空闲内存容量 | +| Buff Cache Memory | Mb | 缓存占用内存 | +| Available Memory | Mb | 剩余可用内存 | +| Memory Usage | % | 内存使用率 | #### 指标集合:磁盘信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------- | -------- | ------------ | -| Disk Num | 无 | 磁盘总数 | -| Partition Num | 无 | 分区总数 | -| Block Write | 无 | 写磁盘块数 | -| Block Read | 无 | 读磁盘块数 | -| Write Rate | iops | 磁盘写速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|--------| +| Disk Num | 无 | 磁盘总数 | +| Partition Num | 无 | 分区总数 | +| Block Write | 无 | 写磁盘块数 | +| Block Read | 无 | 读磁盘块数 | +| Write Rate | iops | 磁盘写速率 | #### 指标集合:网卡信息 @@ -103,3 +99,4 @@ keywords: [开源监控系统, 操作系统监控, Debian监控] - 内存占用率:% - CPU占用率:% + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dm.md index cdd9f6f6ee5..ea4a376c049 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dm.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dm.md @@ -9,41 +9,41 @@ keywords: [开源监控系统, 开源数据库监控, 达梦数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为5236。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为5236。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | ------------------ | -| PORT_NUM | 无 | 数据库暴露服务端口 | -| CTL_PATH | 无 | 控制文件路径 | -| MAX_SESSIONS | 无 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|-----------| +| PORT_NUM | 无 | 数据库暴露服务端口 | +| CTL_PATH | 无 | 控制文件路径 | +| MAX_SESSIONS | 无 | 数据库最大连接数 | #### 指标集合:status -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------ | -| status$ | 无 | DM数据库的开闭状态 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|------------| +| status$ | 无 | DM数据库的开闭状态 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | -------- | ---------------------------------------------- | -| dm_sql_thd | 无 | 用于编写 dmsql dmserver 的线程 | -| dm_io_thd | 无 | IO线程,由IO_THR_GROUPS参数控制,默认为2个线程 | -| dm_quit_thd | 无 | 用于执行正常关闭数据库的线程 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|---------------------------------| +| dm_sql_thd | 无 | 用于编写 dmsql dmserver 的线程 | +| dm_io_thd | 无 | IO线程,由IO_THR_GROUPS参数控制,默认为2个线程 | +| dm_quit_thd | 无 | 用于执行正常关闭数据库的线程 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dns.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dns.md index 67ea72c177e..303ac47444f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dns.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dns.md @@ -13,24 +13,24 @@ keywords: [ 开源监控系统, 开源DNS监控工具, 监控DNS指标 ] ### 配置参数 -| 参数名称 | 参数帮助描述 | -|-----------|------------------------------------------------| -| DNS服务器IP | 被监控的IPv4、IPv6。注意⚠️不包含协议头(例如:https://,http://)。 | -| 监控名称 | 标识此监控的名称,名称需要是唯一的。 | -| 端口 | DNS服务对外提供的端口,默认为53。 | -| 域名解析的地址 | 域名解析的地址。 | -| 连接超时时间 | 设置连接DNS服务器的超时时间,单位ms毫秒,默认6000毫秒。 | +| 参数名称 | 参数帮助描述 | +|-----------|------------------------------------------------------------------| +| DNS服务器IP | 被监控的IPv4、IPv6。注意⚠️不包含协议头(例如:https://,http://)。 | +| 监控名称 | 标识此监控的名称,名称需要是唯一的。 | +| 端口 | DNS服务对外提供的端口,默认为53。 | +| 域名解析的地址 | 域名解析的地址。 | +| 连接超时时间 | 设置连接DNS服务器的超时时间,单位ms毫秒,默认6000毫秒。 | | 查询类别 | DNS查询的类别. 可选的值包括 `IN`, `CHAOS`, `HESIOD`, `NONE`, 和 `ANY`。默认值:IN | -| 是否使用tcp协议 | 设置DNS查询是否使用tcp协议。 | -| 采集间隔 | 监控周期性数据采集的时间间隔,单位:秒,最小可设置为30秒。 | -| 绑定标签 | 用于对监控资源进行分类管理。 | -| 描述备注 | 用于更多关于标识和描述此监控的信息,用户可以在此处添加备注信息。 | +| 是否使用tcp协议 | 设置DNS查询是否使用tcp协议。 | +| 采集间隔 | 监控周期性数据采集的时间间隔,单位:秒,最小可设置为30秒。 | +| 绑定标签 | 用于对监控资源进行分类管理。 | +| 描述备注 | 用于更多关于标识和描述此监控的信息,用户可以在此处添加备注信息。 | ### 采集指标 #### 指标集:Header -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------|------|-----------------| | 响应时间 | 毫秒 | DNS服务器响应请求所需的时间 | | 操作码 | 无 | 当前消息的类型 | @@ -43,27 +43,31 @@ keywords: [ 开源监控系统, 开源DNS监控工具, 监控DNS指标 ] ### 指标集: Question -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|-----------------------------------| | Section | 无 | 请求记录信息,其中包含查询的域名,资源类型,资源记录类,附加信息。 | ### 指标集: Answer -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|----------------------------------------| | Section0 | 无 | 请求响应信息,其中包含查询的域名,TTL,资源记录类,资源类型,查询的结果。 | + > Answer 指标集最多会采集10条响应记录,指标名称从 Section0 到 Section9。 ### 指标集: Authority -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|------------------------------------------------------------------| | Section0 | 无 | 域名的SOA(Start of Authority,权威区域起始)记录,其中包含查询的域名,TTL,资源类型,资源记录类等信息。 | + > Authority 指标集最多会采集10条响应记录,指标名称从 Section0 到 Section9。 ### 指标集: Additional -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|-------------| | Section0 | 无 | DNS查询的附加信息。 | + > Additional 指标集最多会采集10条响应记录,指标名称从 Section0 到 Section9。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/docker.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/docker.md index 221776b2426..c546b46fd2c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/docker.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/docker.md @@ -7,7 +7,6 @@ keywords: [开源监控系统, 开源容器监控, Docker容器监控] > 对Docker容器的通用性能指标进行采集监控。 - ## 监控前操作 如果想要监控 `Docker` 中的容器信息,则需要按照一下步骤打开端口,让采集请求获取到对应的信息。 @@ -44,63 +43,60 @@ firewall-cmd --zone=public --add-port=2375/tcp --permanent firewall-cmd --reload ``` - - - - ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为2375。 | -| 查询超时时间 | 设置获取Docker服务器API接口时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 器名称 | 一般是监控所有运行中的容器信息。 | -| 用户名 | 连接用户名,可选 | -| 密码 | 连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为2375。 | +| 查询超时时间 | 设置获取Docker服务器API接口时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 器名称 | 一般是监控所有运行中的容器信息。 | +| 用户名 | 连接用户名,可选 | +| 密码 | 连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:system -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------ | -------- | -------------------------------------- | -| Name | 无 | 服务器名称 | -| version | 无 | docker本版号 | -| os | 无 | 服务器版本 例如:linux x86_64 | -| root_dir | 无 | docker文件夹目录 例如:/var/lib/docker | -| containers | 无 | 容器总数(在运行+未运行) | -| containers_running | 无 | 运行中的容器数目 | -| containers_paused | 无 | 暂停中的容器数目 | -| images | 无 | 容器景象的总数目。 | -| ncpu | 无 | NCPU | -| mem_total | MB | 占用的内存总大小 | -| system_time | 无 | 系统时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|--------------------------------| +| Name | 无 | 服务器名称 | +| version | 无 | docker本版号 | +| os | 无 | 服务器版本 例如:linux x86_64 | +| root_dir | 无 | docker文件夹目录 例如:/var/lib/docker | +| containers | 无 | 容器总数(在运行+未运行) | +| containers_running | 无 | 运行中的容器数目 | +| containers_paused | 无 | 暂停中的容器数目 | +| images | 无 | 容器景象的总数目。 | +| ncpu | 无 | NCPU | +| mem_total | MB | 占用的内存总大小 | +| system_time | 无 | 系统时间 | #### 指标集合:containers -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------- | -| id | 无 | Docker中容器的ID | -| name | 无 | Docker容器中的容器名称 | -| image | 无 | Docker容器使用的镜像 | -| command | 无 | Docker中的默认启动命令 | -| state | 无 | Docker中容器的运行状态 | -| status | 无 | Docker容器中的更新时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------------| +| id | 无 | Docker中容器的ID | +| name | 无 | Docker容器中的容器名称 | +| image | 无 | Docker容器使用的镜像 | +| command | 无 | Docker中的默认启动命令 | +| state | 无 | Docker中容器的运行状态 | +| status | 无 | Docker容器中的更新时间 | #### 指标集合:stats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- | -------- | ---------------------------- | -| name | 无 | Docker容器中的名字 | -| available_memory | MB | Docker容器可以利用的内存大小 | -| used_memory | MB | Docker容器已经使用的内存大小 | -| memory_usage | 无 | Docker容器的内存使用率 | -| cpu_delta | 无 | Docker容器已经使用的CPU数量 | -| number_cpus | 无 | Docker容器可以使用的CPU数量 | -| cpu_usage | 无 | Docker容器CPU使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------|------|--------------------| +| name | 无 | Docker容器中的名字 | +| available_memory | MB | Docker容器可以利用的内存大小 | +| used_memory | MB | Docker容器已经使用的内存大小 | +| memory_usage | 无 | Docker容器的内存使用率 | +| cpu_delta | 无 | Docker容器已经使用的CPU数量 | +| number_cpus | 无 | Docker容器可以使用的CPU数量 | +| cpu_usage | 无 | Docker容器CPU使用率 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/doris_fe.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/doris_fe.md index 0e4803826be..10a66aa6853 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/doris_fe.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/doris_fe.md @@ -15,118 +15,118 @@ keywords: [开源监控系统, 开源数据库监控, DORIS数据库FE监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ |-----------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8030 , 取值自 `http_port` 配置项的值 | +| 参数名称 | 参数帮助描述 | +|--------|-----------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | +| 端口 | 数据库对外提供的端口,默认为8030 , 取值自 `http_port` 配置项的值 | | 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认6000毫秒 | -| 采集器 | 配置此监控使用哪台采集器调度采集 | -| 绑定标签 | 您可以使用标签进行监控资源的分类管理, 例如给资源分别绑定生产环境、测试环境的标签。 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 采集器 | 配置此监控使用哪台采集器调度采集 | +| 绑定标签 | 您可以使用标签进行监控资源的分类管理, 例如给资源分别绑定生产环境、测试环境的标签。 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:doris_fe_connection_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------- | -| value | 无 | 当前FE的MySQL端口连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| value | 无 | 当前FE的MySQL端口连接数 | #### 指标集合:doris_fe_edit_log_clean 不应失败,如失败,需人工介入 -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------- | -| success | 无 | 清理历史元数据日志成功的次数 | -| failed | 无 | 清理历史元数据日志失败的次数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------------| +| success | 无 | 清理历史元数据日志成功的次数 | +| failed | 无 | 清理历史元数据日志失败的次数 | #### 指标集合:doris_fe_edit_log -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------------- | -------- | ------------------------ | -| write | 无 | 元数据日志写入次数的计数 | -| read | 无 | 元数据日志读取次数的计数 | -| current | 无 | 元数据日志当前数量 | -| accumulated_bytes | 字节 | 元数据日志写入量的累计值 | -| current_bytes | 字节 | 元数据日志当前值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|--------------| +| write | 无 | 元数据日志写入次数的计数 | +| read | 无 | 元数据日志读取次数的计数 | +| current | 无 | 元数据日志当前数量 | +| accumulated_bytes | 字节 | 元数据日志写入量的累计值 | +| current_bytes | 字节 | 元数据日志当前值 | #### 指标集合:doris_fe_image_clean 不应失败,如失败,需人工介入 -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------------- | -| success | 无 | 清理历史元数据镜像文件成功的次数 | -| failed | 无 | 清理历史元数据镜像文件失败的次数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|------------------| +| success | 无 | 清理历史元数据镜像文件成功的次数 | +| failed | 无 | 清理历史元数据镜像文件失败的次数 | #### 指标集合:doris_fe_image_write 不应失败,如失败,需人工介入 -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------- | -| success | 无 | 生成元数据镜像文件成功的次数 | -| failed | 无 | 生成元数据镜像文件失败的次数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------------| +| success | 无 | 生成元数据镜像文件成功的次数 | +| failed | 无 | 生成元数据镜像文件失败的次数 | #### 指标集合:doris_fe_query_err -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------- | -| value | 无 | 错误查询的累积值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|----------| +| value | 无 | 错误查询的累积值 | #### 指标集合:doris_fe_max_journal_id -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | 当前FE节点最大元数据日志ID。如果是Master FE,则是当前写入的最大ID,如果是非Master FE,则代表当前回放的元数据日志最大ID。用于观察多个FE之间的 id 是否差距过大。过大则表示元数据同步出现问题 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------------------------------------------------------------------------------------------------| +| value | 无 | 当前FE节点最大元数据日志ID。如果是Master FE,则是当前写入的最大ID,如果是非Master FE,则代表当前回放的元数据日志最大ID。用于观察多个FE之间的 id 是否差距过大。过大则表示元数据同步出现问题 | #### 指标集合:doris_fe_max_tablet_compaction_score -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | 所有BE节点中最大的 compaction score 值。该值可以观测当前集群最大的 compaction score,以判断是否过高。如过高则可能出现查询或写入延迟 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------------------------------------------------------------------| +| value | 无 | 所有BE节点中最大的 compaction score 值。该值可以观测当前集群最大的 compaction score,以判断是否过高。如过高则可能出现查询或写入延迟 | #### 指标集合:doris_fe_qps -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------ | -| value | 无 | 当前FE每秒查询数量(仅统计查询请求) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------| +| value | 无 | 当前FE每秒查询数量(仅统计查询请求) | #### 指标集合:doris_fe_query_err_rate -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------- | -| value | 无 | 每秒错误查询数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------| +| value | 无 | 每秒错误查询数 | #### 指标集合:doris_fe_report_queue_size -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | BE的各种定期汇报任务在FE端的队列长度,该值反映了汇报任务在 Master FE 节点上的阻塞程度,数值越大,表示FE处理能力不足 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------------------------------------------------| +| value | 无 | BE的各种定期汇报任务在FE端的队列长度,该值反映了汇报任务在 Master FE 节点上的阻塞程度,数值越大,表示FE处理能力不足 | #### 指标集合:doris_fe_rps -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------------------------- | -| value | 无 | 当前FE每秒请求数量(包含查询以及其他各类语句) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------| +| value | 无 | 当前FE每秒请求数量(包含查询以及其他各类语句) | #### 指标集合:doris_fe_scheduled_tablet_num -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | Master FE节点正在调度的 tablet 数量。包括正在修复的副本和正在均衡的副本,该数值可以反映当前集群,正在迁移的 tablet 数量。如果长时间有值,说明集群不稳定 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------------------------------------------------------------------| +| value | 无 | Master FE节点正在调度的 tablet 数量。包括正在修复的副本和正在均衡的副本,该数值可以反映当前集群,正在迁移的 tablet 数量。如果长时间有值,说明集群不稳定 | #### 指标集合:doris_fe_txn_status 可以观测各个状态下导入事务的数量,来判断是否有堆积 -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- | -------- | ------------- | -| unknown | 无 | 未知 | -| prepare | 无 | 准备中 | -| committed | 无 | 已提交 | -| visible | 无 | 可见 | -| aborted | 无 | 已中止/已撤销 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|---------| +| unknown | 无 | 未知 | +| prepare | 无 | 准备中 | +| committed | 无 | 已提交 | +| visible | 无 | 可见 | +| aborted | 无 | 已中止/已撤销 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dynamic_tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dynamic_tp.md index e3e143c17ed..8c2f1e290e4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dynamic_tp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dynamic_tp.md @@ -7,15 +7,15 @@ keywords: [开源监控系统, 开源中间件监控, DynamicTp线程池监控] > 对DynamicTp actuator 暴露的线程池性能指标进行采集监控。 -### 前提 +### 前提 -1. 集成使用 `DynamicTp` +1. 集成使用 `DynamicTp` `DynamicTp` 是Jvm语言的基于配置中心的轻量级动态线程池,内置监控告警功能,可通过SPI自定义扩展实现。 -集成使用,请参考文档 [快速接入](https://dynamictp.cn/guide/use/quick-start.html) +集成使用,请参考文档 [快速接入](https://dynamictp.cn/guide/use/quick-start.html) -2. 开启SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 +2. 开启SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 ```yaml management: @@ -24,7 +24,8 @@ management: exposure: include: '*' ``` -测试访问指标接口 `ip:port/actuator/dynamic-tp` 是否有响应json数据如下: + +测试访问指标接口 `ip:port/actuator/dynamic-tp` 是否有响应json数据如下: ```json [ @@ -58,45 +59,44 @@ management: ] ``` -3. 在HertzBeat中间件监控下添加DynamicTp监控即可 - +3. 在HertzBeat中间件监控下添加DynamicTp监控即可 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ |------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 应用服务对外提供的端口,默认为8080。 | +| 参数名称 | 参数帮助描述 | +|-----------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 应用服务对外提供的端口,默认为8080。 | | 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | - | Base Path | 暴露接口路径前缀,默认 /actuator | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| Base Path | 暴露接口路径前缀,默认 /actuator | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:thread_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|------------------------| -| pool_name | 无 | 线程池名称 | -| core_pool_size | 无 | 核心线程数 | -| maximum_pool_size | 无 | 最大线程数 | -| queue_type | 无 | 任务队列类型 | -| queue_capacity | MB | 任务队列容量 | -| queue_size | 无 | 任务队列当前占用大小 | -| fair | 无 | 队列模式,SynchronousQueue会用到 | -| queue_remaining_capacity | MB | 任务队列剩余大小 | -| active_count | 无 | 活跃线程数 | -| task_count | 无 | 任务总数 | -| completed_task_count | 无 | 已完成任务数 | -| largest_pool_size | 无 | 历史最大线程数 | -| pool_size | 无 | 当前线程数 | -| wait_task_count | 无 | 等待执行任务数 | -| reject_count | 无 | 拒绝任务数 | -| reject_handler_name | 无 | 拒绝策略类型 | -| dynamic | 无 | 是否动态线程池 | -| run_timeout_count | 无 | 运行超时任务数 | -| queue_timeout_count | 无 | 等待超时任务数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------|------|--------------------------| +| pool_name | 无 | 线程池名称 | +| core_pool_size | 无 | 核心线程数 | +| maximum_pool_size | 无 | 最大线程数 | +| queue_type | 无 | 任务队列类型 | +| queue_capacity | MB | 任务队列容量 | +| queue_size | 无 | 任务队列当前占用大小 | +| fair | 无 | 队列模式,SynchronousQueue会用到 | +| queue_remaining_capacity | MB | 任务队列剩余大小 | +| active_count | 无 | 活跃线程数 | +| task_count | 无 | 任务总数 | +| completed_task_count | 无 | 已完成任务数 | +| largest_pool_size | 无 | 历史最大线程数 | +| pool_size | 无 | 当前线程数 | +| wait_task_count | 无 | 等待执行任务数 | +| reject_count | 无 | 拒绝任务数 | +| reject_handler_name | 无 | 拒绝策略类型 | +| dynamic | 无 | 是否动态线程池 | +| run_timeout_count | 无 | 运行超时任务数 | +| queue_timeout_count | 无 | 等待超时任务数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/elasticsearch.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/elasticsearch.md index e64d956bc77..a0b3082cc1a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/elasticsearch.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/elasticsearch.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, 监控ElasticSearch ] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |---------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -27,7 +27,7 @@ keywords: [ 开源监控系统, 监控ElasticSearch ] #### 指标集合:health -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------------------|------|----------| | cluster_name | 无 | 集群名称 | | status | 无 | 集群状态 | @@ -41,7 +41,7 @@ keywords: [ 开源监控系统, 监控ElasticSearch ] #### 指标集合:nodes -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|--------| | total | 无 | 节点数 | | successful | 无 | 在线节点数 | @@ -49,7 +49,7 @@ keywords: [ 开源监控系统, 监控ElasticSearch ] #### 指标集合:nodes_detail -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------------|------|---------| | node_name | 无 | 节点名称 | | ip | 无 | IP地址 | @@ -61,3 +61,4 @@ keywords: [ 开源监控系统, 监控ElasticSearch ] | disk_free | GB | 磁盘剩余容量 | | disk_total | GB | 磁盘总容量 | | disk_used_percent | % | 磁盘使用率 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/euleros.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/euleros.md index 258f7bb36dd..6c894671cc6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/euleros.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/euleros.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -28,7 +28,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 #### 指标集合:系统基本信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | Host Name | 无 | 主机名称 | | System Version | 无 | 操作系统版本 | @@ -36,7 +36,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 #### 指标集合:CPU 信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------------------| | info | 无 | CPU型号 | | cores | 无 | CPU内核数量 | @@ -47,7 +47,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 #### 指标集合:内存信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|----------| | total | Mb | 总内存容量 | | used | Mb | 用户程序内存量 | @@ -58,7 +58,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 #### 指标集合:磁盘信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------|------|-----------| | disk_num | 无 | 磁盘总数 | | partition_num | 无 | 分区总数 | @@ -68,7 +68,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 #### 指标集合:网卡信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | interface_name | 无 | 网卡名称 | | receive_bytes | Mb | 入站数据流量 | @@ -76,7 +76,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 #### 指标集合:文件系统 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|---------| | filesystem | 无 | 文件系统的名称 | | used | Mb | 已使用磁盘大小 | @@ -88,7 +88,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | cpu_usage | % | CPU占用率 | @@ -99,9 +99,10 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink.md index ef260f4dc2c..177c41874fb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink.md @@ -9,7 +9,7 @@ keywords: [开源监控系统, 开源 Flink 监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -26,7 +26,7 @@ keywords: [开源监控系统, 开源 Flink 监控] #### 指标集合:overview -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|----------| | slots_total | 个 | 插槽总数 | | slots_used | 个 | 已用插槽数 | @@ -34,5 +34,3 @@ keywords: [开源监控系统, 开源 Flink 监控] | jobs_running | 个 | 正在运行的任务数 | | jobs_failed | 个 | 已经失败的任务数 | - - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink_on_yarn.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink_on_yarn.md index da2ebd82cfb..a9baa1eeadd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink_on_yarn.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink_on_yarn.md @@ -10,139 +10,135 @@ keywords: [开源监控系统, 开源 Flink On Yarn 监控] ### 配置参数 - -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------------------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| Yarn端口 | Yarn的端口,对应配置项:`yarn.resourcemanager.webapp.address`中的端口 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 启动SSL | 是否启用SSL | -| 用户名 | 连接用户名 | -| 密码 | 连接密码 | -| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理。 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息。 | +| 参数名称 | 参数帮助描述 | +|--------|---------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| Yarn端口 | Yarn的端口,对应配置项:`yarn.resourcemanager.webapp.address`中的端口 | +| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 启动SSL | 是否启用SSL | +| 用户名 | 连接用户名 | +| 密码 | 连接密码 | +| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理。 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息。 | ### 采集指标 #### 指标集合:JobManager Metrics - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------------------------------------------------- | -------- | ------------------ | -| Status.JVM.Memory.NonHeap.Committed | 字节 | 非堆内存的提交量 | -| Status.JVM.Memory.Mapped.TotalCapacity | 字节 | 映射内存的总容量 | -| Status.JVM.Memory.NonHeap.Used | 字节 | 非堆内存的使用量 | -| Status.JVM.Memory.Metaspace.Max | 字节 | 元空间的最大容量 | -| Status.JVM.GarbageCollector.G1_Old_Generation.Count | 字节 | 老年代垃圾收集次数 | -| Status.JVM.Memory.Direct.MemoryUsed | 字节 | 直接内存的使用量 | -| Status.JVM.Memory.Mapped.MemoryUsed | 字节 | 映射内存的使用量 | -| Status.JVM.GarbageCollector.G1_Young_Generation.Count | 字节 | 年轻代垃圾收集次数 | -| Status.JVM.Memory.Direct.TotalCapacity | 字节 | 直接内存的总容量 | -| Status.JVM.GarbageCollector.G1_Old_Generation.Time | 字节 | 老年代垃圾收集时间 | -| Status.JVM.Memory.Heap.Committed | 字节 | 堆内存的提交量 | -| Status.JVM.Memory.Mapped.Count | - | 映射内存的数量 | -| Status.JVM.Memory.Metaspace.Used | 字节 | 元空间的使用量 | -| Status.JVM.Memory.Direct.Count | - | 直接内存的数量 | -| Status.JVM.Memory.Heap.Used | 字节 | 堆内存的使用量 | -| Status.JVM.Memory.Heap.Max | 字节 | 堆内存的最大容量 | -| Status.JVM.GarbageCollector.G1_Young_Generation.Time | 字节 | 年轻代垃圾收集时间 | -| Status.JVM.Memory.NonHeap.Max | 字节 | 非堆内存的最大容量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------------------------------------|------|-----------| +| Status.JVM.Memory.NonHeap.Committed | 字节 | 非堆内存的提交量 | +| Status.JVM.Memory.Mapped.TotalCapacity | 字节 | 映射内存的总容量 | +| Status.JVM.Memory.NonHeap.Used | 字节 | 非堆内存的使用量 | +| Status.JVM.Memory.Metaspace.Max | 字节 | 元空间的最大容量 | +| Status.JVM.GarbageCollector.G1_Old_Generation.Count | 字节 | 老年代垃圾收集次数 | +| Status.JVM.Memory.Direct.MemoryUsed | 字节 | 直接内存的使用量 | +| Status.JVM.Memory.Mapped.MemoryUsed | 字节 | 映射内存的使用量 | +| Status.JVM.GarbageCollector.G1_Young_Generation.Count | 字节 | 年轻代垃圾收集次数 | +| Status.JVM.Memory.Direct.TotalCapacity | 字节 | 直接内存的总容量 | +| Status.JVM.GarbageCollector.G1_Old_Generation.Time | 字节 | 老年代垃圾收集时间 | +| Status.JVM.Memory.Heap.Committed | 字节 | 堆内存的提交量 | +| Status.JVM.Memory.Mapped.Count | - | 映射内存的数量 | +| Status.JVM.Memory.Metaspace.Used | 字节 | 元空间的使用量 | +| Status.JVM.Memory.Direct.Count | - | 直接内存的数量 | +| Status.JVM.Memory.Heap.Used | 字节 | 堆内存的使用量 | +| Status.JVM.Memory.Heap.Max | 字节 | 堆内存的最大容量 | +| Status.JVM.GarbageCollector.G1_Young_Generation.Time | 字节 | 年轻代垃圾收集时间 | +| Status.JVM.Memory.NonHeap.Max | 字节 | 非堆内存的最大容量 | #### 指标集合:JobManager Config - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------------------------------ | -------- | ---------------------------- | -| internal.jobgraph-path | - | 内部作业图路径 | -| env.java.home | - | Java 环境路径 | -| classloader.check-leaked-classloader | - | 是否检查类加载器 | -| env.java.opts | - | Java 选项 | -| high-availability.cluster-id | - | 高可用性集群 ID | -| jobmanager.rpc.address | - | JobManager 的 RPC 地址 | -| jobmanager.memory.jvm-overhead.min | 字节 | JobManager 的 JVM 开销最小值 | -| jobmanager.web.port | 端口号 | JobManager 的 Web 端口 | -| webclient.port | 端口号 | Web 客户端端口 | -| execution.savepoint.ignore-unclaimed-state | - | 是否忽略未声明的状态 | -| io.tmp.dirs | 路径 | 临时文件目录 | -| parallelism.default | - | 默认并行度 | -| taskmanager.memory.fraction | - | TaskManager 内存占比 | -| taskmanager.numberOfTaskSlots | - | TaskManager 的任务槽数量 | -| yarn.application.name | - | Yarn 应用名称 | -| taskmanager.heap.mb | MB | TaskManager 堆内存大小 | -| taskmanager.memory.process.size | GB | TaskManager 进程内存大小 | -| web.port | 端口号 | Web 端口 | -| classloader.resolve-order | - | 类加载器解析顺序 | -| jobmanager.heap.mb | MB | JobManager 堆内存大小 | -| jobmanager.memory.off-heap.size | 字节 | JobManager 堆外内存大小 | -| state.backend.incremental | - | 状态后端是否增量 | -| execution.target | - | 执行目标 | -| jobmanager.memory.process.size | GB | JobManager 进程内存大小 | -| web.tmpdir | 路径 | Web 临时目录 | -| yarn.ship-files | 路径 | Yarn 传输文件 | -| jobmanager.rpc.port | 端口号 | JobManager 的 RPC 端口 | -| internal.io.tmpdirs.use-local-default | - | 是否使用本地默认临时目录 | -| execution.checkpointing.interval | 毫秒 | 检查点间隔 | -| execution.attached | - | 是否附加执行 | -| internal.cluster.execution-mode | - | 内部集群执行模式 | -| execution.shutdown-on-attached-exit | - | 是否在附加退出时关闭 | -| pipeline.jars | 路径 | 管道 JAR 文件 | -| rest.address | - | REST 地址 | -| state.backend | - | 状态后端类型 | -| jobmanager.memory.jvm-metaspace.size | 字节 | JobManager JVM 元空间大小 | -| $internal.deployment.config-dir | 路径 | 内部部署配置目录 | -| $internal.yarn.log-config-file | 路径 | 内部 Yarn 日志配置文件路径 | -| jobmanager.memory.heap.size | 字节 | JobManager 堆内存大小 | -| state.checkpoints.dir | 路径 | 状态检查点目录 | -| jobmanager.memory.jvm-overhead.max | 字节 | JobManager 的 JVM 开销最大值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------------------|------|------------------------| +| internal.jobgraph-path | - | 内部作业图路径 | +| env.java.home | - | Java 环境路径 | +| classloader.check-leaked-classloader | - | 是否检查类加载器 | +| env.java.opts | - | Java 选项 | +| high-availability.cluster-id | - | 高可用性集群 ID | +| jobmanager.rpc.address | - | JobManager 的 RPC 地址 | +| jobmanager.memory.jvm-overhead.min | 字节 | JobManager 的 JVM 开销最小值 | +| jobmanager.web.port | 端口号 | JobManager 的 Web 端口 | +| webclient.port | 端口号 | Web 客户端端口 | +| execution.savepoint.ignore-unclaimed-state | - | 是否忽略未声明的状态 | +| io.tmp.dirs | 路径 | 临时文件目录 | +| parallelism.default | - | 默认并行度 | +| taskmanager.memory.fraction | - | TaskManager 内存占比 | +| taskmanager.numberOfTaskSlots | - | TaskManager 的任务槽数量 | +| yarn.application.name | - | Yarn 应用名称 | +| taskmanager.heap.mb | MB | TaskManager 堆内存大小 | +| taskmanager.memory.process.size | GB | TaskManager 进程内存大小 | +| web.port | 端口号 | Web 端口 | +| classloader.resolve-order | - | 类加载器解析顺序 | +| jobmanager.heap.mb | MB | JobManager 堆内存大小 | +| jobmanager.memory.off-heap.size | 字节 | JobManager 堆外内存大小 | +| state.backend.incremental | - | 状态后端是否增量 | +| execution.target | - | 执行目标 | +| jobmanager.memory.process.size | GB | JobManager 进程内存大小 | +| web.tmpdir | 路径 | Web 临时目录 | +| yarn.ship-files | 路径 | Yarn 传输文件 | +| jobmanager.rpc.port | 端口号 | JobManager 的 RPC 端口 | +| internal.io.tmpdirs.use-local-default | - | 是否使用本地默认临时目录 | +| execution.checkpointing.interval | 毫秒 | 检查点间隔 | +| execution.attached | - | 是否附加执行 | +| internal.cluster.execution-mode | - | 内部集群执行模式 | +| execution.shutdown-on-attached-exit | - | 是否在附加退出时关闭 | +| pipeline.jars | 路径 | 管道 JAR 文件 | +| rest.address | - | REST 地址 | +| state.backend | - | 状态后端类型 | +| jobmanager.memory.jvm-metaspace.size | 字节 | JobManager JVM 元空间大小 | +| $internal.deployment.config-dir | 路径 | 内部部署配置目录 | +| $internal.yarn.log-config-file | 路径 | 内部 Yarn 日志配置文件路径 | +| jobmanager.memory.heap.size | 字节 | JobManager 堆内存大小 | +| state.checkpoints.dir | 路径 | 状态检查点目录 | +| jobmanager.memory.jvm-overhead.max | 字节 | JobManager 的 JVM 开销最大值 | #### TaskManager - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------------------------- | -------- | ------------------------------------- | -| Container ID | - | 容器 ID,用于唯一标识一个容器 | -| Path | - | 容器路径 | -| Data Port | 端口号 | 数据传输端口 | -| JMX Port | 端口号 | JMX(Java Management Extensions)端口 | -| Last Heartbeat | 时间戳 | 最后一次心跳时间 | -| All Slots | 数量 | 容器中所有任务槽的数量 | -| Free Slots | 数量 | 容器中空闲任务槽的数量 | -| totalResourceCpuCores | 核心数 | 容器总的CPU核心数 | -| totalResourceTaskHeapMemory | MB | 容器总的任务堆内存大小 | -| totalResourceManagedMemory | MB | 容器总的托管内存大小 | -| totalResourceNetworkMemory | MB | 容器总的网络内存大小 | -| freeResourceCpuCores | 核心数 | 容器中空闲的CPU核心数 | -| freeResourceTaskHeapMemory | MB | 容器中空闲的任务堆内存大小 | -| freeResourceTaskOffHeapMemory | MB | 容器中空闲的任务堆外内存大小 | -| freeResourceManagedMemory | MB | 容器中空闲的托管内存大小 | -| freeResourceNetworkMemory | MB | 容器中空闲的网络内存大小 | -| CPU Cores | 核心数 | CPU核心数 | -| Physical MEM | MB | 物理内存大小 | -| JVM Heap Size | MB | JVM堆内存大小 | -| Flink Managed MEM | MB | Flink管理的内存大小 | -| Framework Heap | MB | 框架堆内存大小 | -| Task Heap | MB | 任务堆内存大小 | -| Framework Off-Heap | MB | 框架堆外内存大小 | -| memoryConfigurationTaskOffHeap | Byte | 任务堆外内存配置 | -| Network | MB | 网络内存配置 | -| Managed Memory | MB | 托管内存配置 | -| JVM Metaspace | MB | JVM元空间大小 | -| JVM Overhead | MB | JVM开销 | -| memoryConfigurationTotalFlinkMemory | Byte | Flink总内存配置 | -| memoryConfigurationTotalProcessMemory | Byte | 进程总内存配置 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------------|------|-----------------------------------| +| Container ID | - | 容器 ID,用于唯一标识一个容器 | +| Path | - | 容器路径 | +| Data Port | 端口号 | 数据传输端口 | +| JMX Port | 端口号 | JMX(Java Management Extensions)端口 | +| Last Heartbeat | 时间戳 | 最后一次心跳时间 | +| All Slots | 数量 | 容器中所有任务槽的数量 | +| Free Slots | 数量 | 容器中空闲任务槽的数量 | +| totalResourceCpuCores | 核心数 | 容器总的CPU核心数 | +| totalResourceTaskHeapMemory | MB | 容器总的任务堆内存大小 | +| totalResourceManagedMemory | MB | 容器总的托管内存大小 | +| totalResourceNetworkMemory | MB | 容器总的网络内存大小 | +| freeResourceCpuCores | 核心数 | 容器中空闲的CPU核心数 | +| freeResourceTaskHeapMemory | MB | 容器中空闲的任务堆内存大小 | +| freeResourceTaskOffHeapMemory | MB | 容器中空闲的任务堆外内存大小 | +| freeResourceManagedMemory | MB | 容器中空闲的托管内存大小 | +| freeResourceNetworkMemory | MB | 容器中空闲的网络内存大小 | +| CPU Cores | 核心数 | CPU核心数 | +| Physical MEM | MB | 物理内存大小 | +| JVM Heap Size | MB | JVM堆内存大小 | +| Flink Managed MEM | MB | Flink管理的内存大小 | +| Framework Heap | MB | 框架堆内存大小 | +| Task Heap | MB | 任务堆内存大小 | +| Framework Off-Heap | MB | 框架堆外内存大小 | +| memoryConfigurationTaskOffHeap | Byte | 任务堆外内存配置 | +| Network | MB | 网络内存配置 | +| Managed Memory | MB | 托管内存配置 | +| JVM Metaspace | MB | JVM元空间大小 | +| JVM Overhead | MB | JVM开销 | +| memoryConfigurationTotalFlinkMemory | Byte | Flink总内存配置 | +| memoryConfigurationTotalProcessMemory | Byte | 进程总内存配置 | #### TaskManager Metrics +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------------------|------|----------------------| +| Status.Shuffle.Netty.TotalMemory | MB | Netty Shuffle 使用的总内存 | +| Status.Flink.Memory.Managed.Used | MB | Flink 管理的已用内存 | +| Status.JVM.Memory.Metaspace.Used | MB | JVM 元空间已使用的内存 | +| Status.JVM.Memory.Metaspace.Max | MB | JVM 元空间的最大内存 | +| Status.JVM.Memory.Heap.Used | MB | JVM 堆内存已使用的内存 | +| Status.JVM.Memory.Heap.Max | MB | JVM 堆内存的最大容量 | +| Status.Flink.Memory.Managed.Total | MB | Flink 管理的内存总量 | +| Status.Shuffle.Netty.UsedMemory | MB | Netty Shuffle 使用的内存 | -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------------------- | -------- | -------------------------- | -| Status.Shuffle.Netty.TotalMemory | MB | Netty Shuffle 使用的总内存 | -| Status.Flink.Memory.Managed.Used | MB | Flink 管理的已用内存 | -| Status.JVM.Memory.Metaspace.Used | MB | JVM 元空间已使用的内存 | -| Status.JVM.Memory.Metaspace.Max | MB | JVM 元空间的最大内存 | -| Status.JVM.Memory.Heap.Used | MB | JVM 堆内存已使用的内存 | -| Status.JVM.Memory.Heap.Max | MB | JVM 堆内存的最大容量 | -| Status.Flink.Memory.Managed.Total | MB | Flink 管理的内存总量 | -| Status.Shuffle.Netty.UsedMemory | MB | Netty Shuffle 使用的内存 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/freebsd.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/freebsd.md index 53f7eceb7c3..01313bd7ae6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/freebsd.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/freebsd.md @@ -7,10 +7,9 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 > 对FreeBSD操作系统的通用性能指标 (系统信息、CPU、内存、磁盘、网卡、文件系统、TOP资源进程等) 进行采集监控。 - ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -29,7 +28,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 #### 指标集合:系统基本信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | Host Name | 无 | 主机名称 | | System Version | 无 | 操作系统版本 | @@ -37,7 +36,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 #### 指标集合:CPU 信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------------------| | info | 无 | CPU型号 | | cores | 核数 | CPU内核数量 | @@ -48,7 +47,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 #### 指标集合:内存信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|---------| | physmem | Mb | 物理内存 | | usermem | Mb | 用户程序内存量 | @@ -57,7 +56,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 #### 指标集合:文件系统 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|---------| | filesystem | 无 | 文件系统的名称 | | used | Mb | 已使用磁盘大小 | @@ -69,7 +68,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | cpu_usage | % | CPU占用率 | @@ -80,7 +79,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | mem_usage | % | 内存占用率 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ftp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ftp.md index c86dfb14a7b..d421b6a78eb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ftp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ftp.md @@ -11,7 +11,7 @@ keywords: [ 开源监控系统, 开源FTP服务器监控工具, 监控FTP指标 ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|--------------------------------------| | 目标Host | 被监控的IPv4、IPv6。注意⚠️不包含协议头(例如:ftp://)。 | | 监控名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -28,7 +28,8 @@ keywords: [ 开源监控系统, 开源FTP服务器监控工具, 监控FTP指标 #### 指标集合:概要 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------|------|------------------| | 活动状态 | 无 | 检查目录是否存在,且具有访问权限 | | 响应时间 | ms | 连接FTP响应时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/fullsite.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/fullsite.md index f7fc4c150b7..9d39da7c9e4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/fullsite.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/fullsite.md @@ -7,29 +7,29 @@ keywords: [开源监控系统, 开源网站监控, SiteMap监控] > 对网站的全部页面监测是否可用 > 往往一个网站有多个不同服务提供的页面,我们通过采集网站暴露出来的网站地图SiteMap来监控全站。 -> 注意⚠️,此监控需您网站支持SiteMap。我们支持XML和TXT格式的SiteMap。 +> 注意⚠️,此监控需您网站支持SiteMap。我们支持XML和TXT格式的SiteMap。 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 网站地图 | 网站SiteMap地图地址的相对路径,例如:/sitemap.xml。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|---------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | +| 网站地图 | 网站SiteMap地图地址的相对路径,例如:/sitemap.xml。 | +| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| url | 无 | 网页的URL路径 | -| statusCode | 无 | 请求此网页的响应HTTP状态码 | -| responseTime | ms毫秒 | 网站响应时间 | -| errorMsg | 无 | 请求此网站反馈的错误信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|-----------------| +| url | 无 | 网页的URL路径 | +| statusCode | 无 | 请求此网页的响应HTTP状态码 | +| responseTime | ms毫秒 | 网站响应时间 | +| errorMsg | 无 | 请求此网站反馈的错误信息 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/guide.md index 79c4ec85e52..48dc239b69a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/guide.md @@ -1,136 +1,134 @@ ---- -id: guide -title: 帮助中心 -sidebar_label: 帮助入门 ---- - -> 易用友好的实时监控工具,无需Agent,强大自定义监控能力。 -> 使用过程中的帮助文档,辅助信息。 - -## 🔬 监控服务 - -> 定时采集监控对端服务暴露的性能指标,提供可视化界面,处理数据供告警等服务调度。 -> 规划的监控类型:应用服务,数据库,操作系统,云原生,开源中间件 - -### 应用服务监控 - -  👉 [网站监测](website)
-  👉 [HTTP API](api)
-  👉 [PING连通性](ping)
-  👉 [端口可用性](port)
-  👉 [全站监控](fullsite)
-  👉 [SSL证书有效期](ssl_cert)
-  👉 [JVM虚拟机](jvm)
-  👉 [SpringBoot2.0](springboot2)
-  👉 [DNS服务器](dns)
-  👉 [FTP服务器](ftp)
-  👉 [Websocket](websocket)
- -### 应用程序监控 - - 👉 [Linux进程](process)
- 👉 [JVM虚拟机](jvm)
- 👉 [SpringBoot2.0](springboot2)
- 👉 [SpringBoot3.0](springboot3)
- 👉 [DynamicTp线程池](dynamic_tp)
- - -### 数据库监控 - -  👉 [MYSQL数据库监控](mysql)
-  👉 [MariaDB数据库监控](mariadb)
-  👉 [PostgreSQL数据库监控](postgresql)
-  👉 [SqlServer数据库监控](sqlserver)
-  👉 [Oracle数据库监控](oracle)
-  👉 [达梦数据库监控](dm)
-  👉 [OpenGauss数据库监控](opengauss)
-  👉 [IoTDB数据库监控](iotdb)
-  👉 [TiDB数据库监控](tidb)
-  👉 [MongoDB数据库监控](mongodb)
-  👉 [NebulaGraph集群监控](nebulagraph_cluster)
- -### 缓存监控 - - 👉 [Redis](redis)
- 👉 [Memcached](memcached)
- -### 操作系统监控 - -  👉 [Linux操作系统监控](linux)
-  👉 [Windows操作系统监控](windows)
-  👉 [Ubuntu操作系统监控](ubuntu)
-  👉 [Centos操作系统监控](centos)
-  👉 [FreeBSD操作系统监控](freebsd)
-  👉 [RedHat操作系统监控](redhat)
-  👉 [RockyLinux操作系统监控](rockylinux)
-  👉 [EulerOS操作系统监控](euleros)
- -### 中间件监控 - -  👉 [Zookeeper](zookeeper)
-  👉 [Kafka](kafka)
-  👉 [Tomcat](tomcat)
-  👉 [ShenYu](shenyu)
-  👉 [DynamicTp](dynamic_tp)
-  👉 [RabbitMQ](rabbitmq)
-  👉 [ActiveMQ](activemq)
-  👉 [Jetty](jetty)
-  👉 [Nacos](nacos)
- -### 云原生监控 - -  👉 [Docker](docker)
-  👉 [Kubernetes](kubernetes)
- -### 大数据监控 - -  👉 [Clickhouse](clickhouse)
-  👉 [ElasticSearch](elasticsearch)
-  👉 [Flink](flink)
- -### Ai大模型监控 - - 👉 [OpenAi](openai)
- -### 网络监控 - -  👉 [华为通用交换机](huawei_switch)
- -### 服务器监控 - -## 💡 告警服务 - -> 更自由化的阈值告警配置,支持邮箱,短信,webhook,钉钉,企业微信,飞书机器人等告警通知。 -> 告警服务的定位是阈值准确及时触发,告警通知及时可达。 - -### 告警中心 - -> 已触发的告警信息中心,提供告警删除,告警处理,标记未处理,告警级别状态等查询过滤。 - -### 告警配置 - -> 指标阈值配置,提供表达式形式的指标阈值配置,可设置告警级别,触发次数,告警通知模版和是否启用,关联监控等功能。 - -详见 👉 [阈值告警](alert_threshold)
-   👉 [阈值表达式](alert_threshold_expr) - -### 告警通知 - -> 触发告警信息后,除了显示在告警中心列表外,还可以用指定方式(邮件钉钉微信飞书等)通知给指定接收人。 -> 告警通知提供设置不同类型的通知方式,如邮件接收人,企业微信机器人通知,钉钉机器人通知,飞书机器人通知。 -> 接收人设置后需要设置关联的告警通知策略,来配置哪些告警信息发给哪些接收人。 - - - 👉 [配置邮箱通知](alert_email)
- 👉 [配置 Webhook 通知](alert_webhook)
- 👉 [配置 Telegram 通知](alert_telegram)
- 👉 [配置 Discord 通知](alert_discord)
- 👉 [配置 Slack 通知](alert_slack)
- 👉 [配置企业微信机器人通知](alert_wework)
- 👉 [配置钉钉机器人通知](alert_dingtalk)
- 👉 [配置飞书机器人通知](alert_feishu)
- 👉 [配置华为云SMN通知](alert_smn)
- -### 插件 - - 👉 [插件](plugin)
+--- +id: guide +title: 帮助中心 +sidebar_label: 帮助入门 +--- + +> 易用友好的实时监控工具,无需Agent,强大自定义监控能力。 +> 使用过程中的帮助文档,辅助信息。 + +## 🔬 监控服务 + +> 定时采集监控对端服务暴露的性能指标,提供可视化界面,处理数据供告警等服务调度。 +> 规划的监控类型:应用服务,数据库,操作系统,云原生,开源中间件 + +### 应用服务监控 + + 👉 [网站监测](website)
+ 👉 [HTTP API](api)
+ 👉 [PING连通性](ping)
+ 👉 [端口可用性](port)
+ 👉 [全站监控](fullsite)
+ 👉 [SSL证书有效期](ssl_cert)
+ 👉 [JVM虚拟机](jvm)
+ 👉 [SpringBoot2.0](springboot2)
+ 👉 [DNS服务器](dns)
+ 👉 [FTP服务器](ftp)
+ 👉 [Websocket](websocket)
+ +### 应用程序监控 + + 👉 [Linux进程](process)
+ 👉 [JVM虚拟机](jvm)
+ 👉 [SpringBoot2.0](springboot2)
+ 👉 [SpringBoot3.0](springboot3)
+ 👉 [DynamicTp线程池](dynamic_tp)
+ +### 数据库监控 + + 👉 [MYSQL数据库监控](mysql)
+ 👉 [MariaDB数据库监控](mariadb)
+ 👉 [PostgreSQL数据库监控](postgresql)
+ 👉 [SqlServer数据库监控](sqlserver)
+ 👉 [Oracle数据库监控](oracle)
+ 👉 [达梦数据库监控](dm)
+ 👉 [OpenGauss数据库监控](opengauss)
+ 👉 [IoTDB数据库监控](iotdb)
+ 👉 [TiDB数据库监控](tidb)
+ 👉 [MongoDB数据库监控](mongodb)
+ 👉 [NebulaGraph集群监控](nebulagraph_cluster)
+ +### 缓存监控 + + 👉 [Redis](redis)
+ 👉 [Memcached](memcached)
+ +### 操作系统监控 + + 👉 [Linux操作系统监控](linux)
+ 👉 [Windows操作系统监控](windows)
+ 👉 [Ubuntu操作系统监控](ubuntu)
+ 👉 [Centos操作系统监控](centos)
+ 👉 [FreeBSD操作系统监控](freebsd)
+ 👉 [RedHat操作系统监控](redhat)
+ 👉 [RockyLinux操作系统监控](rockylinux)
+ 👉 [EulerOS操作系统监控](euleros)
+ +### 中间件监控 + + 👉 [Zookeeper](zookeeper)
+ 👉 [Kafka](kafka)
+ 👉 [Tomcat](tomcat)
+ 👉 [ShenYu](shenyu)
+ 👉 [DynamicTp](dynamic_tp)
+ 👉 [RabbitMQ](rabbitmq)
+ 👉 [ActiveMQ](activemq)
+ 👉 [Jetty](jetty)
+ 👉 [Nacos](nacos)
+ +### 云原生监控 + + 👉 [Docker](docker)
+ 👉 [Kubernetes](kubernetes)
+ +### 大数据监控 + + 👉 [Clickhouse](clickhouse)
+ 👉 [ElasticSearch](elasticsearch)
+ 👉 [Flink](flink)
+ +### Ai大模型监控 + + 👉 [OpenAi](openai)
+ +### 网络监控 + + 👉 [华为通用交换机](huawei_switch)
+ +### 服务器监控 + +## 💡 告警服务 + +> 更自由化的阈值告警配置,支持邮箱,短信,webhook,钉钉,企业微信,飞书机器人等告警通知。 +> 告警服务的定位是阈值准确及时触发,告警通知及时可达。 + +### 告警中心 + +> 已触发的告警信息中心,提供告警删除,告警处理,标记未处理,告警级别状态等查询过滤。 + +### 告警配置 + +> 指标阈值配置,提供表达式形式的指标阈值配置,可设置告警级别,触发次数,告警通知模版和是否启用,关联监控等功能。 + +详见 👉 [阈值告警](alert_threshold)
+   👉 [阈值表达式](alert_threshold_expr) + +### 告警通知 + +> 触发告警信息后,除了显示在告警中心列表外,还可以用指定方式(邮件钉钉微信飞书等)通知给指定接收人。 +> 告警通知提供设置不同类型的通知方式,如邮件接收人,企业微信机器人通知,钉钉机器人通知,飞书机器人通知。 +> 接收人设置后需要设置关联的告警通知策略,来配置哪些告警信息发给哪些接收人。 + + 👉 [配置邮箱通知](alert_email)
+ 👉 [配置 Webhook 通知](alert_webhook)
+ 👉 [配置 Telegram 通知](alert_telegram)
+ 👉 [配置 Discord 通知](alert_discord)
+ 👉 [配置 Slack 通知](alert_slack)
+ 👉 [配置企业微信机器人通知](alert_wework)
+ 👉 [配置钉钉机器人通知](alert_dingtalk)
+ 👉 [配置飞书机器人通知](alert_feishu)
+ 👉 [配置华为云SMN通知](alert_smn)
+ +### 插件 + + 👉 [插件](plugin)
diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hadoop.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hadoop.md index fec361e2366..bda83b006e4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hadoop.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hadoop.md @@ -37,57 +37,54 @@ export HADOOP_OPTS= "$HADOOP_OPTS ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:code_cache (限JDK8及以下) -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_master.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_master.md index f75d5cc8e98..e732bf45fd6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_master.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_master.md @@ -4,6 +4,7 @@ title: 监控:Hbase Master监控 sidebar_label: Apache Hbase Master keywords: [开源监控系统, 开源数据库监控, HbaseMaster监控] --- + > 对Hbase Master的通用性能指标进行采集监控 **使用协议:HTTP** @@ -14,49 +15,46 @@ keywords: [开源监控系统, 开源数据库监控, HbaseMaster监控] ## 配置参数 - -| 参数名称 | 参数帮助描述 | -| ------------ | -------------------------------------------------------------------- | -| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 端口 | hbase master的端口号,默认为16010。即:`hbase.master.info.port`参数值 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|---------------------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 端口 | hbase master的端口号,默认为16010。即:`hbase.master.info.port`参数值 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:server - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- |----| ---------------------------- | -| numRegionServers | 无 | 当前存活的 RegionServer 个数 | -| numDeadRegionServers | 无 | 当前Dead的 RegionServer 个数 | -| averageLoad | 无 | 集群平均负载 | -| clusterRequests | 无 | 集群请求数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------|------|-------------------------| +| numRegionServers | 无 | 当前存活的 RegionServer 个数 | +| numDeadRegionServers | 无 | 当前Dead的 RegionServer 个数 | +| averageLoad | 无 | 集群平均负载 | +| clusterRequests | 无 | 集群请求数量 | #### 指标集合:Rit - -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------- | ------ | ------------------- | -| ritCount | 无 | 当前的 RIT 数量 | -| ritCountOverThreshold | 无 | 超过阈值的 RIT 数量 | -| ritOldestAge | ms | 最老的RIT的持续时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|--------------| +| ritCount | 无 | 当前的 RIT 数量 | +| ritCountOverThreshold | 无 | 超过阈值的 RIT 数量 | +| ritOldestAge | ms | 最老的RIT的持续时间 | #### 指标集合:basic - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------------------- | ----- | ------------------------ | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|--------------------| | liveRegionServers | 无 | 当前活跃RegionServer列表 | | deadRegionServers | 无 | 当前离线RegionServer列表 | -| zookeeperQuorum | 无 | Zookeeper列表 | -| masterHostName | 无 | Master节点 | -| BalancerCluster_num_ops | 无 | 集群负载均衡次数 | -| numActiveHandler | 无 | RPC句柄数 | -| receivedBytes | MB | 集群接收数据量 | -| sentBytes | MB | 集群发送数据量(MB) | -| clusterRequests | 无 | 集群总请求数量 | +| zookeeperQuorum | 无 | Zookeeper列表 | +| masterHostName | 无 | Master节点 | +| BalancerCluster_num_ops | 无 | 集群负载均衡次数 | +| numActiveHandler | 无 | RPC句柄数 | +| receivedBytes | MB | 集群接收数据量 | +| sentBytes | MB | 集群发送数据量(MB) | +| clusterRequests | 无 | 集群总请求数量 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_regionserver.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_regionserver.md index 44d5b533932..1c1cfdf1802 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_regionserver.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_regionserver.md @@ -4,6 +4,7 @@ title: 监控 Hbase RegionServer监控 sidebar_label: Apache Hbase RegionServer keywords: [开源监控系统, 开源数据库监控, RegionServer监控] --- + > 对Hbase RegionServer的通用性能指标进行采集监控 **使用协议:HTTP** @@ -14,16 +15,15 @@ keywords: [开源监控系统, 开源数据库监控, RegionServer监控] ## 配置参数 - -| 参数名称 | 参数帮助描述 | -| ------------ |----------------------------------------------------------------| -| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 端口 | hbase regionserver的端口号,默认为16030。即:`hbase.regionserver.info.port`参数值 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|---------------------------------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 端口 | hbase regionserver的端口号,默认为16030。即:`hbase.regionserver.info.port`参数值 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 @@ -31,67 +31,64 @@ keywords: [开源监控系统, 开源数据库监控, RegionServer监控] #### 指标集合:server - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- |-------|------------------------------------------| -| regionCount | 无 | Region数量 | -| readRequestCount | 无 | 重启集群后的读请求数量 | -| writeRequestCount | 无 | 重启集群后的写请求数量 | -| averageRegionSize | MB | 平均Region大小 | -| totalRequestCount | 无 | 全部请求数量 | -| ScanTime_num_ops | 无 | Scan 请求总量 | -| Append_num_ops | 无 | Append 请求量 | -| Increment_num_ops | 无 | Increment请求量 | -| Get_num_ops | 无 | Get 请求量 | -| Delete_num_ops | 无 | Delete 请求量 | -| Put_num_ops | 无 | Put 请求量 | -| ScanTime_mean | 无 | 平均 Scan 请求时间 | -| ScanTime_min | 无 | 最小 Scan 请求时间 | -| ScanTime_max | 无 | 最大 Scan 请求时间 | -| ScanSize_mean | bytes | 平均 Scan 请求大小 | -| ScanSize_min | 无 | 最小 Scan 请求大小 | -| ScanSize_max | 无 | 最大 Scan 请求大小 | -| slowPutCount | 无 | 慢操作次数/Put | -| slowGetCount | 无 | 慢操作次数/Get | -| slowAppendCount | 无 | 慢操作次数/Append | -| slowIncrementCount | 无 | 慢操作次数/Increment | -| slowDeleteCount | 无 | 慢操作次数/Delete | -| blockCacheSize | 无 | 缓存块内存占用大小 | -| blockCacheCount | 无 | 缓存块数量_Block Cache 中的 Block 数量 | -| blockCacheExpressHitPercent | 无 | 读缓存命中率 | -| memStoreSize | 无 | Memstore 大小 | -| FlushTime_num_ops | 无 | RS写磁盘次数/Memstore flush 写磁盘次数 | -| flushQueueLength | 无 | Region Flush 队列长度 | -| flushedCellsSize | 无 | flush到磁盘大小 | -| storeFileCount | 无 | Storefile 个数 | -| storeCount | 无 | Store 个数 | -| storeFileSize | 无 | Storefile 大小 | -| compactionQueueLength | 无 | Compaction 队列长度 | -| percentFilesLocal | 无 | Region 的 HFile 位于本地 HDFS Data Node的比例 | -| percentFilesLocalSecondaryRegions | 无 | Region 副本的 HFile 位于本地 HDFS Data Node的比例 | -| hlogFileCount | 无 | WAL 文件数量 | -| hlogFileSize | 无 | WAL 文件大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------------------|-------|-----------------------------------------| +| regionCount | 无 | Region数量 | +| readRequestCount | 无 | 重启集群后的读请求数量 | +| writeRequestCount | 无 | 重启集群后的写请求数量 | +| averageRegionSize | MB | 平均Region大小 | +| totalRequestCount | 无 | 全部请求数量 | +| ScanTime_num_ops | 无 | Scan 请求总量 | +| Append_num_ops | 无 | Append 请求量 | +| Increment_num_ops | 无 | Increment请求量 | +| Get_num_ops | 无 | Get 请求量 | +| Delete_num_ops | 无 | Delete 请求量 | +| Put_num_ops | 无 | Put 请求量 | +| ScanTime_mean | 无 | 平均 Scan 请求时间 | +| ScanTime_min | 无 | 最小 Scan 请求时间 | +| ScanTime_max | 无 | 最大 Scan 请求时间 | +| ScanSize_mean | bytes | 平均 Scan 请求大小 | +| ScanSize_min | 无 | 最小 Scan 请求大小 | +| ScanSize_max | 无 | 最大 Scan 请求大小 | +| slowPutCount | 无 | 慢操作次数/Put | +| slowGetCount | 无 | 慢操作次数/Get | +| slowAppendCount | 无 | 慢操作次数/Append | +| slowIncrementCount | 无 | 慢操作次数/Increment | +| slowDeleteCount | 无 | 慢操作次数/Delete | +| blockCacheSize | 无 | 缓存块内存占用大小 | +| blockCacheCount | 无 | 缓存块数量_Block Cache 中的 Block 数量 | +| blockCacheExpressHitPercent | 无 | 读缓存命中率 | +| memStoreSize | 无 | Memstore 大小 | +| FlushTime_num_ops | 无 | RS写磁盘次数/Memstore flush 写磁盘次数 | +| flushQueueLength | 无 | Region Flush 队列长度 | +| flushedCellsSize | 无 | flush到磁盘大小 | +| storeFileCount | 无 | Storefile 个数 | +| storeCount | 无 | Store 个数 | +| storeFileSize | 无 | Storefile 大小 | +| compactionQueueLength | 无 | Compaction 队列长度 | +| percentFilesLocal | 无 | Region 的 HFile 位于本地 HDFS Data Node的比例 | +| percentFilesLocalSecondaryRegions | 无 | Region 副本的 HFile 位于本地 HDFS Data Node的比例 | +| hlogFileCount | 无 | WAL 文件数量 | +| hlogFileSize | 无 | WAL 文件大小 | #### 指标集合:IPC - -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------- | ------ | ------------------- | -| numActiveHandler | 无 | 当前的 RIT 数量 | -| NotServingRegionException | 无 | 超过阈值的 RIT 数量 | -| RegionMovedException | ms | 最老的RIT的持续时间 | -| RegionTooBusyException | ms | 最老的RIT的持续时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|------|--------------| +| numActiveHandler | 无 | 当前的 RIT 数量 | +| NotServingRegionException | 无 | 超过阈值的 RIT 数量 | +| RegionMovedException | ms | 最老的RIT的持续时间 | +| RegionTooBusyException | ms | 最老的RIT的持续时间 | #### 指标集合:JVM - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------------------- | ----- | ------------------------ | -| MemNonHeapUsedM | 无 | 当前活跃RegionServer列表 | -| MemNonHeapCommittedM | 无 | 当前离线RegionServer列表 | -| MemHeapUsedM | 无 | Zookeeper列表 | -| MemHeapCommittedM | 无 | Master节点 | -| MemHeapMaxM | 无 | 集群负载均衡次数 | -| MemMaxM | 无 | RPC句柄数 | -| GcCount | MB | 集群接收数据量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------|------|--------------------| +| MemNonHeapUsedM | 无 | 当前活跃RegionServer列表 | +| MemNonHeapCommittedM | 无 | 当前离线RegionServer列表 | +| MemHeapUsedM | 无 | Zookeeper列表 | +| MemHeapCommittedM | 无 | Master节点 | +| MemHeapMaxM | 无 | 集群负载均衡次数 | +| MemMaxM | 无 | RPC句柄数 | +| GcCount | MB | 集群接收数据量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_datanode.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_datanode.md index efb05494290..db494acbb8e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_datanode.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_datanode.md @@ -15,42 +15,43 @@ keywords: [大数据监控系统, 分布式文件系统监控, Apache HDFS DataN ## 配置参数 -| 参数名称 | 参数帮助描述 | -| ---------------- |---------------------------------------| -| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | -| 端口 | Apache HDFS DataNode 的监控端口号,默认为50075。 | -| 查询超时时间 | 查询 Apache HDFS DataNode 的超时时间,单位毫秒,默认6000毫秒。 | -| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | -| 是否探测 | 新增监控前是否先探测检查监控可用性。 | -| 描述备注 | 此监控的更多描述和备注信息。 | +| 参数名称 | 参数帮助描述 | +|--------|----------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | +| 端口 | Apache HDFS DataNode 的监控端口号,默认为50075。 | +| 查询超时时间 | 查询 Apache HDFS DataNode 的超时时间,单位毫秒,默认6000毫秒。 | +| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | +| 是否探测 | 新增监控前是否先探测检查监控可用性。 | +| 描述备注 | 此监控的更多描述和备注信息。 | ### 采集指标 #### 指标集合:FSDatasetState -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------------- | -------- | ------------------------------------ | -| DfsUsed | GB | DataNode HDFS使用量 | -| Remaining | GB | DataNode HDFS剩余空间 | -| Capacity | GB | DataNode HDFS空间总量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|-------------------| +| DfsUsed | GB | DataNode HDFS使用量 | +| Remaining | GB | DataNode HDFS剩余空间 | +| Capacity | GB | DataNode HDFS空间总量 | #### 指标集合:JvmMetrics -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------------ | -------- | ------------------------------------ | -| MemNonHeapUsedM | MB | JVM 当前已经使用的 NonHeapMemory 的大小 | -| MemNonHeapCommittedM | MB | JVM 配置的 NonHeapCommittedM 的大小 | -| MemHeapUsedM | MB | JVM 当前已经使用的 HeapMemory 的大小 | -| MemHeapCommittedM | MB | JVM HeapMemory 提交大小 | -| MemHeapMaxM | MB | JVM 配置的 HeapMemory 的大小 | -| MemMaxM | MB | JVM 运行时可以使用的最大内存大小 | -| ThreadsRunnable | 个 | 处于 RUNNABLE 状态的线程数量 | -| ThreadsBlocked | 个 | 处于 BLOCKED 状态的线程数量 | -| ThreadsWaiting | 个 | 处于 WAITING 状态的线程数量 | -| ThreadsTimedWaiting | 个 | 处于 TIMED WAITING 状态的线程数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------|------|-------------------------------| +| MemNonHeapUsedM | MB | JVM 当前已经使用的 NonHeapMemory 的大小 | +| MemNonHeapCommittedM | MB | JVM 配置的 NonHeapCommittedM 的大小 | +| MemHeapUsedM | MB | JVM 当前已经使用的 HeapMemory 的大小 | +| MemHeapCommittedM | MB | JVM HeapMemory 提交大小 | +| MemHeapMaxM | MB | JVM 配置的 HeapMemory 的大小 | +| MemMaxM | MB | JVM 运行时可以使用的最大内存大小 | +| ThreadsRunnable | 个 | 处于 RUNNABLE 状态的线程数量 | +| ThreadsBlocked | 个 | 处于 BLOCKED 状态的线程数量 | +| ThreadsWaiting | 个 | 处于 WAITING 状态的线程数量 | +| ThreadsTimedWaiting | 个 | 处于 TIMED WAITING 状态的线程数量 | #### 指标集合:runtime -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------| -------- | ----------------- | -| StartTime | | 启动时间 | \ No newline at end of file +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| StartTime | | 启动时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_namenode.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_namenode.md index 26fd5e985af..66343c11cd2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_namenode.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_namenode.md @@ -15,79 +15,79 @@ keywords: [大数据监控系统, 分布式文件系统监控, Apache HDFS NameN ## 配置参数 -| 参数名称 | 参数帮助描述 | -| ---------------- |---------------------------------------| -| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | -| 端口 | HDFS NameNode 的监控端口号,默认为50070。 | -| 查询超时时间 | 查询 HDFS NameNode 的超时时间,单位毫秒,默认6000毫秒。 | -| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | -| 是否探测 | 新增监控前是否先探测检查监控可用性。 | -| 描述备注 | 此监控的更多描述和备注信息。 | +| 参数名称 | 参数帮助描述 | +|--------|---------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | +| 端口 | HDFS NameNode 的监控端口号,默认为50070。 | +| 查询超时时间 | 查询 HDFS NameNode 的超时时间,单位毫秒,默认6000毫秒。 | +| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | +| 是否探测 | 新增监控前是否先探测检查监控可用性。 | +| 描述备注 | 此监控的更多描述和备注信息。 | ### 采集指标 #### 指标集合:FSNamesystem -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------------- | -------- | ------------------------------------ | -| CapacityTotal | | 集群存储总容量 | -| CapacityTotalGB | GB | 集群存储总容量 | -| CapacityUsed | | 集群存储已使用容量 | -| CapacityUsedGB | GB | 集群存储已使用容量 | -| CapacityRemaining | | 集群存储剩余容量 | -| CapacityRemainingGB | GB | 集群存储剩余容量 | -| CapacityUsedNonDFS | | 集群非 HDFS 使用容量 | -| TotalLoad | | 整个集群的客户端连接数 | -| FilesTotal | | 集群文件总数量 | -| BlocksTotal | | 总 BLOCK 数量 | -| PendingReplicationBlocks | | 等待被备份的块数量 | -| UnderReplicatedBlocks | | 副本数不够的块数量 | -| CorruptBlocks | | 坏块数量 | -| ScheduledReplicationBlocks | | 安排要备份的块数量 | -| PendingDeletionBlocks | | 等待被删除的块数量 | -| ExcessBlocks | | 多余的块数量 | -| PostponedMisreplicatedBlocks | | 被推迟处理的异常块数量 | -| NumLiveDataNodes | | 活的数据节点数量 | -| NumDeadDataNodes | | 已经标记为 Dead 状态的数据节点数量 | -| NumDecomLiveDataNodes | | 下线且 Live 的节点数量 | -| NumDecomDeadDataNodes | | 下线且 Dead 的节点数量 | -| NumDecommissioningDataNodes | | 正在下线的节点数量 | -| TransactionsSinceLastCheckpoint | | 从上次Checkpoint之后的事务数量 | -| LastCheckpointTime | | 上一次Checkpoint时间 | -| PendingDataNodeMessageCount | | DATANODE 的请求被 QUEUE 在 standby namenode 中的个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------|------|---------------------------------------------| +| CapacityTotal | | 集群存储总容量 | +| CapacityTotalGB | GB | 集群存储总容量 | +| CapacityUsed | | 集群存储已使用容量 | +| CapacityUsedGB | GB | 集群存储已使用容量 | +| CapacityRemaining | | 集群存储剩余容量 | +| CapacityRemainingGB | GB | 集群存储剩余容量 | +| CapacityUsedNonDFS | | 集群非 HDFS 使用容量 | +| TotalLoad | | 整个集群的客户端连接数 | +| FilesTotal | | 集群文件总数量 | +| BlocksTotal | | 总 BLOCK 数量 | +| PendingReplicationBlocks | | 等待被备份的块数量 | +| UnderReplicatedBlocks | | 副本数不够的块数量 | +| CorruptBlocks | | 坏块数量 | +| ScheduledReplicationBlocks | | 安排要备份的块数量 | +| PendingDeletionBlocks | | 等待被删除的块数量 | +| ExcessBlocks | | 多余的块数量 | +| PostponedMisreplicatedBlocks | | 被推迟处理的异常块数量 | +| NumLiveDataNodes | | 活的数据节点数量 | +| NumDeadDataNodes | | 已经标记为 Dead 状态的数据节点数量 | +| NumDecomLiveDataNodes | | 下线且 Live 的节点数量 | +| NumDecomDeadDataNodes | | 下线且 Dead 的节点数量 | +| NumDecommissioningDataNodes | | 正在下线的节点数量 | +| TransactionsSinceLastCheckpoint | | 从上次Checkpoint之后的事务数量 | +| LastCheckpointTime | | 上一次Checkpoint时间 | +| PendingDataNodeMessageCount | | DATANODE 的请求被 QUEUE 在 standby namenode 中的个数 | #### 指标集合:RPC -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------- | -------- | ---------------------- | -| ReceivedBytes | | 接收数据速率 | -| SentBytes | | 发送数据速率 | -| RpcQueueTimeNumOps | | RPC 调用速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|----------| +| ReceivedBytes | | 接收数据速率 | +| SentBytes | | 发送数据速率 | +| RpcQueueTimeNumOps | | RPC 调用速率 | #### 指标集合:runtime -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------| -------- | ----------------- | -| StartTime | | 启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| StartTime | | 启动时间 | #### 指标集合:JvmMetrics -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------------ | -------- | ---------------- | -| MemNonHeapUsedM | MB | JVM 当前已经使用的 NonHeapMemory 的大小 | -| MemNonHeapCommittedM | MB | JVM 配置的 NonHeapCommittedM 的大小 | -| MemHeapUsedM | MB | JVM 当前已经使用的 HeapMemory 的大小 | -| MemHeapCommittedM | MB | JVM HeapMemory 提交大小 | -| MemHeapMaxM | MB | JVM 配置的 HeapMemory 的大小 | -| MemMaxM | MB | JVM 运行时可以使用的最大内存大小 | -| GcCountParNew | 次 | 新生代GC消耗时间 | -| GcTimeMillisParNew | 毫秒 | 新生代GC消耗时间 | -| GcCountConcurrentMarkSweep | 毫秒 | 老年代GC次数 | -| GcTimeMillisConcurrentMarkSweep | 个 | 老年代GC消耗时间 | -| GcCount | 个 | GC次数 | -| GcTimeMillis | 个 | GC消耗时间 | -| ThreadsRunnable | 个 | 处于 BLOCKED 状态的线程数量 | -| ThreadsBlocked | 个 | 处于 BLOCKED 状态的线程数量 | -| ThreadsWaiting | 个 | 处于 WAITING 状态的线程数量 | -| ThreadsTimedWaiting | 个 | 处于 TIMED WAITING 状态的线程数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------|------|-------------------------------| +| MemNonHeapUsedM | MB | JVM 当前已经使用的 NonHeapMemory 的大小 | +| MemNonHeapCommittedM | MB | JVM 配置的 NonHeapCommittedM 的大小 | +| MemHeapUsedM | MB | JVM 当前已经使用的 HeapMemory 的大小 | +| MemHeapCommittedM | MB | JVM HeapMemory 提交大小 | +| MemHeapMaxM | MB | JVM 配置的 HeapMemory 的大小 | +| MemMaxM | MB | JVM 运行时可以使用的最大内存大小 | +| GcCountParNew | 次 | 新生代GC消耗时间 | +| GcTimeMillisParNew | 毫秒 | 新生代GC消耗时间 | +| GcCountConcurrentMarkSweep | 毫秒 | 老年代GC次数 | +| GcTimeMillisConcurrentMarkSweep | 个 | 老年代GC消耗时间 | +| GcCount | 个 | GC次数 | +| GcTimeMillis | 个 | GC消耗时间 | +| ThreadsRunnable | 个 | 处于 BLOCKED 状态的线程数量 | +| ThreadsBlocked | 个 | 处于 BLOCKED 状态的线程数量 | +| ThreadsWaiting | 个 | 处于 WAITING 状态的线程数量 | +| ThreadsTimedWaiting | 个 | 处于 TIMED WAITING 状态的线程数量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hive.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hive.md index 2c2136f91cf..3b41d3979c6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hive.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hive.md @@ -16,6 +16,7 @@ keywords: [开源监控工具, 开源 Apache Hive 监控工具, 监控 Apache Hi ```shell hive --service metastore & ``` + **2. 启用 Hive Server2:** ```shell @@ -24,54 +25,53 @@ hive --service hiveserver2 & ### 配置参数 -| 参数名称 | 参数描述 | -| ---------- |--------------------------------------------------------| -| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | -| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | -| 端口 | 数据库提供的默认端口为 10002。 | -| 启用 HTTPS | 是否通过 HTTPS 访问网站,请注意⚠️当启用 HTTPS 时,需要将默认端口更改为 443 | -| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | -| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | -| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | +| 参数名称 | 参数描述 | +|----------|--------------------------------------------------------| +| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | +| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | +| 端口 | 数据库提供的默认端口为 10002。 | +| 启用 HTTPS | 是否通过 HTTPS 访问网站,请注意⚠️当启用 HTTPS 时,需要将默认端口更改为 443 | +| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | +| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | +| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | ### 采集指标 #### 指标收集: 基本信息 -| 指标名称 | 指标单位 | 指标描述 | -|--------|-------|-----------------------------| -| 虚拟机名称 | 无 | 运行 HiveServer2 的虚拟机(VM)的名称。 | -| 虚拟机供应商 | 无 | 虚拟机的供应商或提供者。 | -| 虚拟机版本 | 无 | 虚拟机的版本。 | -| 允许持续时间 | 无 | HiveServer2 运行的持续时间。 | +| 指标名称 | 指标单位 | 指标描述 | +|--------|------|-----------------------------| +| 虚拟机名称 | 无 | 运行 HiveServer2 的虚拟机(VM)的名称。 | +| 虚拟机供应商 | 无 | 虚拟机的供应商或提供者。 | +| 虚拟机版本 | 无 | 虚拟机的版本。 | +| 允许持续时间 | 无 | HiveServer2 运行的持续时间。 | #### 指标收集: 环境信息 -| 指标名称 | 指标单位 | 指标描述 | -|------------|-------|--------------------------------| -| HTTPS代理端口号 | 无 | 用于 HTTPS 代理通信的端口号。 | -| 操作系统 | 无 | 运行 HiveServer2 的操作系统的名称。 | -| 操作系统版本 | 无 | 操作系统的版本。 | -| 操作系统架构 | 无 | 操作系统的架构。 | -| java运行环境 | 无 | HiveServer2 使用的 Java 运行时环境的名称。 | -| java运行环境版本 | 无 | Java 运行时环境的版本。 | +| 指标名称 | 指标单位 | 指标描述 | +|------------|------|--------------------------------| +| HTTPS代理端口号 | 无 | 用于 HTTPS 代理通信的端口号。 | +| 操作系统 | 无 | 运行 HiveServer2 的操作系统的名称。 | +| 操作系统版本 | 无 | 操作系统的版本。 | +| 操作系统架构 | 无 | 操作系统的架构。 | +| java运行环境 | 无 | HiveServer2 使用的 Java 运行时环境的名称。 | +| java运行环境版本 | 无 | Java 运行时环境的版本。 | #### 指标收集: 线程信息 -| 指标名称 | 指标单位 | 指标描述 | +| 指标名称 | 指标单位 | 指标描述 | |--------|------|------------------------------| | 线程数量 | None | HiveServer2 当前正在使用的线程数。 | -| 总启动线程数 | None | HiveServer2 启动以来启动的线程总数。 | +| 总启动线程数 | None | HiveServer2 启动以来启动的线程总数。 | | 最高线程数 | None | HiveServer2 在任何给定时间使用的最高线程数。 | | 守护线程数 | None | HiveServer2 当前活动的守护线程数。 | #### 指标收集: 代码缓存 -| 指标名称 | 指标单位 | 指标描述 | -|------------|-------------|---------------| -| 内存池当前内存 | MB | 当前为内存池分配的内存量。 | -| 内存池初始内存 | MB | 内存池请求的初始内存量。 | -| 内存池可分配最大内存 | MB | 内存池可分配的最大内存量。 | -| 内存池内存使用量 | MB | 内存池已使用内存量 | - +| 指标名称 | 指标单位 | 指标描述 | +|------------|------|---------------| +| 内存池当前内存 | MB | 当前为内存池分配的内存量。 | +| 内存池初始内存 | MB | 内存池请求的初始内存量。 | +| 内存池可分配最大内存 | MB | 内存池可分配的最大内存量。 | +| 内存池内存使用量 | MB | 内存池已使用内存量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/huawei_switch.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/huawei_switch.md index c30c4186661..6bd76f639e8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/huawei_switch.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/huawei_switch.md @@ -1,53 +1,54 @@ ---- -id: huawei_switch -title: 监控:华为通用交换机 -sidebar_label: 华为通用交换机 -keywords: [ 开源监控系统, 网络监控, 华为通用交换机监控 ] ---- - -> 对 华为交换机 的通用指标(可用性,系统信息,端口流量等)进行采集监控。 - -**使用协议:SNMP** - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|---------------------|-------------------------------------------------------------------------------------------------------------------------------| -| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | SNMP端口号,默认为161 | -| SNMP 版本 | 所使用的SNMP版本 | -| SNMP 团体字 | 用于SNMP v1 和 SNMP v2c,用于在SNMP Agent完成认证,字符串形式。团体名包括“read”和“write”两种,执行SNMP查询操作时,采用“read”团体名进行认证;执行SNMP设置操作时,则采用“write”团体名进行认证。 | -| SNMP username | 用于SNMP v3,MSG 用户名 | -| SNMP contextName | 用于SNMP v3,用于确定Context EngineID对被管理设备的MIB视图。 | -| SNMP authPassword | 用于SNMP v3,SNMP 认证密码 | -| authPassword 加密方式 | 用于SNMP v3,SNMP 认证算法 | -| SNMP privPassphrase | 用于SNMP v3,SNMP 加密密码 | -| privPassword 加密方式 | 用于SNMP v3,SNMP 加密算法 | -| 查询超时时间 | 设置查询未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | -| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | -| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | - -### 采集指标 - -由于华为通用交换机可查询的指标过多,详细的指标可于[华为MIB查询平台](https://info.support.huawei.com/info-finder/tool/zh/enterprise/mib) -进行查询。 - -此文档仅介绍监控模板中查询的监控指标。 - -#### 指标集合:huawei_core - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ifIndex | 无 | 接口索引 该值大于零且全局唯一。 | -| ifDescr | 无 | 描述接口的字符串,应该包含制造商、产品名和接口软硬件的版本。 | -| ifMtu | octets | 最大传输单元。接口上可以传送的最大报文的大小。对于传输网络数据报的接口,这是接口可以传输的最大数据报的大小。 | -| ifSpeed | 比特/秒 | 估计的接口当前带宽。对于带宽无法改变或者无法准确估计的接口,该项为额定带宽值。 如果接口的带宽比该表项的值大,则该表项的值是其最大值(4,294,967,295),并且ifHighSpeed的值是接口的速率。对于没有速率概念的子层接口,该表项的值为零。 | -| ifInOctets | octets | 该接口入方向通过的总字节数,包括分桢的数据。在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | -| ifInDiscards | 无 | 入方向的被丢弃的报文个数,即使没有错误发生。也将阻止这些报文送往上层协议。 一个可能的原因是释放buffer的空间。在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | -| ifInErrors | 无 | 出错而不会被送往上层协议的报文/传输单元个数。在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | -| ifOutOctets | octets | 该接口出方向通过的总字节数,包括分桢的数据。在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | -| ifOutDiscards | 无 | 出方向的被丢弃的报文个数,即使没有错误发生。也将阻止这些报文发送。丢弃此类报文的一个可能原因是为了释放缓冲区空间。 在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | -| ifOutErrors | 无 | 对于面向数据包的接口,该节点表示由于错误而无法发送的数据包数量。对于面向字符或固定长度接口,该节点表示由于错误而无法传输的传输单元的数量。这种计数器的值可能在管理系统的重新初始化时会不连续,其他时间如ifCounterDiscontinuityTime的值。 | -| ifAdminStatus | 无 | 接口的理想状态。 testing(3)状态表示没有可操作的数据包通过。 当受管系统初始化时,全部接口开始于ifAdminStatus在down(2)状态。由于明确的管理动作或被管理的系统保留的每个配置信息,ifAdminStatus然后被更改为Up(1)或testing(3)状态(或保留在down(2)状态)。 | -| ifOperStatus | 无 | 当前接口的操作状态。testing(3)状态表示没有可操作的数据包可以通过。如果ifAdminStatus是down(2),则ifOperStatus应该是down(2)。 如果ifAdminStatus是改为up(1),则ifOperStatus应该更改为up(1)。如果接口准备好传输,接收网络流量; 它应该改为dormant(5)。如果接口正在等待外部动作(如串行线路等待传入连接); 它应该保持在down(2)状态,并且只有当有故障阻止它变成up(1)状态。 它应该留在notPresent(6)状态如果接口缺少(通常为硬件)组件。 | +--- +id: huawei_switch +title: 监控:华为通用交换机 +sidebar_label: 华为通用交换机 +keywords: [ 开源监控系统, 网络监控, 华为通用交换机监控 ] +--- + +> 对 华为交换机 的通用指标(可用性,系统信息,端口流量等)进行采集监控。 + +**使用协议:SNMP** + +### 配置参数 + +| 参数名称 | 参数帮助描述 | +|---------------------|-------------------------------------------------------------------------------------------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | SNMP端口号,默认为161 | +| SNMP 版本 | 所使用的SNMP版本 | +| SNMP 团体字 | 用于SNMP v1 和 SNMP v2c,用于在SNMP Agent完成认证,字符串形式。团体名包括“read”和“write”两种,执行SNMP查询操作时,采用“read”团体名进行认证;执行SNMP设置操作时,则采用“write”团体名进行认证。 | +| SNMP username | 用于SNMP v3,MSG 用户名 | +| SNMP contextName | 用于SNMP v3,用于确定Context EngineID对被管理设备的MIB视图。 | +| SNMP authPassword | 用于SNMP v3,SNMP 认证密码 | +| authPassword 加密方式 | 用于SNMP v3,SNMP 认证算法 | +| SNMP privPassphrase | 用于SNMP v3,SNMP 加密密码 | +| privPassword 加密方式 | 用于SNMP v3,SNMP 加密算法 | +| 查询超时时间 | 设置查询未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | +| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | +| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | + +### 采集指标 + +由于华为通用交换机可查询的指标过多,详细的指标可于[华为MIB查询平台](https://info.support.huawei.com/info-finder/tool/zh/enterprise/mib) +进行查询。 + +此文档仅介绍监控模板中查询的监控指标。 + +#### 指标集合:huawei_core + +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ifIndex | 无 | 接口索引 该值大于零且全局唯一。 | +| ifDescr | 无 | 描述接口的字符串,应该包含制造商、产品名和接口软硬件的版本。 | +| ifMtu | octets | 最大传输单元。接口上可以传送的最大报文的大小。对于传输网络数据报的接口,这是接口可以传输的最大数据报的大小。 | +| ifSpeed | 比特/秒 | 估计的接口当前带宽。对于带宽无法改变或者无法准确估计的接口,该项为额定带宽值。 如果接口的带宽比该表项的值大,则该表项的值是其最大值(4,294,967,295),并且ifHighSpeed的值是接口的速率。对于没有速率概念的子层接口,该表项的值为零。 | +| ifInOctets | octets | 该接口入方向通过的总字节数,包括分桢的数据。在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | +| ifInDiscards | 无 | 入方向的被丢弃的报文个数,即使没有错误发生。也将阻止这些报文送往上层协议。 一个可能的原因是释放buffer的空间。在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | +| ifInErrors | 无 | 出错而不会被送往上层协议的报文/传输单元个数。在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | +| ifOutOctets | octets | 该接口出方向通过的总字节数,包括分桢的数据。在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | +| ifOutDiscards | 无 | 出方向的被丢弃的报文个数,即使没有错误发生。也将阻止这些报文发送。丢弃此类报文的一个可能原因是为了释放缓冲区空间。 在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | +| ifOutErrors | 无 | 对于面向数据包的接口,该节点表示由于错误而无法发送的数据包数量。对于面向字符或固定长度接口,该节点表示由于错误而无法传输的传输单元的数量。这种计数器的值可能在管理系统的重新初始化时会不连续,其他时间如ifCounterDiscontinuityTime的值。 | +| ifAdminStatus | 无 | 接口的理想状态。 testing(3)状态表示没有可操作的数据包通过。 当受管系统初始化时,全部接口开始于ifAdminStatus在down(2)状态。由于明确的管理动作或被管理的系统保留的每个配置信息,ifAdminStatus然后被更改为Up(1)或testing(3)状态(或保留在down(2)状态)。 | +| ifOperStatus | 无 | 当前接口的操作状态。testing(3)状态表示没有可操作的数据包可以通过。如果ifAdminStatus是down(2),则ifOperStatus应该是down(2)。 如果ifAdminStatus是改为up(1),则ifOperStatus应该更改为up(1)。如果接口准备好传输,接收网络流量; 它应该改为dormant(5)。如果接口正在等待外部动作(如串行线路等待传入连接); 它应该保持在down(2)状态,并且只有当有故障阻止它变成up(1)状态。 它应该留在notPresent(6)状态如果接口缺少(通常为硬件)组件。 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hugegraph.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hugegraph.md index 8770ababdcd..bb802791dda 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hugegraph.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hugegraph.md @@ -4,6 +4,7 @@ title: 监控:HugeGraph监控 sidebar_label: Apache HugeGraph keywords: [开源监控系统, 开源数据库监控, HugeGraph监控] --- + > 对HugeGraph的通用性能指标进行采集监控 **使用协议:HTTP** @@ -14,8 +15,7 @@ keywords: [开源监控系统, 开源数据库监控, HugeGraph监控] ## 配置参数 - -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |-----------|---------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 端口 | HugeGraph restserver的端口号,默认为8080。即:`restserver_port`参数值 | @@ -30,117 +30,112 @@ keywords: [开源监控系统, 开源数据库监控, HugeGraph监控] #### 指标集合:gauges -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------------------------ | -------- | --------------------------------- | -| edge-hugegraph-capacity | 无 | 表示当前图中边的容量上限 | -| edge-hugegraph-expire | 无 | 表示边数据的过期时间 | -| edge-hugegraph-hits | 无 | 表示边数据缓存的命中次数 | -| edge-hugegraph-miss | 无 | 表示边数据缓存的未命中次数 | -| edge-hugegraph-size | 无 | 表示当前图中边的数量 | -| instances | 无 | 表示当前运行的HugeGraph实例数量| -| schema-id-hugegraph-capacity | 无 | 表示图中schema ID的容量上限 | -| schema-id-hugegraph-expire | 无 | 表示schema ID数据的过期时间 | -| schema-id-hugegraph-hits | 无 | 表示schema ID数据缓存的命中次数| -| schema-id-hugegraph-miss | 无 | 表示schema ID数据缓存的未命中次数| -| schema-id-hugegraph-size | 无 | 表示当前图中schema ID的数量 | -| schema-name-hugegraph-capacity | 无 | 表示图中schema名称的容量上限 | -| schema-name-hugegraph-expire | 无 | 表示schema名称数据的过期时间 | -| schema-name-hugegraph-hits | 无 | 表示schema名称数据缓存的命中次数| -| schema-name-hugegraph-miss | 无 | 表示schema名称数据缓存的未命中次数| -| schema-name-hugegraph-size | 无 | 表示当前图中schema名称的数量 | -| token-hugegraph-capacity | 无 | 表示图中token的容量上限 | -| token-hugegraph-expire | 无 | 表示token数据的过期时间 | -| token-hugegraph-hits | 无 | 表示token数据缓存的命中次数 | -| token-hugegraph-miss | 无 | 表示token数据缓存的未命中次数 | -| token-hugegraph-size | 无 | 表示当前图中token的数量 | -| users-hugegraph-capacity | 无 | 表示图中用户的容量上限 | -| users-hugegraph-expire | 无 | 表示用户数据的过期时间 | -| users-hugegraph-hits | 无 | 表示用户数据缓存的命中次数 | -| users-hugegraph-miss | 无 | 表示用户数据缓存的未命中次数 | -| users-hugegraph-size | 无 | 表示当前图中用户的数量 | -| users_pwd-hugegraph-capacity | 无 | 表示users_pwd的容量上限 | -| users_pwd-hugegraph-expire | 无 | 表示users_pwd数据的过期时间 | -| users_pwd-hugegraph-hits | 无 | 表示users_pwd数据缓存的命中次数 | -| users_pwd-hugegraph-miss | 无 | 表示users_pwd数据缓存的未命中次数| -| users_pwd-hugegraph-size | 无 | 表示当前图中users_pwd的数量 | -| vertex-hugegraph-capacity | 无 | 表示图中顶点的容量上限 | -| vertex-hugegraph-expire | 无 | 表示顶点数据的过期时间 | -| vertex-hugegraph-hits | 无 | 表示顶点数据缓存的命中次数 | -| vertex-hugegraph-miss | 无 | 表示顶点数据缓存的未命中次数 | -| vertex-hugegraph-size | 无 | 表示当前图中顶点的数量 | -| batch-write-threads | 无 | 表示批量写入操作时的线程数 | -| max-write-threads | 无 | 表示最大写入操作的线程数 | -| pending-tasks | 无 | 表示待处理的任务数 | -| workers | 无 | 表示当前工作线程的数量 | -| average-load-penalty | 无 | 表示平均加载延迟 | -| estimated-size | 无 | 表示估计的数据大小 | -| eviction-count | 无 | 表示被驱逐的数据条数 | -| eviction-weight | 无 | 表示被驱逐数据的权重 | -| hit-count | 无 | 表示缓存命中总数 | -| hit-rate | 无 | 表示缓存命中率 | -| load-count | 无 | 表示数据加载次数 | -| load-failure-count | 无 | 表示数据加载失败次数 | -| load-failure-rate | 无 | 表示数据加载失败率 | -| load-success-count | 无 | 表示数据加载成功次数 | -| long-run-compilation-count | 无 | 表示长时间运行的编译次数 | -| miss-count | 无 | 表示缓存未命中总数 | -| miss-rate | 无 | 表示缓存未命中率 | -| request-count | 无 | 表示总的请求次数 | -| total-load-time | 无 | 表示总的数据加载时间 | -| sessions | 无 | 表示当前的活动会话数量 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|-----------------------| +| edge-hugegraph-capacity | 无 | 表示当前图中边的容量上限 | +| edge-hugegraph-expire | 无 | 表示边数据的过期时间 | +| edge-hugegraph-hits | 无 | 表示边数据缓存的命中次数 | +| edge-hugegraph-miss | 无 | 表示边数据缓存的未命中次数 | +| edge-hugegraph-size | 无 | 表示当前图中边的数量 | +| instances | 无 | 表示当前运行的HugeGraph实例数量 | +| schema-id-hugegraph-capacity | 无 | 表示图中schema ID的容量上限 | +| schema-id-hugegraph-expire | 无 | 表示schema ID数据的过期时间 | +| schema-id-hugegraph-hits | 无 | 表示schema ID数据缓存的命中次数 | +| schema-id-hugegraph-miss | 无 | 表示schema ID数据缓存的未命中次数 | +| schema-id-hugegraph-size | 无 | 表示当前图中schema ID的数量 | +| schema-name-hugegraph-capacity | 无 | 表示图中schema名称的容量上限 | +| schema-name-hugegraph-expire | 无 | 表示schema名称数据的过期时间 | +| schema-name-hugegraph-hits | 无 | 表示schema名称数据缓存的命中次数 | +| schema-name-hugegraph-miss | 无 | 表示schema名称数据缓存的未命中次数 | +| schema-name-hugegraph-size | 无 | 表示当前图中schema名称的数量 | +| token-hugegraph-capacity | 无 | 表示图中token的容量上限 | +| token-hugegraph-expire | 无 | 表示token数据的过期时间 | +| token-hugegraph-hits | 无 | 表示token数据缓存的命中次数 | +| token-hugegraph-miss | 无 | 表示token数据缓存的未命中次数 | +| token-hugegraph-size | 无 | 表示当前图中token的数量 | +| users-hugegraph-capacity | 无 | 表示图中用户的容量上限 | +| users-hugegraph-expire | 无 | 表示用户数据的过期时间 | +| users-hugegraph-hits | 无 | 表示用户数据缓存的命中次数 | +| users-hugegraph-miss | 无 | 表示用户数据缓存的未命中次数 | +| users-hugegraph-size | 无 | 表示当前图中用户的数量 | +| users_pwd-hugegraph-capacity | 无 | 表示users_pwd的容量上限 | +| users_pwd-hugegraph-expire | 无 | 表示users_pwd数据的过期时间 | +| users_pwd-hugegraph-hits | 无 | 表示users_pwd数据缓存的命中次数 | +| users_pwd-hugegraph-miss | 无 | 表示users_pwd数据缓存的未命中次数 | +| users_pwd-hugegraph-size | 无 | 表示当前图中users_pwd的数量 | +| vertex-hugegraph-capacity | 无 | 表示图中顶点的容量上限 | +| vertex-hugegraph-expire | 无 | 表示顶点数据的过期时间 | +| vertex-hugegraph-hits | 无 | 表示顶点数据缓存的命中次数 | +| vertex-hugegraph-miss | 无 | 表示顶点数据缓存的未命中次数 | +| vertex-hugegraph-size | 无 | 表示当前图中顶点的数量 | +| batch-write-threads | 无 | 表示批量写入操作时的线程数 | +| max-write-threads | 无 | 表示最大写入操作的线程数 | +| pending-tasks | 无 | 表示待处理的任务数 | +| workers | 无 | 表示当前工作线程的数量 | +| average-load-penalty | 无 | 表示平均加载延迟 | +| estimated-size | 无 | 表示估计的数据大小 | +| eviction-count | 无 | 表示被驱逐的数据条数 | +| eviction-weight | 无 | 表示被驱逐数据的权重 | +| hit-count | 无 | 表示缓存命中总数 | +| hit-rate | 无 | 表示缓存命中率 | +| load-count | 无 | 表示数据加载次数 | +| load-failure-count | 无 | 表示数据加载失败次数 | +| load-failure-rate | 无 | 表示数据加载失败率 | +| load-success-count | 无 | 表示数据加载成功次数 | +| long-run-compilation-count | 无 | 表示长时间运行的编译次数 | +| miss-count | 无 | 表示缓存未命中总数 | +| miss-rate | 无 | 表示缓存未命中率 | +| request-count | 无 | 表示总的请求次数 | +| total-load-time | 无 | 表示总的数据加载时间 | +| sessions | 无 | 表示当前的活动会话数量 | #### 指标集合:counters - -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------------------------------- | -------- | ---------------------------------------- | -| GET-SUCCESS_COUNTER | 无 | 记录GET请求成功的次数 | -| GET-TOTAL_COUNTER | 无 | 记录GET请求的总次数 | -| favicon-ico-GET-FAILED_COUNTER | 无 | 记录获取favicon.ico失败的GET请求次数 | -| favicon-ico-GET-TOTAL_COUNTER | 无 | 记录获取favicon.ico的GET请求总次数 | -| graphs-HEAD-FAILED_COUNTER | 无 | 记录graphs资源的HEAD请求失败的次数 | -| graphs-HEAD-SUCCESS_COUNTER | 无 | 记录graphs资源的HEAD请求成功的次数 | -| graphs-HEAD-TOTAL_COUNTER | 无 | 记录graphs资源的HEAD请求的总次数 | -| graphs-hugegraph-graph-vertices-GET-SUCCESS_COUNTER | 无 | 记录获取HugeGraph图中顶点的GET请求成功的次数 | -| graphs-hugegraph-graph-vertices-GET-TOTAL_COUNTER | 无 | 记录获取HugeGraph图中顶点的GET请求的总次数 | -| metircs-GET-FAILED_COUNTER | 无 | 记录获取metrics失败的GET请求次数 | -| metircs-GET-TOTAL_COUNTER | 无 | 记录获取metrics的GET请求总次数 | -| metrics-GET-SUCCESS_COUNTER | 无 | 记录获取metrics成功的GET请求次数 | -| metrics-GET-TOTAL_COUNTER | 无 | 记录获取metrics的GET请求总次数 | -| metrics-gauges-GET-SUCCESS_COUNTER | 无 | 记录获取metrics gauges成功的GET请求次数 | -| metrics-gauges-GET-TOTAL_COUNTER | 无 | 记录获取metrics gauges的GET请求总次数 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------------------------------------|------|------------------------------| +| GET-SUCCESS_COUNTER | 无 | 记录GET请求成功的次数 | +| GET-TOTAL_COUNTER | 无 | 记录GET请求的总次数 | +| favicon-ico-GET-FAILED_COUNTER | 无 | 记录获取favicon.ico失败的GET请求次数 | +| favicon-ico-GET-TOTAL_COUNTER | 无 | 记录获取favicon.ico的GET请求总次数 | +| graphs-HEAD-FAILED_COUNTER | 无 | 记录graphs资源的HEAD请求失败的次数 | +| graphs-HEAD-SUCCESS_COUNTER | 无 | 记录graphs资源的HEAD请求成功的次数 | +| graphs-HEAD-TOTAL_COUNTER | 无 | 记录graphs资源的HEAD请求的总次数 | +| graphs-hugegraph-graph-vertices-GET-SUCCESS_COUNTER | 无 | 记录获取HugeGraph图中顶点的GET请求成功的次数 | +| graphs-hugegraph-graph-vertices-GET-TOTAL_COUNTER | 无 | 记录获取HugeGraph图中顶点的GET请求的总次数 | +| metircs-GET-FAILED_COUNTER | 无 | 记录获取metrics失败的GET请求次数 | +| metircs-GET-TOTAL_COUNTER | 无 | 记录获取metrics的GET请求总次数 | +| metrics-GET-SUCCESS_COUNTER | 无 | 记录获取metrics成功的GET请求次数 | +| metrics-GET-TOTAL_COUNTER | 无 | 记录获取metrics的GET请求总次数 | +| metrics-gauges-GET-SUCCESS_COUNTER | 无 | 记录获取metrics gauges成功的GET请求次数 | +| metrics-gauges-GET-TOTAL_COUNTER | 无 | 记录获取metrics gauges的GET请求总次数 | #### 指标集合:system +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------------------|------|--------------------------------| +| mem | 无 | 表示系统的总内存量 | +| mem_total | 无 | 表示系统的总内存量(与mem相同) | +| mem_used | 无 | 表示系统当前使用的内存量 | +| mem_free | 无 | 表示系统空闲的内存量 | +| mem_unit | 无 | 表示内存量的单位(如字节、千字节、兆字节等) | +| processors | 无 | 表示系统的处理器数量 | +| uptime | 无 | 表示系统运行时间,即从启动到现在的时间 | +| systemload_average | 无 | 表示系统的平均负载,反映了系统的繁忙程度 | +| heap_committed | 无 | 表示JVM堆内存的承诺大小,即保证可供JVM使用的堆内存大小 | +| heap_init | 无 | 表示JVM堆内存的初始大小 | +| heap_used | 无 | 表示JVM当前使用的堆内存大小 | +| heap_max | 无 | 表示JVM堆内存的最大可使用大小 | +| nonheap_committed | 无 | 表示JVM非堆内存的承诺大小 | +| nonheap_init | 无 | 表示JVM非堆内存的初始大小 | +| nonheap_used | 无 | 表示JVM当前使用的非堆内存大小 | +| nonheap_max | 无 | 表示JVM非堆内存的最大可使用大小 | +| thread_peak | 无 | 表示自JVM启动以来峰值线程数 | +| thread_daemon | 无 | 表示当前活跃的守护线程数 | +| thread_total_started | 无 | 表示自JVM启动以来总共启动过的线程数 | +| thread_count | 无 | 表示当前活跃的线程数 | +| garbage_collector_g1_young_generation_count | 无 | 表示G1垃圾收集器年轻代垃圾收集的次数 | +| garbage_collector_g1_young_generation_time | 无 | 表示G1垃圾收集器年轻代垃圾收集的总时间 | +| garbage_collector_g1_old_generation_count | 无 | 表示G1垃圾收集器老年代垃圾收集的次数 | +| garbage_collector_g1_old_generation_time | 无 | 表示G1垃圾收集器老年代垃圾收集的总时间 | +| garbage_collector_time_unit | 无 | 表示垃圾收集时间的单位(如毫秒、秒等) | -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------------------------------- | -------- | -------------------------------------------------- | -| mem | 无 | 表示系统的总内存量 | -| mem_total | 无 | 表示系统的总内存量(与mem相同) | -| mem_used | 无 | 表示系统当前使用的内存量 | -| mem_free | 无 | 表示系统空闲的内存量 | -| mem_unit | 无 | 表示内存量的单位(如字节、千字节、兆字节等) | -| processors | 无 | 表示系统的处理器数量 | -| uptime | 无 | 表示系统运行时间,即从启动到现在的时间 | -| systemload_average | 无 | 表示系统的平均负载,反映了系统的繁忙程度 | -| heap_committed | 无 | 表示JVM堆内存的承诺大小,即保证可供JVM使用的堆内存大小 | -| heap_init | 无 | 表示JVM堆内存的初始大小 | -| heap_used | 无 | 表示JVM当前使用的堆内存大小 | -| heap_max | 无 | 表示JVM堆内存的最大可使用大小 | -| nonheap_committed | 无 | 表示JVM非堆内存的承诺大小 | -| nonheap_init | 无 | 表示JVM非堆内存的初始大小 | -| nonheap_used | 无 | 表示JVM当前使用的非堆内存大小 | -| nonheap_max | 无 | 表示JVM非堆内存的最大可使用大小 | -| thread_peak | 无 | 表示自JVM启动以来峰值线程数 | -| thread_daemon | 无 | 表示当前活跃的守护线程数 | -| thread_total_started | 无 | 表示自JVM启动以来总共启动过的线程数 | -| thread_count | 无 | 表示当前活跃的线程数 | -| garbage_collector_g1_young_generation_count | 无 | 表示G1垃圾收集器年轻代垃圾收集的次数 | -| garbage_collector_g1_young_generation_time | 无 | 表示G1垃圾收集器年轻代垃圾收集的总时间 | -| garbage_collector_g1_old_generation_count | 无 | 表示G1垃圾收集器老年代垃圾收集的次数 | -| garbage_collector_g1_old_generation_time | 无 | 表示G1垃圾收集器老年代垃圾收集的总时间 | -| garbage_collector_time_unit | 无 | 表示垃圾收集时间的单位(如毫秒、秒等) | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/imap.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/imap.md index 4b04a003ed6..d913d0ff9f9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/imap.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/imap.md @@ -5,7 +5,7 @@ sidebar_label: 邮箱信息监控 keywords: [开源监控系统, 开源网络监控, 邮箱信息监控] --- -> IMAP,即Internet Message Access Protocol(互联网邮件访问协议),您可以通过这种协议从邮件服务器上获取邮箱的详细信息 +> IMAP,即Internet Message Access Protocol(互联网邮件访问协议),您可以通过这种协议从邮件服务器上获取邮箱的详细信息 > 您可以点击`新建 QQ 邮箱监控`或`新建网易邮箱监控`并进行配置,或者选择`更多操作`,导入已有配置。 ### 启用 IMAP 服务 @@ -21,7 +21,7 @@ keywords: [开源监控系统, 开源网络监控, 邮箱信息监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |:----------|--------------------------------------------------| | 监控Host | IMAP 邮件服务器域名。注意⚠️不带协议头 (例如: https://, http://) 。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -40,8 +40,9 @@ keywords: [开源监控系统, 开源网络监控, 邮箱信息监控] #### 指标集合:(邮箱中文件夹名称) -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|---------------| | 邮件总数 | | 该文件夹下所有邮件数量 | | 最近收到邮件总数 | | 该文件夹下最近收到邮件数量 | | 未读邮件总数 | | 该文件夹下未读邮件数量 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb.md index 1881d79f4ee..00ff0b7f679 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb.md @@ -5,64 +5,62 @@ sidebar_label: InfluxDB 数据库 keywords: [开源监控系统, 开源数据库监控, InfluxDB 数据库监控] --- - ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- |------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为8086。 | -| URL | 数据库连接URL,一般是由host拼接,不需要添加 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为8086。 | +| URL | 数据库连接URL,一般是由host拼接,不需要添加 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:influxdb 基本信息 -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------| ----------- |--------| -| build_date | 无 | 创建日期 | -| os | 无 | 操作系统 | -| cpus | 无 | cpus | -| version | 无 | 版本号 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|--------| +| build_date | 无 | 创建日期 | +| os | 无 | 操作系统 | +| cpus | 无 | cpus | +| version | 无 | 版本号 | #### 指标集合:http 响应时间 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------|------|---------| | handler | 无 | handler | | path | 无 | 路径 | | response_code | 无 | 返回code | | method | 无 | 请求方法 | | user_agent | 无 | 用户代理 | -| status | 无 | 状态 | +| status | 无 | 状态 | #### 指标集合:正在排队的 TSM 数 -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------------------------|------|------------| -| bucket | 无 | 存储桶 | -| engine | 无 | 引擎类型 | -| id | 无 | 标识符 | -| level | 无 | 级别 | -| path | 无 | 数据文件路径 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|--------| +| bucket | 无 | 存储桶 | +| engine | 无 | 引擎类型 | +| id | 无 | 标识符 | +| level | 无 | 级别 | +| path | 无 | 数据文件路径 | #### 指标集合:HTTP写入请求的字节数量 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|--------| -| endpoint | 无 | 终点 | -| org_id | 无 | 组织标识符 | -| status | 无 | 状态 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| endpoint | 无 | 终点 | +| org_id | 无 | 组织标识符 | +| status | 无 | 状态 | #### 指标集合:质量控制请求总数 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|--------| -| result | 无 | 结果 | -| org | 无 | 组织标识符 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|--------| +| result | 无 | 结果 | +| org | 无 | 组织标识符 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb_promql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb_promql.md index ac56245fee4..97469a71932 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb_promql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb_promql.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, InfluxDB监控,InfluxDB-PromQL监控 ] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -28,7 +28,7 @@ keywords: [ 开源监控系统, InfluxDB监控,InfluxDB-PromQL监控 ] #### 指标集合:basic_influxdb_memstats_alloc -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|---------| | instance | 无 | 指标所属实例 | | timestamp | 无 | 采集指标时间戳 | @@ -36,7 +36,7 @@ keywords: [ 开源监控系统, InfluxDB监控,InfluxDB-PromQL监控 ] #### 指标集合: influxdb_database_numMeasurements -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|---------| | job | 无 | 指标名称 | | instance | 无 | 指标所属实例 | @@ -46,7 +46,7 @@ keywords: [ 开源监控系统, InfluxDB监控,InfluxDB-PromQL监控 ] #### 指标集合: influxdb_query_rate_seconds -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|---------| | instance | 无 | 指标所属实例 | | timestamp | 无 | 采集指标时间戳 | @@ -54,10 +54,9 @@ keywords: [ 开源监控系统, InfluxDB监控,InfluxDB-PromQL监控 ] #### 指标集合: influxdb_queryExecutor_queriesFinished_10s -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|---------| | instance | 无 | 指标所属实例 | | timestamp | 无 | 采集指标时间戳 | | value | 无 | 指标值 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md index 0f0dc0e0ecb..fceb485f05b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md @@ -5,15 +5,15 @@ sidebar_label: IoTDB数据库 keywords: [开源监控系统, 开源数据库监控, IoTDB数据库监控] --- -> 对 Apache IoTDB 物联网时序数据库的运行状态(JVM相关),内存任务集群等相关指标进行监测。 +> 对 Apache IoTDB 物联网时序数据库的运行状态(JVM相关),内存任务集群等相关指标进行监测。 -## 监控前操作 +## 监控前操作 -您需要在 IoTDB 开启`metrics`功能,他将提供 prometheus metrics 形式的接口数据。 +您需要在 IoTDB 开启`metrics`功能,他将提供 prometheus metrics 形式的接口数据。 -开启`metrics`功能, 参考 [官方文档](https://iotdb.apache.org/zh/UserGuide/V0.13.x/Maintenance-Tools/Metric-Tool.html) +开启`metrics`功能, 参考 [官方文档](https://iotdb.apache.org/zh/UserGuide/V0.13.x/Maintenance-Tools/Metric-Tool.html) -主要如下步骤: +主要如下步骤: 1. metric 采集默认是关闭的,需要先到 `conf/iotdb-metric.yml` 中修改参数打开后重启 server @@ -41,13 +41,13 @@ predefinedMetrics: - FILE ``` -2. 重启 IoTDB, 打开浏览器或者用curl 访问 http://ip:9091/metrics, 就能看到metric数据了。 +2. 重启 IoTDB, 打开浏览器或者用curl 访问 http://ip:9091/metrics, 就能看到metric数据了。 3. 在 HertzBeat 添加对应 IoTDB 监控即可。 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -57,37 +57,37 @@ predefinedMetrics: | 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | | 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:cluster_node_status +#### 指标集合:cluster_node_status -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- |------|-------------------------| +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|-------------------------| | name | 无 | 节点名称IP | | status | 无 | 节点状态,1=online 2=offline | #### 指标集合:jvm_memory_committed_bytes -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------|------|------------------| | area | 无 | heap内存或nonheap内存 | | id | 无 | 内存区块 | -| value | MB | 当前向JVM申请的内存大小 | +| value | MB | 当前向JVM申请的内存大小 | #### 指标集合:jvm_memory_used_bytes -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------------| +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------| | area | 无 | heap内存或nonheap内存 | | id | 无 | 内存区块 | -| value | MB | JVM已使用内存大小 | +| value | MB | JVM已使用内存大小 | #### 指标集合:jvm_threads_states_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------| -| state | 无 | 线程状态 | -| count | 无 | 线程状态对应线程数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------| +| state | 无 | 线程状态 | +| count | 无 | 线程状态对应线程数量 | #### 指标集合:quantity 业务数据 @@ -99,22 +99,23 @@ predefinedMetrics: #### 指标集合:cache_hit 缓存 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|----------------------------------------------------| -| name | 无 | 缓存名称 chunk/timeSeriesMeta/bloomFilter | -| value | % | chunk/timeSeriesMeta缓存命中率,bloomFilter拦截率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------------------| +| name | 无 | 缓存名称 chunk/timeSeriesMeta/bloomFilter | +| value | % | chunk/timeSeriesMeta缓存命中率,bloomFilter拦截率 | #### 指标集合:queue 任务队列 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|---------------------------------------------------| -| name | 无 | 队列名称 compaction_inner/compaction_cross/flush | -| status | 无 | 状态 running/waiting | -| value | 无 | 当前时间任务数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|----------------------------------------------| +| name | 无 | 队列名称 compaction_inner/compaction_cross/flush | +| status | 无 | 状态 running/waiting | +| value | 无 | 当前时间任务数 | #### 指标集合:thrift_connections -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------|------|-------------| -| name | 无 | 名称 | -| connection | 无 | thrift当前连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|-------------| +| name | 无 | 名称 | +| connection | 无 | thrift当前连接数 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/issue.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/issue.md index b7414f878b1..745a4f70a88 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/issue.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/issue.md @@ -1,59 +1,66 @@ --- id: issue title: 常见问题 -sidebar_label: 常见问题 +sidebar_label: 常见问题 --- -### 监控常见问题 +### 监控常见问题 -1. ** 页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名 ** -> 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http +1. ** 页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名 ** -2. ** 网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK ** -> 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) +> 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http + +2. ** 网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK ** + +> 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) 3. 安装包部署的hertzbeat下ping连通性监控异常 -安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 + 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 + > 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 > docker安装默认启用无此问题 -> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address 4. 配置了k8s监控,但是实际监控时间并未按照正确间隔时间执行 -请参考下面几点排查问题: + 请参考下面几点排查问题: + > 一:首先查看hertzbeat的错误日志,如果出现了'desc: SQL statement too long, check maxSQLLength config',信息 > 二:需要调整tdengine配置文件,可在服务器创建taos.cfg文件,调整# max length of an SQL : maxSQLLength 654800,然后重启tdengine,需要加入配置文件的挂载 -> 三:如果遇到了重启tdengine失败,需要调整挂载数据文件中的配置,见 .../taosdata/dnode/dnodeEps.json,中dnodeFqdn调整为启动失败的dockerId即可,然后docker restart tdengine +> 三:如果遇到了重启tdengine失败,需要调整挂载数据文件中的配置,见 .../taosdata/dnode/dnodeEps.json,中dnodeFqdn调整为启动失败的dockerId即可,然后docker restart tdengine 5. 配置http api监控,用于进行业务接口探测,确保业务可以用,另外接口有进行token鉴权校验,"Authorization:Bearer eyJhbGciOiJIUzI1....",配置后测试,提示“StatusCode 401”。服务端应用收到的token为"Authorization:Bearer%20eyJhbGciOiJIUzI1....",hertzbeat对空格进行转义为“%20”,服务器没有转义导致鉴权失败,建议转义功能作为可选项。 - -### Docker部署常见问题 +### Docker部署常见问题 1. **MYSQL,TDENGINE和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** -此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 + 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 + > 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP -> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` +> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` 2. **按照流程部署,访问 http://ip:1157/ 无界面** -请参考下面几点排查问题: + 请参考下面几点排查问题: + > 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 -3. **日志报错TDengine连接或插入SQL失败** +3. **日志报错TDengine连接或插入SQL失败** + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter +> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter ### 安装包部署常见问题 1. **按照流程部署,访问 http://ip:1157/ 无界面** 请参考下面几点排查问题: + > 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 > 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 2. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jetty.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jetty.md index 04a15823529..b60a5882b9f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jetty.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jetty.md @@ -15,17 +15,18 @@ keywords: [开源监控系统, 开源中间件监控, Jetty应用服务器监控 #### Jetty应用服务器开启JMX协议步骤 -[参考官方文档](https://www.eclipse.org/jetty/documentation/jetty-10/operations-guide/index.html#og-jmx-remote) +[参考官方文档](https://www.eclipse.org/jetty/documentation/jetty-10/operations-guide/index.html#og-jmx-remote) -1. 在 Jetty 启动 JMX JMX-REMOTE 模块 +1. 在 Jetty 启动 JMX JMX-REMOTE 模块 ```shell java -jar $JETTY_HOME/start.jar --add-module=jmx java -jar $JETTY_HOME/start.jar --add-module=jmx-remote ``` -命令执行成功会创建出 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件 -2. 编辑 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件,修改 JMX 的 IP 端口等参数。 +命令执行成功会创建出 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件 + +2. 编辑 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件,修改 JMX 的 IP 端口等参数。 **`localhost` 需修改为对外暴露 IP** @@ -50,49 +51,45 @@ java -jar $JETTY_HOME/start.jar --add-module=jmx-remote ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jvm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jvm.md index c97cc73b003..f046b3ef6a0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jvm.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jvm.md @@ -28,57 +28,54 @@ keywords: [开源监控系统, 开源JAVA监控, JVM虚拟机监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:code_cache (限JDK8及以下) -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka.md index 2b4ed0514b7..3cb4d74132c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka.md @@ -7,7 +7,7 @@ keywords: [开源监控系统, 开源消息中间件监控, Kafka监控] > 对Kafka的通用性能指标进行采集监控 -**使用协议:JMX** +**使用协议:JMX** ### 监控前操作 @@ -18,7 +18,7 @@ keywords: [开源监控系统, 开源消息中间件监控, Kafka监控] 2. 修改 Kafka 启动脚本 修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` -在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 +在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 ```shell export JMX_PORT=9999; @@ -32,71 +32,65 @@ export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=ip地址 -Dcom.sun.management. ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置Kafka连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置Kafka连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:server_info -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| Version | 无 | Kafka版本 | -| StartTimeMs | ms | 运行时间 | -| CommitId | 无 | 版本提交ID | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|---------| +| Version | 无 | Kafka版本 | +| StartTimeMs | ms | 运行时间 | +| CommitId | 无 | 版本提交ID | #### 指标集合:code_cache -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:active_controller_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| Value | 个 | 活跃监控器数量 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------| +| Value | 个 | 活跃监控器数量 | #### 指标集合:broker_partition_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| Value | 个 | 分区数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------| +| Value | 个 | 分区数量 | #### 指标集合:broker_leader_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| Value | 个 | 领导者数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------| +| Value | 个 | 领导者数量 | #### 指标集合:broker_handler_avg_percent 请求处理器空闲率 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| EventType | 无 | 类型 | -| RateUnit | 具体情况具体分析 | 单位 | -| Count | 个 | 数量 | -| OneMinuteRate | % | 一分钟处理率 | -| FiveMinuteRate | % | 五分钟处理率 | -| MeanRate | 无 | 平均处理率 | -| FifteenMinuteRate | 无 | 十五分钟处理率 | - - -> 其他指标见文知意,欢迎贡献一起优化文档。 +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|----------|---------| +| EventType | 无 | 类型 | +| RateUnit | 具体情况具体分析 | 单位 | +| Count | 个 | 数量 | +| OneMinuteRate | % | 一分钟处理率 | +| FiveMinuteRate | % | 五分钟处理率 | +| MeanRate | 无 | 平均处理率 | +| FifteenMinuteRate | 无 | 十五分钟处理率 | + +> 其他指标见文知意,欢迎贡献一起优化文档。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka_promql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka_promql.md index a0b0b625485..e0e5ecf7e50 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka_promql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka_promql.md @@ -15,7 +15,7 @@ keywords: [ 开源监控系统,开源中间件监控, Kafka监控,Kafka-PromQL ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -34,7 +34,7 @@ keywords: [ 开源监控系统,开源中间件监控, Kafka监控,Kafka-PromQL #### 指标集合:kafka_brokers -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|---------| | \_\_name__ | 无 | 指标名称 | | instance | 无 | 指标所属实例 | @@ -43,7 +43,7 @@ keywords: [ 开源监控系统,开源中间件监控, Kafka监控,Kafka-PromQL #### 指标集合: kafka_topic_partitions -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|---------| | \_\_name__ | 无 | 指标名称 | | instance | 无 | 指标所属实例 | @@ -54,3 +54,4 @@ keywords: [ 开源监控系统,开源中间件监控, Kafka监控,Kafka-PromQL 1. kafka启用了JMX监控,可以使用 [Kafka](kafka) 监控; 2. kafka集群部署kafka_exporter暴露的监控指标,可以参考 [Prometheus任务](prometheus) 配置Prometheus采集任务监控kafka。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md index f45da8d9b27..aa242d93a6b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md @@ -7,14 +7,13 @@ keywords: [开源监控系统, 开源Kubernetes监控] > 对kubernetes的通用性能指标进行采集监控。 - ## 监控前操作 如果想要监控 `Kubernetes` 中的信息,则需要获取到可访问Api Server的授权TOKEN,让采集请求获取到对应的信息。 -参考获取token步骤 +参考获取token步骤 -#### 方式一: +#### 方式一: 1. 创建service account并绑定默认cluster-admin管理员集群角色 @@ -27,7 +26,9 @@ kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin -- kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` + #### 方式二: + ``` kubectl create serviceaccount cluster-admin @@ -36,13 +37,14 @@ kubectl create clusterrolebinding cluster-admin-manual --clusterrole=cluster-adm kubectl create token --duration=1000h cluster-admin ``` + ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |-------------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| APiServer端口 | K8s APiServer端口,默认6443 | +| APiServer端口 | K8s APiServer端口,默认6443 | | token | 授权Access Token | | URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | | 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | @@ -53,44 +55,45 @@ kubectl create token --duration=1000h cluster-admin #### 指标集合:nodes -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------ | -------- |--------| -| node_name | 无 | 节点名称 | -| is_ready | 无 | 节点状态 | -| capacity_cpu | 无 | CPU容量 | -| allocatable_cpu | 无 | 已分配CPU | -| capacity_memory | 无 | 内存容量 | -| allocatable_memory | 无 | 已分配内存 | -| creation_time | 无 | 节点创建时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|--------| +| node_name | 无 | 节点名称 | +| is_ready | 无 | 节点状态 | +| capacity_cpu | 无 | CPU容量 | +| allocatable_cpu | 无 | 已分配CPU | +| capacity_memory | 无 | 内存容量 | +| allocatable_memory | 无 | 已分配内存 | +| creation_time | 无 | 节点创建时间 | #### 指标集合:namespaces -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- |-------------| -| namespace | 无 | namespace名称 | -| status | 无 | 状态 | -| creation_time | 无 | 创建时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-------------| +| namespace | 无 | namespace名称 | +| status | 无 | 状态 | +| creation_time | 无 | 创建时间 | #### 指标集合:pods -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- | -------- |----------------| -| pod | 无 | pod名称 | -| namespace | 无 | pod所属namespace | -| status | 无 | pod状态 | -| restart | 无 | 重启次数 | -| host_ip | 无 | 所在主机IP | -| pod_ip | 无 | pod ip | -| creation_time | 无 | pod创建时间 | -| start_time | 无 | pod启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|----------------| +| pod | 无 | pod名称 | +| namespace | 无 | pod所属namespace | +| status | 无 | pod状态 | +| restart | 无 | 重启次数 | +| host_ip | 无 | 所在主机IP | +| pod_ip | 无 | pod ip | +| creation_time | 无 | pod创建时间 | +| start_time | 无 | pod启动时间 | #### 指标集合:services -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- |------|--------------------------------------------------------| -| service | 无 | service名称 | -| namespace | 无 | service所属namespace | -| type | 无 | service类型 ClusterIP NodePort LoadBalancer ExternalName | -| cluster_ip | 无 | cluster ip | -| selector | 无 | tag selector匹配 | -| creation_time | 无 | 创建时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|--------------------------------------------------------| +| service | 无 | service名称 | +| namespace | 无 | service所属namespace | +| type | 无 | service类型 ClusterIP NodePort LoadBalancer ExternalName | +| cluster_ip | 无 | cluster ip | +| selector | 无 | tag selector匹配 | +| creation_time | 无 | 创建时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/linux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/linux.md index 853be6610b4..4a69c04495e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/linux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/linux.md @@ -9,74 +9,74 @@ keywords: [开源监控系统, 开源操作系统监控, Linux操作系统监控 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux SSH对外提供的端口,默认为22。 | +| 用户名 | SSH连接用户名,可选 | +| 密码 | SSH连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| hostname | 无 | 主机名称 | +| version | 无 | 操作系统版本 | +| uptime | 无 | 系统运行时间 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:memory -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:disk -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:interface -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | byte | 入站数据流量(bytes) | +| transmit_bytes | byte | 出站数据流量(bytes) | #### 指标集合:disk_free -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mariadb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mariadb.md index 5986e109227..291c8eb8bf6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mariadb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mariadb.md @@ -15,49 +15,46 @@ keywords: [开源监控系统, 开源数据库监控, MariaDB数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为3306。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为3306。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| version | 无 | 数据库版本 | -| port | 无 | 数据库暴露服务端口 | -| datadir | 无 | 数据库存储数据盘地址 | -| max_connections | 无 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|------------| +| version | 无 | 数据库版本 | +| port | 无 | 数据库暴露服务端口 | +| datadir | 无 | 数据库存储数据盘地址 | +| max_connections | 无 | 数据库最大连接数 | #### 指标集合:status -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| threads_created | 无 | MariaDB已经创建的总连接数 | -| threads_connected | 无 | MariaDB已经连接的连接数 | -| threads_cached | 无 | MariaDB当前缓存的连接数 | -| threads_running | 无 | MariaDB当前活跃的连接数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|------------------| +| threads_created | 无 | MariaDB已经创建的总连接数 | +| threads_connected | 无 | MariaDB已经连接的连接数 | +| threads_cached | 无 | MariaDB当前缓存的连接数 | +| threads_running | 无 | MariaDB当前活跃的连接数 | #### 指标集合:innodb -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | -| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | -| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | -| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------|------|-------------------------| +| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | +| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | +| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | +| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/memcached.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/memcached.md index 0debad01ce9..db88c1ac5fc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/memcached.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/memcached.md @@ -34,7 +34,7 @@ STAT version 1.4.15 ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |------|---------------------------------------------------| | 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不带协议头(例如:https://,http://) | | 监控名称 | 标识此监控的名称。名称需要唯一 | @@ -47,7 +47,7 @@ STAT version 1.4.15 #### 指标集:server_info -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------------|------|-------------------| | pid | 无 | Memcache 服务器进程 ID | | uptime | s | 服务器已运行的秒数 | @@ -65,3 +65,4 @@ STAT version 1.4.15 | cmd_flush | 无 | Flush 命令请求数 | | get_misses | 无 | Get 命令未命中次数 | | delete_misses | 无 | Delete 命令未命中次数 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb.md index 23c4a866809..8c54174b54a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -27,7 +27,7 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] #### 指标集合:构建信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------------|------|-------------------------| | version | 无 | MongoDB版本信息 | | gitVersion | 无 | 源代码git版本 | @@ -39,7 +39,7 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] #### 指标集合:服务器文档 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|--------| | deleted | 无 | 已删除数 | | inserted | 无 | 已插入数 | @@ -48,21 +48,21 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] #### 指标集合:服务器操作 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|-------------------| | scanAndOrder | 无 | 执行查询时需要扫描并进行排序的次数 | | writeConflicts | 无 | 写冲突的次数 | #### 指标集合: 服务器_ttl -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------------|------|-------------------------------| | deletedDocuments | 无 | 删除的过期文档数量 | | passes | 无 | TTL清理过程的总传递次数,每次传递会检查并删除过期的文档 | #### 指标集合:系统信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------|------|-----------------------| | currentTime | 无 | 当前时间 | | hostname | 无 | 主机名 | @@ -75,7 +75,7 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] #### 指标集合:操作系统信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|--------| | type | 无 | 操作系统类型 | | name | 无 | 操作系统名称 | @@ -83,7 +83,7 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] #### 指标集合:额外信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------------|------|----------------------| | versionString | 无 | 版本 | | libcVersion | 无 | 标准库版本 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb_atlas.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb_atlas.md index c2d24ed8c22..01167c2fc7b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb_atlas.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb_atlas.md @@ -4,79 +4,75 @@ title: 监控:MongoDB Atlas 数据库 sidebar_label: MongoDB Atlas 数据库 keywords: [ 开源监控系统, 开源数据库监控, MongoDB Atlas 数据库监控 ] --- + > 对MongoDB Atlas 数据库的通用性能指标进行采集监控。 ### 配置参数 - -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------------------- | -| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 用户名 | MongoDB用户名,可选。 | -| 密码 | MongoDB密码,可选。 | -| 数据库 | 数据库名称 | -| 认证数据库 | 存储用户凭据的数据库名称。 | -| 连接超时时间 | 设置连接MongoDB未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | -| 集群模式 | MongoDB Atlas集群取值为:mongodb-atlas | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒。 | -| 绑定标签 | 用于对监控资源进行分类管理。 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息。 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 用户名 | MongoDB用户名,可选。 | +| 密码 | MongoDB密码,可选。 | +| 数据库 | 数据库名称 | +| 认证数据库 | 存储用户凭据的数据库名称。 | +| 连接超时时间 | 设置连接MongoDB未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | +| 集群模式 | MongoDB Atlas集群取值为:mongodb-atlas | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒。 | +| 绑定标签 | 用于对监控资源进行分类管理。 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息。 | ### 采集指标 #### 指标集合:构建信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- | -------- | ----------------------------- | -| version | 无 | MongoDB版本信息 | -| gitVersion | 无 | 源代码git版本 | -| sysInfo | 无 | 系统信息 | -| allocator | 无 | MongoDB所使用的内存分配器 | -| javascriptEngine | 无 | MongoDB所使用的JavaScript引擎 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------|------|-------------------------| +| version | 无 | MongoDB版本信息 | +| gitVersion | 无 | 源代码git版本 | +| sysInfo | 无 | 系统信息 | +| allocator | 无 | MongoDB所使用的内存分配器 | +| javascriptEngine | 无 | MongoDB所使用的JavaScript引擎 | #### 指标集合:服务器文档 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------- | -| delete | 无 | 已删除数 | -| insert | 无 | 已插入数 | -| update | 无 | 更新数 | -| query | 无 | 查询数 | -| getmore | 无 | 光标中剩余文档的请求数 | -| command | 无 | 执行命令操作的总数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|-------------| +| delete | 无 | 已删除数 | +| insert | 无 | 已插入数 | +| update | 无 | 更新数 | +| query | 无 | 查询数 | +| getmore | 无 | 光标中剩余文档的请求数 | +| command | 无 | 执行命令操作的总数 | #### 指标集合:网络操作 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | -------- | ---------------------------------- | -| Bytes In | 无 | 执行查询时需要扫描并进行排序的次数 | -| Bytes Out | 无 | 写冲突的次数 | -| Request Num | 无 | 请求数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|-------------------| +| Bytes In | 无 | 执行查询时需要扫描并进行排序的次数 | +| Bytes Out | 无 | 写冲突的次数 | +| Request Num | 无 | 请求数 | #### 指标集合: 连接信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------------- | -------- | ------------------ | -| Current Connections | 无 | 当前正在进行连接数 | -| Available Connections | 无 | 可用连接数 | -| Total Created Connections | 无 | 创建的连接总数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|------|-----------| +| Current Connections | 无 | 当前正在进行连接数 | +| Available Connections | 无 | 可用连接数 | +| Total Created Connections | 无 | 创建的连接总数 | #### 指标集合:数据库统计 +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|-------|----------| +| Database Name | 无 | 数据库名称 | +| Collections | 无 | 集合数 | +| Views | 无 | 视图数 | +| Objects | 无 | 文档数 | +| Document Avg Size | Bytes | 文档平均大小 | +| Document Size | Bytes | 文档大小 | +| Storage Size | Bytes | 使用存储空间大小 | +| Indexes | 无 | 索引数 | +| Index Size | Bytes | 索引大小 | -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------------- | -------- | ---------------- | -| Database Name | 无 | 数据库名称 | -| Collections | 无 | 集合数 | -| Views | 无 | 视图数 | -| Objects | 无 | 文档数 | -| Document Avg Size | Bytes | 文档平均大小 | -| Document Size | Bytes | 文档大小 | -| Storage Size | Bytes | 使用存储空间大小 | -| Indexes | 无 | 索引数 | -| Index Size | Bytes | 索引大小 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mysql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mysql.md index 811cc2df135..4d47823d43b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mysql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mysql.md @@ -15,49 +15,46 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为3306。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为3306。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| version | 无 | 数据库版本 | -| port | 无 | 数据库暴露服务端口 | -| datadir | 无 | 数据库存储数据盘地址 | -| max_connections | 无 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|------------| +| version | 无 | 数据库版本 | +| port | 无 | 数据库暴露服务端口 | +| datadir | 无 | 数据库存储数据盘地址 | +| max_connections | 无 | 数据库最大连接数 | #### 指标集合:status -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| threads_created | 无 | MySql已经创建的总连接数 | -| threads_connected | 无 | MySql已经连接的连接数 | -| threads_cached | 无 | MySql当前缓存的连接数 | -| threads_running | 无 | MySql当前活跃的连接数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|----------------| +| threads_created | 无 | MySql已经创建的总连接数 | +| threads_connected | 无 | MySql已经连接的连接数 | +| threads_cached | 无 | MySql当前缓存的连接数 | +| threads_running | 无 | MySql当前活跃的连接数 | #### 指标集合:innodb -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | -| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | -| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | -| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------|------|-------------------------| +| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | +| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | +| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | +| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md index ef643cf2fe3..84b432f4651 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md @@ -1,93 +1,95 @@ ---- -id: nacos -title: 监控:Nacos分布式监控 -sidebar_label: Nacos分布式 -keywords: [开源监控系统, 中间件监控, Nacos分布式监控] ---- - -> 通过调用 Nacos Metrics 接口对 Nacos 注册配置中心服务的通用指标进行采集监控。 - -### 监控前操作 - -#### 搭建Nacos集群暴露metrics数据 - -1. 按照[部署文档](https://nacos.io/zh-cn/docs/deployment.html)搭建好Nacos集群。 -2. 配置application.properties文件,暴露metrics数据。 -``` -management.endpoints.web.exposure.include=* -``` -3. 访问```{ip}:8848/nacos/actuator/prometheus```,查看是否能访问到metrics数据。 - -更多信息请参考[Nacos 监控手册](https://nacos.io/zh-cn/docs/monitor-guide.html)。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 服务器Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| Nacos服务端口 | Nacos服务对外提供的端口,默认为8848。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:jvm - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| system_cpu_usage | 无 | CPU使用率 | -| system_load_average_1m | 无 | load | -| jvm_memory_used_bytes | 字节 | 内存使用字节,包含各种内存区 | -| jvm_memory_max_bytes | 字节 | 内存最大字节,包含各种内存区 | -| jvm_gc_pause_seconds_count | 无 | gc次数,包含各种gc | -| jvm_gc_pause_seconds_sum | 秒 | gc耗时,包含各种gc | -| jvm_threads_daemon | 无 | 线程数 | - -#### 指标集合:Nacos - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| http_server_requests_seconds_count | 秒 | http请求次数,包括多种(url,方法,code) | -| http_server_requests_seconds_sum | 秒 | http请求总耗时,包括多种(url,方法,code) | -| nacos_timer_seconds_sum | 秒 | Nacos config水平通知耗时 | -| nacos_timer_seconds_count | 无 | Nacos config水平通知次数 | -| nacos_monitor{name='longPolling'} | 无 | Nacos config长连接数 | -| nacos_monitor{name='configCount'} | 无 | Nacos config配置个数 | -| nacos_monitor{name='dumpTask'} | 无 | Nacos config配置落盘任务堆积数 | -| nacos_monitor{name='notifyTask'} | 无 | Nacos config配置水平通知任务堆积数 | -| nacos_monitor{name='getConfig'} | 无 | Nacos config读配置统计数 | -| nacos_monitor{name='publish'} | 无 | Nacos config写配置统计数 | -| nacos_monitor{name='ipCount'} | 无 | Nacos naming ip个数 | -| nacos_monitor{name='domCount'} | 无 | Nacos naming域名个数(1.x 版本) | -| nacos_monitor{name='serviceCount'} | 无 | Nacos naming域名个数(2.x 版本) | -| nacos_monitor{name='failedPush'} | 无 | Nacos naming推送失败数 | -| nacos_monitor{name='avgPushCost'} | 秒 | Nacos naming平均推送耗时 | -| nacos_monitor{name='leaderStatus'} | 无 | Nacos naming角色状态 | -| nacos_monitor{name='maxPushCost'} | 秒 | Nacos naming最大推送耗时 | -| nacos_monitor{name='mysqlhealthCheck'} | 无 | Nacos naming mysql健康检查次数 | -| nacos_monitor{name='httpHealthCheck'} | 无 | Nacos naming http健康检查次数 | -| nacos_monitor{name='tcpHealthCheck'} | 无 | Nacos naming tcp健康检查次数 | - -#### 指标集合:Nacos 异常 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| nacos_exception_total{name='db'} | 无 | 数据库异常 | -| nacos_exception_total{name='configNotify'} | 无 | Nacos config水平通知失败 | -| nacos_exception_total{name='unhealth'} | 无 | Nacos config server之间健康检查异常 | -| nacos_exception_total{name='disk'} | 无 | Nacos naming写磁盘异常 | -| nacos_exception_total{name='leaderSendBeatFailed'} | 无 | Nacos naming leader发送心跳异常 | -| nacos_exception_total{name='illegalArgument'} | 无 | 请求参数不合法 | -| nacos_exception_total{name='nacos'} | 无 | Nacos请求响应内部错误异常(读写失败,没权限,参数错误) | - -#### 指标集合:client - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| nacos_monitor{name='subServiceCount'} | 无 | 订阅的服务数 | -| nacos_monitor{name='pubServiceCount'} | 无 | 发布的服务数 | -| nacos_monitor{name='configListenSize'} | 无 | 监听的配置数 | -| nacos_client_request_seconds_count | 无 | 请求的次数,包括多种(url,方法,code) | -| nacos_client_request_seconds_sum | 秒 | 请求的总耗时,包括多种(url,方法,code) | - \ No newline at end of file +--- +id: nacos +title: 监控:Nacos分布式监控 +sidebar_label: Nacos分布式 +keywords: [开源监控系统, 中间件监控, Nacos分布式监控] +--- + +> 通过调用 Nacos Metrics 接口对 Nacos 注册配置中心服务的通用指标进行采集监控。 + +### 监控前操作 + +#### 搭建Nacos集群暴露metrics数据 + +1. 按照[部署文档](https://nacos.io/zh-cn/docs/deployment.html)搭建好Nacos集群。 +2. 配置application.properties文件,暴露metrics数据。 + +``` +management.endpoints.web.exposure.include=* +``` + +3. 访问```{ip}:8848/nacos/actuator/prometheus```,查看是否能访问到metrics数据。 + +更多信息请参考[Nacos 监控手册](https://nacos.io/zh-cn/docs/monitor-guide.html)。 + +### 配置参数 + +| 参数名称 | 参数帮助描述 | +|-----------|------------------------------------------------------| +| 服务器Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| Nacos服务端口 | Nacos服务对外提供的端口,默认为8848。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | + +### 采集指标 + +#### 指标集合:jvm + +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------------|------|----------------| +| system_cpu_usage | 无 | CPU使用率 | +| system_load_average_1m | 无 | load | +| jvm_memory_used_bytes | 字节 | 内存使用字节,包含各种内存区 | +| jvm_memory_max_bytes | 字节 | 内存最大字节,包含各种内存区 | +| jvm_gc_pause_seconds_count | 无 | gc次数,包含各种gc | +| jvm_gc_pause_seconds_sum | 秒 | gc耗时,包含各种gc | +| jvm_threads_daemon | 无 | 线程数 | + +#### 指标集合:Nacos + +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------------------------|------|-----------------------------| +| http_server_requests_seconds_count | 秒 | http请求次数,包括多种(url,方法,code) | +| http_server_requests_seconds_sum | 秒 | http请求总耗时,包括多种(url,方法,code) | +| nacos_timer_seconds_sum | 秒 | Nacos config水平通知耗时 | +| nacos_timer_seconds_count | 无 | Nacos config水平通知次数 | +| nacos_monitor{name='longPolling'} | 无 | Nacos config长连接数 | +| nacos_monitor{name='configCount'} | 无 | Nacos config配置个数 | +| nacos_monitor{name='dumpTask'} | 无 | Nacos config配置落盘任务堆积数 | +| nacos_monitor{name='notifyTask'} | 无 | Nacos config配置水平通知任务堆积数 | +| nacos_monitor{name='getConfig'} | 无 | Nacos config读配置统计数 | +| nacos_monitor{name='publish'} | 无 | Nacos config写配置统计数 | +| nacos_monitor{name='ipCount'} | 无 | Nacos naming ip个数 | +| nacos_monitor{name='domCount'} | 无 | Nacos naming域名个数(1.x 版本) | +| nacos_monitor{name='serviceCount'} | 无 | Nacos naming域名个数(2.x 版本) | +| nacos_monitor{name='failedPush'} | 无 | Nacos naming推送失败数 | +| nacos_monitor{name='avgPushCost'} | 秒 | Nacos naming平均推送耗时 | +| nacos_monitor{name='leaderStatus'} | 无 | Nacos naming角色状态 | +| nacos_monitor{name='maxPushCost'} | 秒 | Nacos naming最大推送耗时 | +| nacos_monitor{name='mysqlhealthCheck'} | 无 | Nacos naming mysql健康检查次数 | +| nacos_monitor{name='httpHealthCheck'} | 无 | Nacos naming http健康检查次数 | +| nacos_monitor{name='tcpHealthCheck'} | 无 | Nacos naming tcp健康检查次数 | + +#### 指标集合:Nacos 异常 + +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------------------------------------|------|--------------------------------| +| nacos_exception_total{name='db'} | 无 | 数据库异常 | +| nacos_exception_total{name='configNotify'} | 无 | Nacos config水平通知失败 | +| nacos_exception_total{name='unhealth'} | 无 | Nacos config server之间健康检查异常 | +| nacos_exception_total{name='disk'} | 无 | Nacos naming写磁盘异常 | +| nacos_exception_total{name='leaderSendBeatFailed'} | 无 | Nacos naming leader发送心跳异常 | +| nacos_exception_total{name='illegalArgument'} | 无 | 请求参数不合法 | +| nacos_exception_total{name='nacos'} | 无 | Nacos请求响应内部错误异常(读写失败,没权限,参数错误) | + +#### 指标集合:client + +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------------------------|------|--------------------------| +| nacos_monitor{name='subServiceCount'} | 无 | 订阅的服务数 | +| nacos_monitor{name='pubServiceCount'} | 无 | 发布的服务数 | +| nacos_monitor{name='configListenSize'} | 无 | 监听的配置数 | +| nacos_client_request_seconds_count | 无 | 请求的次数,包括多种(url,方法,code) | +| nacos_client_request_seconds_sum | 秒 | 请求的总耗时,包括多种(url,方法,code) | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph.md index d070101da8f..ded4a06ad2f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph.md @@ -14,7 +14,7 @@ keywords: [ 开源监控工具, 开源 NebulaGraph 监控工具, 监控 NebulaGr nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 的统计信息。 ``` -### +### **1、通过 stats 和 rocksdb stats 接口获取可用参数。** @@ -34,7 +34,7 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |-------------|--------------------------------------------------------------------| | 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️没有协议头(例如:https://、http://) | | 监控名称 | 识别此监控的名称。名称需要唯一 | @@ -53,7 +53,7 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 指标太多,相关链接如下 **https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------------------------------------------------------|------|--------| | 达到内存水位线的语句的数量(rate) | | | | 达到内存水位线的语句的数量(sum) | | | @@ -116,8 +116,9 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 指标太多,相关链接如下 **https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------------------|------|------------------------| | rocksdb.backup.read.bytes | | 备份 RocksDB 数据库期间读取的字节数 | | rocksdb.backup.write.bytes | | 指标名称 | | ... | | ... | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph_cluster.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph_cluster.md index 851f6dd7946..252f5f47d8a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph_cluster.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph_cluster.md @@ -11,7 +11,7 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |---------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -27,7 +27,7 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, #### 指标集合:基础信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|---------| | responseTime | 无 | 响应时间 | | charset | 无 | 字符集 | @@ -35,21 +35,21 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, #### 指标集合:Session -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------------|------|------------| | session | 无 | session的数量 | | running_query_count | 无 | 正在执行的查询的数量 | #### 指标集合:后台任务 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------------| | queue_jobs | 无 | 等待中的后台任务 | | running_jobs | 无 | 正在执行的后台任务的数量 | #### 指标集合:节点信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------------|------|-----------------| | total_storage_node | 无 | storage节点的数量 | | offline_storage_node | 无 | 离线的storage节点的数量 | @@ -60,7 +60,7 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, #### 指标集合:Storage节点 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------------------|------|---------------------| | host | 无 | 节点地址 | | port | 无 | 端口 | @@ -72,7 +72,7 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, #### 指标集合:Meta节点 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|---------------------| | host | 无 | 节点地址 | | port | 无 | 端口 | @@ -81,7 +81,7 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, #### 指标集合:Graph节点 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|---------------------| | host | 无 | 节点地址 | | port | 无 | 端口 | @@ -89,3 +89,4 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, | version | 无 | 版本 | > 如果需要自定义监控模板采集NebulaGraph集群的数据,请参考: [NGQL自定义监控](../advanced/extend-ngql.md) + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nginx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nginx.md index a509ff7da2a..82908df358b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nginx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nginx.md @@ -13,18 +13,19 @@ keywords: [开源监控工具, 开源Java监控工具, 监控Nginx指标] 如果你想使用这种监控方式监控 'Nginx' 的信息,你需要修改你的 Nginx 配置文件以启用监控模块。 -### 启用 ngx_http_stub_status_module +### 启用 ngx_http_stub_status_module 1. 检查是否已添加 `ngx_http_stub_status_module` ```shell nginx -V ``` + 查看是否包含 `--with-http_stub_status_module`,如果没有则需要重新编译安装 Nginx。 2. 编译安装 Nginx, 添加 `ngx_http_stub_status_module` 模块 -下载 Nginx 并解压,在目录下执行 +下载 Nginx 并解压,在目录下执行 ```shell ./configure --prefix=/usr/local/nginx --with-http_stub_status_module @@ -58,7 +59,7 @@ nginx -s reload 5. 在浏览器访问 `http://localhost/nginx-status` 即可查看 Nginx 监控状态信息。 -### 启用 `ngx_http_reqstat_module` +### 启用 `ngx_http_reqstat_module` 1. 安装 `ngx_http_reqstat_module` 模块 @@ -107,49 +108,47 @@ nginx -s reload 4. 在浏览器访问 `http://localhost/req-status` 即可查看 Nginx 监控状态信息。 - **参考文档: https://blog.csdn.net/weixin_55985097/article/details/116722309** **⚠️注意监控模块的端点路径为 `/nginx-status` `/req-status`** ### 配置参数 -| 参数名 | 参数描述 | -|-------------------|-----------------------------------------------------| -| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | -| 监控名称 | 标识此监控的名称。名称需要唯一 | -| 端口 | Nginx 提供的端口 | -| 超时时间 | 允许收集响应时间 | -| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | -| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | -| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | +| 参数名 | 参数描述 | +|--------|-----------------------------------------------------| +| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | +| 监控名称 | 标识此监控的名称。名称需要唯一 | +| 端口 | Nginx 提供的端口 | +| 超时时间 | 允许收集响应时间 | +| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | +| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | +| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | ### 收集指标 #### 指标收集:nginx_status -| 指标名称 | 指标单位 | 指标描述 | -|-------|-------------|------------| -| 接收连接数 | | 已接受的连接 | -| 处理连接数 | | 成功处理的连接 | -| 活动连接数 | | 当前活动连接 | -| 丢弃连接数 | | 丢弃的连接 | -| 请求连接数 | | 客户端请求 | -| 读连接数 | | 正在执行读操作的连接 | -| 写连接数 | | 正在执行写操作的连接 | -| 等待连接数 | | 等待连接 | +| 指标名称 | 指标单位 | 指标描述 | +|-------|------|------------| +| 接收连接数 | | 已接受的连接 | +| 处理连接数 | | 成功处理的连接 | +| 活动连接数 | | 当前活动连接 | +| 丢弃连接数 | | 丢弃的连接 | +| 请求连接数 | | 客户端请求 | +| 读连接数 | | 正在执行读操作的连接 | +| 写连接数 | | 正在执行写操作的连接 | +| 等待连接数 | | 等待连接 | #### 指标集:req_status -| 指标名称 | 指标单位 | 指标描述 | -|---------|-------|---------| -| 分组类别 | | 分组类别 | -| 分组名称 | | 分组名称 | -| 最大并发连接数 | | 最大并发连接数 | -| 最大带宽 | kb | 最大带宽 | -| 总流量 | kb | 总流量 | -| 总请求数 | | 总请求数 | -| 当前并发连接数 | | 当前并发连接数 | -| 当前带宽 | kb | 当前带宽 | - +| 指标名称 | 指标单位 | 指标描述 | +|---------|------|---------| +| 分组类别 | | 分组类别 | +| 分组名称 | | 分组名称 | +| 最大并发连接数 | | 最大并发连接数 | +| 最大带宽 | kb | 最大带宽 | +| 总流量 | kb | 总流量 | +| 总请求数 | | 总请求数 | +| 当前并发连接数 | | 当前并发连接数 | +| 当前带宽 | kb | 当前带宽 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ntp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ntp.md index 5760321922f..735ab741b4d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ntp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ntp.md @@ -15,7 +15,7 @@ NTP监控的中文文档如下: ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |------|--------------------------------------------------| | 监控主机 | 被监控的IPv4、IPv6或域名。注意⚠️不包含协议头(例如:https://,http://) | | 监控名称 | 标识此监控的名称。名称需要是唯一的 | @@ -27,7 +27,7 @@ NTP监控的中文文档如下: #### 指标集:概要 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------|------|--------------------------| | 响应时间 | 毫秒 | NTP服务器响应请求所需的时间。 | | 时间 | 毫秒 | NTP服务器报告的当前时间。 | @@ -39,3 +39,4 @@ NTP监控的中文文档如下: | 层级 | | NTP服务器的层级,表示其与参考时钟的距离。 | | 参考ID | | 指示NTP服务器使用的参考时钟或时间源的标识符。 | | 精度 | | NTP服务器时钟的精度,表示其准确性。 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/openai.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/openai.md index d22b1238855..0af3ca3d17b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/openai.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/openai.md @@ -8,12 +8,13 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] ### 准备工作 #### 获取会话密钥 -> 1. 打开 Chrome 浏览器的网络请求界面 + +> 1. 打开 Chrome 浏览器的网络请求界面 > `Mac: cmd + option + i` > `Windows: ctrl + shift + i` > 2. 访问 https://platform.openai.com/usage > 3. 找到 https://api.openai.com/dashboard/billing/usage 请求 -> 4. 找到请求头中 Authorization 字段,并复制 `Bearer ` 之后的内容。例如: `sess-123456` +> 4. 找到请求头中 Authorization 字段,并复制 `Bearer ` 之后的内容。例如: `sess-123456` ### 注意事项 @@ -22,11 +23,11 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -|:-------|---------------------------------| +| 参数名称 | 参数帮助描述 | +|:-------|---------------------------------|---| | 监控Host | 此处填写 api.openai.com 。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | | -| 会话密钥 | 即准备工作中获取的会话密钥。 | | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | | +| 会话密钥 | 即准备工作中获取的会话密钥。 | | | 采集器 | 配置此监控使用哪台采集器调度采集。 | | 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒。 | | 绑定标签 | 对监控资源的分类管理标签。 | @@ -36,7 +37,7 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] #### 指标集合:信用额度授予 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------|--------|---------| | 总授予额度 | 美元 ($) | 总授予额度 | | 总使用额度 | 美元 ($) | 总使用额度 | @@ -45,14 +46,14 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] #### 指标集合:模型花费 -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|--------|---------| -| 模型名称 | 无 | 模型名称 | -| 花费 | 美元 ($) | 花费 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------|--------|--------| +| 模型名称 | 无 | 模型名称 | +| 花费 | 美元 ($) | 花费 | #### 指标集合:订阅计费 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------|--------|--------------| | 是否有支付方式 | 无 | 是否有支付方式 | | 订阅是否已取消 | 无 | 订阅是否已取消 | @@ -80,3 +81,4 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] | 税务ID | 无 | 税务ID | | 结算地址 | 无 | 结算地址 | | 业务地址 | 无 | 业务地址 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opengauss.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opengauss.md index 632a7f41b2d..8bf21d7debb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opengauss.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opengauss.md @@ -9,50 +9,48 @@ keywords: [开源监控系统, 开源数据库监控, OpenGauss数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为5432。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为5432。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| server_version | 无 | 数据库服务器的版本号 | -| port | 无 | 数据库服务器端暴露服务端口 | -| server_encoding | 无 | 数据库服务器端的字符集编码 | -| data_directory | 无 | 数据库存储数据盘地址 | -| max_connections | 连接数 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------------| +| server_version | 无 | 数据库服务器的版本号 | +| port | 无 | 数据库服务器端暴露服务端口 | +| server_encoding | 无 | 数据库服务器端的字符集编码 | +| data_directory | 无 | 数据库存储数据盘地址 | +| max_connections | 连接数 | 数据库最大连接数 | #### 指标集合:state -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| name | 无 | 数据库名称,或share-object为共享对象。 | -| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | -| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | -| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | -| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | -| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | -| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | -| stats_reset | 无 | 这些统计信息上次被重置的时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------------------------------------------------------------| +| name | 无 | 数据库名称,或share-object为共享对象。 | +| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | +| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | +| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | +| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | +| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | +| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | +| stats_reset | 无 | 这些统计信息上次被重置的时间 | #### 指标集合:activity -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| running | 连接数 | 当前客户端连接数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------| +| running | 连接数 | 当前客户端连接数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opensuse.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opensuse.md index 6c3b2e9ac7e..f32e2b070ae 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opensuse.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opensuse.md @@ -4,114 +4,105 @@ title: 监控:OpenSUSE操作系统监控 sidebar_label: OpenSUSE操作系统 keywords: [开源监控系统, 开源操作系统监控, OpenSUSE操作系统监控] --- + > 对OpenSUSE操作系统的通用性能指标进行采集监控。 ### 配置参数 - -| 参数名称 | 参数帮助描述 | -| -------- |------------------------------------------------------| +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | | 端口 | Linux SSH对外提供的端口,默认为22。 | -| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | -| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次获取信息都会创建一个连接 | -| 用户名 | SSH连接用户名,可选 | +| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | +| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次获取信息都会创建一个连接 | +| 用户名 | SSH连接用户名,可选 | | 密码 | SSH连接密码,可选 | -| 采集器 | 配置此监控使用哪台采集器调度采集 | -| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 采集器 | 配置此监控使用哪台采集器调度采集 | +| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | | 密钥 | 连接服务器所需密钥 | ### 采集指标 #### 指标集合:系统基本信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | ------------ | -| Host Name | 无 | 主机名称 | -| System Version | 无 | 操作系统版本 | -| Uptime | 无 | 启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------| +| Host Name | 无 | 主机名称 | +| System Version | 无 | 操作系统版本 | +| Uptime | 无 | 启动时间 | #### 指标集合:CPU 信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | --------------------------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:内存信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------- | -------- | ---------------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:磁盘信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------- | -------- | ------------------ | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:网卡信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- |------|-------------| -| interface_name | 无 | 网卡名称 | -| receive_bytes | Mb | 入站数据流量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | Mb | 入站数据流量 | | transmit_bytes | Mb | 出站数据流量 | #### 指标集合:文件系统 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------- | -------- | -------------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | #### 指标集合:Top10 CPU进程 统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- | -------- | ------------ | -| pid | 无 | 进程ID | -| cpu_usage | % | CPU占用率 | -| mem_usage | % | 内存占用率 | -| command | 无 | 执行命令 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| pid | 无 | 进程ID | +| cpu_usage | % | CPU占用率 | +| mem_usage | % | 内存占用率 | +| command | 无 | 执行命令 | #### 指标集合:Top10 内存进程 统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| pid | 无 | 进程ID | +| mem_usage | % | 内存占用率 | +| cpu_usage | % | CPU占用率 | +| command | 无 | 执行命令 | -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- | -------- | ------------ | -| pid | 无 | 进程ID | -| mem_usage | % | 内存占用率 | -| cpu_usage | % | CPU占用率 | -| command | 无 | 执行命令 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/oracle.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/oracle.md index 2edf7bf6ff1..71f0db0bf95 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/oracle.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/oracle.md @@ -15,55 +15,56 @@ keywords: [开源监控系统, 开源数据库监控, Oracle数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为1521。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为1521。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| database_version | 无 | 数据库版本 | -| database_type | 无 | 数据库类型 | -| hostname | 无 | 主机名称 | -| instance_name | 无 | 数据库实例名称 | -| startup_time | 无 | 数据库启动时间 | -| status | 无 | 数据库状态 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------|------|---------| +| database_version | 无 | 数据库版本 | +| database_type | 无 | 数据库类型 | +| hostname | 无 | 主机名称 | +| instance_name | 无 | 数据库实例名称 | +| startup_time | 无 | 数据库启动时间 | +| status | 无 | 数据库状态 | #### 指标集合:tablespace -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| file_id | 无 | 文件ID | -| file_name | 无 | 文件名称 | -| tablespace_name | 无 | 所属表空间名称 | -| status | 无 | 状态 | -| bytes | MB | 大小 | -| blocks | 无 | 区块数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------| +| file_id | 无 | 文件ID | +| file_name | 无 | 文件名称 | +| tablespace_name | 无 | 所属表空间名称 | +| status | 无 | 状态 | +| bytes | MB | 大小 | +| blocks | 无 | 区块数量 | #### 指标集合:user_connect -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| username | 无 | 用户名 | -| counts | 个数 | 当前连接数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| username | 无 | 用户名 | +| counts | 个数 | 当前连接数量 | #### 指标集合:performance -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| qps | QPS | I/O Requests per Second 每秒IO请求数量 | -| tps | TPS | User Transaction Per Sec 每秒用户事物处理数量 | -| mbps | MBPS | I/O Megabytes per Second 每秒 I/O 兆字节数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------|------|---------------------------------------| +| qps | QPS | I/O Requests per Second 每秒IO请求数量 | +| tps | TPS | User Transaction Per Sec 每秒用户事物处理数量 | +| mbps | MBPS | I/O Megabytes per Second 每秒 I/O 兆字节数量 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ping.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ping.md index 7b6613f25bd..401e86f9382 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ping.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ping.md @@ -5,32 +5,33 @@ sidebar_label: PING连通性 keywords: [开源监控系统, 开源网络监控, 网络PING监控] --- -> 对对端HOST地址进行PING操作,判断其连通性 +> 对对端HOST地址进行PING操作,判断其连通性 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| Ping超时时间 | 设置PING未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|----------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| Ping超时时间 | 设置PING未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| responseTime | ms毫秒 | 网站响应时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | ms毫秒 | 网站响应时间 | - -### 常见问题 +### 常见问题 1. 安装包部署的hertzbeat下ping连通性监控异常 - 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 + 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 + > 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 > docker安装默认启用无此问题 -> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/plugin.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/plugin.md index 2278530b47c..c4bf36a4cfb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/plugin.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/plugin.md @@ -1,10 +1,11 @@ --- id: plugin title: 自定义插件 -sidebar_label: 自定义插件 +sidebar_label: 自定义插件 --- ## 自定义插件 + ### 简介 当前`Hertzbeat`在使用时,主要依赖`alert`模块对用户进行通知,然后用户采取一些措施如发送请求、执行`sql`、执行`shell`脚本等。 @@ -13,15 +14,19 @@ sidebar_label: 自定义插件 目前,`HertzBeat`只在告警后设置了触发`alert`方法,如需在采集、启动程序等时机设置触发方法,请在`https://github.com/apache/hertzbeat/issues/new/choose` 提`Task`。 ### 具体使用 + 1. 拉取主分支代码 `git clone https://github.com/apache/hertzbeat.git` ,定位到`plugin`模块的 `Plugin`接口。 - ![plugin-1.png](/img/docs/help/plugin-1.png) + ![plugin-1.png](/img/docs/help/plugin-1.png) 2. 在`org.apache.hertzbeat.plugin.impl`目录下, 新建一个接口实现类,如`org.apache.hertzbeat.plugin.impl.DemoPluginImpl`,在实现类中接收`Alert`类作为参数,实现`alert`方法,逻辑由用户自定义,这里我们简单打印一下对象。 - ![plugin-2.png](/img/docs/help/plugin-2.png) + ![plugin-2.png](/img/docs/help/plugin-2.png) 3. 在 `META-INF/services/org.apache.hertzbeat.plugin.Plugin` 文件中增加接口实现类的全限定名,每个实现类全限定名单独成行。 4. 打包`hertzbeat-plugin`模块。 - ![plugin-3.png](/img/docs/help/plugin-3.png) + ![plugin-3.png](/img/docs/help/plugin-3.png) + 5. 将打包后的`jar`包,拷贝到安装目录下的`ext-lib`目录下(若为`docker`安装则先将`ext-lib`目录挂载出来,再拷贝到该目录下) - ![plugin-4.png](/img/docs/help/plugin-4.png) + ![plugin-4.png](/img/docs/help/plugin-4.png) + 6. 然后重启`HertzBeat`,即可实现自定义告警后处理策略。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pop3.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pop3.md index 8d6c2eb5548..4c58cc4a308 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pop3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pop3.md @@ -24,26 +24,24 @@ keywords: [开源监控工具,开源Java监控工具,监控POP3指标] 5. 通过POP3服务器域名,端口号,qq邮箱账号以及授权码连接POP3服务器,采集监控指标 ``` - ### 配置参数 -| 参数名 | 参数描述 | -|-------------------|-----------------------------------------------------| -| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | -| 监控名称 | 标识此监控的名称。名称需要唯一 | -| 端口 | POP3 提供的端口 | -| 超时时间 | 允许收集响应时间 | -| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | -| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | -| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | +| 参数名 | 参数描述 | +|--------|-----------------------------------------------------| +| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | +| 监控名称 | 标识此监控的名称。名称需要唯一 | +| 端口 | POP3 提供的端口 | +| 超时时间 | 允许收集响应时间 | +| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | +| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | +| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | ### 采集指标 #### 指标集:email_status -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------| -| 邮件数量 | | 邮件数量 | -| 邮箱总大小 | kb | 邮箱中邮件的总大小 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------| +| 邮件数量 | | 邮件数量 | +| 邮箱总大小 | kb | 邮箱中邮件的总大小 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/port.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/port.md index f27314e8e40..dd0b19aac82 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/port.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/port.md @@ -9,22 +9,21 @@ keywords: [开源监控系统, 开源网络监控, TCP 端口可用性监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 连接超时时间 | 端口连接的等待超时时间,单位毫秒,默认3000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | +| 连接超时时间 | 端口连接的等待超时时间,单位毫秒,默认3000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| responseTime | ms毫秒 | 网站响应时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | ms毫秒 | 网站响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/postgresql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/postgresql.md index 4716d0e2e64..59adae7da81 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/postgresql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/postgresql.md @@ -9,50 +9,48 @@ keywords: [开源监控系统, 开源数据库监控, PostgreSQL数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为5432。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为5432。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| server_version | 无 | 数据库服务器的版本号 | -| port | 无 | 数据库服务器端暴露服务端口 | -| server_encoding | 无 | 数据库服务器端的字符集编码 | -| data_directory | 无 | 数据库存储数据盘地址 | -| max_connections | 连接数 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------------| +| server_version | 无 | 数据库服务器的版本号 | +| port | 无 | 数据库服务器端暴露服务端口 | +| server_encoding | 无 | 数据库服务器端的字符集编码 | +| data_directory | 无 | 数据库存储数据盘地址 | +| max_connections | 连接数 | 数据库最大连接数 | #### 指标集合:state -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| name | 无 | 数据库名称,或share-object为共享对象。 | -| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | -| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | -| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | -| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | -| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | -| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | -| stats_reset | 无 | 这些统计信息上次被重置的时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------------------------------------------------------------| +| name | 无 | 数据库名称,或share-object为共享对象。 | +| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | +| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | +| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | +| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | +| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | +| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | +| stats_reset | 无 | 这些统计信息上次被重置的时间 | #### 指标集合:activity -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| running | 连接数 | 当前客户端连接数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------| +| running | 连接数 | 当前客户端连接数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prestodb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prestodb.md index 31ed6d64692..592e840b463 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prestodb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prestodb.md @@ -4,74 +4,72 @@ title: 监控:PrestoDB 数据库 sidebar_label: PrestoDB 数据库 keywords: [ 开源监控系统, 开源数据库监控, Presto数据库监控 ] --- + > 对PrestoDB Atlas 的通用性能指标进行采集监控。 ### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|---------------------------------------------| +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 被监控的平台端口。 | -| 连接超时时间 | 设置连接PrestoDB未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒。 | -| 绑定标签 | 用于对监控资源进行分类管理。 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 被监控的平台端口。 | +| 连接超时时间 | 设置连接PrestoDB未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒。 | +| 绑定标签 | 用于对监控资源进行分类管理。 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息。 | ### 采集指标 #### 指标集合:集群状态 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- | -------- | ----------------------------- | -| activeWorkers | 无 | 活跃节点数 | -| runningQueries | 无 | 运行中的查询数 | -| queuedQueries | 无 | 队列中的查询数 | -| blockedQueries | 无 | 阻塞的查询数 | -| runningDrivers | 无 | 运行中的驱动数 | -| runningTasks | 无 | 运行中的任务数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------| +| activeWorkers | 无 | 活跃节点数 | +| runningQueries | 无 | 运行中的查询数 | +| queuedQueries | 无 | 队列中的查询数 | +| blockedQueries | 无 | 阻塞的查询数 | +| runningDrivers | 无 | 运行中的驱动数 | +| runningTasks | 无 | 运行中的任务数 | #### 指标集合:节点信息 -| 指标名称 | 指标单位 | 指标帮助描述 | -|------|------| ---------------------------------- | -| uri | 无 | 节点链接 | -| recentRequests | 无 | 最近一段时间内的请求数量 | -| recentFailures | 无 | 最近一段时间内的失败请求数量 | -| recentSuccesses | 无 | 最近一段时间内的成功请求数量 | -| lastRequestTime | 无 | 最近一次请求的时间 | -| lastResponseTime | 无 | 最近一次响应的时间 | -| age | 无 | 持续时间 | -| recentFailureRatio | 无 | 最近一段时间内的失败 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|----------------| +| uri | 无 | 节点链接 | +| recentRequests | 无 | 最近一段时间内的请求数量 | +| recentFailures | 无 | 最近一段时间内的失败请求数量 | +| recentSuccesses | 无 | 最近一段时间内的成功请求数量 | +| lastRequestTime | 无 | 最近一次请求的时间 | +| lastResponseTime | 无 | 最近一次响应的时间 | +| age | 无 | 持续时间 | +| recentFailureRatio | 无 | 最近一段时间内的失败 | #### 指标集合:节点状态 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------| ---------------------------------- | -| nodeId | 无 | 节点ID | -| nodeVersion | 无 | 节点版本 | -| environment | 无 | 环境 | -| coordinator | 无 | 是否为协调节点 | -| uptime | 无 | 正常运行时间 | -| externalAddress | 无 | 外部地址 | -| internalAddress | 无 | 内部地址 | -| processors | 无 | 处理器数量 | -| processCpuLoad | 无 | 进程CPU负载 | -| systemCpuLoad | 无 | 系统CPU负载 | -| heapUsed | MB | 已使用堆内存 | -| heapAvailable | MB | 可用堆内存 | -| nonHeapUsed | MB | 请已使用非堆内存 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|----------| +| nodeId | 无 | 节点ID | +| nodeVersion | 无 | 节点版本 | +| environment | 无 | 环境 | +| coordinator | 无 | 是否为协调节点 | +| uptime | 无 | 正常运行时间 | +| externalAddress | 无 | 外部地址 | +| internalAddress | 无 | 内部地址 | +| processors | 无 | 处理器数量 | +| processCpuLoad | 无 | 进程CPU负载 | +| systemCpuLoad | 无 | 系统CPU负载 | +| heapUsed | MB | 已使用堆内存 | +| heapAvailable | MB | 可用堆内存 | +| nonHeapUsed | MB | 请已使用非堆内存 | #### 指标集合: 任务查询 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------------- | -------- | ------------------ | -| taskId | 无 | 任务ID | -| version | 无 | 版本 | -| state | 无 | 状态 | -| self | 无 | 自身 | -| lastHeartbeat | 无 | 最后心跳时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|--------| +| taskId | 无 | 任务ID | +| version | 无 | 版本 | +| state | 无 | 状态 | +| self | 无 | 自身 | +| lastHeartbeat | 无 | 最后心跳时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/process.md index 91d7718e419..2eda0726d27 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/process.md @@ -4,48 +4,46 @@ title: 监控:Linux 进程监控 sidebar_label: Process keywords: [开源监控系统, 操作系统进程监控, 进程监控] --- + > 对Linux系统进程基础信息进行采集监控,包括进程的 CPU使用率、内存使用率、物理内存、IO 等监控 ## 配置参数 - -| 参数名称 | 参数帮助描述 | -| -------- | ------------------------------------------------------------------------- | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux系统的ssh端口,默认: 22 | -| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | -| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | -| 用户名 | 服务器用户名 | -| 密码 | 服务器密码 | -| 进程名称 | 需要监控的进程名称或进程部分名称 | -| 采集器 | 配置此监控使用哪台采集器调度采集 | -| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -| 私钥 | 连接服务器所需私钥 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux系统的ssh端口,默认: 22 | +| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | +| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | +| 用户名 | 服务器用户名 | +| 密码 | 服务器密码 | +| 进程名称 | 需要监控的进程名称或进程部分名称 | +| 采集器 | 配置此监控使用哪台采集器调度采集 | +| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 私钥 | 连接服务器所需私钥 | ### 采集指标 #### 指标集合:进程基本信息 - | 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------ | -| PID | 无 | 进程ID | -| User | 无 | 用户 | -| CPU | 无 | CPU使用率 | -| MEM | 无 | 内存使用率 | -| rss | 无 | 物理内存 | -| cmd | 无 | 运行命令 | +|------|------|--------| +| PID | 无 | 进程ID | +| User | 无 | 用户 | +| CPU | 无 | CPU使用率 | +| MEM | 无 | 内存使用率 | +| rss | 无 | 物理内存 | +| cmd | 无 | 运行命令 | #### 指标集合:内存使用信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------ | -| PID | 无 | 进程ID | -| detail | 无 | 详细监控指标 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|--------| +| PID | 无 | 进程ID | +| detail | 无 | 详细监控指标 | 包含的指标: @@ -63,22 +61,20 @@ keywords: [开源监控系统, 操作系统进程监控, 进程监控] #### 指标集合:其他监控信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------ | -| PID | 无 | 进程ID | -| path | 无 | 执行路径 | -| date | 无 | 启动时间 | -| fd_count | 无 | 打开文件描述符数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|-----------| +| PID | 无 | 进程ID | +| path | 无 | 执行路径 | +| date | 无 | 启动时间 | +| fd_count | 无 | 打开文件描述符数量 | #### 指标集合:IO - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------ | -| PID | 无 | 进程ID | -| metric | 无 | 监控指标名称 | -| value | 无 | 监控指标值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|--------| +| PID | 无 | 进程ID | +| metric | 无 | 监控指标名称 | +| value | 无 | 监控指标值 | 包含的指标: @@ -89,3 +85,4 @@ keywords: [开源监控系统, 操作系统进程监控, 进程监控] - read_bytes(进程从磁盘实际读取的字节数) - write_bytes(进程写入到磁盘的实际字节数) - cancelled_write_bytes(进程写入到磁盘的实际字节数) + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prometheus.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prometheus.md index 50d148f72a9..571da45aac0 100755 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prometheus.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prometheus.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, Prometheus协议监控 ] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(例如: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -40,5 +40,3 @@ keywords: [ 开源监控系统, Prometheus协议监控 ] 其余设置保持默认。 - - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pulsar.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pulsar.md index a59178686b3..1c12244997b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pulsar.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pulsar.md @@ -4,52 +4,48 @@ title: 监控:Pulsar监控 sidebar_label: Apache Pulsar keywords: [开源监控系统, 开源数据库监控, HbaseMaster监控] --- + > 对Pulsar的通用性能指标进行采集监控 **使用协议:HTTP** ## 配置参数 - -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------------------- | -| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 端口 | Pulsar的webServiceProt值,默认为8080。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 端口 | Pulsar的webServiceProt值,默认为8080。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:版本信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | ------------ | -| Version Info | 无 | 版本信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| Version Info | 无 | 版本信息 | #### 指标集合:process_start_time_seconds - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------ | -------- | ------------ | -| Process Start Time | 无 | 进程启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|--------| +| Process Start Time | 无 | 进程启动时间 | #### 指标集合:process_open_fds - -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------- | -------- | ---------------- | -| Open File Descriptors | 无 | 打开的文件描述符 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| Open File Descriptors | 无 | 打开的文件描述符 | #### 指标集合:process_max_fds - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- | -------- | -------------- | -| Max File Descriptors | 无 | 最大文件描述符 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------|------|---------| +| Max File Descriptors | 无 | 最大文件描述符 | #### 指标集合: jvm_memory_pool_allocated_bytes diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rabbitmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rabbitmq.md index 8cb91eeb3e6..89c728162c9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rabbitmq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rabbitmq.md @@ -5,42 +5,42 @@ sidebar_label: RabbitMQ消息中间件 keywords: [开源监控系统, 开源消息中间件监控, RabbitMQ消息中间件监控] --- -> 对 RabbitMQ 消息中间件的运行状态,节点,队列等相关指标进行监测。 +> 对 RabbitMQ 消息中间件的运行状态,节点,队列等相关指标进行监测。 -### 监控前操作 +### 监控前操作 > HertzBeat 使用 RabbitMQ Management 的 Rest Api 对 RabbitMQ 进行指标数据采集。 -> 故需要您的 RabbitMQ 环境开启 Management 插件 +> 故需要您的 RabbitMQ 环境开启 Management 插件 -1. 开启 Management 插件,或使用自开启版本 +1. 开启 Management 插件,或使用自开启版本 ```shell rabbitmq-plugins enable rabbitmq_management ``` -2. 浏览器访问 http://ip:15672/ ,默认账户密码 `guest/guest`. 成功登录即开启成功。 +2. 浏览器访问 http://ip:15672/ ,默认账户密码 `guest/guest`. 成功登录即开启成功。 3. 在 HertzBeat 添加对应 RabbitMQ 监控即可,参数使用 Management 的 IP 端口,默认账户密码。 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | -|----------|---------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | RabbitMQ Management 对外提供的HTTP端口,默认为15672。 | -| 用户名 | 接口Basic认证时使用的用户名 | -| 密码 | 接口Basic认证时使用的密码 | -| 超时时间 | HTTP请求查询超时时间 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | RabbitMQ Management 对外提供的HTTP端口,默认为15672。 | +| 用户名 | 接口Basic认证时使用的用户名 | +| 密码 | 接口Basic认证时使用的密码 | +| 超时时间 | HTTP请求查询超时时间 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:overview +#### 指标集合:overview -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------------|------|--------------------------| | product_version | 无 | 产品版本 | | product_name | 无 | 产品名称 | @@ -52,7 +52,7 @@ rabbitmq-plugins enable rabbitmq_management #### 指标集合:object_totals -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------|------|-----------------| | channels | 无 | channels的总数量 | | connections | 无 | connections的总数量 | @@ -62,65 +62,65 @@ rabbitmq-plugins enable rabbitmq_management #### 指标集合:nodes -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------|------|--------------------------------| -| name | 无 | The node name | -| type | 无 | The node type | -| running | 无 | Running state | -| os_pid | 无 | Pid in OS | -| mem_limit | MB | Memory usage high watermark | -| mem_used | MB | Total amount of memory used | -| fd_total | 无 | File descriptors available | -| fd_used | 无 | File descriptors used | -| sockets_total | 无 | Sockets available | -| sockets_used | 无 | Sockets used | -| proc_total | 无 | Erlang process limit | -| proc_used | 无 | Erlang processes used | -| disk_free_limit | GB | Free disk space low watermark | -| disk_free | GB | Free disk space | -| gc_num | 无 | GC runs | -| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | -| context_switches | 无 | Context_switches num | -| io_read_count | 无 | 总共读操作的数量 | -| io_read_bytes | KB | 总共读入磁盘数据大小 | -| io_read_avg_time | ms | 读操作平均时间,毫秒为单位 | -| io_write_count | 无 | 磁盘写操作总量 | -| io_write_bytes | KB | 写入磁盘数据总量 | -| io_write_avg_time | ms | 每个磁盘写操作的平均时间,毫秒为单位 | -| io_seek_count | 无 | seek操作总量 | -| io_seek_avg_time | ms | seek操作的平均时间,毫秒单位 | -| io_sync_count | 无 | fsync操作的总量 | -| io_sync_avg_time | ms | fsync操作的平均时间,毫秒为单位 | -| connection_created | 无 | connection created num | -| connection_closed | 无 | connection closed num | -| channel_created | 无 | channel created num | -| channel_closed | 无 | channel closed num | -| queue_declared | 无 | queue declared num | -| queue_created | 无 | queue created num | -| queue_deleted | 无 | queue deleted num | -| connection_closed | 无 | connection closed num | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|-------------------------------| +| name | 无 | The node name | +| type | 无 | The node type | +| running | 无 | Running state | +| os_pid | 无 | Pid in OS | +| mem_limit | MB | Memory usage high watermark | +| mem_used | MB | Total amount of memory used | +| fd_total | 无 | File descriptors available | +| fd_used | 无 | File descriptors used | +| sockets_total | 无 | Sockets available | +| sockets_used | 无 | Sockets used | +| proc_total | 无 | Erlang process limit | +| proc_used | 无 | Erlang processes used | +| disk_free_limit | GB | Free disk space low watermark | +| disk_free | GB | Free disk space | +| gc_num | 无 | GC runs | +| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | +| context_switches | 无 | Context_switches num | +| io_read_count | 无 | 总共读操作的数量 | +| io_read_bytes | KB | 总共读入磁盘数据大小 | +| io_read_avg_time | ms | 读操作平均时间,毫秒为单位 | +| io_write_count | 无 | 磁盘写操作总量 | +| io_write_bytes | KB | 写入磁盘数据总量 | +| io_write_avg_time | ms | 每个磁盘写操作的平均时间,毫秒为单位 | +| io_seek_count | 无 | seek操作总量 | +| io_seek_avg_time | ms | seek操作的平均时间,毫秒单位 | +| io_sync_count | 无 | fsync操作的总量 | +| io_sync_avg_time | ms | fsync操作的平均时间,毫秒为单位 | +| connection_created | 无 | connection created num | +| connection_closed | 无 | connection closed num | +| channel_created | 无 | channel created num | +| channel_closed | 无 | channel closed num | +| queue_declared | 无 | queue declared num | +| queue_created | 无 | queue created num | +| queue_deleted | 无 | queue deleted num | +| connection_closed | 无 | connection closed num | #### 指标集合:queues -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------------------------|------|--------------------------------------------------------------------------------------------------------------------------------------| -| name | 无 | The name of the queue with non-ASCII characters escaped as in C. | +| name | 无 | The name of the queue with non-ASCII characters escaped as in C. | | node | 无 | The queue on the node name | -| state | 无 | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | +| state | 无 | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | | type | 无 | Queue type, one of: quorum, stream, classic. | -| vhost | 无 | vhost path | +| vhost | 无 | vhost path | | auto_delete | 无 | Whether the queue will be deleted automatically when no longer used | -| policy | 无 | Effective policy name for the queue. | +| policy | 无 | Effective policy name for the queue. | | consumers | 无 | Number of consumers. | | memory | B | Bytes of memory allocated by the runtime for the queue, including stack, heap and internal structures. | | messages_ready | 无 | Number of messages ready to be delivered to clients | -| messages_unacknowledged | 无 | Number of messages delivered to clients but not yet acknowledged | +| messages_unacknowledged | 无 | Number of messages delivered to clients but not yet acknowledged | | messages | 无 | Sum of ready and unacknowledged messages (queue depth) | -| messages_ready_ram | 无 | Number of messages from messages_ready which are resident in ram | +| messages_ready_ram | 无 | Number of messages from messages_ready which are resident in ram | | messages_persistent | 无 | Total number of persistent messages in the queue (will always be 0 for transient queues) | -| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | +| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | | message_bytes_ready | B | Like message_bytes but counting only those messages ready to be delivered to clients | -| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | +| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | | message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | | message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redhat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redhat.md index 5ceb911c8d3..e0b8ae48cf4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redhat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redhat.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -28,7 +28,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 #### 指标集合:系统基本信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | Host Name | 无 | 主机名称 | | System Version | 无 | 操作系统版本 | @@ -36,7 +36,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 #### 指标集合:CPU 信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------------------| | info | 无 | CPU型号 | | cores | 无 | CPU内核数量 | @@ -47,7 +47,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 #### 指标集合:内存信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|----------| | total | Mb | 总内存容量 | | used | Mb | 用户程序内存量 | @@ -58,7 +58,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 #### 指标集合:磁盘信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------|------|-----------| | disk_num | 无 | 磁盘总数 | | partition_num | 无 | 分区总数 | @@ -68,7 +68,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 #### 指标集合:网卡信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | interface_name | 无 | 网卡名称 | | receive_bytes | Mb | 入站数据流量 | @@ -76,7 +76,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 #### 指标集合:文件系统 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|---------| | filesystem | 无 | 文件系统的名称 | | used | Mb | 已使用磁盘大小 | @@ -88,7 +88,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | cpu_usage | % | CPU占用率 | @@ -99,9 +99,10 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis.md index dd9b304e1ce..58248fb0b45 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis.md @@ -2,244 +2,239 @@ id: redis title: 监控:REDIS数据库监控 sidebar_label: REDIS数据库 -keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] +keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] --- > 对REDIS数据库的通用性能指标进行采集监控。支持REDIS1.0+。 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | -| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | +| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:server -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| redis_version | 无 | Redis 服务器版本 | -| redis_git_sha1 | 无 | Git SHA1 | -| redis_git_dirty | 无 | Git dirty flag | -| redis_build_id | 无 | redis 构建的id | -| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | -| os | 无 | Redis 服务器的宿主操作系统 | -| arch_bits | 无 | 架构(32 或 64 位) | -| multiplexing_api | 无 | Redis使用的事件循环机制| -| atomicvar_api | 无 | Redis使用的原子 API | -| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本| -| process_id | 无 | 服务器进程的PID | -| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | -| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | -| tcp_port | 无 | TCP/IP侦听端口 | -| server_time_usec | 无 | 微秒级精度的基于时间的系统时间| -| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | -| uptime_in_days | 无 | 自Redis服务器启动后的天数 | -| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | -| configured_hz | 无 | 服务器配置的频率设置 | -| lru_clock | 无 | 时钟每分钟递增,用于LRU管理| -| executable | 无 | 服务器可执行文件的路径 | -| config_file | 无 | 配置文件的路径 | -| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志| -| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------|------|-----------------------------------------------| +| redis_version | 无 | Redis 服务器版本 | +| redis_git_sha1 | 无 | Git SHA1 | +| redis_git_dirty | 无 | Git dirty flag | +| redis_build_id | 无 | redis 构建的id | +| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | +| os | 无 | Redis 服务器的宿主操作系统 | +| arch_bits | 无 | 架构(32 或 64 位) | +| multiplexing_api | 无 | Redis使用的事件循环机制 | +| atomicvar_api | 无 | Redis使用的原子 API | +| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本 | +| process_id | 无 | 服务器进程的PID | +| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | +| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | +| tcp_port | 无 | TCP/IP侦听端口 | +| server_time_usec | 无 | 微秒级精度的基于时间的系统时间 | +| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | +| uptime_in_days | 无 | 自Redis服务器启动后的天数 | +| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | +| configured_hz | 无 | 服务器配置的频率设置 | +| lru_clock | 无 | 时钟每分钟递增,用于LRU管理 | +| executable | 无 | 服务器可执行文件的路径 | +| config_file | 无 | 配置文件的路径 | +| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志 | +| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。 | #### 指标集合:clients -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | -| cluster_connections | 无 | 群集总线使用的套接字数量的近似值| -| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。| -| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | -| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | -| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | -| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING)| -| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------|------|--------------------------------------------------------------------------------| +| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | +| cluster_connections | 无 | 群集总线使用的套接字数量的近似值 | +| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。 | +| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | +| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | +| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | +| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING) | +| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | #### 指标集合:memory -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | -| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | -| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字| -| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值| -| used_memory_peak | byte | Redis消耗的峰值内存(字节)| -| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | -| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和| -| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节)| -| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | -| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | -| allocator_allocated | byte| 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同| -| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片| -| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | -| total_system_memory | byte | Redis主机的内存总量 | -| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_lua | byte | Lua引擎使用的字节数 | -| used_memory_lua_human | KB | 上一个值的人类可读值 | -| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | -| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | -| number_of_cached_scripts | 无 |缓存的lua脚本数量 | -| maxmemory | byte | maxmemory配置指令的值| -| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | -| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | -| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | -| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | -| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | -| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | -| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | -| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | -| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | -| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | -| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。| -| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | -| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | -| mem_clients_normal | 无 | 普通客户端使用的内存 | -| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | -| mem_allocator | 无 | 内存分配器,在编译时选择。 | -| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | -| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL)| -| lazyfreed_objects | 无 | 已延迟释放的对象数。| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|----------|-----------------------------------------------------------------------------------------------| +| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | +| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | +| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字 | +| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_peak | byte | Redis消耗的峰值内存(字节) | +| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | +| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和 | +| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节) | +| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | +| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | +| allocator_allocated | byte | 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同 | +| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片 | +| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | +| total_system_memory | byte | Redis主机的内存总量 | +| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_lua | byte | Lua引擎使用的字节数 | +| used_memory_lua_human | KB | 上一个值的人类可读值 | +| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | +| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | +| number_of_cached_scripts | 无 | 缓存的lua脚本数量 | +| maxmemory | byte | maxmemory配置指令的值 | +| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | +| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | +| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | +| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | +| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | +| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | +| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | +| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | +| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | +| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | +| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。 | +| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | +| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | +| mem_clients_normal | 无 | 普通客户端使用的内存 | +| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | +| mem_allocator | 无 | 内存分配器,在编译时选择。 | +| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | +| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL) | +| lazyfreed_objects | 无 | 已延迟释放的对象数。 | #### 指标集合:persistence -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是| -| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | -| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | -| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比| -| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | -| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | -| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | -| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | -| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | -| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功| -| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | -| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | -| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | -| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | -| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite| -| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | -| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | -| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | -| aof_last_write_status | 无 | 上次aof写入状态 | -| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | -| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------------|--------|-----------------------------------------------------------------------------------------------------| +| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是 | +| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | +| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | +| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比 | +| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | +| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | +| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | +| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | +| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | +| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功 | +| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | +| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | +| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | +| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | +| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | +| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite | +| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | +| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | +| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | +| aof_last_write_status | 无 | 上次aof写入状态 | +| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | +| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | +| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | #### 指标集合:stats -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total_connections_received | 无 | 服务器接受的连接总数 | -| total_commands_processed | 无 | 服务器处理的命令总数 | -| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | -| total_net_input_bytes | byte | 从网络读取的字节总数 | -| total_net_output_bytes | byte | 写入网络的总字节数 | -| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | -| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | -| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数| -| sync_full | 无 | 具有副本的完整重新同步数 | -| sync_partial_ok | 无 | 接受的部分重新同步请求数 | -| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | -| expired_keys | 无 | 过期的key总数 | -| expired_stale_perc | 无 | 可能过期key的百分比 | -| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | -| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | -| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | -| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | -| keyspace_misses | 无 | 在主dict 中未查到key的次数 | -| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | -| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | -| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | -| total_forks | 无 | 自服务器启动以来的fork操作总数| -| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | -| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | -| active_defrag_hits | 无 | 主动碎片整理命中次数 | -| active_defrag_misses | 无 | 主动碎片整理未命中次数 | -| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | -| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数| -| tracking_total_keys | 无 | key 查询的总数| -| tracking_total_items | 无 | item查询的总数 | -| tracking_total_prefixes | 无 | 前缀查询的总数 | -| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | -| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | -| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | -| total_reads_processed | 无 | 正在读取的请求数 | -| total_writes_processed | 无 | 正在写入的请求数 | -| io_threaded_reads_processed | 无 | 正在读取的线程数| -| io_threaded_writes_processed | 无 | 正在写入的线程数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|----------------------------------------------------| +| total_connections_received | 无 | 服务器接受的连接总数 | +| total_commands_processed | 无 | 服务器处理的命令总数 | +| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | +| total_net_input_bytes | byte | 从网络读取的字节总数 | +| total_net_output_bytes | byte | 写入网络的总字节数 | +| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | +| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | +| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数 | +| sync_full | 无 | 具有副本的完整重新同步数 | +| sync_partial_ok | 无 | 接受的部分重新同步请求数 | +| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | +| expired_keys | 无 | 过期的key总数 | +| expired_stale_perc | 无 | 可能过期key的百分比 | +| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | +| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | +| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | +| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | +| keyspace_misses | 无 | 在主dict 中未查到key的次数 | +| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | +| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | +| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | +| total_forks | 无 | 自服务器启动以来的fork操作总数 | +| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | +| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | +| active_defrag_hits | 无 | 主动碎片整理命中次数 | +| active_defrag_misses | 无 | 主动碎片整理未命中次数 | +| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | +| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数 | +| tracking_total_keys | 无 | key 查询的总数 | +| tracking_total_items | 无 | item查询的总数 | +| tracking_total_prefixes | 无 | 前缀查询的总数 | +| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | +| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | +| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | +| total_reads_processed | 无 | 正在读取的请求数 | +| total_writes_processed | 无 | 正在写入的请求数 | +| io_threaded_reads_processed | 无 | 正在读取的线程数 | +| io_threaded_writes_processed | 无 | 正在写入的线程数 | #### 指标集合:replication -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| role | 无 | 节点角色 master 主节点 slave 从节点 | -| connected_slaves | 无 | 连接的从节点数 | -| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | -| master_replid | 无 | 实例启动的随机字符串| -| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID| -| master_repl_offset | 无 | 主从同步偏移量 | -| second_repl_offset | 无 | 接受从服务ID的最大偏移量| -| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | -| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | -| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | -| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|-------------------------------------------------------------------------------------| +| role | 无 | 节点角色 master 主节点 slave 从节点 | +| connected_slaves | 无 | 连接的从节点数 | +| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | +| master_replid | 无 | 实例启动的随机字符串 | +| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID | +| master_repl_offset | 无 | 主从同步偏移量 | +| second_repl_offset | 无 | 接受从服务ID的最大偏移量 | +| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | +| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | +| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | +| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和| -| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和| -| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和| -| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | -| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU| -| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|------|------------------------| +| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和 | +| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和 | +| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和 | +| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | +| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU | +| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | #### 指标集合:errorstats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| errorstat_ERR | 无 | 错误累计出现的次数 | -| errorstat_MISCONF | 无 | | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|-----------| +| errorstat_ERR | 无 | 错误累计出现的次数 | +| errorstat_MISCONF | 无 | | #### 指标集合:cluster -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|--------------------| +| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是 | #### 指标集合:commandstats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数| -| cmdstat_get | 无 | get命令的统计信息 | -| cmdstat_setnx | 无 | setnx命令的统计信息 | -| cmdstat_hset | 无 | hset命令的统计信息 | -| cmdstat_hget | 无 | hget命令的统计信息 | -| cmdstat_lpush | 无 | lpush命令的统计信息 | -| cmdstat_rpush | 无 | rpush命令的统计信息 | -| cmdstat_lpop | 无 | lpop命令的统计信息 | -| cmdstat_rpop | 无 | rpop命令的统计信息 | -| cmdstat_llen | 无 | llen命令的统计信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|---------------------------------------------------------------------------------------------------------------------------| +| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数 | +| cmdstat_get | 无 | get命令的统计信息 | +| cmdstat_setnx | 无 | setnx命令的统计信息 | +| cmdstat_hset | 无 | hset命令的统计信息 | +| cmdstat_hget | 无 | hget命令的统计信息 | +| cmdstat_lpush | 无 | lpush命令的统计信息 | +| cmdstat_rpush | 无 | rpush命令的统计信息 | +| cmdstat_lpop | 无 | lpop命令的统计信息 | +| cmdstat_rpop | 无 | rpop命令的统计信息 | +| cmdstat_llen | 无 | llen命令的统计信息 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis_cluster.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis_cluster.md index 48858be3e86..ed684ef1a35 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis_cluster.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis_cluster.md @@ -19,10 +19,10 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 appendonly yes bind 0.0.0.0 protected-mode no - + ``` - *docker-compose.yml* + *docker-compose.yml* ```yml services: @@ -34,7 +34,7 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "1000:6379" - + redis-master-2: image: redis:latest container_name: redis-master-2 @@ -43,7 +43,7 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "2000:6379" - + redis-master-3: image: redis:latest container_name: redis-master-3 @@ -52,7 +52,7 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "3000:6379" - + redis-slave-1: image: redis:latest container_name: redis-slave-1 @@ -61,7 +61,7 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "1001:6379" - + redis-slave-2: image: redis:latest container_name: redis-slave-2 @@ -70,7 +70,7 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "2001:6379" - + redis-slave-3: image: redis:latest container_name: redis-slave-3 @@ -79,60 +79,59 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "3001:6379" - + networks: default: external: name: hertzbeat-redis-cluster ``` - 2. 查看所有容器的 IP 地址,搭建 Redis 集群时需要用到这些. - ```bash - docker-compose up -d - docker network inspect hertzbeat-redis-cluste - ``` - - ``` - "Containers": { - "187b879f73c473b3cbb82ff95f668e65af46115ddaa27f3ff1a712332b981531": { - ... - "Name": "redis-slave-2", - "IPv4Address": "192.168.117.6/24", - ... - }, - "45e22b64c82e51857fc104436cdd6cc0c5776ad10a2e4b9d8e52e36cfb87217e": { - ... - "Name": "redis-master-3", - "IPv4Address": "192.168.117.3/24 - ... - }, - "57838ae37956f8af181f9a131eb011efec332b9ed3d49480f59d8962ececf288": { - ... - "Name": "redis-master-2", - "IPv4Address": "192.168.117.7/24", - ... - }, - "94478d14bd950bcde533134870beb89b392515843027a0595af56dd1e3305a76": { - ... - "Name": "redis-master-1", - "IPv4Address": "192.168.117.4/24", - ... - }, - "ad055720747e7fc430ba794d5321723740eeb345c280073e4292ed4302ff657c": { - ... - "Name": "redis-slave-3", - "IPv4Address": "192.168.117.2/24", - ... - }, - "eddded1ac4c7528640ba0c6befbdaa48faa7cb13905b934ca1f5c69ab364c725": { - ... - "Name": "redis-slave-1", - "IPv4Address": "192.168.117.5/24", - ... - } - }, - ``` + ```bash + docker-compose up -d + docker network inspect hertzbeat-redis-cluste + ``` + + ``` + "Containers": { + "187b879f73c473b3cbb82ff95f668e65af46115ddaa27f3ff1a712332b981531": { + ... + "Name": "redis-slave-2", + "IPv4Address": "192.168.117.6/24", + ... + }, + "45e22b64c82e51857fc104436cdd6cc0c5776ad10a2e4b9d8e52e36cfb87217e": { + ... + "Name": "redis-master-3", + "IPv4Address": "192.168.117.3/24 + ... + }, + "57838ae37956f8af181f9a131eb011efec332b9ed3d49480f59d8962ececf288": { + ... + "Name": "redis-master-2", + "IPv4Address": "192.168.117.7/24", + ... + }, + "94478d14bd950bcde533134870beb89b392515843027a0595af56dd1e3305a76": { + ... + "Name": "redis-master-1", + "IPv4Address": "192.168.117.4/24", + ... + }, + "ad055720747e7fc430ba794d5321723740eeb345c280073e4292ed4302ff657c": { + ... + "Name": "redis-slave-3", + "IPv4Address": "192.168.117.2/24", + ... + }, + "eddded1ac4c7528640ba0c6befbdaa48faa7cb13905b934ca1f5c69ab364c725": { + ... + "Name": "redis-slave-1", + "IPv4Address": "192.168.117.5/24", + ... + } + }, + ``` 3. 进入容器, 然后构建集群. ```bash @@ -162,3 +161,4 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 ### Configuration Parameters 查看 [REDIS](https://hertzbeat.apache.org/docs/help/redis) 文档. + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rocketmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rocketmq.md index f12a43628ac..84cc24fc976 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rocketmq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rocketmq.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, 开源中间件监控, RocketMQ消息中间件 ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |-----------|------------------------------------------------| | 注册中心Host | RocketMQ注册中心的IPV4,IPV6(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -24,7 +24,7 @@ keywords: [ 开源监控系统, 开源中间件监控, RocketMQ消息中间件 #### 指标集合:集群 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------------------|------|------------| | BrokerId | 无 | Broker唯一ID | | Address | 无 | Broker地址 | @@ -38,7 +38,7 @@ keywords: [ 开源监控系统, 开源中间件监控, RocketMQ消息中间件 #### 指标集合:消费者 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------------|------|--------| | Consumer_group | 无 | 消费者组 | | Client_quantity | 无 | 客户端数量 | @@ -46,3 +46,4 @@ keywords: [ 开源监控系统, 开源中间件监控, RocketMQ消息中间件 | Consume_type | 无 | 消费类型 | | Consume_tps | 无 | 消费TPS | | Delay | 无 | 延迟 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rockylinux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rockylinux.md index 0cf541702bc..55923468da8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rockylinux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rockylinux.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -28,7 +28,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 #### 指标集合:系统基本信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | Host Name | 无 | 主机名称 | | System Version | 无 | 操作系统版本 | @@ -36,7 +36,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 #### 指标集合:CPU 信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------------------| | info | 无 | CPU型号 | | cores | 无 | CPU内核数量 | @@ -47,7 +47,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 #### 指标集合:内存信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|----------| | total | Mb | 总内存容量 | | used | Mb | 用户程序内存量 | @@ -58,7 +58,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 #### 指标集合:磁盘信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------|------|-----------| | disk_num | 无 | 磁盘总数 | | partition_num | 无 | 分区总数 | @@ -68,7 +68,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 #### 指标集合:网卡信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | interface_name | 无 | 网卡名称 | | receive_bytes | Mb | 入站数据流量 | @@ -76,7 +76,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 #### 指标集合:文件系统 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|---------| | filesystem | 无 | 文件系统的名称 | | used | Mb | 已使用磁盘大小 | @@ -88,7 +88,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | cpu_usage | % | CPU占用率 | @@ -99,9 +99,10 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/shenyu.md index 08788efeaae..1149ed4bdd9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/shenyu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/shenyu.md @@ -5,24 +5,24 @@ sidebar_label: ShenYu网关监控 keywords: [开源监控系统, 开源消息中间件监控, ShenYu网关监控监控] --- -> 对 ShenYu 网关的运行状态(JVM相关),请求响应等相关指标进行监测。 +> 对 ShenYu 网关的运行状态(JVM相关),请求响应等相关指标进行监测。 -## 监控前操作 +## 监控前操作 -您需要在 ShenYu 网关开启`metrics`插件,暴露对应的 prometheus metrics 接口。 +您需要在 ShenYu 网关开启`metrics`插件,暴露对应的 prometheus metrics 接口。 -开启插件, 参考 [官方文档](https://shenyu.apache.org/zh/docs/plugin-center/observability/metrics-plugin) +开启插件, 参考 [官方文档](https://shenyu.apache.org/zh/docs/plugin-center/observability/metrics-plugin) -主要如下两步骤: +主要如下两步骤: 1. 在网关的 pom.xml 文件中添加 metrics 的依赖。 ```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + ``` 2. 在网关的配置yaml文件中编辑如下内容: @@ -39,11 +39,11 @@ shenyu: jvm_enabled: true #开启jvm的监控指标 ``` -最后重启访问网关指标接口 `http://ip:8090` 响应 prometheus 格式数据即可。 +最后重启访问网关指标接口 `http://ip:8090` 响应 prometheus 格式数据即可。 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -53,79 +53,78 @@ shenyu: | 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | | 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:shenyu_request_total +#### 指标集合:shenyu_request_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|-------------------| -| value | 无 | 收集ShenYu网关的所有请求数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------------| +| value | 无 | 收集ShenYu网关的所有请求数量 | #### 指标集合:shenyu_request_throw_created -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|--------------------| -| value | 无 | 收集ShenYu网关的异常请求数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------------| +| value | 无 | 收集ShenYu网关的异常请求数量 | #### 指标集合:process_cpu_seconds_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------------| -| value | 无 | 用户和系统CPU总计所用的秒数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| value | 无 | 用户和系统CPU总计所用的秒数 | #### 指标集合:process_open_fds -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|--------------| -| value | 无 | 打开的文件描述符的数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------| +| value | 无 | 打开的文件描述符的数量 | #### 指标集合:process_max_fds -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|----------------| -| value | 无 | 打开的文件描述符的最大数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------| +| value | 无 | 打开的文件描述符的最大数量 | #### 指标集合:jvm_info -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|-----------| -| runtime | 无 | JVM 版本信息 | -| vendor | 无 | JVM 版本信息 | -| version | 无 | JVM 版本信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------| +| runtime | 无 | JVM 版本信息 | +| vendor | 无 | JVM 版本信息 | +| version | 无 | JVM 版本信息 | #### 指标集合:jvm_memory_bytes_used -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------------| -| area | 无 | JVM 内存区域 | -| value | MB | 给定 JVM 内存区域的已用大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------| +| area | 无 | JVM 内存区域 | +| value | MB | 给定 JVM 内存区域的已用大小 | #### 指标集合:jvm_memory_pool_bytes_used -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------|------|-----------------| -| pool | 无 | JVM 内存池 | -| value | MB | 给定 JVM 内存池的已用大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| pool | 无 | JVM 内存池 | +| value | MB | 给定 JVM 内存池的已用大小 | #### 指标集合:jvm_memory_pool_bytes_committed -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------------| -| pool | 无 | JVM 内存池 | -| value | MB | 给定 JVM 内存池的已提交大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------| +| pool | 无 | JVM 内存池 | +| value | MB | 给定 JVM 内存池的已提交大小 | #### 指标集合:jvm_memory_pool_bytes_max -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------| ----------- | -| pool | 无 | JVM 内存池 | -| value | MB | 给定 JVM 内存池的最大大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| pool | 无 | JVM 内存池 | +| value | MB | 给定 JVM 内存池的最大大小 | #### 指标集合:jvm_threads_state -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|-------------| -| state | 无 | 线程状态 | -| value | 无 | 对应线程状态的线程数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------| +| state | 无 | 线程状态 | +| value | 无 | 对应线程状态的线程数量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/smtp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/smtp.md index 21dcd9a88f3..5755437e80e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/smtp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/smtp.md @@ -13,12 +13,11 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit > 详见 https://datatracker.ietf.org/doc/html/rfc821#page-13 - **协议使用:SMTP** ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |---------|---------------------------------------------------| | 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️无需协议头(例如:https://、http://) | | 监控名称 | 标识此监控的名称。名称需要保持唯一 | @@ -33,9 +32,10 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit #### 指标集:概要 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------|------|-------------------| | 响应时间 | 毫秒 | SMTP 服务器响应请求所需的时间 | | 响应状态 | | 响应状态 | | SMTP 服务器标语 | | SMTP 服务器的标语 | | helo 命令返回信息 | | helo 命令返回的响应信息 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/spring_gateway.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/spring_gateway.md index 086e0a63ac8..a0695849705 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/spring_gateway.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/spring_gateway.md @@ -19,6 +19,7 @@ keywords: [开源监控工具, 开源 Spring Gateway 监控工具, 监控 Spring spring-boot-starter-actuator ``` + **2. 修改 YML 配置以暴露度量接口:** ```yaml @@ -35,56 +36,55 @@ management: ### 配置参数 -| 参数名称 | 参数描述 | -| ----------- |--------------------------------------------------------| -| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | -| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | -| 端口 | 数据库提供的默认端口为 8080。 | +| 参数名称 | 参数描述 | +|----------|--------------------------------------------------------|-----------------------------------------------| +| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | +| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | +| 端口 | 数据库提供的默认端口为 8080。 | | 启用 HTTPS | 是否通过 HTTPS 访问网站,请注意⚠️当启用 HTTPS 时,需要将默认端口更改为 443 | -| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | -| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | will continue only if the probe is successful -| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | +| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | +| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | will continue only if the probe is successful | +| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | ### 采集指标 #### 指标收集: 健康状态 -| 指标名称 | 指标单位 | 指标描述 | -|-------|-------|--------------------------| -| 服务状态 | 无 | 服务健康状态: UP(正常),Down(异常) | +| 指标名称 | 指标单位 | 指标描述 | +|------|------|-------------------------| +| 服务状态 | 无 | 服务健康状态: UP(正常),Down(异常) | #### 指标收集: 环境信息 -| 指标名称 | 指标单位 | 指标描述 | -|---------|-------|----------------------------------------------| -| profile | 无 | 应用程序运行的配置环境: prod(生产环境),dev(开发环境),test(测试环境) | -| 端口号 | 无 | 应用程序暴露的端口 | -| 操作系统 | 无 | 运行操作系统 | -| 操作系统架构 | 无 | 运行操作系统的架构 | -| JDK供应商 | 无 | JDK 供应商 | -| JVM版本 | 无 | JVM 版本 | +| 指标名称 | 指标单位 | 指标描述 | +|---------|------|----------------------------------------------| +| profile | 无 | 应用程序运行的配置环境: prod(生产环境),dev(开发环境),test(测试环境) | +| 端口号 | 无 | 应用程序暴露的端口 | +| 操作系统 | 无 | 运行操作系统 | +| 操作系统架构 | 无 | 运行操作系统的架构 | +| JDK供应商 | 无 | JDK 供应商 | +| JVM版本 | 无 | JVM 版本 | #### 指标收集: 线程信息 -| 指标名称 | 指标单位 | 指标描述 | -|-------------|------------|-------------| -| 状态 | 无 | 线程状态 | -| 数量 | 无 | 线程状态对应的线程数量 | +| 指标名称 | 指标单位 | 指标描述 | +|------|------|-------------| +| 状态 | 无 | 线程状态 | +| 数量 | 无 | 线程状态对应的线程数量 | #### 指标收集: 内存使用情况 -| 指标名称 | 指标单位 | 指标描述 | -|-------|-------|-------------| -| 内存空间 | 无 | 内存空间名称 | -| 内存占用 | MB | 此空间占用的内存大小 | +| 指标名称 | 指标单位 | 指标描述 | +|------|------|------------| +| 内存空间 | 无 | 内存空间名称 | +| 内存占用 | MB | 此空间占用的内存大小 | #### 指标收集: 路由信息 -| 指标名称 | 指标单位 | 指标描述 | -|-------|-------|----------| -| 路由id | 无 | 路由 ID | -| 匹配规则 | 无 | 路由匹配规则 | -| 资源标识符 | 无 | 服务资源标识符 | -| 优先级 | 无 | 此路由的优先级 | - +| 指标名称 | 指标单位 | 指标描述 | +|-------|------|---------| +| 路由id | 无 | 路由 ID | +| 匹配规则 | 无 | 路由匹配规则 | +| 资源标识符 | 无 | 服务资源标识符 | +| 优先级 | 无 | 此路由的优先级 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot2.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot2.md index 280c6cb6b06..e66d4237a13 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot2.md @@ -7,7 +7,6 @@ keywords: [开源监控系统, 开源消息中间件监控, SpringBoot2.0 监控 > 对SpringBoot2.0 actuator 暴露的通用性能指标进行采集监控。 - ## 监控前操作 如果想要通过此监控类型监控 `SpringBoot` 中的信息,则需要您的SpringBoot应用集成并开启SpringBoot Actuator。 @@ -20,6 +19,7 @@ keywords: [开源监控系统, 开源消息中间件监控, SpringBoot2.0 监控 spring-boot-starter-actuator ``` + **2、修改YML配置暴露指标接口:** ```yaml @@ -30,7 +30,9 @@ management: include: '*' enabled-by-default: on ``` + *注意:如果你的项目里还引入了认证相关的依赖,比如springboot-security,那么SpringBoot Actuator暴露出的接口可能会被拦截,此时需要你手动放开这些接口,以springboot-security为例,需要在SecurityConfig配置类中加入以下代码:* + ```java public class SecurityConfig extends WebSecurityConfigurerAdapter{ @Override @@ -46,48 +48,50 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ } } ``` + ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ |------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 应用服务对外提供的端口,默认为8080。 | +| 参数名称 | 参数帮助描述 | +|-----------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 应用服务对外提供的端口,默认为8080。 | | 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | | Base Path | 暴露接口路径前缀,默认 /actuator | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:health -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------ | -------- |--------------------------------| -| status | 无 | 服务健康状态: UP,Down | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|-----------------| +| status | 无 | 服务健康状态: UP,Down | #### 指标集合:environment -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------| -------- |----------------------------| -| profile | 无 | 应用运行profile: prod,dev,test | -| port | 无 | 应用暴露端口 | -| os | 无 | 运行所在操作系统 | -| os_arch | 无 | 运行所在操作系统架构 | -| jdk_vendor | 无 | jdk vendor | -| jvm_version | 无 | jvm version | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|----------------------------| +| profile | 无 | 应用运行profile: prod,dev,test | +| port | 无 | 应用暴露端口 | +| os | 无 | 运行所在操作系统 | +| os_arch | 无 | 运行所在操作系统架构 | +| jdk_vendor | 无 | jdk vendor | +| jvm_version | 无 | jvm version | #### 指标集合:threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- |------|--------------------| -| state | 无 | 线程状态 | -| number | 无 | 此线程状态对应的线程数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|--------------| +| state | 无 | 线程状态 | +| number | 无 | 此线程状态对应的线程数量 | #### 指标集合:memory_used -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|------------| -| space | 无 | 内存空间名称 | -| mem_used | MB | 此空间占用内存大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|-----------| +| space | 无 | 内存空间名称 | +| mem_used | MB | 此空间占用内存大小 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot3.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot3.md index 384f9249d16..56a63068b17 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot3.md @@ -51,7 +51,7 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -65,23 +65,28 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ ### 采集指标 #### 指标集合:可用性 -| 指标名称 | 指标单位 | 指标帮助描述 | + +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------| | responseTime | ms | 响应时间 | #### 指标集合:线程 -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|--------------------| -| state | 无 | 线程状态 | -| size | 无 | 此线程状态对应的线程数量 | + +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------| +| state | 无 | 线程状态 | +| size | 无 | 此线程状态对应的线程数量 | #### 指标集合:内存使用 -| 指标名称 | 指标单位 | 指标帮助描述 | + +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|-----------| | space | 无 | 内存空间名称 | | mem_used | MB | 此空间占用内存大小 | #### 指标集合:健康状态 -| 指标名称 | 指标单位 | 指标帮助描述 | + +| 指标名称 | 指标单位 | 指标帮助描述 | |--------|------|-----------------| -| status | 无 | 服务健康状态: UP,Down | \ No newline at end of file +| status | 无 | 服务健康状态: UP,Down | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/sqlserver.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/sqlserver.md index 5dc66e27cfc..22a5a50ddd8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/sqlserver.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/sqlserver.md @@ -9,68 +9,68 @@ keywords: [开源监控系统, 开源数据库监控, SqlServer数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为1433。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为1433。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| machine_name | 无 | 运行服务器实例的 Windows 计算机名称 | -| server_name | 无 | 与Windows实例关联的服务器和实例信息SQL Server | -| version | 无 | 实例的版本,SQL Server,格式为"major.minor.build.revision" | -| edition | 无 | 已安装的 实例的产品SQL Server版本 | -| start_time | 无 | 数据库启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------------------------------------------------| +| machine_name | 无 | 运行服务器实例的 Windows 计算机名称 | +| server_name | 无 | 与Windows实例关联的服务器和实例信息SQL Server | +| version | 无 | 实例的版本,SQL Server,格式为"major.minor.build.revision" | +| edition | 无 | 已安装的 实例的产品SQL Server版本 | +| start_time | 无 | 数据库启动时间 | #### 指标集合:performance_counters -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| database_pages | 无 | Database pages, 已获得的页面数(缓冲池) | -| target_pages | 无 | Target pages, 缓冲池必须的理想页面数 | -| page_life_expectancy | s,秒 | Page life expectancy, 数据页在缓冲池中驻留的时间,这个时间一般会大于 300 | -| buffer_cache_hit_ratio | % | Buffer cache hit ratio, 数据库缓冲池高速缓冲命中率,被请求的数据在缓冲池中被找到的概率,一般会大于 80% 才算正常,否则可能是缓冲池容量太小 | -| checkpoint_pages_sec | 无 | Checkpoint pages/sec, 检查点每秒写入磁盘的脏页个数,如果数据过高,证明缺少内存容量 | -| page_reads_sec | 无 | Page reads/sec, 缓存池中每秒读的页数 | -| page_writes_sec | 无 | Page writes/sec, 缓存池中每秒写的页数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------|------|-------------------------------------------------------------------------------------| +| database_pages | 无 | Database pages, 已获得的页面数(缓冲池) | +| target_pages | 无 | Target pages, 缓冲池必须的理想页面数 | +| page_life_expectancy | s,秒 | Page life expectancy, 数据页在缓冲池中驻留的时间,这个时间一般会大于 300 | +| buffer_cache_hit_ratio | % | Buffer cache hit ratio, 数据库缓冲池高速缓冲命中率,被请求的数据在缓冲池中被找到的概率,一般会大于 80% 才算正常,否则可能是缓冲池容量太小 | +| checkpoint_pages_sec | 无 | Checkpoint pages/sec, 检查点每秒写入磁盘的脏页个数,如果数据过高,证明缺少内存容量 | +| page_reads_sec | 无 | Page reads/sec, 缓存池中每秒读的页数 | +| page_writes_sec | 无 | Page writes/sec, 缓存池中每秒写的页数 | #### 指标集合:connection -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| user_connection | 无 | 已连接的会话数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------| +| user_connection | 无 | 已连接的会话数 | -### 常见问题 +### 常见问题 -1. SSL连接问题修复 +1. SSL连接问题修复 jdk版本:jdk11 问题描述:SQL Server2019使用SA用户连接报错 -错误信息: +错误信息: + ```text The driver could not establish a secure connection to SQL Server by using Secure Sockets Layer (SSL) encryption. Error: "PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target". ClientConnectionId:xxxxxxxxxxxxxxxxx ``` + 问题截图: ![issue](https://user-images.githubusercontent.com/38679717/206621658-c0741d48-673d-45ff-9a3b-47d113064c12.png) 解决方案: -添加`SqlServer`监控时使用高级设置,自定义JDBC URL,拼接的jdbc url后面加上参数配置,```;encrypt=true;trustServerCertificate=true;```这个参数true表示无条件信任server端返回的任何根证书。 +添加`SqlServer`监控时使用高级设置,自定义JDBC URL,拼接的jdbc url后面加上参数配置,```;encrypt=true;trustServerCertificate=true;```这个参数true表示无条件信任server端返回的任何根证书。 -样例:```jdbc:sqlserver://127.0.0.1:1433;DatabaseName=demo;encrypt=true;trustServerCertificate=true;``` +样例:```jdbc:sqlserver://127.0.0.1:1433;DatabaseName=demo;encrypt=true;trustServerCertificate=true;``` -参考文档:[microsoft pkix-path-building-failed-unable-to-find-valid-certification](https://techcommunity.microsoft.com/t5/azure-database-support-blog/pkix-path-building-failed-unable-to-find-valid-certification/ba-p/2591304) +参考文档:[microsoft pkix-path-building-failed-unable-to-find-valid-certification](https://techcommunity.microsoft.com/t5/azure-database-support-blog/pkix-path-building-failed-unable-to-find-valid-certification/ba-p/2591304) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ssl_cert.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ssl_cert.md index ce0084f7e95..73957e31fb8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ssl_cert.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ssl_cert.md @@ -5,29 +5,30 @@ sidebar_label: SSL证书监控 keywords: [开源监控系统, 开源网站监控, SSL证书监控监控] --- -> 对网站的SSL证书过期时间,响应时间等指标进行监测 +> 对网站的SSL证书过期时间,响应时间等指标进行监测 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,https一般默认为443。 | -| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-------------------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,https一般默认为443。 | +| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:certificate +#### 指标集合:certificate + +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|----------| +| subject | 无 | 证书名称 | +| expired | 无 | 是否过期 | +| start_time | 无 | 有效期开始时间 | +| start_timestamp | ms毫秒 | 有效期开始时间戳 | +| end_time | 无 | 过期时间 | +| end_timestamp | ms毫秒 | 过期时间戳 | -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|----------| -| subject | 无 | 证书名称 | -| expired | 无 | 是否过期 | -| start_time | 无 | 有效期开始时间 | -| start_timestamp | ms毫秒 | 有效期开始时间戳 | -| end_time | 无 | 过期时间 | -| end_timestamp | ms毫秒 | 过期时间戳 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/status.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/status.md index ebae858ee4c..2dc77cd8f6f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/status.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/status.md @@ -13,14 +13,14 @@ keywords: [开源监控系统, 开源网站监控, 状态页面] 需要填写的字段如下: -| 字段名称 | 字段说明 | 举例 | -|------|-----------------------------------------|---------------------------------------------------------------------------------------------------| -| 组织名称 | 组织的名称 | HertzBeat | -| 组织介绍 | 组织的详细介绍 | Apache HertzBeat (incubating) 是一个易用友好的开源实时监控告警系统,无需 Agent,高性能集群,兼容 Prometheus,提供强大的自定义监控和状态页构建能力。 | -| 网站链接 | 组织网站的 URL,便于访问者获取更多信息 | https://hertzbeat.apache.org/ | +| 字段名称 | 字段说明 | 举例 | +|------|----------------------------------------|---------------------------------------------------------------------------------------------------| +| 组织名称 | 组织的名称 | HertzBeat | +| 组织介绍 | 组织的详细介绍 | Apache HertzBeat (incubating) 是一个易用友好的开源实时监控告警系统,无需 Agent,高性能集群,兼容 Prometheus,提供强大的自定义监控和状态页构建能力。 | +| 网站链接 | 组织网站的 URL,便于访问者获取更多信息 | https://hertzbeat.apache.org/ | | 标志图片 | 组织官方标志或 Logo 的图片文件路径或 URL,建议使用 .svg 格式 | https://hertzbeat.apache.org/zh-cn/img/hertzbeat-logo.svg | -| 反馈地址 | 接收问题反馈的地址 | https://github.com/apache/hertzbeat/issues | -| 主题颜色 | 状态页面的主色调。 | 在页面中点击选择 | +| 反馈地址 | 接收问题反馈的地址 | https://github.com/apache/hertzbeat/issues | +| 主题颜色 | 状态页面的主色调。 | 在页面中点击选择 | 填写完组织信息后,点击 `确定`。 @@ -32,12 +32,12 @@ keywords: [开源监控系统, 开源网站监控, 状态页面] 点击 `新增组件` 添加需要监控的组件,并填写以下字段: -| 字段名称 | 字段说明 | 举例 | -|------|-----------------------------------------------------------------------------------------|------------------------------------| -| 服务组件 | 组件服务名称 | 开发环境 ElasticSearch | -| 组件描述 | 组件服务的详细描述信息 | 开发环境,ElasticSearch (ip:192.168.1.1) | -| 服务状态统计方式 | 计算组件服务状态的方式。
自动计算:根据组件监控的状态自动计算显示状态。
手动设置:手动配置组件状态。 | 自动计算 / 手动设置 二选一 | -| 匹配标签 | 状态计算关联标签,使用标签关联的所有监控可用性状态数据来计算组件服务状态。 | 在页面选择组件的标签 | +| 字段名称 | 字段说明 | 举例 | +|----------|-------------------------------------------------------------|-------------------------------------| +| 服务组件 | 组件服务名称 | 开发环境 ElasticSearch | +| 组件描述 | 组件服务的详细描述信息 | 开发环境,ElasticSearch (ip:192.168.1.1) | +| 服务状态统计方式 | 计算组件服务状态的方式。
自动计算:根据组件监控的状态自动计算显示状态。
手动设置:手动配置组件状态。 | 自动计算 / 手动设置 二选一 | +| 匹配标签 | 状态计算关联标签,使用标签关联的所有监控可用性状态数据来计算组件服务状态。 | 在页面选择组件的标签 | ### 设置状态统计时间 @@ -61,13 +61,13 @@ status: ![](/img/docs/help/status-1.png) -| 字段名称 | 字段说明 | 举例 | -|----------|-----------------------------------------------|------------------------------------------------| -| 事件名称 | 事件的标题,应简洁明了地反映事件的核心内容。 | "服务器宕机事件 - 2023 年 4 月 5 日" | -| 影响组件 | 选择受此事件影响的组件。 | 在页面中选择 | -| 过程状态 | 设置事件当前状态,便于追踪处理进度。可选值:调查中 / 已确认 / 观察中 / 已修复 | 已确认 | -| 发布消息 | 向相关方传达的正式通知,包括事件详情、影响评估和应对措施。 | Dear All,开发环境异常,开发人员正在紧急处理,预计两小时内修复。请耐心等待,如有紧急事项请联系 Tom:130xxxx0000! | +| 字段名称 | 字段说明 | 举例 | +|------|---------------------------------------------|----------------------------------------------------------------------| +| 事件名称 | 事件的标题,应简洁明了地反映事件的核心内容。 | "服务器宕机事件 - 2023 年 4 月 5 日" | +| 影响组件 | 选择受此事件影响的组件。 | 在页面中选择 | +| 过程状态 | 设置事件当前状态,便于追踪处理进度。可选值:调查中 / 已确认 / 观察中 / 已修复 | 已确认 | +| 发布消息 | 向相关方传达的正式通知,包括事件详情、影响评估和应对措施。 | Dear All,开发环境异常,开发人员正在紧急处理,预计两小时内修复。请耐心等待,如有紧急事项请联系 Tom:130xxxx0000! | > 提示:可以多次发布消息,及时向团队汇报当前状态。 -![](/img/docs/help/status-2.png) \ No newline at end of file +![](/img/docs/help/status-2.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tidb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tidb.md index cf0cfb417d1..fe5eef718ef 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tidb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tidb.md @@ -1,49 +1,47 @@ ---- -id: tidb -title: 监控:TiDB数据库监控 -sidebar_label: TiDB数据库 -keywords: [开源监控系统, 开源数据库监控, TiDB数据库监控] ---- - -> 使用 HTTP 和 JDBC 协议对 TiDB 的通用性能指标进行采集监控。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| Service端口 | TiDB数据库对外提供用于状态报告的端口,默认为10080。 | -| PD端口 | TiDB数据库的PD端口,默认为2379。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | -| JDBC端口 | TiDB数据库对外提供用于客户端请求的端口,默认为4000。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| JDBC URL | 数据库使用[JDBC驱动的](https://docs.pingcap.com/zh/tidb/stable/dev-guide-connect-to-tidb#jdbc)连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -监控模板将从TiDB系统变量表中检索监控指标,用户可以自行检索[TiDB系统变量表](https://docs.pingcap.com/zh/tidb/stable/system-variables)以查询所需信息或其他系统变量。 - -除此之外,TiDB也提供默认监控指标表,见[Metrics Schema](https://docs.pingcap.com/zh/tidb/stable/metrics-schema)与[METRICS_SUMMARY](https://docs.pingcap.com/zh/tidb/stable/information-schema-metrics-summary),用户可以根据需求自行添加检索式。 - -由于可以被监控的指标过多,下文仅介绍监控模板中所查询的指标。 - -#### 指标集合:系统变量 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| version | 无 | MySQL 的版本和 TiDB 的版本,例如 '8.0.11-TiDB-v7.5.1' | -| version_comment | 无 | TiDB 版本号的其他信息,例如 'TiDB Server (Apache License 2.0) Community Edition, MySQL 8.0 compatible' | -| version_compile_machine | 无 | 运行 TiDB 的 CPU 架构的名称 | -| version_compile_os | 无 | TiDB 所在操作系统的名称 | -| max_connections | 无 | 该变量表示 TiDB 中同时允许的最大客户端连接数,用于资源控制。默认情况下,该变量值为 0 表示不限制客户端连接数。当本变量的值大于 0 且客户端连接数到达此值时,TiDB 服务端将会拒绝新的客户端连接。 | -| datadir | 无 | 数据存储的位置,位置可以是本地路径 /tmp/tidb。如果数据存储在 TiKV 上,则可以是指向 PD 服务器的路径。变量值的格式为 ${pd-ip}:${pd-port},表示 TiDB 在启动时连接到的 PD 服务器。 | -| port | 无 | 使用 MySQL 协议时 tidb-server 监听的端口。 | - - - +--- +id: tidb +title: 监控:TiDB数据库监控 +sidebar_label: TiDB数据库 +keywords: [开源监控系统, 开源数据库监控, TiDB数据库监控] +--- + +> 使用 HTTP 和 JDBC 协议对 TiDB 的通用性能指标进行采集监控。 + +### 配置参数 + +| 参数名称 | 参数帮助描述 | +|-----------|------------------------------------------------------------------------------------------------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| Service端口 | TiDB数据库对外提供用于状态报告的端口,默认为10080。 | +| PD端口 | TiDB数据库的PD端口,默认为2379。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | +| JDBC端口 | TiDB数据库对外提供用于客户端请求的端口,默认为4000。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| JDBC URL | 数据库使用[JDBC驱动的](https://docs.pingcap.com/zh/tidb/stable/dev-guide-connect-to-tidb#jdbc)连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | + +### 采集指标 + +监控模板将从TiDB系统变量表中检索监控指标,用户可以自行检索[TiDB系统变量表](https://docs.pingcap.com/zh/tidb/stable/system-variables)以查询所需信息或其他系统变量。 + +除此之外,TiDB也提供默认监控指标表,见[Metrics Schema](https://docs.pingcap.com/zh/tidb/stable/metrics-schema)与[METRICS_SUMMARY](https://docs.pingcap.com/zh/tidb/stable/information-schema-metrics-summary),用户可以根据需求自行添加检索式。 + +由于可以被监控的指标过多,下文仅介绍监控模板中所查询的指标。 + +#### 指标集合:系统变量 + +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|------------------------------------------------------------------------------------------------------------------| +| version | 无 | MySQL 的版本和 TiDB 的版本,例如 '8.0.11-TiDB-v7.5.1' | +| version_comment | 无 | TiDB 版本号的其他信息,例如 'TiDB Server (Apache License 2.0) Community Edition, MySQL 8.0 compatible' | +| version_compile_machine | 无 | 运行 TiDB 的 CPU 架构的名称 | +| version_compile_os | 无 | TiDB 所在操作系统的名称 | +| max_connections | 无 | 该变量表示 TiDB 中同时允许的最大客户端连接数,用于资源控制。默认情况下,该变量值为 0 表示不限制客户端连接数。当本变量的值大于 0 且客户端连接数到达此值时,TiDB 服务端将会拒绝新的客户端连接。 | +| datadir | 无 | 数据存储的位置,位置可以是本地路径 /tmp/tidb。如果数据存储在 TiKV 上,则可以是指向 PD 服务器的路径。变量值的格式为 ${pd-ip}:${pd-port},表示 TiDB 在启动时连接到的 PD 服务器。 | +| port | 无 | 使用 MySQL 协议时 tidb-server 监听的端口。 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/time_expression.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/time_expression.md index f03ebfab25e..8b5e6c8aca9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/time_expression.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/time_expression.md @@ -23,7 +23,7 @@ ${FORMATTER [{ + | - } ]} > 示例输出以当前时间为 `2022-04-24 02:40:00.123` -| 名称 | 描述 | 示例 | +| 名称 | 描述 | 示例 | |--------------|----------------------------|---------------------| | @now | 格式化为 `yyyy-MM-dd HH:mm:ss` | 2022-04-24 02:40:00 | | @date | 格式化为 `yyyy-MM-dd` | 2022-04-24 | @@ -42,9 +42,9 @@ ${FORMATTER [{ + | - } ]} | 名称 | 描述 | |----|----| -| y | 年 | +| y | 年 | | M | 月 | -| d | 日 | +| d | 日 | | H | 小时 | | m | 分钟 | | s | 秒 | @@ -57,10 +57,9 @@ ${FORMATTER [{ + | - } ]} #### 使用示例 1. 简单表达式 - - `${now}` 获取当前时间,并格式化为 `yyyy-MM-dd HH:mm:ss` - - `${time+1h}` 计算当前时间一小时之后的时间,并格式化为 `HH:mm:ss` - - `${time+1h+15s+30s}` 计算当前时间一小时15分钟30秒之后的时间,并格式化为 `HH:mm:ss` + - `${now}` 获取当前时间,并格式化为 `yyyy-MM-dd HH:mm:ss` + - `${time+1h}` 计算当前时间一小时之后的时间,并格式化为 `HH:mm:ss` + - `${time+1h+15s+30s}` 计算当前时间一小时15分钟30秒之后的时间,并格式化为 `HH:mm:ss` 2. 复杂表达式模板(如果内置的格式化器无法满足需要,可以组合使用多个表达式) - - `${@year}年${@month}月${@day}日`,获取当前日期并按照 yyyy年MM月dd日格式返回 - + - `${@year}年${@month}月${@day}日`,获取当前日期并按照 yyyy年MM月dd日格式返回 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tomcat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tomcat.md index c306bebc550..b366ee3c2ac 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tomcat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tomcat.md @@ -11,67 +11,65 @@ keywords: [开源监控系统, 开源网站监控, Tomcat监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置Tomcat连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置Tomcat连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:code_cache -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | ### Tomcat开启JMX协议步骤 -1. 搭建好tomcat后,进入tomcat下的bin目录,修改catalina.sh文件 注意⚠️替换IP地址 +1. 搭建好tomcat后,进入tomcat下的bin目录,修改catalina.sh文件 注意⚠️替换IP地址 -2. vim catalina.sh +2. vim catalina.sh ```aidl CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote -Djava.rmi.server.hostname=10.1.1.52 -Dcom.sun.management.jmxremote.port=1099 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" ``` -参考: https://blog.csdn.net/weixin_41924764/article/details/108694239 +参考: https://blog.csdn.net/weixin_41924764/article/details/108694239 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ubuntu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ubuntu.md index e5d1be3a140..3ec51e5464a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ubuntu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ubuntu.md @@ -9,74 +9,74 @@ keywords: [开源监控系统, 开源操作系统监控, Ubuntu监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux SSH对外提供的端口,默认为22。 | +| 用户名 | SSH连接用户名,可选 | +| 密码 | SSH连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| hostname | 无 | 主机名称 | +| version | 无 | 操作系统版本 | +| uptime | 无 | 系统运行时间 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:memory -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:disk -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:interface -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | byte | 入站数据流量(bytes) | +| transmit_bytes | byte | 出站数据流量(bytes) | #### 指标集合:disk_free -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/udp_port.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/udp_port.md index 06c11717b25..ee2f388873b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/udp_port.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/udp_port.md @@ -10,7 +10,7 @@ keywords: [开源监控系统, 开源网络监控, UDP 端口可用性监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |:-------|--------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头 (例如: https://, http://) 。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -26,6 +26,7 @@ keywords: [开源监控系统, 开源网络监控, UDP 端口可用性监控] #### 指标集合:概要 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------|---------|--------| | 响应时间 | 毫秒 (ms) | 网站响应时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/website.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/website.md index 5dbb2f2c7c6..8efe5262612 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/website.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/website.md @@ -5,26 +5,26 @@ sidebar_label: 网站监测 keywords: [开源监控系统, 开源网站监控] --- -> 对网站是否可用,响应时间等指标进行监测 +> 对网站是否可用,响应时间等指标进行监测 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|---------|-------------------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | +| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | +| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:summary +#### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| responseTime | ms毫秒 | 网站响应时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | ms毫秒 | 网站响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/websocket.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/websocket.md index ad8baa7d71b..3bd02f3ce18 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/websocket.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/websocket.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, Websocket监控 ] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |------------------|--------------------------------------------------------------| | WebSocket服务的Host | 被监控的Websocket的IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -23,7 +23,7 @@ keywords: [ 开源监控系统, Websocket监控 ] #### 指标集合:概要 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------|------|---------| | responseTime | ms | 响应时间 | | httpVersion | 无 | HTTP 版本 | @@ -31,3 +31,4 @@ keywords: [ 开源监控系统, Websocket监控 ] | statusMessage | 无 | 状态消息 | | connection | 无 | 表示连接方式 | | upgrade | 无 | 升级后的协议 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/windows.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/windows.md index 6a1c79b9ede..41447469e61 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/windows.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/windows.md @@ -6,38 +6,39 @@ keywords: [开源监控系统, 开源操作系统监控, Windows操作系统监 --- > 通过SNMP协议对Windows操作系统的通用性能指标进行采集监控。 -> 注意⚠️ Windows服务器需开启SNMP服务 +> 注意⚠️ Windows服务器需开启SNMP服务 参考资料: [什么是SNMP协议1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) [什么是SNMP协议2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) [Win配置SNMP英文](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) -[Win配置SNMP中文](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) +[Win配置SNMP中文](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Windows SNMP服务对外提供的端口,默认为 161。 | -| SNMP 版本 | SNMP协议版本 V1 V2c V3 | +| 参数名称 | 参数帮助描述 | +|----------|----------------------------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Windows SNMP服务对外提供的端口,默认为 161。 | +| SNMP 版本 | SNMP协议版本 V1 V2c V3 | | SNMP 团体字 | SNMP 协议团体名(Community Name),用于实现SNMP网络管理员访问SNMP管理代理时的身份验证。类似于密码,默认值为 public | -| 超时时间 | 协议连接超时时间 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 超时时间 | 协议连接超时时间 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:system -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| name | 无 | 主机名称 | -| descr | 无 | 操作系统描述 | -| uptime | 无 | 系统运行时间 | -| numUsers | 个数 | 当前用户数 | -| services | 个数 | 当前服务数量 | -| processes | 个数 | 当前进程数量 | -| responseTime | ms | 采集响应时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| name | 无 | 主机名称 | +| descr | 无 | 操作系统描述 | +| uptime | 无 | 系统运行时间 | +| numUsers | 个数 | 当前用户数 | +| services | 个数 | 当前服务数量 | +| processes | 个数 | 当前进程数量 | +| responseTime | ms | 采集响应时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/yarn.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/yarn.md index 2c88fe1e5a9..c35a0226876 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/yarn.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/yarn.md @@ -15,69 +15,70 @@ keywords: [大数据监控系统, Apache Yarn监控, 资源管理器监控] ## 配置参数 -| 参数名称 | 参数帮助描述 | -| ---------------- |---------------------------------------| -| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | -| 端口 | Apache Yarn 的监控端口号,默认为8088。 | -| 查询超时时间 | 查询 Apache Yarn 的超时时间,单位毫秒,默认6000毫秒。 | -| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | +| 参数名称 | 参数帮助描述 | +|--------|-------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | +| 端口 | Apache Yarn 的监控端口号,默认为8088。 | +| 查询超时时间 | 查询 Apache Yarn 的超时时间,单位毫秒,默认6000毫秒。 | +| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | ### 采集指标 #### 指标集合:ClusterMetrics -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- | -------- | ---------------------------------- | -| NumActiveNMs | | 当前存活的 NodeManager 个数 | -| NumDecommissionedNMs | | 当前 Decommissioned 的 NodeManager 个数 | -| NumDecommissioningNMs| | 集群正在下线的节点数 | -| NumLostNMs | | 集群丢失的节点数 | -| NumUnhealthyNMs | | 集群不健康的节点数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|------------------------------------| +| NumActiveNMs | | 当前存活的 NodeManager 个数 | +| NumDecommissionedNMs | | 当前 Decommissioned 的 NodeManager 个数 | +| NumDecommissioningNMs | | 集群正在下线的节点数 | +| NumLostNMs | | 集群丢失的节点数 | +| NumUnhealthyNMs | | 集群不健康的节点数 | #### 指标集合:JvmMetrics -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- | -------- | ------------------------------------ | -| MemNonHeapCommittedM | MB | JVM当前非堆内存大小已提交大小 | -| MemNonHeapMaxM | MB | JVM非堆最大可用内存 | -| MemNonHeapUsedM | MB | JVM当前已使用的非堆内存大小 | -| MemHeapCommittedM | MB | JVM当前已使用堆内存大小 | -| MemHeapMaxM | MB | JVM堆内存最大可用内存 | -| MemHeapUsedM | MB | JVM当前已使用堆内存大小 | -| GcTimeMillis | | JVM GC时间 | -| GcCount | | JVM GC次数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------|------|------------------| +| MemNonHeapCommittedM | MB | JVM当前非堆内存大小已提交大小 | +| MemNonHeapMaxM | MB | JVM非堆最大可用内存 | +| MemNonHeapUsedM | MB | JVM当前已使用的非堆内存大小 | +| MemHeapCommittedM | MB | JVM当前已使用堆内存大小 | +| MemHeapMaxM | MB | JVM堆内存最大可用内存 | +| MemHeapUsedM | MB | JVM当前已使用堆内存大小 | +| GcTimeMillis | | JVM GC时间 | +| GcCount | | JVM GC次数 | #### 指标集合:QueueMetrics -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------------ | -------- | ------------------------------------ | -| queue | | 队列名称 | -| AllocatedVCores | | 分配的虚拟核数(已分配) | -| ReservedVCores | | 预留核数 | -| AvailableVCores | | 可用核数(尚未分配) | -| PendingVCores | | 阻塞调度核数 | -| AllocatedMB | MB | 已分配(已用)的内存大小 | -| AvailableMB | MB | 可用内存(尚未分配) | -| PendingMB | MB | 阻塞调度内存 | -| ReservedMB | MB | 预留内存 | -| AllocatedContainers | | 已分配(已用)的container数 | -| PendingContainers | | 阻塞调度container个数 | -| ReservedContainers | | 预留container数 | -| AggregateContainersAllocated | | 累积的container分配总数 | -| AggregateContainersReleased | | 累积的container释放总数 | -| AppsCompleted | | 完成的任务数 | -| AppsKilled | | 被杀掉的任务数 | -| AppsFailed | | 失败的任务数 | -| AppsPending | | 阻塞的任务数 | -| AppsRunning | | 提正在运行的任务数 | -| AppsSubmitted | | 提交过的任务数 | -| running_0 | | 运行时间小于60分钟的作业个数 | -| running_60 | | 运行时间介于60~300分钟的作业个数 | -| running_300 | | 运行时间介于300~1440分钟的作业个数 | -| running_1440 | | 运行时间大于1440分钟的作业个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------------|------|-----------------------| +| queue | | 队列名称 | +| AllocatedVCores | | 分配的虚拟核数(已分配) | +| ReservedVCores | | 预留核数 | +| AvailableVCores | | 可用核数(尚未分配) | +| PendingVCores | | 阻塞调度核数 | +| AllocatedMB | MB | 已分配(已用)的内存大小 | +| AvailableMB | MB | 可用内存(尚未分配) | +| PendingMB | MB | 阻塞调度内存 | +| ReservedMB | MB | 预留内存 | +| AllocatedContainers | | 已分配(已用)的container数 | +| PendingContainers | | 阻塞调度container个数 | +| ReservedContainers | | 预留container数 | +| AggregateContainersAllocated | | 累积的container分配总数 | +| AggregateContainersReleased | | 累积的container释放总数 | +| AppsCompleted | | 完成的任务数 | +| AppsKilled | | 被杀掉的任务数 | +| AppsFailed | | 失败的任务数 | +| AppsPending | | 阻塞的任务数 | +| AppsRunning | | 提正在运行的任务数 | +| AppsSubmitted | | 提交过的任务数 | +| running_0 | | 运行时间小于60分钟的作业个数 | +| running_60 | | 运行时间介于60~300分钟的作业个数 | +| running_300 | | 运行时间介于300~1440分钟的作业个数 | +| running_1440 | | 运行时间大于1440分钟的作业个数 | #### 指标集合:runtime -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- | -------- | ---------------------------- | -| StartTime | | 启动时间戳 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| StartTime | | 启动时间戳 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md index 64d08a259c4..476498549aa 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md @@ -12,7 +12,7 @@ keywords: [开源监控系统, Zookeeper监控监控] > 监控 zookeeper 目前的实现方案使用的是 zookeeper 提供的四字命令 + netcat 来收集指标数据 > 需要用户自己将zookeeper的四字命令加入白名单 -1. 加白名单步骤 +1. 加白名单步骤 > 1.找到我们 zookeeper 的配置文件,一般是 `zoo.cfg` > @@ -25,95 +25,96 @@ keywords: [开源监控系统, Zookeeper监控监控] # 将所有命令添加到白名单中 4lw.commands.whitelist=* ``` + > 3.重启服务 -```shell +```shell zkServer.sh restart ``` -2. netcat 协议 +2. netcat 协议 目前实现方案需要我们部署zookeeper的linux服务器,安装netcat的命令环境 > netcat安装步骤 -```shell -yum install -y nc -``` +> +> ```shell +> yum install -y nc +> ``` 如果终端显示以下信息则说明安装成功 + ```shell Complete! ``` - ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Zookeeper的Linux服务器SSH端口。 | -| 查询超时时间 | 设置Zookeeper连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | Zookeeper所在Linux连接用户名 | -| 密码 | Zookeeper所在Linux连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Zookeeper的Linux服务器SSH端口。 | +| 查询超时时间 | 设置Zookeeper连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | Zookeeper所在Linux连接用户名 | +| 密码 | Zookeeper所在Linux连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:conf -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| clientPort | 无 | 端口 | -| dataDir | 无 | 数据快照文件目录,默认10万次操作生成一次快照 | -| dataDirSize | kb | 数据快照文件大小 | -| dataLogDir | 无 | 事务日志文件目录,生产环境放在独立磁盘上 | -| dataLogSize | kb | 事务日志文件大小 | -| tickTime | ms | 服务器之间或客户端与服务器之间维持心跳的时间间隔 | -| minSessionTimeout | ms| 最小session超时时间 心跳时间x2 指定时间小于该时间默认使用此时间 | -| maxSessionTimeout | ms |最大session超时时间 心跳时间x20 指定时间大于该时间默认使用此时间 | -| serverId | 无 | 服务器编号 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|----------------------------------------| +| clientPort | 无 | 端口 | +| dataDir | 无 | 数据快照文件目录,默认10万次操作生成一次快照 | +| dataDirSize | kb | 数据快照文件大小 | +| dataLogDir | 无 | 事务日志文件目录,生产环境放在独立磁盘上 | +| dataLogSize | kb | 事务日志文件大小 | +| tickTime | ms | 服务器之间或客户端与服务器之间维持心跳的时间间隔 | +| minSessionTimeout | ms | 最小session超时时间 心跳时间x2 指定时间小于该时间默认使用此时间 | +| maxSessionTimeout | ms | 最大session超时时间 心跳时间x20 指定时间大于该时间默认使用此时间 | +| serverId | 无 | 服务器编号 | #### 指标集合:stats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| zk_version | 无 | 服务器版本 | -| zk_server_state | 无 | 服务器角色 | -| zk_num_alive_connections | 个 | 连接数 | -| zk_avg_latency | ms | 平均延时 | -| zk_outstanding_requests | 个 | 堆积请求数 | -| zk_znode_count | 个 | znode结点数量 | -| zk_packets_sent | 个 | 发包数 | -| zk_packets_received | 个 | 收包数 | -| zk_watch_count | 个 | watch数量 | -| zk_max_file_descriptor_count | 个 | 最大文件描述符数量 | -| zk_approximate_data_size | kb | 数据大小 | -| zk_open_file_descriptor_count | 个 | 打开的文件描述符数量 | -| zk_max_latency | ms | 最大延时 | -| zk_ephemerals_count | 个 | 临时节点数 | -| zk_min_latency | ms | 最小延时 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------------|------|------------| +| zk_version | 无 | 服务器版本 | +| zk_server_state | 无 | 服务器角色 | +| zk_num_alive_connections | 个 | 连接数 | +| zk_avg_latency | ms | 平均延时 | +| zk_outstanding_requests | 个 | 堆积请求数 | +| zk_znode_count | 个 | znode结点数量 | +| zk_packets_sent | 个 | 发包数 | +| zk_packets_received | 个 | 收包数 | +| zk_watch_count | 个 | watch数量 | +| zk_max_file_descriptor_count | 个 | 最大文件描述符数量 | +| zk_approximate_data_size | kb | 数据大小 | +| zk_open_file_descriptor_count | 个 | 打开的文件描述符数量 | +| zk_max_latency | ms | 最大延时 | +| zk_ephemerals_count | 个 | 临时节点数 | +| zk_min_latency | ms | 最小延时 | #### 指标集合:envi -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |-----| ----------- | -| zk_version | 无 | ZooKeeper版本 | -| hostname | 无 | 主机名 | -| java_version | 无 | Java版本 | -| java_vendor | 无 | Java供应商 | -| java_home | 无 | Java主目录 | -| java_class_path | 无 | Java类路径 | -| java_library_path | 无 | Java库路径 | -| java_io_tmpdir | 无 | Java临时目录 | -| java_compiler | 无 | Java编译器 | -| os_name | 无 | 操作系统名称 | -| os_arch | 无 | 操作系统架构 | -| os_version | 无 | 操作系统版本 | -| user_name | 无 | 用户名 | -| user_home | 无 | 用户主目录 | -| user_dir | 无 | 用户当前目录 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|-------------| +| zk_version | 无 | ZooKeeper版本 | +| hostname | 无 | 主机名 | +| java_version | 无 | Java版本 | +| java_vendor | 无 | Java供应商 | +| java_home | 无 | Java主目录 | +| java_class_path | 无 | Java类路径 | +| java_library_path | 无 | Java库路径 | +| java_io_tmpdir | 无 | Java临时目录 | +| java_compiler | 无 | Java编译器 | +| os_name | 无 | 操作系统名称 | +| os_arch | 无 | 操作系统架构 | +| os_version | 无 | 操作系统版本 | +| user_name | 无 | 用户名 | +| user_home | 无 | 用户主目录 | +| user_dir | 无 | 用户当前目录 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md index 1e33cec89df..e22cea0502b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md @@ -16,7 +16,6 @@ slug: / [![QQ](https://img.shields.io/badge/QQ-630061200-orange)](https://qm.qq.com/q/FltGGGIX2m) [![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UCri75zfWX0GHqJFPENEbLow?logo=youtube&label=YouTube%20Channel)](https://www.youtube.com/channel/UCri75zfWX0GHqJFPENEbLow) - ## 🎡 介绍 [Apache HertzBeat](https://github.com/apache/hertzbeat) (incubating) 是一个易用友好的开源实时监控告警系统,无需 Agent,高性能集群,兼容 Prometheus,提供强大的自定义监控和状态页构建能力。 @@ -31,7 +30,7 @@ slug: / - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 - 提供强大的状态页构建能力,轻松向用户传达您产品服务的实时状态。 -> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 +> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 ---- @@ -48,7 +47,6 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ![hertzbeat](/img/home/9.png) - ### 内置监控类型 **官方内置了大量的监控模版类型,方便用户直接在页面添加使用,一款监控类型对应一个YML监控模版** @@ -113,11 +111,11 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ### 高性能集群 -> 当监控数量指数级上升,采集性能下降或者环境不稳定容易造成采集器单点故障时,这时我们的采集器集群就出场了。 +> 当监控数量指数级上升,采集性能下降或者环境不稳定容易造成采集器单点故障时,这时我们的采集器集群就出场了。 -- `HertzBeat` 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 -- 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 -- 单机模式与集群模式相互切换部署非常方便,无需额外组件部署。 +- `HertzBeat` 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 +- 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 +- 单机模式与集群模式相互切换部署非常方便,无需额外组件部署。 ![hertzbeat](/img/docs/cluster-arch.png) @@ -134,10 +132,10 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ### 易用友好 -- 集 **监控+告警+通知** All in one, 无需单独部署多个组件服务。 -- 全UI界面操作,不管是新增监控,修改监控模版,还是告警阈值通知,都可在WEB界面操作完成,无需要修改文件或脚本或重启。 -- 无需 Agent, 监控对端我们只需在WEB界面填写所需IP端口账户密码等参数即可。 -- 自定义友好,只需一个监控模版YML,自动生成对应监控类型的监控管理页面,数据图表页面,阈值配置等。 +- 集 **监控+告警+通知** All in one, 无需单独部署多个组件服务。 +- 全UI界面操作,不管是新增监控,修改监控模版,还是告警阈值通知,都可在WEB界面操作完成,无需要修改文件或脚本或重启。 +- 无需 Agent, 监控对端我们只需在WEB界面填写所需IP端口账户密码等参数即可。 +- 自定义友好,只需一个监控模版YML,自动生成对应监控类型的监控管理页面,数据图表页面,阈值配置等。 - 阈值告警通知友好,基于表达式阈值配置,多种告警通知渠道,支持告警静默,时段标签告警级别过滤等。 ### 完全开源 @@ -151,8 +149,7 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ![cncf](/img/home/cncf-landscape-left-logo.svg) ------ - +--- **`HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。** ----- @@ -269,7 +266,6 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 ![hertzbeat](/img/home/13.png) - ### 告警静默 - 当通过阈值规则判断触发告警后,会进入到告警静默,告警静默会根据规则对特定一次性时间段或周期性时候段的告警消息屏蔽静默,此时间段不发送告警消息。 @@ -301,8 +297,7 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 ![hertzbeat](/img/home/9.png) - ------ +--- **还有更多强大的功能快去探索呀。Have Fun!** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/design.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/design.md index 7799d12fe52..7f3854c3e12 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/design.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/design.md @@ -1,9 +1,9 @@ --- id: design title: 设计文档 -sidebar_label: 设计文档 +sidebar_label: 设计文档 --- -### HertzBeat 架构 +### HertzBeat 架构 -![architecture](/img/docs/hertzbeat-arch.svg) +![architecture](/img/docs/hertzbeat-arch.svg) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/resource.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/resource.md index 46699d69a23..0e01e014901 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/resource.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/resource.md @@ -1,20 +1,20 @@ --- id: resource title: 相关资源 -sidebar_label: 相关资源 +sidebar_label: 相关资源 --- -## 图标资源 +## 图标资源 -### HertzBeat LOGO +### HertzBeat LOGO ![logo](/img/hertzbeat-logo.svg) Download: [SVG](/img/hertzbeat-logo.svg) [PNG](/img/hertzbeat-logo.png) -### HertzBeat Brand LOGO +### HertzBeat Brand LOGO ![logo](/img/hertzbeat-brand.svg) -Download: [SVG](/img/hertzbeat-brand.svg) [PNG](/img/hertzbeat-brand.png) +Download: [SVG](/img/hertzbeat-brand.svg) [PNG](/img/hertzbeat-brand.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/account-modify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/account-modify.md index 028138275cb..ce89d825b7e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/account-modify.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/account-modify.md @@ -1,7 +1,7 @@ --- id: account-modify title: 配置修改账户密码和加密密钥 -sidebar_label: 更新账户和密钥 +sidebar_label: 更新账户和密钥 --- ## 更新账户 @@ -9,7 +9,7 @@ sidebar_label: 更新账户和密钥 Apache HertzBeat (incubating) 默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat 若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 修改位于安装目录下的 `/hertzbeat/config/sureness.yml` 的配置文件,docker环境目录为`opt/hertzbeat/config/sureness.yml`,建议提前挂载映射 -配置文件内容参考如下 +配置文件内容参考如下 ```yaml @@ -157,4 +157,4 @@ sureness: dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' ``` -**重启 HertzBeat 浏览器访问 http://ip:1157/ 即可探索使用 HertzBeat** +**重启 HertzBeat 浏览器访问 http://ip:1157/ 即可探索使用 HertzBeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md index 7dde7ec14e9..01380784169 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md @@ -1,23 +1,25 @@ --- id: custom-config title: 常见参数配置 -sidebar_label: 常见参数配置 +sidebar_label: 常见参数配置 --- 这里描述了如果配置短信服务器,内置可用性告警触发次数等。 -**`hertzbeat`的配置文件`application.yml`** +**`hertzbeat`的配置文件`application.yml`** -### 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 - 安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 +### 配置HertzBeat的配置文件 + +修改位于 `hertzbeat/config/application.yml` 的配置文件 +注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 +安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 1. 配置短信发送服务器 -> 只有成功配置了您自己的短信服务器,监控系统内触发的告警短信才会正常发送。 +> 只有成功配置了您自己的短信服务器,监控系统内触发的告警短信才会正常发送。 + +在`application.yml`新增如下腾讯平台短信服务器配置(参数需替换为您的短信服务器配置) -在`application.yml`新增如下腾讯平台短信服务器配置(参数需替换为您的短信服务器配置) ```yaml common: sms: @@ -28,15 +30,17 @@ common: sign-name: 赫兹跳动 template-id: 1343434 ``` + 1.1 腾讯云短信创建签名(sign-name) ![image](https://github.com/apache/hertzbeat/assets/40455946/3a4c287d-b23d-4398-8562-4894296af485) 1.2 腾讯云短信创建正文模板(template-id) + ``` 监控:{1},告警级别:{2}。内容:{3} ``` -![image](https://github.com/apache/hertzbeat/assets/40455946/face71a6-46d5-452c-bed3-59d2a975afeb) +![image](https://github.com/apache/hertzbeat/assets/40455946/face71a6-46d5-452c-bed3-59d2a975afeb) 1.3 腾讯云短信创建应用(app-id) ![image](https://github.com/apache/hertzbeat/assets/40455946/2732d710-37fa-4455-af64-48bba273c2f8) @@ -44,8 +48,7 @@ common: 1.4 腾讯云访问管理(secret-id、secret-key) ![image](https://github.com/apache/hertzbeat/assets/40455946/36f056f0-94e7-43db-8f07-82893c98024e) - -2. 配置告警自定义参数 +2. 配置告警自定义参数 ```yaml alerter: @@ -53,11 +56,12 @@ alerter: console-url: https://console.tancloud.io ``` -3. 使用外置redis代替内存存储实时指标数据 +3. 使用外置redis代替内存存储实时指标数据 -> 默认我们的指标实时数据存储在内存中,可以配置如下来使用redis代替内存存储。 +> 默认我们的指标实时数据存储在内存中,可以配置如下来使用redis代替内存存储。 + +注意⚠️ `memory.enabled: false, redis.enabled: true` -注意⚠️ `memory.enabled: false, redis.enabled: true` ```yaml warehouse: store: @@ -70,3 +74,4 @@ warehouse: port: 6379 password: 123456 ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md index 8be78289b9a..2bee426c1ab 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md @@ -1,7 +1,7 @@ --- id: docker-compose-deploy title: 通过 Docker Compose 方式安装 HertzBeat -sidebar_label: Docker Compose方式安装 +sidebar_label: Docker Compose方式安装 --- :::tip @@ -13,22 +13,20 @@ sidebar_label: Docker Compose方式安装 执行命令 `docker compose version` 检查是否拥有 Docker Compose 环境。 ::: +1. 下载启动脚本包 - -1. 下载启动脚本包 +从 [下载地址](https://github.com/apache/hertzbeat/releases/download/v1.6.0/apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz) 下载安装脚本包 `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` -从 [下载地址](https://github.com/apache/hertzbeat/releases/download/v1.6.0/apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz) 下载安装脚本包 `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` - -2. 选择使用 HertzBeat + PostgreSQL + VictoriaMetrics 方案 +2. 选择使用 HertzBeat + PostgreSQL + VictoriaMetrics 方案 :::tip - `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` 解压后包含多个部署方案,这里我们推荐选择 `hertzbeat-postgresql-victoria-metrics` 方案。 -- 其它部署方式请详细阅读各个部署方案的 README.md 文件, MySQL 方案需要自行准备 MySQL 驱动包。 +- 其它部署方式请详细阅读各个部署方案的 README.md 文件, MySQL 方案需要自行准备 MySQL 驱动包。 ::: -- 解压脚本包 +- 解压脚本包 -``` +``` $ tar zxvf apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz ``` @@ -51,7 +49,7 @@ docker-compose up -d > 查看各个容器的运行状态,up 为正常运行状态 -``` +``` docker-compose ps ``` @@ -60,9 +58,8 @@ docker-compose ps **HAVE FUN** - ---- - + ### 部署常见问题 **最多的问题就是容器网络问题,请先提前排查** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-deploy.md index 5804522bd93..d89816f8a12 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-deploy.md @@ -1,7 +1,7 @@ --- id: docker-deploy title: 通过 Docker 方式安装 HertzBeat -sidebar_label: Docker方式安装 +sidebar_label: Docker方式安装 --- :::tip @@ -10,14 +10,14 @@ sidebar_label: Docker方式安装 ::: :::note -需您的环境中已经拥有 Docker 环境,若未安装请参考 [Docker官网文档](https://docs.docker.com/get-docker/) +需您的环境中已经拥有 Docker 环境,若未安装请参考 [Docker官网文档](https://docs.docker.com/get-docker/) ::: ### 部署 HertzBeat Server 1. 执行以下命令 -```shell +```shell $ docker run -d -p 1157:1157 -p 1158:1158 \ -v $(pwd)/data:/opt/hertzbeat/data \ -v $(pwd)/logs:/opt/hertzbeat/logs \ @@ -48,7 +48,7 @@ $ docker run -d -p 1157:1157 -p 1158:1158 \ ::: 2. 开始探索 HertzBeat - 浏览器访问 http://ip:1157/ 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 + 浏览器访问 http://ip:1157/ 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 ### 部署 HertzBeat Collector 集群(可选) @@ -61,7 +61,7 @@ HertzBeat Collector 是一个轻量级的数据采集器,用于采集并将数 1. 执行以下命令 -```shell +```shell $ docker run -d \ -e IDENTITY=custom-collector-name \ -e MODE=public \ @@ -69,6 +69,7 @@ $ docker run -d \ -e MANAGER_PORT=1158 \ --name hertzbeat-collector apache/hertzbeat-collector ``` + > 命令参数详解 - `docker run -d` : 通过 Docker 后台运行容器 @@ -90,36 +91,40 @@ $ docker run -d \ 2. 开始探索 HertzBeat Collector 浏览器访问 http://ip:1157/ 即可开始探索使用,默认账户密码 admin/hertzbeat。 -**HAVE FUN** +**HAVE FUN** ---- -### Docker 方式部署常见问题 +### Docker 方式部署常见问题 **最多的问题就是网络问题,请先提前排查** 1. MYSQL,TDENGINE或IotDB和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败 -此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 + 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 + > 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP -> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` +> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` 2. 按照流程部署,访问 http://ip:1157/ 无界面 -请参考下面几点排查问题: + 请参考下面几点排查问题: + > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 3. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - > 安装初始化此时序数据库 4. 安装配置了时序数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 请检查配置的时许数据库参数是否正确 > 时序数据库对应的 enable 是否设置为true > 注意⚠️若hertzbeat和外置数据库都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 -5. application.yml 是干什么用的 +5. application.yml 是干什么用的 > 此文件是HertzBeat的配置文件,用于配置HertzBeat的各种参数,如数据库连接信息,时序数据库配置等。 @@ -137,4 +142,4 @@ HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat 若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 下载 `sureness.yml` 文件到主机目录下,例如: $(pwd)/sureness.yml 下载源 [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) -具体修改步骤参考 [配置修改账户密码](account-modify) +具体修改步骤参考 [配置修改账户密码](account-modify) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md index 57cf7603a16..6f946707cab 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md @@ -8,21 +8,24 @@ Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任 > 我们推荐使用并长期支持 VictoriaMetrics 作为存储。 -[GreptimeDB](https://github.com/GreptimeTeam/greptimedb) is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. +[GreptimeDB](https://github.com/GreptimeTeam/greptimedb) is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage. -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** + +### 通过Docker方式安装GreptimeDB -### 通过Docker方式安装GreptimeDB > 可参考官方网站[安装教程](https://docs.greptime.com/getting-started/overview) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装GreptimeDB +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装GreptimeDB ```shell $ docker run -p 127.0.0.1:4000-4003:4000-4003 \ @@ -35,17 +38,17 @@ $ docker run -p 127.0.0.1:4000-4003:4000-4003 \ --postgres-addr 0.0.0.0:4003 ``` - `-v "$(pwd)/greptimedb:/tmp/greptimedb` 为 greptimedb 数据目录本地持久化挂载,需将 `$(pwd)/greptimedb` 替换为实际本地存在的目录,默认使用执行命令的当前目录下的 `greptimedb` 目录作为数据目录。 +`-v "$(pwd)/greptimedb:/tmp/greptimedb` 为 greptimedb 数据目录本地持久化挂载,需将 `$(pwd)/greptimedb` 替换为实际本地存在的目录,默认使用执行命令的当前目录下的 `greptimedb` 目录作为数据目录。 - 使用```$ docker ps```查看数据库是否启动成功 +使用```$ docker ps```查看数据库是否启动成功 -### 在hertzbeat的`application.yml`配置文件配置此数据库连接 +### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** ```yaml warehouse: @@ -67,8 +70,9 @@ warehouse: 2. 重启 HertzBeat -### 常见问题 +### 常见问题 1. 时序数据库 GreptimeDB 或者 IoTDB 或者 TDengine 是否都需要配置,能不能都用 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md index 91d30eef74d..82b833d459c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md @@ -1,36 +1,39 @@ --- id: influxdb-init title: 依赖时序数据库服务InfluxDB安装初始化(可选) -sidebar_label: 指标数据存储InfluxDB +sidebar_label: 指标数据存储InfluxDB --- Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) > 我们推荐使用并长期支持 VictoriaMetrics 作为存储。 -InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海量时序数据的高性能读、高性能写、高效存储与实时分析等。 注意支持⚠️ 1.x版本。 +InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海量时序数据的高性能读、高性能写、高效存储与实时分析等。 注意支持⚠️ 1.x版本。 **注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** ### 1. 直接使用华为云服务 GaussDB For Influx -> 开通使用[华为云云数据库 GaussDB For Influx](https://www.huaweicloud.com/product/gaussdbforinflux.html) - +> 开通使用[华为云云数据库 GaussDB For Influx](https://www.huaweicloud.com/product/gaussdbforinflux.html) +> > 获取云数据库对外暴露连接地址,账户密码即可 ⚠️注意云数据库默认开启了SSL,云数据库地址应使用 `https:` -### 2. 通过Docker方式安装InfluxDB +### 2. 通过Docker方式安装InfluxDB + > 可参考官方网站[安装教程](https://hub.docker.com/_/influxdb) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装InfluxDB 1.x +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装InfluxDB 1.x ```shell $ docker run -p 8086:8086 \ @@ -38,17 +41,16 @@ $ docker run -p 8086:8086 \ influxdb:1.8 ``` - `-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 - 使用```$ docker ps```查看数据库是否启动成功 +`-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 +使用```$ docker ps```查看数据库是否启动成功 - -### 在hertzbeat的`application.yml`配置文件配置此数据库连接 +### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** ```yaml warehouse: @@ -67,10 +69,9 @@ warehouse: 2. 重启 HertzBeat -### 常见问题 +### 常见问题 -1. 时序数据库InfluxDb, IoTDB和TDengine是否都需要配置,能不能都用 +1. 时序数据库InfluxDb, IoTDB和TDengine是否都需要配置,能不能都用 > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md index b4c924f6d9f..859e2ba39d0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md @@ -3,6 +3,7 @@ id: iotdb-init title: 依赖时序数据库服务IoTDB安装初始化(可选) sidebar_label: 指标数据存储IoTDB --- + Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) > 我们推荐使用并长期支持 VictoriaMetrics 作为存储。 @@ -89,16 +90,15 @@ warehouse: 参数说明: - -| 参数名称 | 参数说明 | -| ------------------- |-------------------------------------------| +| 参数名称 | 参数说明 | +|---------------------|-------------------------------------------| | enabled | 是否启用 | | host | IoTDB数据库地址 | | rpc-port | IoTDB数据库端口 | | node-urls | IoTDB集群地址 | | username | IoTDB数据库账户 | | password | IoTDB数据库密码 | -| version | IoTDB数据库版本,已废弃,仅支持V1.* | +| version | IoTDB数据库版本,已废弃,仅支持V1.* | | query-timeout-in-ms | 查询超时时间 | | expire-time | 数据存储时间,默认'7776000000'(90天,单位为毫秒,-1代表永不过期) | @@ -122,3 +122,4 @@ warehouse: > iot-db enable是否设置为true > 注意⚠️若hertzbeat和IotDB都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/mysql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/mysql-change.md index dadca1e3eb1..1b7154a2fcb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/mysql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/mysql-change.md @@ -1,23 +1,27 @@ --- id: mysql-change title: 关系型数据库使用 Mysql 替换依赖的 H2 存储系统元数据(可选) -sidebar_label: 元数据存储Mysql +sidebar_label: 元数据存储Mysql --- -MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) 除了支持使用默认内置的H2数据库外,还可以切换为使用MYSQL存储监控信息,告警信息,配置信息等结构化关系数据。 + +MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) 除了支持使用默认内置的H2数据库外,还可以切换为使用MYSQL存储监控信息,告警信息,配置信息等结构化关系数据。 注意⚠️ 使用外置Mysql数据库替换内置H2数据库为可选项,但建议生产环境配置,以提供更好的性能 -> 如果您已有MYSQL环境,可直接跳到数据库创建那一步。 +> 如果您已有MYSQL环境,可直接跳到数据库创建那一步。 + +### 通过Docker方式安装MYSQL -### 通过Docker方式安装MYSQL 1. 下载安装Docker环境 Docker 的安装请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后请于终端检查Docker版本输出是否正常。 + 安装完毕后请于终端检查Docker版本输出是否正常。 + ``` $ docker -v Docker version 20.10.12, build e91ed57 ``` -2. Docker安装MYSQl +2. Docker安装MYSQl + ``` $ docker run -d --name mysql \ -p 3306:3306 \ @@ -26,12 +30,14 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) --restart=always \ mysql:5.7 ``` + `-v /opt/data:/var/lib/mysql` 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 使用```$ docker ps```查看数据库是否启动成功 -### 数据库创建 +### 数据库创建 + 1. 进入MYSQL或使用客户端连接MYSQL服务 - `mysql -uroot -p123456` + `mysql -uroot -p123456` 2. 创建名称为hertzbeat的数据库 `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. 查看hertzbeat数据库是否创建成功 @@ -42,15 +48,16 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) - 下载 MYSQL jdbc driver jar, 例如 mysql-connector-java-8.0.25.jar. https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip - 将此 jar 包拷贝放入 HertzBeat 的安装目录下的 `ext-lib` 目录下. -### 修改hertzbeat的配置文件application.yml切换数据源 +### 修改hertzbeat的配置文件application.yml切换数据源 - 配置 HertzBeat 的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 - ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://github.com/hertzbeat/hertzbeat/raw/master/script/application.yml) + ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://github.com/hertzbeat/hertzbeat/raw/master/script/application.yml) + + 需修改部分原参数: - 需修改部分原参数: ```yaml spring: datasource: @@ -70,7 +77,9 @@ spring: logging: level: SEVERE ``` - 具体替换参数如下,需根据mysql环境配置账户密码IP: + +具体替换参数如下,需根据mysql环境配置账户密码IP: + ```yaml spring: datasource: @@ -90,6 +99,6 @@ spring: level: SEVERE ``` -- 通过docker启动时,建议修改host为宿主机的外网IP地址,包括mysql连接字符串。 +- 通过docker启动时,建议修改host为宿主机的外网IP地址,包括mysql连接字符串。 **启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md index 7a0e627c57c..86c08ce67dd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md @@ -1,7 +1,7 @@ --- id: package-deploy title: 通过安装包安装 HertzBeat -sidebar_label: 安装包方式安装 +sidebar_label: 安装包方式安装 --- :::tip @@ -10,17 +10,19 @@ Apache HertzBeat (incubating) 支持在Linux Windows Mac系统安装运行,CPU ::: ### 部署 HertzBeat Server - + 1. 下载安装包 -从 [下载页面](/docs/download) 下载您系统环境对应的安装包版本 `apache-hertzbeat-xxx-incubating-bin.tar.gz` +从 [下载页面](/docs/download) 下载您系统环境对应的安装包版本 `apache-hertzbeat-xxx-incubating-bin.tar.gz` + +2. 设置配置文件(可选) -2. 设置配置文件(可选) +解压安装包到主机 eg: /opt/hertzbeat -解压安装包到主机 eg: /opt/hertzbeat -``` +``` $ tar zxvf apache-hertzbeat-xxx-incubating-bin.tar.gz ``` + :::tip 位于 `config/application.yml` 的配置文件,您可以根据需求修改配置文件来配置外部依赖的服务,如数据库,时序数据库等参数。 HertzBeat 启动时默认全使用内部服务,但生产环境建议切换为外部数据库服务。 @@ -28,25 +30,26 @@ HertzBeat 启动时默认全使用内部服务,但生产环境建议切换为 建议元数据存储使用 [PostgreSQL](postgresql-change), 指标数据存储使用 [VictoriaMetrics](victoria-metrics-init), 具体步骤参见 -- [内置 H2 数据库切换为 PostgreSQL](postgresql-change) +- [内置 H2 数据库切换为 PostgreSQL](postgresql-change) - [使用 VictoriaMetrics 存储指标数据](victoria-metrics-init) -3. 配置账户文件(可选) +3. 配置账户文件(可选) HertzBeat 默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat 若需要新增删除修改账户或密码,可以通过修改位于 `config/sureness.yml` 的配置文件实现,具体参考 -- [配置修改账户密码](account-modify) +- [配置修改账户密码](account-modify) 4. 启动 -执行位于安装目录 bin 下的启动脚本 startup.sh, windows 环境下为 startup.bat -``` +执行位于安装目录 bin 下的启动脚本 startup.sh, windows 环境下为 startup.bat + +``` $ ./startup.sh ``` 5. 开始探索HertzBeat -浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 + 浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 ### 部署 HertzBeat Collector 集群(可选) @@ -64,22 +67,23 @@ HertzBeat Collector 是一个轻量级的数据采集器,用于采集并将数 2. 设置配置文件 解压安装包到主机 eg: /opt/hertzbeat-collector -``` + +``` $ tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz ``` 配置采集器的配置文件 `config/application.yml` 里面的 HertzBeat Server 连接 IP, 端口, 采集器名称(需保证唯一性)等参数。 ```yaml - collector: - dispatch: - entrance: - netty: - enabled: true - identity: ${IDENTITY:} - mode: ${MODE:public} - manager-host: ${MANAGER_HOST:127.0.0.1} - manager-port: ${MANAGER_PORT:1158} +collector: + dispatch: + entrance: + netty: + enabled: true + identity: ${IDENTITY:} + mode: ${MODE:public} + manager-host: ${MANAGER_HOST:127.0.0.1} + manager-port: ${MANAGER_PORT:1158} ``` > 参数详解 @@ -92,7 +96,8 @@ $ tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz 3. 启动 执行位于安装目录 hertzbeat-collector/bin/ 下的启动脚本 startup.sh, windows 环境下为 startup.bat -``` + +``` $ ./startup.sh ``` @@ -101,9 +106,8 @@ $ ./startup.sh **HAVE FUN** - ---- - + ### 安装包部署常见问题 **最多的问题就是网络环境问题,请先提前排查** @@ -114,16 +118,18 @@ $ ./startup.sh 要求:JAVA17环境 下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) 安装后命令行检查是否成功安装 - ``` - $ java -version - java version "17.0.9" - Java(TM) SE Runtime Environment 17.0.9 (build 17.0.9+8-LTS-237) - Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) - ``` +``` +$ java -version +java version "17.0.9" +Java(TM) SE Runtime Environment 17.0.9 (build 17.0.9+8-LTS-237) +Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) + +``` 2. 按照流程部署,访问 http://ip:1157/ 无界面 请参考下面几点排查问题: + > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 > 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/postgresql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/postgresql-change.md index 196e5ac2abd..331f021c747 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/postgresql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/postgresql-change.md @@ -3,44 +3,49 @@ id: postgresql-change title: 关系型数据库使用 PostgreSQL 替换依赖的 H2 存储系统元数据(推荐) sidebar_label: 元数据存储PostgreSQL(推荐) --- -PostgreSQL 是一个功能强大,开源的关系型数据库管理系统(RDBMS)。Apache HertzBeat (incubating) 除了支持使用默认内置的 H2 数据库外,还可以切换为使用 PostgreSQL 存储监控信息,告警信息,配置信息等结构化关系数据。 -注意⚠️ 使用外置 PostgreSQL 数据库替换内置 H2 数据库为可选项,但建议生产环境配置,以提供更好的性能 +PostgreSQL 是一个功能强大,开源的关系型数据库管理系统(RDBMS)。Apache HertzBeat (incubating) 除了支持使用默认内置的 H2 数据库外,还可以切换为使用 PostgreSQL 存储监控信息,告警信息,配置信息等结构化关系数据。 -> 如果您已有 PostgreSQL 环境,可直接跳到数据库创建那一步。 +注意⚠️ 使用外置 PostgreSQL 数据库替换内置 H2 数据库为可选项,但建议生产环境配置,以提供更好的性能 +> 如果您已有 PostgreSQL 环境,可直接跳到数据库创建那一步。 -### 通过 Docker 方式安装 PostgreSQL +### 通过 Docker 方式安装 PostgreSQL 1. 下载安装 Docker 环境 - Docker 的安装请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。安装完毕后请于终端检查 Docker 版本输出是否正常。 + Docker 的安装请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。安装完毕后请于终端检查 Docker 版本输出是否正常。 + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` - 2. Docker 安装 PostgreSQL + ```shell $ docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` + 使用 ```$ docker ps``` 查看数据库是否启动成功 -### 数据库创建 +### 数据库创建 + +1. 进入 PostgreSQL 或使用客户端连接 PostgreSQL 服务 -1. 进入 PostgreSQL 或使用客户端连接 PostgreSQL 服务 ```shell su - postgres psql ``` - -2. 创建名称为 hertzbeat 的数据库 +2. 创建名称为 hertzbeat 的数据库 + ```sql CREATE DATABASE hertzbeat; ``` -3. 查看 hertzbeat 数据库是否创建成功 +3. 查看 hertzbeat 数据库是否创建成功 + ```sql SELECT * FROM pg_database where datname='hertzbeat'; ``` + ### 修改 hertzbeat 的配置文件 application.yml 切换数据源 1. 配置 HertzBeat 的配置文件 @@ -68,7 +73,9 @@ spring: logging: level: SEVERE ``` + 具体替换参数如下,需根据 PostgreSQL 环境配置账户密码 IP: + ```yaml spring: datasource: @@ -88,4 +95,4 @@ spring: level: SEVERE ``` -**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** +**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/quickstart.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/quickstart.md index 3702a89d87d..064230770a6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/quickstart.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/quickstart.md @@ -1,18 +1,18 @@ --- id: quickstart title: 快速开始 -sidebar_label: 快速开始 +sidebar_label: 快速开始 --- ### 🐕 开始使用 - 如果您是想将 Apache HertzBeat (incubating) 部署到本地搭建监控系统,请参考下面的部署文档进行操作。 +### 🍞 HertzBeat安装 -### 🍞 HertzBeat安装 > HertzBeat支持通过源码安装启动,Docker容器运行和安装包方式安装部署,CPU架构支持X86/ARM64。 -#### 方式一:Docker方式快速安装 +#### 方式一:Docker方式快速安装 1. `docker` 环境仅需一条命令即可开始 @@ -29,14 +29,15 @@ sidebar_label: 快速开始 ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 - `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 -更多配置详细步骤参考 [通过Docker方式安装HertzBeat](docker-deploy) +更多配置详细步骤参考 [通过Docker方式安装HertzBeat](docker-deploy) -#### 方式二:通过安装包安装 +#### 方式二:通过安装包安装 1. 下载您系统环境对应的安装包`hertzbeat-xx.tar.gz` [Download Page](https://hertzbeat.apache.org/docs/download) 2. 配置 HertzBeat 的配置文件 `hertzbeat/config/application.yml`(可选) @@ -45,6 +46,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 5. 部署采集器集群(可选) - 下载您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [Download Page](https://hertzbeat.apache.org/docs/download) - 配置采集器的配置文件 `hertzbeat-collector/config/application.yml` 里面的连接主HertzBeat服务的对外IP,端口,当前采集器名称(需保证唯一性)等参数 `identity` `mode` (public or private) `manager-host` `manager-port` + ```yaml collector: dispatch: @@ -59,9 +61,9 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - 启动 `$ ./bin/startup.sh ` 或 `bin/startup.bat` - 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 -更多配置详细步骤参考 [通过安装包安装HertzBeat](package-deploy) +更多配置详细步骤参考 [通过安装包安装HertzBeat](package-deploy) -#### 方式三:本地代码启动 +#### 方式三:本地代码启动 1. 此为前后端分离项目,本地代码调试需要分别启动后端工程`manager`和前端工程`web-app` 2. 后端:需要`maven3+`, `java17`和`lombok`环境,修改`YML`配置信息并启动`manager`服务 @@ -74,7 +76,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 通过 [docker-compose部署脚本](https://github.com/apache/hertzbeat/tree/master/script/docker-compose) 一次性把 postgresql/mysql 数据库, victoria-metrics/iotdb/tdengine 时序数据库和 hertzbeat 安装部署。 -详细步骤参考 [docker-compose部署方案](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/README.md) +详细步骤参考 [docker-compose部署方案](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/README.md) ##### 方式五:Kubernetes Helm Charts 部署 hertzbeat+collector+postgresql+tsdb diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/rainbond-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/rainbond-deploy.md index 5da0679d327..8e01b8cf7a2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/rainbond-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/rainbond-deploy.md @@ -1,7 +1,7 @@ --- id: rainbond-deploy title: 使用 Rainbond 部署 HertzBeat -sidebar_label: Rainbond方式部署 +sidebar_label: Rainbond方式部署 --- 如果你不熟悉 Kubernetes,想在 Kubernetes 中安装 Apache HertzBeat (incubating),可以使用 Rainbond 来部署。Rainbond 是一个基于 Kubernetes 构建的云原生应用管理平台,可以很简单的将你的应用部署到 Kubernetes中。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/sslcert-practice.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/sslcert-practice.md index f3892a20a64..1eb90ccaa83 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/sslcert-practice.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/sslcert-practice.md @@ -4,7 +4,6 @@ title: SSL证书过期监控使用案例 sidebar_label: SSL证书过期监控使用案例 --- - 现在大部分网站都默认支持 HTTPS,我们申请的证书一般是3个月或者1年,很容易随着时间的流逝SSL证书过期了我们却没有第一时间发现,或者在过期之前没有及时更新证书。 这篇文章介绍如果使用 hertzbeat 监控系统来检测我们网站的SSL证书有效期,当证书过期时或证书快过期前几天,给我们发告警消息。 @@ -13,7 +12,6 @@ sidebar_label: SSL证书过期监控使用案例 Apache HertzBeat (incubating) 一个拥有强大自定义监控能力,无需Agent的实时监控工具。网站监测,PING连通性,端口可用性,数据库,操作系统,中间件,API监控,阈值告警,告警通知(邮件微信钉钉飞书)。 - github: https://github.com/apache/hertzbeat #### 安装 HertzBeat @@ -30,10 +28,8 @@ github: https://github.com/apache/hertzbeat > 系统页面 -> 监控菜单 -> SSL证书 -> 新增SSL证书 - ![](/img/docs/start/ssl_1.png) - 2. 配置监控网站 > 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md index b64743d1f7b..9837a5a5b99 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md @@ -8,24 +8,26 @@ Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任 > 我们推荐使用并长期支持 VictoriaMetrics 作为存储。 -TDengine是一款开源物联网时序型数据库,我们用其存储采集到的监控指标历史数据。 注意支持⚠️ 3.x版本。 +TDengine是一款开源物联网时序型数据库,我们用其存储采集到的监控指标历史数据。 注意支持⚠️ 3.x版本。 **注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有TDengine环境,可直接跳到创建数据库实例那一步。 +### 通过Docker方式安装TDengine -### 通过Docker方式安装TDengine > 可参考官方网站[安装教程](https://docs.taosdata.com/get-started/docker/) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装TDengine +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装TDengine ```shell $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ @@ -34,23 +36,23 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ tdengine/tdengine:3.0.4.0 ``` - `-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 - `-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 - 使用```$ docker ps```查看数据库是否启动成功 +`-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 +`-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 +使用```$ docker ps```查看数据库是否启动成功 + +### 创建数据库实例 -### 创建数据库实例 +> [TDengine CLI 小技巧](https://docs.taosdata.com/develop/model/) -> [TDengine CLI 小技巧](https://docs.taosdata.com/develop/model/) +1. 进入数据库Docker容器 -1. 进入数据库Docker容器 ``` $ docker exec -it tdengine /bin/bash ``` - 2. 修改账户密码 > 建议您修改密码。TDengine默认的账户密码是 root/taosdata - > 进入容器后,执行 `taos` 命令进入TDengine CLI , 如下: + > 进入容器后,执行 `taos` 命令进入TDengine CLI , 如下: ``` root@tdengine-server:~/TDengine-server# taos @@ -58,6 +60,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. taos> ``` + > 在 TDengine CLI 中使用 alter user 命令可以修改用户密码,缺省密码为 taosdata 3. 创建名称为hertzbeat的数据库 @@ -77,24 +80,23 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ taos> show databases; taos> use hertzbeat; ``` - 5. 退出TDengine CLI ``` 输入 q 或 quit 或 exit 回车 ``` -**注意⚠️若是安装包安装的TDengine** +**注意⚠️若是安装包安装的TDengine** > 除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter -### 在hertzbeat的`application.yml`配置文件配置此数据库连接 +### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** ```yaml warehouse: @@ -112,19 +114,24 @@ warehouse: 2. 重启 HertzBeat -### 常见问题 +### 常见问题 1. 时序数据库IoTDB和TDengine是否都需要配置,能不能都用 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 -2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] +2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 -3. 监控详情历史图片不展示或无数据,已经配置了TDengine +3. 监控详情历史图片不展示或无数据,已经配置了TDengine + > 请确认是否安装的TDengine版本为3.0以上,版本2.x不支持兼容 4. 安装配置了TDengine数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 请检查配置参数是否正确 > td-engine enable是否设置为true > 注意⚠️若hertzbeat和TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP -> 可根据logs目录下启动日志排查 +> 可根据logs目录下启动日志排查 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/update-1.6.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/update-1.6.0.md index 6a076a89d69..8c8becc369d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/update-1.6.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/update-1.6.0.md @@ -7,6 +7,7 @@ sidebar_label: 1.6.0升级指南 # HertzBeat 1.6.0 升级指南 ### 注意:该指南适用于1.5.0向1.6.0版本升级 + ### 如果你使用更老的版本,建议使用导出功能重新安装,或先升级到1.5.0再按本指南升级到1.6.0 ### 二进制安装包升级 @@ -18,6 +19,7 @@ sidebar_label: 1.6.0升级指南 - 当你的服务器中默认环境变量为Java17时,这一步你无需任何操作。 - 当你的服务器中默认环境变量不为Java17时,如Java8、Java11,若你服务器中**没有**其他应用需要低版本Java,根据你的系统,到 [https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html](https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html) 选择相应的发行版下载,并在搜索引擎搜索如何设置新的环境变量指向新的Java17。 - 当你的服务器中默认环境变量不为Java17时,如Java8、Java11,若你服务器中**有**其他应用需要低版本Java,根据你的系统,到 [https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html](https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html) 选择相应的发行版下载,并将解压后的文件夹重命名为java,复制到Hertzbeat的解压目录下。 + 2. 升级数据库 打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), @@ -30,6 +32,7 @@ sidebar_label: 1.6.0升级指南 - `application.yml`一般需要修改以下部分 默认为: + ```yaml datasource: driver-class-name: org.h2.Driver @@ -48,7 +51,9 @@ sidebar_label: 1.6.0升级指南 logging: level: SEVERE ``` + 如若修改为mysql数据库,给出一个示例: + ```yaml datasource: driver-class-name: com.mysql.cj.jdbc.Driver @@ -69,6 +74,7 @@ sidebar_label: 1.6.0升级指南 ``` - `sureness.yml`修改是可选的,一般在你需要修改账号密码时 + ```yaml # account info config # eg: admin has role [admin,user], password is hertzbeat @@ -93,10 +99,10 @@ account: role: [user] ``` - 4. 添加相应的数据库驱动 - 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动,复制到安装目录下`ext-lib`中。 + 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动,复制到安装目录下`ext-lib`中。 + mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) oracle(如果你要监控oracle,这两个驱动是必须的) [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) @@ -106,6 +112,7 @@ oracle(如果你要监控oracle,这两个驱动是必须的) ### Docker 方式升级 - Mysql数据库 1. 关闭 HertzBeat 容器 + ``` docker stop hertzbeat ``` @@ -122,6 +129,7 @@ docker stop hertzbeat - `application.yml`一般需要修改以下部分 默认为: + ```yaml datasource: driver-class-name: com.mysql.cj.jdbc.Driver @@ -142,6 +150,7 @@ docker stop hertzbeat ``` - `sureness.yml`修改是可选的,一般在你需要修改账号密码时 + ```yaml # account info config # eg: admin has role [admin,user], password is hertzbeat @@ -168,7 +177,8 @@ account: 4. 添加相应的数据库驱动 - 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动 jar 放到本地 `ext-lib`目录下,然后启动时将`ext-lib`挂载到容器的 `/opt/hertzbeat/ext-lib`目录。 + 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动 jar 放到本地 `ext-lib`目录下,然后启动时将`ext-lib`挂载到容器的 `/opt/hertzbeat/ext-lib`目录。 + mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) oracle(如果你要监控oracle,这两个驱动是必须的) [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) @@ -178,6 +188,7 @@ oracle(如果你要监控oracle,这两个驱动是必须的) ### Docker安装升级 - H2内置数据库(生产环境不推荐使用H2) 1. 关闭 HertzBeat 容器 + ``` docker stop hertzbeat ``` @@ -187,9 +198,11 @@ docker stop hertzbeat 前题你已经将 H2 数据库文件 data 目录挂载到本地,或者启动老容器手动将 /opt/hertzbeat/data 目录拷贝出来。 下载 h2 驱动 jar [https://mvnrepository.com/artifact/com.h2database/h2/2.2.220](https://mvnrepository.com/artifact/com.h2database/h2/2.2.220) 使用 h2 驱动 jar 本地启动数据库 + ``` java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 ``` + 打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), 选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件在 H2 执行升级sql。 @@ -200,6 +213,7 @@ java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 - `application.yml`一般需要修改以下部分 默认为: + ```yaml datasource: driver-class-name: org.h2.Driver @@ -220,6 +234,7 @@ java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 ``` - `sureness.yml`修改是可选的,一般在你需要修改账号密码时 + ```yaml # account info config # eg: admin has role [admin,user], password is hertzbeat @@ -244,10 +259,10 @@ account: role: [user] ``` - 4. 添加相应的数据库驱动 - 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动 jar 放到本地 `ext-lib`目录下,然后启动时将`ext-lib`挂载到容器的 `/opt/hertzbeat/ext-lib`目录。 + 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动 jar 放到本地 `ext-lib`目录下,然后启动时将`ext-lib`挂载到容器的 `/opt/hertzbeat/ext-lib`目录。 + mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) oracle(如果你要监控oracle,这两个驱动是必须的) [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) @@ -255,8 +270,8 @@ oracle(如果你要监控oracle,这两个驱动是必须的) 接下来,像之前那样 Docker 运行启动,即可体验最新的HertzBeat1.6.0! ### 通过导出导入升级 -> 若不想如上繁琐的脚本升级方式,可以直接将老环境的监控任务和阈值信息导出导入 +> 若不想如上繁琐的脚本升级方式,可以直接将老环境的监控任务和阈值信息导出导入 1. 部署一套最新版本的新环境 2. 在页面上将老环境的监控任务和阈值信息导出。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/upgrade.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/upgrade.md index 461065fa003..2e2124534ec 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/upgrade.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/upgrade.md @@ -1,7 +1,7 @@ --- id: upgrade title: HertzBeat 新版本更新指引 -sidebar_label: 版本更新指引 +sidebar_label: 版本更新指引 --- **Apache HertzBeat (incubating) 的发布版本列表** @@ -14,8 +14,6 @@ HertzBeat 的元数据信息保存在 H2 或 Mysql, PostgreSQL 关系型数据 **升级前您需要保存备份好数据库的数据文件和监控模版文件** - - ### Docker部署方式的升级 1. 若使用了自定义监控模版 @@ -23,32 +21,26 @@ HertzBeat 的元数据信息保存在 H2 或 Mysql, PostgreSQL 关系型数据 - `docker cp hertzbeat:/opt/hertzbeat/define ./define` - 然后在后续升级启动 docker 容器的时候需要挂载上这个 define 目录,`-v $(pwd)/define:/opt/hertzbeat/define` - `-v $(pwd)/define:/opt/hertzbeat/define` - -2. 若使用内置默认 H2 数据库 +2. 若使用内置默认 H2 数据库 - 需挂载或备份 `-v $(pwd)/data:/opt/hertzbeat/data` 容器内的数据库文件目录 `/opt/hertzbeat/data` - 停止并删除容器,删除本地 HertzBeat docker 镜像,拉取新版本镜像 - 参考 [Docker安装HertzBeat](docker-deploy) 使用新镜像创建新的容器,注意需要将数据库文件目录挂载 `-v $(pwd)/data:/opt/hertzbeat/data` - 3. 若使用外置关系型数据库 Mysql, PostgreSQL - 无需挂载备份容器内的数据库文件目录 - 停止并删除容器,删除本地 HertzBeat docker 镜像,拉取新版本镜像 - 参考 [Docker安装HertzBeat](docker-deploy) 使用新镜像创建新的容器,`application.yml`配置数据库连接即可 - ### 安装包部署方式的升级 1. 若使用内置默认 H2 数据库 - - 备份安装包下的数据库文件目录 `/opt/hertzbeat/data` + - 备份安装包下的数据库文件目录 `/opt/hertzbeat/data` - 若有自定义监控模版,需备份 `/opt/hertzbeat/define` 下的模版YML - `bin/shutdown.sh` 停止 HertzBeat 进程,下载新安装包 - 参考 [安装包安装HertzBeat](package-deploy) 使用新安装包启动 - 2. 若使用外置关系型数据库 Mysql, PostgreSQL - 无需备份安装包下的数据库文件目录 - 若有自定义监控模版,需备份 `/opt/hertzbeat/define` 下的模版YML - `bin/shutdown.sh` 停止 HertzBeat 进程,下载新安装包 - 参考 [安装包安装HertzBeat](package-deploy) 使用新安装包启动,`application.yml`配置数据库连接即可 - - -**HAVE FUN** +**HAVE FUN** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md index abc8a1f94b2..4beebcd5045 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md @@ -1,7 +1,7 @@ --- id: victoria-metrics-init title: 依赖时序数据库服务VictoriaMetrics安装初始化 -sidebar_label: 指标数据存储VictoriaMetrics(推荐) +sidebar_label: 指标数据存储VictoriaMetrics(推荐) --- Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) @@ -11,21 +11,23 @@ Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任 VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决方案和时序数据库,兼容 Prometheus 生态。推荐版本(VictoriaMetrics:v1.95.1+, HertzBeat:v1.4.3+) **注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有VictoriaMetrics环境,可直接跳到YML配置那一步。 +### 通过Docker方式安装VictoriaMetrics -### 通过Docker方式安装VictoriaMetrics > 可参考官方网站[安装教程](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装VictoriaMetrics +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装VictoriaMetrics ```shell $ docker run -d -p 8428:8428 \ @@ -34,16 +36,16 @@ $ docker run -d -p 8428:8428 \ victoriametrics/victoria-metrics:v1.95.1 ``` - `-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` 为VictoriaMetrics数据目录本地持久化挂载 - 使用```$ docker ps```查看数据库是否启动成功 +`-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` 为VictoriaMetrics数据目录本地持久化挂载 +使用```$ docker ps```查看数据库是否启动成功 -3. 在hertzbeat的`application.yml`配置文件配置VictoriaMetrics数据库连接 +3. 在hertzbeat的`application.yml`配置文件配置VictoriaMetrics数据库连接 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** ```yaml warehouse: @@ -61,8 +63,9 @@ warehouse: 4. 重启 HertzBeat -### 常见问题 +### 常见问题 1. 时序数据库是否都需要配置,能不能都用 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,但会影响历史图表数据和存储时长等。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/template.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/template.md index 55d89c73cba..219620a230c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/template.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/template.md @@ -4,27 +4,27 @@ title: 监控模版中心 sidebar_label: 监控模版 --- -> Apache HertzBeat (incubating) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 - +> Apache HertzBeat (incubating) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 +> > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 > 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? -这是它的架构原理: +这是它的架构原理: ![hertzBeat](/img/docs/hertzbeat-arch.png) -**我们将所有监控采集类型(mysql,website,jvm,k8s)都定义为yml模版,用户可以导入这些模版到hertzbeat系统中,使其支持对应类型的监控,非常方便!** +**我们将所有监控采集类型(mysql,website,jvm,k8s)都定义为yml模版,用户可以导入这些模版到hertzbeat系统中,使其支持对应类型的监控,非常方便!** ![](/img/docs/advanced/extend-point-1.png) **欢迎大家一起贡献你使用过程中自定义的通用监控类型YML模版,可用的模板如下:** -### 应用服务监控模版 +### 应用服务监控模版  👉 [Website monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml)
- 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
- 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
- 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
+ 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
+ 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
+ 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
 👉 [Full site monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml)
 👉 [SSL Cert monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml)
 👉 [JVM monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml)
@@ -32,7 +32,7 @@ sidebar_label: 监控模版  👉 [SpringBoot3.0](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml)
 👉 [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml)
-### 数据库监控模版 +### 数据库监控模版  👉 [MYSQL database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml)
 👉 [MariaDB database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml)
@@ -49,7 +49,7 @@ sidebar_label: 监控模版  👉 [Redis Sentinel database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml)
 👉 [Redis Cluster database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml)
-### 操作系统监控模版 +### 操作系统监控模版  👉 [Linux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml)
 👉 [Windows operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml)
@@ -64,7 +64,6 @@ sidebar_label: 监控模版  👉 [AlmaLinux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml)
 👉 [Debian operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml)
- ### 中间件监控模版  👉 [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml)
@@ -78,13 +77,12 @@ sidebar_label: 监控模版  👉 [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml)
 👉 [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml)
- ### 云原生监控模版  👉 [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml)
 👉 [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml)
-### 网络监控模版 +### 网络监控模版  👉 [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml)
 👉 [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml)
diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-default.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-default.md index 4df0501dd83..cdd62f2209a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-default.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-default.md @@ -3,14 +3,17 @@ id: extend-http-default title: HTTP协议系统默认解析方式 sidebar_label: 系统默认解析方式 --- -> HTTP接口调用获取响应数据后,用HertzBeat默认的解析方式去解析响应数据。 -**此需接口响应数据结构符合HertzBeat指定的数据结构规则** +> HTTP接口调用获取响应数据后,用HertzBeat默认的解析方式去解析响应数据。 -### HertzBeat数据格式规范 -注意⚠️ 响应数据为JSON +**此需接口响应数据结构符合HertzBeat指定的数据结构规则** + +### HertzBeat数据格式规范 + +注意⚠️ 响应数据为JSON 单层格式:key-value + ```json { "metricName1": "metricValue", @@ -19,7 +22,9 @@ sidebar_label: 系统默认解析方式 "metricName4": "metricValue" } ``` + 多层格式:数组里面套key-value + ```json [ { @@ -36,9 +41,11 @@ sidebar_label: 系统默认解析方式 } ] ``` + 样例: 查询自定义系统的CPU信息,其暴露接口为 `/metrics/cpu`,我们需要其中的`hostname,core,useage`指标 -若只有一台虚拟机,其单层格式为: +若只有一台虚拟机,其单层格式为: + ```json { "hostname": "linux-1", @@ -48,7 +55,9 @@ sidebar_label: 系统默认解析方式 "runningTime": 100 } ``` -若有多台虚拟机,其多层格式为: + +若有多台虚拟机,其多层格式为: + ```json [ { @@ -75,7 +84,7 @@ sidebar_label: 系统默认解析方式 ] ``` -**对应的监控模版YML可以配置为如下** +**对应的监控模版YML可以配置为如下** ```yaml # 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 @@ -148,3 +157,4 @@ metrics: # 这里使用HertzBeat默认解析 parseType: default ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md index aac574f42ad..b8699c93dcc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md @@ -1,19 +1,17 @@ --- id: extend-http-example-hertzbeat title: 教程一:适配一款基于HTTP协议的监控类型 -sidebar_label: 教程一:适配一款HTTP协议监控 +sidebar_label: 教程一:适配一款HTTP协议监控 --- -通过此教程我们一步一步描述如何在hertzbeat监控系统下新增适配一款基于http协议的监控类型。 - -阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 +通过此教程我们一步一步描述如何在hertzbeat监控系统下新增适配一款基于http协议的监控类型。 +阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 ### HTTP协议解析通用响应结构体,获取指标数据 > 很多场景我们需要对提供的 HTTP API 接口进行监控,获取接口返回的指标值。这篇文章我们通过http自定义协议来解析我们常见的http接口响应结构,获取返回体中的字段作为指标数据。 - ``` { "code": 200, @@ -22,6 +20,7 @@ sidebar_label: 教程一:适配一款HTTP协议监控 } ``` + 如上,通常我们的后台API接口会设计这这样一个通用返回。hertzbeat系统的后台也是如此,我们今天就用hertzbeat的 API 做样例,新增适配一款新的监控类型 **hertzbeat**,监控采集它的系统摘要统计API `http://localhost:1157/api/summary`, 其响应数据为: @@ -58,7 +57,6 @@ sidebar_label: 教程一:适配一款HTTP协议监控 **我们这次获取其app下的 `category`,`app`,`status`,`size`,`availableSize`等指标数据。** - ### 新增自定义监控模版YML **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** @@ -188,32 +186,24 @@ metrics: **新增完毕,现在我们重启hertzbeat系统。我们可以看到系统页面已经多了一个`hertzbeat`监控类型了。** - ![](/img/docs/advanced/extend-http-example-1.png) - ### 系统页面添加对`hertzbeat`监控类型的监控 > 我们点击新增 `HertzBeat监控系统`,配置监控IP,端口,采集周期,高级设置里的账户密码等, 点击确定添加监控。 - ![](/img/docs/advanced/extend-http-example-2.png) - ![](/img/docs/advanced/extend-http-example-3.png) > 过一定时间(取决于采集周期)我们就可以在监控详情看到具体的指标数据和历史图表啦! - ![](/img/docs/advanced/extend-http-example-4.png) - - ### 设置阈值告警通知 > 接下来我们就可以正常的设置阈值,告警触发后可以在告警中心查看,也可以新增接收人,设置告警通知等,Have Fun!!! - ---- #### 完! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-token.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-token.md index 5bbfe58cf32..93379199b19 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-token.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-token.md @@ -6,15 +6,15 @@ sidebar_label: 教程二:获取TOKEN后续认证使用 通过此教程我们一步一步描述如何在教程一的基础上改造,新增一个监控指标,先调用认证接口获取TOKEN后,使用TOKEN作为参数供后面的监控指标采集认证使用。 -阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 +阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 -### 请求流程 +### 请求流程 -【**认证信息监控指标(优先级最高)**】【**HTTP接口携带账户密码调用**】->【**响应数据解析**】->【**解析签发TOKEN-accessToken作为指标**】->【**将accessToken作为变量参数赋值给其他采集监控指标**】 +【**认证信息监控指标(优先级最高)**】【**HTTP接口携带账户密码调用**】->【**响应数据解析**】->【**解析签发TOKEN-accessToken作为指标**】->【**将accessToken作为变量参数赋值给其他采集监控指标**】 > 这里我们依然用教程一的hertzbeat监控举例!hertzbeat后台接口不仅仅支持教程一使用的basic直接账户密码认证,也支持token认证。 -**我们需要`POST`调用登录接口`/api/account/auth/form`获取`accessToken`,请求body(json格式)如下**: +**我们需要`POST`调用登录接口`/api/account/auth/form`获取`accessToken`,请求body(json格式)如下**: ```json { @@ -22,7 +22,8 @@ sidebar_label: 教程二:获取TOKEN后续认证使用 "identifier": "admin" } ``` -**响应结构数据如下**: + +**响应结构数据如下**: ```json { @@ -99,9 +100,9 @@ metrics: ...... ``` -### 定义监控指标`auth`登录请求获取`token` +### 定义监控指标`auth`登录请求获取`token` -1. 在`app-hertzbeat_token.yml`新增一个监控指标定义 `auth`, 设置采集优先级为最高0,采集指标 `token`. +1. 在`app-hertzbeat_token.yml`新增一个监控指标定义 `auth`, 设置采集优先级为最高0,采集指标 `token`. ```yaml @@ -206,16 +207,13 @@ metrics: ![](/img/docs/advanced/extend-http-example-5.png) - **新增成功后我们就可以在详情页面看到我们采集的 `token`, `refreshToken`指标数据。** ![](/img/docs/advanced/extend-http-example-6.png) ![](/img/docs/advanced/extend-http-example-7.png) - - -### 将`token`作为变量参数给后面的监控指标采集使用 +### 将`token`作为变量参数给后面的监控指标采集使用 **在`app-hertzbeat_token.yml`新增一个监控指标定义 `summary` 同教程一中的`summary`相同, 设置采集优先级为1** **设置此监控指标的HTTP协议配置中认证方式为 `Bearer Token` 将上一个监控指标`auth`采集的指标`token`作为参数给其赋值,使用`^o^`作为内部替换符标识,即`^o^token^o^`。如下:** @@ -231,7 +229,7 @@ metrics: bearerTokenToken: ^o^token^o^ ``` -**最终`app-hertzbeat_token.yml`定义如下:** +**最终`app-hertzbeat_token.yml`定义如下:** ```yaml @@ -368,9 +366,9 @@ metrics: ``` -**配置完成后,再次重启 `hertzbeat` 系统,查看监控详情页面** +**配置完成后,再次重启 `hertzbeat` 系统,查看监控详情页面** -![](/img/docs/advanced/extend-http-example-8.png) +![](/img/docs/advanced/extend-http-example-8.png) ![](/img/docs/advanced/extend-http-example-9.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-jsonpath.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-jsonpath.md index fb3a6ef36d9..71a6b3f116e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-jsonpath.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-jsonpath.md @@ -3,16 +3,18 @@ id: extend-http-jsonpath title: HTTP协议JsonPath解析方式 sidebar_label: JsonPath解析方式 --- + > HTTP接口调用获取响应数据后,用JsonPath脚本解析的解析方式去解析响应数据。 注意⚠️ 响应数据为JSON格式 -**使用JsonPath脚本将响应数据解析成符合HertzBeat指定的数据结构规则的数据** +**使用JsonPath脚本将响应数据解析成符合HertzBeat指定的数据结构规则的数据** + +#### JsonPath操作符 -#### JsonPath操作符 -[JSONPath在线验证](https://www.jsonpath.cn) +[JSONPath在线验证](https://www.jsonpath.cn) -| JSONPATH | 帮助描述 | +| JSONPATH | 帮助描述 | |------------------|-----------------------------------| | $ | 根对象或元素 | | @ | 当前对象或元素 | @@ -25,8 +27,10 @@ sidebar_label: JsonPath解析方式 | ?() | 过滤器(脚本)表达式. | | () | 脚本表达式. | -#### HertzBeat数据格式规范 +#### HertzBeat数据格式规范 + 单层格式:key-value + ```json { "metricName1": "metricValue", @@ -35,7 +39,9 @@ sidebar_label: JsonPath解析方式 "metricName4": "metricValue" } ``` + 多层格式:数组里面套key-value + ```json [ { @@ -53,10 +59,11 @@ sidebar_label: JsonPath解析方式 ] ``` -#### 样例 +#### 样例 查询自定义系统的数值信息,其暴露接口为 `/metrics/person`,我们需要其中的`type,num`指标 -接口返回的原始数据如下: +接口返回的原始数据如下: + ```json { "firstName": "John", @@ -80,7 +87,8 @@ sidebar_label: JsonPath解析方式 } ``` -我们使用JsonPath脚本解析,对应的脚本为: `$.number[*]` ,解析后的数据结构如下: +我们使用JsonPath脚本解析,对应的脚本为: `$.number[*]` ,解析后的数据结构如下: + ```json [ { @@ -93,9 +101,10 @@ sidebar_label: JsonPath解析方式 } ] ``` -此数据结构符合HertzBeat的数据格式规范,成功提取指标`type,num`值。 -**对应的监控模版YML可以配置为如下** +此数据结构符合HertzBeat的数据格式规范,成功提取指标`type,num`值。 + +**对应的监控模版YML可以配置为如下** ```yaml # 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 @@ -166,3 +175,4 @@ metrics: parseType: jsonPath parseScript: '$.number[*]' ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http.md index c4175928195..c12ea3539fe 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http.md @@ -1,21 +1,22 @@ --- id: extend-http title: HTTP协议自定义监控 -sidebar_label: HTTP协议自定义监控 +sidebar_label: HTTP协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用HTTP协议自定义指标监控。 -### HTTP协议采集流程 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用HTTP协议自定义指标监控。 + +### HTTP协议采集流程 + 【**HTTP接口调用**】->【**响应校验**】->【**响应数据解析**】->【**默认方式解析|JsonPath脚本解析 | XmlPath解析(todo) | Prometheus解析**】->【**指标数据提取**】 由流程可见,我们自定义一个HTTP协议的监控类型,需要配置HTTP请求参数,配置获取哪些指标,对响应数据配置解析方式和解析脚本。 -HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数,请求方式,请求体等。 +HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数,请求方式,请求体等。 **系统默认解析方式**:http接口返回hertzbeat规定的json数据结构,即可用默认解析方式解析数据提取对应的指标数据,详细介绍见 [**系统默认解析**](extend-http-default) -**JsonPath脚本解析方式**:用JsonPath脚本对响应的json数据进行解析,返回系统指定的数据结构,然后提供对应的指标数据,详细介绍见 [**JsonPath脚本解析**](extend-http-jsonpath) - +**JsonPath脚本解析方式**:用JsonPath脚本对响应的json数据进行解析,返回系统指定的数据结构,然后提供对应的指标数据,详细介绍见 [**JsonPath脚本解析**](extend-http-jsonpath) -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** @@ -23,15 +24,14 @@ HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数, ------- -下面详细介绍下监控模版YML的配置用法,请注意看使用注释。 +下面详细介绍下监控模版YML的配置用法,请注意看使用注释。 -### 监控模版YML +### 监控模版YML > 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个监控模版,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 -样例:自定义一个名称为example_http的自定义监控类型,其使用HTTP协议采集指标数据。 - +样例:自定义一个名称为example_http的自定义监控类型,其使用HTTP协议采集指标数据。 ```yaml # 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 @@ -203,3 +203,4 @@ metrics: basicAuthPassword: ^_^password^_^ parseType: default ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jdbc.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jdbc.md index f3453c6de80..9bcd5cded08 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jdbc.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jdbc.md @@ -1,29 +1,34 @@ --- id: extend-jdbc title: JDBC协议自定义监控 -sidebar_label: JDBC协议自定义监控 +sidebar_label: JDBC协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JDBC(目前支持mysql,mariadb,postgresql,sqlserver)自定义指标监控。 -> JDBC协议自定义监控可以让我们很方便的通过写SQL查询语句就能监控到我们想监控的指标 -### JDBC协议采集流程 -【**系统直连MYSQL**】->【**运行SQL查询语句**】->【**响应数据解析:oneRow, multiRow, columns**】->【**指标数据提取**】 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JDBC(目前支持mysql,mariadb,postgresql,sqlserver)自定义指标监控。 +> JDBC协议自定义监控可以让我们很方便的通过写SQL查询语句就能监控到我们想监控的指标 + +### JDBC协议采集流程 + +【**系统直连MYSQL**】->【**运行SQL查询语句**】->【**响应数据解析:oneRow, multiRow, columns**】->【**指标数据提取**】 由流程可见,我们自定义一个JDBC协议的监控类型,需要配置JDBC请求参数,配置获取哪些指标,配置查询SQL语句。 -### 数据解析方式 +### 数据解析方式 + SQL查询回来的数据字段和我们需要的指标映射,就能获取对应的指标数据,目前映射解析方式有三种:oneRow, multiRow, columns -#### **oneRow** -> 查询一行数据, 通过查询返回结果集的列名称,和查询的字段映射 +#### **oneRow** + +> 查询一行数据, 通过查询返回结果集的列名称,和查询的字段映射 例如: 查询的指标字段为:one tow three four 查询SQL:select one, tow, three, four from book limit 1; -这里指标字段就能和响应数据一一映射为一行采集数据。 +这里指标字段就能和响应数据一一映射为一行采集数据。 #### **multiRow** -> 查询多行数据, 通过查询返回结果集的列名称,和查询的字段映射 + +> 查询多行数据, 通过查询返回结果集的列名称,和查询的字段映射 例如: 查询的指标字段为:one tow three four @@ -31,33 +36,34 @@ SQL查询回来的数据字段和我们需要的指标映射,就能获取对 这里指标字段就能和响应数据一一映射为多行采集数据。 #### **columns** -> 采集一行指标数据, 通过查询的两列数据(key-value),key和查询的字段匹配,value为查询字段的值 + +> 采集一行指标数据, 通过查询的两列数据(key-value),key和查询的字段匹配,value为查询字段的值 例如: 查询字段:one tow three four 查询SQL:select key, value from book; -SQL响应数据: +SQL响应数据: -| key | value | -|----------|-------| -| one | 243 | -| two | 435 | -| three | 332 | -| four | 643 | +| key | value | +|-------|-------| +| one | 243 | +| two | 435 | +| three | 332 | +| four | 643 | -这里指标字段就能和响应数据的key映射,获取对应的value为其采集监控数据。 +这里指标字段就能和响应数据的key映射,获取对应的value为其采集监控数据。 -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) - ------- -下面详细介绍下文件的配置用法,请注意看使用注释。 -### 监控模版YML +下面详细介绍下文件的配置用法,请注意看使用注释。 + +### 监控模版YML > 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 @@ -237,3 +243,4 @@ metrics: sql: show global status where Variable_name like 'innodb%'; url: ^_^url^_^ ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jmx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jmx.md index e1ce0dd90a3..032f09f4f14 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jmx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jmx.md @@ -1,38 +1,38 @@ --- id: extend-jmx title: JMX协议自定义监控 -sidebar_label: JMX协议自定义监控 +sidebar_label: JMX协议自定义监控 --- + > 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JMX协议自定义指标监控。 -> JMX协议自定义监控可以让我们很方便的通过配置 JMX Mbean Object 就能监控采集到我们想监控的 Mbean 指标 +> JMX协议自定义监控可以让我们很方便的通过配置 JMX Mbean Object 就能监控采集到我们想监控的 Mbean 指标 + +### JMX协议采集流程 -### JMX协议采集流程 -【**对端JAVA应用暴露JMX服务**】->【**HertzBeat直连对端JMX服务**】->【**获取配置的 Mbean Object 数据**】->【**指标数据提取**】 +【**对端JAVA应用暴露JMX服务**】->【**HertzBeat直连对端JMX服务**】->【**获取配置的 Mbean Object 数据**】->【**指标数据提取**】 由流程可见,我们自定义一个JMX协议的监控类型,需要配置JMX请求参数,配置获取哪些指标,配置查询Object信息。 -### 数据解析方式 +### 数据解析方式 通过配置监控模版YML的指标`field`, `aliasFields`, `jmx` 协议的 `objectName` 来和对端系统暴露的 `Mbean`对象信息映射解析。 - - -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) ------- -下面详细介绍下监控模版的配置用法,请注意看使用注释。 + +下面详细介绍下监控模版的配置用法,请注意看使用注释。 ### 监控模版YML > 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 -样例:自定义一个名称为 `example_jvm` 的自定义监控类型,其使用JMX协议采集指标数据。 - +样例:自定义一个名称为 `example_jvm` 的自定义监控类型,其使用JMX协议采集指标数据。 ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -236,3 +236,4 @@ metrics: objectName: java.lang:type=MemoryPool,name=* url: ^_^url^_^ ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-point.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-point.md index c951f1bc190..a59d9b1898a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-point.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-point.md @@ -1,12 +1,13 @@ --- id: extend-point title: 自定义监控 -sidebar_label: 自定义监控 +sidebar_label: 自定义监控 --- + > HertzBeat拥有自定义监控能力,您只需配置监控模版YML就能适配一款自定义的监控类型。 -> 目前自定义监控支持[HTTP协议](extend-http),[JDBC协议](extend-jdbc),[SSH协议](extend-ssh),[JMX协议](extend-jmx),[SNMP协议](extend-snmp),后续会支持更多通用协议。 +> 目前自定义监控支持[HTTP协议](extend-http),[JDBC协议](extend-jdbc),[SSH协议](extend-ssh),[JMX协议](extend-jmx),[SNMP协议](extend-snmp),后续会支持更多通用协议。 -### 自定义流程 +### 自定义流程 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** @@ -14,13 +15,13 @@ sidebar_label: 自定义监控 ------- -### 监控模版YML +### 监控模版YML **HertzBeat的设计是一个监控模版对应一个监控类型,所有监控类型都是由监控模版来定义的**。 -> 监控模版YML定义了 *监控类型的名称(国际化), 配置参数映射, 采集指标信息, 采集协议配置* 等。 +> 监控模版YML定义了 *监控类型的名称(国际化), 配置参数映射, 采集指标信息, 采集协议配置* 等。 -下面使用样例详细介绍下这监控模版YML的配置用法。 +下面使用样例详细介绍下这监控模版YML的配置用法。 样例:自定义一个 `app` 名称为 `example2` 的自定义监控类型,其使用HTTP协议采集指标数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-snmp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-snmp.md index 3ff65d60a17..387d67c5987 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-snmp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-snmp.md @@ -1,38 +1,38 @@ --- id: extend-snmp title: SNMP协议自定义监控 -sidebar_label: SNMP协议自定义监控 +sidebar_label: SNMP协议自定义监控 --- + > 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用 SNMP 协议自定义指标监控。 -> SNMP 协议自定义监控可以让我们很方便的通过配置 Mib OID信息 就能监控采集到我们想监控的OID指标 +> SNMP 协议自定义监控可以让我们很方便的通过配置 Mib OID信息 就能监控采集到我们想监控的OID指标 + +### SNMP协议采集流程 -### SNMP协议采集流程 -【**对端开启SNMP服务**】->【**HertzBeat直连对端SNMP服务**】->【**根据配置抓取对端OID指标信息**】->【**指标数据提取**】 +【**对端开启SNMP服务**】->【**HertzBeat直连对端SNMP服务**】->【**根据配置抓取对端OID指标信息**】->【**指标数据提取**】 由流程可见,我们自定义一个SNMP协议的监控类型,需要配置SNMP请求参数,配置获取哪些指标,配置查询OID信息。 -### 数据解析方式 +### 数据解析方式 通过配置监控模版YML的指标`field`, `aliasFields`, `snmp` 协议下的 `oids`来抓取对端指定的数据并解析映射。 - - -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) ------- -下面详细介绍下文件的配置用法,请注意看使用注释。 + +下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML > 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 -样例:自定义一个名称为 example_windows 的自定义监控类型,其使用 SNMP 协议采集指标数据。 - +样例:自定义一个名称为 example_windows 的自定义监控类型,其使用 SNMP 协议采集指标数据。 ```yaml # The monitoring type category:service-application service monitoring db-database monitoring mid-middleware custom-custom monitoring os-operating system monitoring @@ -207,3 +207,4 @@ metrics: processes: 1.3.6.1.2.1.25.1.6.0 location: 1.3.6.1.2.1.1.6.0 ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-ssh.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-ssh.md index a38086f8dd9..0c4fa9cb9ab 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-ssh.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-ssh.md @@ -1,21 +1,25 @@ --- id: extend-ssh title: SSH协议自定义监控 -sidebar_label: SSH协议自定义监控 +sidebar_label: SSH协议自定义监控 --- + > 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用SSH协议自定义指标监控。 -> SSH协议自定义监控可以让我们很方便的通过写sh命令脚本就能监控采集到我们想监控的Linux指标 +> SSH协议自定义监控可以让我们很方便的通过写sh命令脚本就能监控采集到我们想监控的Linux指标 + +### SSH协议采集流程 -### SSH协议采集流程 -【**系统直连Linux**】->【**运行SHELL命令脚本语句**】->【**响应数据解析:oneRow, multiRow**】->【**指标数据提取**】 +【**系统直连Linux**】->【**运行SHELL命令脚本语句**】->【**响应数据解析:oneRow, multiRow**】->【**指标数据提取**】 由流程可见,我们自定义一个SSH协议的监控类型,需要配置SSH请求参数,配置获取哪些指标,配置查询脚本语句。 -### 数据解析方式 +### 数据解析方式 + SHELL脚本查询回来的数据字段和我们需要的指标映射,就能获取对应的指标数据,目前映射解析方式有两种:oneRow, multiRow,能满足绝大部分指标需求。 -#### **oneRow** -> 查询出一列数据, 通过查询返回结果集的字段值(一行一个值)与字段映射 +#### **oneRow** + +> 查询出一列数据, 通过查询返回结果集的字段值(一行一个值)与字段映射 例如: 需要查询Linux的指标 hostname-主机名称,uptime-启动时间 @@ -23,31 +27,37 @@ SHELL脚本查询回来的数据字段和我们需要的指标映射,就能获 启动时间原始查询命令:`uptime | awk -F "," '{print $1}'` 则在hertzbeat对应的这两个指标的查询脚本为(用`;`将其连接到一起): `hostname; uptime | awk -F "," '{print $1}'` -终端响应的数据为: +终端响应的数据为: + ``` tombook 14:00:15 up 72 days -``` +``` + 则最后采集到的指标数据一一映射为: hostname值为 `tombook` -uptime值为 `14:00:15 up 72 days` +uptime值为 `14:00:15 up 72 days` -这里指标字段就能和响应数据一一映射为一行采集数据。 +这里指标字段就能和响应数据一一映射为一行采集数据。 #### **multiRow** -> 查询多行数据, 通过查询返回结果集的列名称,和查询的指标字段映射 + +> 查询多行数据, 通过查询返回结果集的列名称,和查询的指标字段映射 例如: 查询的Linux内存相关指标字段:total-内存总量 used-已使用内存 free-空闲内存 buff-cache-缓存大小 available-可用内存 -内存指标原始查询命令为:`free -m`, 控制台响应: +内存指标原始查询命令为:`free -m`, 控制台响应: + ```shell total used free shared buff/cache available Mem: 7962 4065 333 1 3562 3593 Swap: 8191 33 8158 ``` + 在hertzbeat中multiRow格式解析需要响应数据列名称和指标值一一映射,则对应的查询SHELL脚本为: `free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` -控制台响应为: +控制台响应为: + ```shell total used free buff_cache available 7962 4066 331 3564 3592 @@ -55,22 +65,22 @@ total used free buff_cache available 这里指标字段就能和响应数据一一映射为采集数据。 -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) ------- -下面详细介绍下文件的配置用法,请注意看使用注释。 + +下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML > 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 -样例:自定义一个名称为example_linux的自定义监控类型,其使用SSH协议采集指标数据。 - +样例:自定义一个名称为example_linux的自定义监控类型,其使用SSH协议采集指标数据。 ```yaml # 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 @@ -206,3 +216,4 @@ metrics: script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' parseType: multiRow ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-tutorial.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-tutorial.md index a639a13a8b9..ff411818bdc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-tutorial.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-tutorial.md @@ -4,16 +4,14 @@ title: 自定义适配一款基于HTTP协议的新监控类型 sidebar_label: 教程案例 --- -通过此教程我们一步一步描述如何在hertzbeat监控系统下自定义新增适配一款基于http协议的监控类型。 - -阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 +通过此教程我们一步一步描述如何在hertzbeat监控系统下自定义新增适配一款基于http协议的监控类型。 +阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 ### HTTP协议解析通用响应结构体,获取指标数据 > 很多场景我们需要对提供的 HTTP API 接口进行监控,获取接口返回的指标值。这篇文章我们通过http自定义协议来解析我们常见的http接口响应结构,获取返回体中的字段作为指标数据。 - ``` { "code": 200, @@ -22,6 +20,7 @@ sidebar_label: 教程案例 } ``` + 如上,通常我们的后台API接口会设计这这样一个通用返回。hertzbeat系统的后台也是如此,我们今天就用hertzbeat的 API 做样例,新增适配一款新的监控类型 **hertzbeat**,监控采集它的系统摘要统计API `http://localhost:1157/api/summary`, 其响应数据为: @@ -58,12 +57,11 @@ sidebar_label: 教程案例 **我们这次获取其app下的 `category`,`app`,`status`,`size`,`availableSize`等指标数据。** - ### 新增配置监控模版YML **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -定义我们在页面上需要输入哪些参数,一般的HTTP协议参数主要有ip, port, headers, params, uri, 账户密码等,我们直接复用 `api`监控模版 里面的参数定义内容,删除其中的我们不需要输入的uri参数和keyword关键字等参数即可。 +定义我们在页面上需要输入哪些参数,一般的HTTP协议参数主要有ip, port, headers, params, uri, 账户密码等,我们直接复用 `api`监控模版 里面的参数定义内容,删除其中的我们不需要输入的uri参数和keyword关键字等参数即可。 定义采集类型是啥,需要用哪种协议采集方式,采集的指标是啥,协议的配置参数等。我们直接复用 `api`监控模版 里面的定义内容,修改为我们当前的监控类型`hertzbeat`配置参数即可,如下:注意⚠️我们这次获取接口响应数据中的`category`,`app`,`status`,`size`,`availableSize`等字段作为指标数据。 @@ -221,32 +219,24 @@ metrics: **点击保存并应用。我们可以看到系统页面的自定义监控菜单已经多了一个`hertzbeat`监控类型了。** - ![](/img/docs/advanced/extend-http-example-1.png) - ### 页面添加对`hertzbeat`监控类型的监控 > 我们点击新增 `HertzBeat监控系统`,配置监控IP,端口,采集周期,高级设置里的账户密码等, 点击确定添加监控。 - ![](/img/docs/advanced/extend-http-example-2.png) - ![](/img/docs/advanced/extend-http-example-3.png) > 过一定时间(取决于采集周期)我们就可以在监控详情看到具体的指标数据和历史图表啦! - ![](/img/docs/advanced/extend-http-example-4.png) - - ### 设置阈值告警通知 > 接下来我们就可以正常设置阈值,告警触发后可以在告警中心查看,也可以新增接收人,设置告警通知等,Have Fun!!! - ---- #### 完! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/activemq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/activemq.md index b2d8f1489f5..29d5478158a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/activemq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/activemq.md @@ -9,7 +9,7 @@ keywords: [开源监控系统, 开源中间件监控, ActiveMQ消息中间件监 **使用协议:JMX** -### 监控前操作 +### 监控前操作 > 您需要在 ActiveMQ 开启 `JMX` 服务,HertzBeat 使用 JMX 协议对 ActiveMQ 进行指标采集。 @@ -23,9 +23,10 @@ keywords: [开源监控系统, 开源中间件监控, ActiveMQ消息中间件监 ``` -2. 修改安装目录下的 `bin/env` 文件,配置JMX 端口 IP等 +2. 修改安装目录下的 `bin/env` 文件,配置JMX 端口 IP等 + +将如下原配置信息 -将如下原配置信息 ```text # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" @@ -34,7 +35,9 @@ keywords: [开源监控系统, 开源中间件监控, ActiveMQ消息中间件监 ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" ``` -更新为如下配置,⚠️注意修改`本机对外IP` + +更新为如下配置,⚠️注意修改`本机对外IP` + ```text # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" @@ -49,25 +52,25 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" 3. 重启 ACTIVEMQ 服务,在 HertzBeat 添加对应 ActiveMQ 监控即可,参数使用 JMX 配置的 IP 端口。 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | -|-------------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| JMX端口 | JMX 对外提供的HTTP端口,默认为 11099。 | -| JMX URL | 可选,自定义 JMX URL 连接 | -| 用户名 | 认证时使用的用户名 | -| 密码 | 认证时使用的密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|---------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| JMX端口 | JMX 对外提供的HTTP端口,默认为 11099。 | +| JMX URL | 可选,自定义 JMX URL 连接 | +| 用户名 | 认证时使用的用户名 | +| 密码 | 认证时使用的密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 #### 指标集合:broker -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------------------|------|-----------------------------------------------------------------------| | BrokerName | 无 | The name of the broker. | | BrokerVersion | 无 | The version of the broker. | @@ -88,57 +91,56 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" | MaxMessageSize | 无 | Max message size on this broker | | MinMessageSize | 无 | Min message size on this broker | -#### 指标集合:topic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------|------|-------------------------------------------------------------------------------------------| -| Name | 无 | Name of this destination. | -| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | -| MemoryPercentUsage | 无 | The percentage of the memory limit used | -| ProducerCount | 无 | Number of producers attached to this destination | -| ConsumerCount | 无 | Number of consumers subscribed to this destination. | -| EnqueueCount | 无 | Number of messages that have been sent to the destination. | -| DequeueCount | 无 | Number of messages that has been acknowledged (and removed) from the destination. | -| ForwardCount | 无 | Number of messages that have been forwarded (to a networked broker) from the destination. | -| InFlightCount | 无 | Number of messages that have been dispatched to, but not acknowledged by, consumers. | -| DispatchCount | 无 | Number of messages that has been delivered to consumers, including those not acknowledged | -| ExpiredCount | 无 | Number of messages that have been expired. | -| StoreMessageSize | B | The memory size of all messages in this destination's store. | -| AverageEnqueueTime | ms | Average time a message was held on this destination. | -| MaxEnqueueTime | ms | The longest time a message was held on this destination | -| MinEnqueueTime | ms | The shortest time a message was held on this destination | -| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | -| AverageMessageSize | B | Average message size on this destination | -| MaxMessageSize | B | Max message size on this destination | -| MinMessageSize | B | Min message size on this destination | - +#### 指标集合:topic + +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|-------------------------------------------------------------------------------------------| +| Name | 无 | Name of this destination. | +| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | +| MemoryPercentUsage | 无 | The percentage of the memory limit used | +| ProducerCount | 无 | Number of producers attached to this destination | +| ConsumerCount | 无 | Number of consumers subscribed to this destination. | +| EnqueueCount | 无 | Number of messages that have been sent to the destination. | +| DequeueCount | 无 | Number of messages that has been acknowledged (and removed) from the destination. | +| ForwardCount | 无 | Number of messages that have been forwarded (to a networked broker) from the destination. | +| InFlightCount | 无 | Number of messages that have been dispatched to, but not acknowledged by, consumers. | +| DispatchCount | 无 | Number of messages that has been delivered to consumers, including those not acknowledged | +| ExpiredCount | 无 | Number of messages that have been expired. | +| StoreMessageSize | B | The memory size of all messages in this destination's store. | +| AverageEnqueueTime | ms | Average time a message was held on this destination. | +| MaxEnqueueTime | ms | The longest time a message was held on this destination | +| MinEnqueueTime | ms | The shortest time a message was held on this destination | +| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | +| AverageMessageSize | B | Average message size on this destination | +| MaxMessageSize | B | Max message size on this destination | +| MinMessageSize | B | Min message size on this destination | #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/airflow.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/airflow.md index 5323ede8110..52367155d89 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/airflow.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/airflow.md @@ -9,33 +9,31 @@ keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8080 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| HTTPS | 是否启用HTTPS | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-----------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | +| 端口 | 数据库对外提供的端口,默认为8080 | +| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | +| HTTPS | 是否启用HTTPS | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:airflow_health -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | -------------------- | -| metadatabase | 无 | metadatabase健康情况 | -| scheduler | 无 | scheduler健康情况 | -| triggerer | 无 | triggerer健康情况 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|------------------| +| metadatabase | 无 | metadatabase健康情况 | +| scheduler | 无 | scheduler健康情况 | +| triggerer | 无 | triggerer健康情况 | #### 指标集合:airflow_version -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | -------- | --------------- | -| value | 无 | Airflow版本 | -| git_version | 无 | Airflow git版本 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|---------------| +| value | 无 | Airflow版本 | +| git_version | 无 | Airflow git版本 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_console.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_console.md index 68cf7339eae..5198b961b66 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_console.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_console.md @@ -6,13 +6,13 @@ sidebar_label: 告警模板登录台地址 > 阈值触发后发送告警信息,通过钉钉/企业微信/飞书机器人通知或者使用邮箱通知的时候,告警内容中有登录控制台的详情链接 - ### 自定义设置 在我们的启动配置文件application.yml中,找到下面的配置 + ```yml alerter: console-url: #这里就是我们的自定义控制台地址 ``` -默认值是赫兹跳动的官方控制台地址 \ No newline at end of file +默认值是赫兹跳动的官方控制台地址 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_dingtalk.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_dingtalk.md index aec6342f7d3..9d0ee3b088f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_dingtalk.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_dingtalk.md @@ -5,15 +5,15 @@ sidebar_label: 告警钉钉机器人通知 keywords: [告警钉钉机器人通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过钉钉机器人通知到接收人。 +> 阈值触发后发送告警信息,通过钉钉机器人通知到接收人。 -### 操作步骤 +### 操作步骤 1. **【钉钉桌面客户端】-> 【群设置】-> 【智能群助手】-> 【添加新建机器人-选自定义】-> 【设置机器人名称头像】-> 【注意⚠️设置自定义关键字: HertzBeat】 ->【添加成功后复制其WebHook地址】** -> 注意⚠️ 新增机器人时需在安全设置块需设置其自定义关键字: HertzBeat ,其它安全设置加签或IP段不填写 +> 注意⚠️ 新增机器人时需在安全设置块需设置其自定义关键字: HertzBeat ,其它安全设置加签或IP段不填写 -![email](/img/docs/help/alert-notice-8.png) +![email](/img/docs/help/alert-notice-8.png) 2. **【保存机器人的WebHook地址access_token值】** @@ -24,18 +24,18 @@ keywords: [告警钉钉机器人通知, 开源告警系统, 开源监控告警 ![email](/img/docs/help/alert-notice-9.png) -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### 钉钉机器人通知常见问题 -### 钉钉机器人通知常见问题 +1. 钉钉群未收到机器人告警通知 -1. 钉钉群未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 > 请排查钉钉机器人是否配置了安全自定义关键字:HertzBeat > 请排查是否配置正确机器人ACCESS_TOKEN,是否已配置告警策略关联 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_discord.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_discord.md index d6c4879a2ba..9694126d0dd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_discord.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_discord.md @@ -5,68 +5,66 @@ sidebar_label: 告警 Discord 机器人通知 keywords: [告警 Discord 机器人通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过 Discord 机器人通知到接收人。 +> 阈值触发后发送告警信息,通过 Discord 机器人通知到接收人。 -## 操作步骤 +## 操作步骤 -> 部署网络本身需支持科学上网,不支持设置代理 +> 部署网络本身需支持科学上网,不支持设置代理 -### 在 Discord 创建应用, 应用下创建机器人, 获取机器人 Token +### 在 Discord 创建应用, 应用下创建机器人, 获取机器人 Token -1. 访问 [https://discord.com/developers/applications](https://discord.com/developers/applications) 创建应用 +1. 访问 [https://discord.com/developers/applications](https://discord.com/developers/applications) 创建应用 ![bot](/img/docs/help/discord-bot-1.png) -2. 在应用下创建机器人,获取机器人 Token +2. 在应用下创建机器人,获取机器人 Token ![bot](/img/docs/help/discord-bot-2.png) ![bot](/img/docs/help/discord-bot-3.png) -3. 授权机器人到聊天服务器 +3. 授权机器人到聊天服务器 -> 在 OAuth2 菜单下给此机器人授权,`SCOPES` 范围选 `bot`, `BOT PERMISSIONS` 选发送消息 `Send Messages` +> 在 OAuth2 菜单下给此机器人授权,`SCOPES` 范围选 `bot`, `BOT PERMISSIONS` 选发送消息 `Send Messages` ![bot](/img/docs/help/discord-bot-4.png) > 获取到最下方生成的 URL, 浏览器访问此 URL 给机器人正式授权,即设置将机器人加入哪个聊天服务器。 -4. 查看您的聊天服务器是否已经加入机器人成员 +4. 查看您的聊天服务器是否已经加入机器人成员 ![bot](/img/docs/help/discord-bot-5.png) -### 开启开发者模式,获取频道 Channel ID +### 开启开发者模式,获取频道 Channel ID -1. 个人设置 -> 高级设置 -> 开启开发者模式 +1. 个人设置 -> 高级设置 -> 开启开发者模式 ![bot](/img/docs/help/discord-bot-6.png) -2. 获取频道 Channel ID +2. 获取频道 Channel ID -> 右键选中您想要发送机器人消息的聊天频道,点击 COPY ID 按钮获取 Channel ID +> 右键选中您想要发送机器人消息的聊天频道,点击 COPY ID 按钮获取 Channel ID ![bot](/img/docs/help/discord-bot-7.png) - -### 在 HertzBeat 新增告警通知人,通知方式为 Discord Bot +### 在 HertzBeat 新增告警通知人,通知方式为 Discord Bot 1. **【告警通知】->【新增接收人】 ->【选择 Discord 机器人通知方式】->【设置机器人Token和ChannelId】-> 【确定】** ![email](/img/docs/help/discord-bot-8.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -![email](/img/docs/help/alert-notice-policy.png) +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 +![email](/img/docs/help/alert-notice-policy.png) -### Discord 机器人通知常见问题 +### Discord 机器人通知常见问题 -1. Discord 未收到机器人告警通知 +1. Discord 未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人Token, ChannelId,是否已配置告警策略关联 > 请排查机器人是否被 Discord聊天服务器正确赋权 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_email.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_email.md index 7033f6036d6..d4dc218c591 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_email.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_email.md @@ -5,34 +5,35 @@ sidebar_label: 告警邮件通知 keywords: [告警邮件通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过邮件通知到接收人。 +> 阈值触发后发送告警信息,通过邮件通知到接收人。 -### 操作步骤 +### 操作步骤 -1. **【告警通知】->【新增接收人】 ->【选择邮件通知方式】** +1. **【告警通知】->【新增接收人】 ->【选择邮件通知方式】** -![email](/img/docs/help/alert-notice-1.png) +![email](/img/docs/help/alert-notice-1.png) 2. **【获取验证码】-> 【输入邮箱验证码】-> 【确定】** -![email](/img/docs/help/alert-notice-2.png) + ![email](/img/docs/help/alert-notice-2.png) -![email](/img/docs/help/alert-notice-3.png) +![email](/img/docs/help/alert-notice-3.png) -3. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +3. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### 邮件通知常见问题 -### 邮件通知常见问题 +1. 自己内网部署的HertzBeat无法接收到邮件通知 -1. 自己内网部署的HertzBeat无法接收到邮件通知 -> HertzBeat需要自己配置邮件服务器,TanCloud无需,请确认是否在application.yml配置了自己的邮件服务器 +> HertzBeat需要自己配置邮件服务器,TanCloud无需,请确认是否在application.yml配置了自己的邮件服务器 + +2. 云环境TanCloud无法接收到邮件通知 -2. 云环境TanCloud无法接收到邮件通知 > 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确邮箱,是否已配置告警策略关联 -> 请查询邮箱的垃圾箱里是否把告警邮件拦截 +> 请查询邮箱的垃圾箱里是否把告警邮件拦截 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_enterprise_wechat_app.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_enterprise_wechat_app.md index 3f1c5a2b9c1..b70c8b10c40 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_enterprise_wechat_app.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_enterprise_wechat_app.md @@ -5,28 +5,28 @@ sidebar_label: 企业微信应用告警通知 keywords: [开源告警系统, 开源监控告警系统, 企业微信应用告警通知] --- -> 阈值触发后发送告警信息,通过企业微信应用通知到接收人. +> 阈值触发后发送告警信息,通过企业微信应用通知到接收人. -### Operation steps +### Operation steps 1. **【企业微信后台管理】-> 【App管理】-> 【创建一个新的应用】-> 【设置应用信息】->【添加成功后复制应用的AgentId和Secret】** -![email](/img/docs/help/alert-wechat-1.jpg) +![email](/img/docs/help/alert-wechat-1.jpg) 2. **【告警通知】->【新增接收人】 ->【选择企业微信应用通知方式】->【设置企业ID,企业应用id和应用的secret 】-> 【确定】** ![email](/img/docs/help/alert-wechat-2.jpg) -3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人。** +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人。** -![email](/img/docs/help/alert-wechat-3.jpg) +![email](/img/docs/help/alert-wechat-3.jpg) +### 企业微信应用通知常见问题 -### 企业微信应用通知常见问题 +1. 企业微信应用未收到告警通知. -1. 企业微信应用未收到告警通知. > 请检查用户是否具有应用程序权限. > 请检查企业应用程序回调地址设置是否正常. > 请检查服务器IP是否在企业应用程序白名单上. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_feishu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_feishu.md index 448e70de223..604eff34fdc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_feishu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_feishu.md @@ -5,30 +5,30 @@ sidebar_label: 告警飞书机器人通知 keywords: [告警飞书机器人通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过飞书机器人通知到接收人。 +> 阈值触发后发送告警信息,通过飞书机器人通知到接收人。 -### 操作步骤 +### 操作步骤 1. **【飞书客户端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** -2. **【保存机器人的WebHook地址的KEY值】** +2. **【保存机器人的WebHook地址的KEY值】** > 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择飞书机器人通知方式】->【设置飞书机器人KEY】-> 【确定】** -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### 飞书机器人通知常见问题 -### 飞书机器人通知常见问题 +1. 飞书群未收到机器人告警通知 -1. 飞书群未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 +> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md index e137d07e9e2..c81f5608674 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md @@ -5,34 +5,33 @@ sidebar_label: 告警 Slack Webhook 通知 keywords: [告警 Slack Webhook 通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过 Slack Webhook 通知到接收人。 +> 阈值触发后发送告警信息,通过 Slack Webhook 通知到接收人。 -## 操作步骤 +## 操作步骤 -> 部署网络本身需支持科学上网,不支持设置代理 +> 部署网络本身需支持科学上网,不支持设置代理 -### 在 Slack 开启 Webhook, 获取 Webhook URL +### 在 Slack 开启 Webhook, 获取 Webhook URL -参考官网文档 [Sending messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) +参考官网文档 [Sending messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) -### 在 HertzBeat 新增告警通知人,通知方式为 Slack Webhook +### 在 HertzBeat 新增告警通知人,通知方式为 Slack Webhook 1. **【告警通知】->【新增接收人】 ->【选择 Slack Webhook 通知方式】->【设置 Webhook URL】-> 【确定】** ![email](/img/docs/help/slack-bot-1.png) -2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-policy.png) +![email](/img/docs/help/alert-notice-policy.png) +### Slack 机器人通知常见问题 -### Slack 机器人通知常见问题 - -1. Slack 未收到机器人告警通知 +1. Slack 未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 +> 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_telegram.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_telegram.md index 6730aa19dcb..df609e66b50 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_telegram.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_telegram.md @@ -5,22 +5,22 @@ sidebar_label: 告警 Telegram 机器人通知 keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过 Telegram 机器人通知到接收人。 +> 阈值触发后发送告警信息,通过 Telegram 机器人通知到接收人。 -## 操作步骤 +## 操作步骤 -> 部署网络本身需支持科学上网,不支持设置代理 +> 部署网络本身需支持科学上网,不支持设置代理 ### 在 Telegram 创建机器人, 获取 Bot Token 和 UserId -1. 使用 [@BotFather](https://t.me/BotFather) 创建自己的机器人并获取访问令牌`Token` +1. 使用 [@BotFather](https://t.me/BotFather) 创建自己的机器人并获取访问令牌`Token` ![telegram-bot](/img/docs/help/telegram-bot-1.png) -2. 获取接收人的 `User ID` +2. 获取接收人的 `User ID` -**使用您要通知的接收人账户给刚创建 Bot 账户随便发送一个信息**, -访问 ```https://api.telegram.org/bot/getUpdates``` , **`使用上一步的 Bot Token 替换其中的`**, 响应`Json`数据中第一个`result.message.from.id` 值即为接收人的 `User ID` +**使用您要通知的接收人账户给刚创建 Bot 账户随便发送一个信息**, +访问 ```https://api.telegram.org/bot/getUpdates``` , **`使用上一步的 Bot Token 替换其中的`**, 响应`Json`数据中第一个`result.message.from.id` 值即为接收人的 `User ID` ```json { @@ -42,27 +42,26 @@ keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] } ``` -3. 记录保存我们获得的 `Token` 和 `User Id` +3. 记录保存我们获得的 `Token` 和 `User Id` -### 在 HertzBeat 新增告警通知人,通知方式为 Telegram Bot +### 在 HertzBeat 新增告警通知人,通知方式为 Telegram Bot 1. **【告警通知】->【新增接收人】 ->【选择 Telegram 机器人通知方式】->【设置机器人Token和UserId】-> 【确定】** ![email](/img/docs/help/telegram-bot-2.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-policy.png) +![email](/img/docs/help/alert-notice-policy.png) +### Telegram 机器人通知常见问题 -### Telegram 机器人通知常见问题 - -1. Telegram 未收到机器人告警通知 +1. Telegram 未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 -> UserId 应为消息接收对象的UserId +> UserId 应为消息接收对象的UserId -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold.md index 42a711c5916..2f8bda93e2b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold.md @@ -1,18 +1,18 @@ --- id: alert_threshold title: 阈值告警配置 -sidebar_label: 阈值告警配置 +sidebar_label: 阈值告警配置 --- -> 对监控指标配置告警阈值(警告告警,严重告警,紧急告警),系统根据阈值配置和采集指标数据计算触发告警。 +> 对监控指标配置告警阈值(警告告警,严重告警,紧急告警),系统根据阈值配置和采集指标数据计算触发告警。 -### 操作步骤 +### 操作步骤 -1. **【告警配置】->【新增阈值】-> 【配置后确定】** +1. **【告警配置】->【新增阈值】-> 【配置后确定】** -![threshold](/img/docs/help/alert-threshold-1.png) +![threshold](/img/docs/help/alert-threshold-1.png) -如上图: +如上图: **指标对象**:选择我们需要配置阈值的监控指标对象 例如:网站监控类型下的 -> summary指标集合下的 -> responseTime响应时间指标 **阈值触发表达式**:根据此表达式来计算判断是否触发阈值,表达式环境变量和操作符见页面提示,例如:设置响应时间大于50触发告警,表达式为 `responseTime > 50`。阈值表达式详细帮助见 [阈值表达式帮助](alert_threshold_expr) @@ -20,17 +20,17 @@ sidebar_label: 阈值告警配置 **触发次数**:设置触发阈值多少次之后才会真正的触发告警 **通知模版**:告警触发后发送的通知信息模版,模版环境变量见页面提示,例如:`${app}.${metrics}.${metric}指标的值为${responseTime},大于50触发告警` **全局默认**: 设置此阈值是否对全局的此类指标都应用有效,默认否。新增阈值后还需将阈值与监控对象关联,这样阈值才会对此监控生效。 -**启用告警**:此告警阈值配置开启生效或关闭 +**启用告警**:此告警阈值配置开启生效或关闭 -2. ** 阈值关联监控⚠️ 【告警配置】-> 【将刚设置的阈值】-> 【配置关联监控】-> 【配置后确定】** +2. ** 阈值关联监控⚠️ 【告警配置】-> 【将刚设置的阈值】-> 【配置关联监控】-> 【配置后确定】** -> ** 注意⚠️ 新增阈值后还需将阈值与监控对象关联(即设置此阈值对哪些监控有效),这样阈值才会对此监控生效 **。 +> ** 注意⚠️ 新增阈值后还需将阈值与监控对象关联(即设置此阈值对哪些监控有效),这样阈值才会对此监控生效 **。 -![threshold](/img/docs/help/alert-threshold-2.png) +![threshold](/img/docs/help/alert-threshold-2.png) -![threshold](/img/docs/help/alert-threshold-3.png) +![threshold](/img/docs/help/alert-threshold-3.png) **阈值告警配置完毕,已经被成功触发的告警信息可以在【告警中心】看到。** -**若需要将告警信息邮件,微信,钉钉飞书通知给相关人员,可以在【告警通知】配置。** +**若需要将告警信息邮件,微信,钉钉飞书通知给相关人员,可以在【告警通知】配置。** -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md index 14fbcdb13dc..a3c5fe9a1c2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md @@ -1,12 +1,12 @@ --- id: alert_threshold_expr title: 阈值触发表达式 -sidebar_label: 阈值触发表达式 +sidebar_label: 阈值触发表达式 --- -> 在我们配置阈值告警时,需要配置阈值触发表达式,系统根据表达式和监控指标值计算触发是否告警,这里详细介绍下表达式使用。 +> 在我们配置阈值告警时,需要配置阈值触发表达式,系统根据表达式和监控指标值计算触发是否告警,这里详细介绍下表达式使用。 -#### 表达式支持的操作符 +#### 表达式支持的操作符 ``` equals(str1,str2) @@ -24,30 +24,31 @@ equals(str1,str2) ``` 丰富的操作符让我们可以很自由的定义表达式。 -注意⚠️ 字符串的相等请用 `equals(str1,str2)` 数字类型的相等判断请用== 或 != +注意⚠️ 字符串的相等请用 `equals(str1,str2)` 数字类型的相等判断请用== 或 != #### 表达式函数库列表 参考: https://www.yuque.com/boyan-avfmj/aviatorscript/ashevw -#### 支持的环境变量 -> 环境变量即指标值等支持的变量,用于在表达式中,阈值计算判断时会将变量替换成实际值进行计算 +#### 支持的环境变量 + +> 环境变量即指标值等支持的变量,用于在表达式中,阈值计算判断时会将变量替换成实际值进行计算 非固定环境变量:这些变量会根据我们选择的监控指标对象而动态变化,例如我们选择了**网站监控的响应时间指标**,则环境变量就有 `responseTime - 此为响应时间变量` 如果我们想设置**网站监控的响应时间大于400时**触发告警,则表达式为 `responseTime>400` 固定环境变量(不常用):`instance : 所属行实例值` -此变量主要用于计算多实例时,比如采集到c盘d盘的`usage`(`usage为非固定环境变量`),我们只想设置**c盘的usage大于80**时告警,则表达式为 `equals(instance,"c")&&usage>80` +此变量主要用于计算多实例时,比如采集到c盘d盘的`usage`(`usage为非固定环境变量`),我们只想设置**c盘的usage大于80**时告警,则表达式为 `equals(instance,"c")&&usage>80` -#### 表达式设置案例 +#### 表达式设置案例 1. 网站监控->响应时间大于等于400ms时触发告警 -`responseTime>=400` + `responseTime>=400` 2. API监控->响应时间大于3000ms时触发告警 -`responseTime>3000` + `responseTime>3000` 3. 全站监控->URL(instance)路径为 `https://baidu.com/book/3` 的响应时间大于200ms时触发告警 -`equals(instance,"https://baidu.com/book/3")&&responseTime>200` + `equals(instance,"https://baidu.com/book/3")&&responseTime>200` 4. MYSQL监控->status指标->threads_running(运行线程数)指标大于7时触发告警 -`threads_running>7` + `threads_running>7` -若遇到问题可以通过交流群ISSUE交流反馈哦! +若遇到问题可以通过交流群ISSUE交流反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_webhook.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_webhook.md index 61ed1dc99b5..022cd50f07e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_webhook.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_webhook.md @@ -5,23 +5,24 @@ sidebar_label: 告警 Webhook 回调通知 keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过post请求方式调用WebHook接口通知到接收人。 +> 阈值触发后发送告警信息,通过post请求方式调用WebHook接口通知到接收人。 -## 操作步骤 +## 操作步骤 -1. **【告警通知】->【新增接收人】 ->【选择WebHook通知方式】-> 【设置WebHook回调地址】 -> 【确定】** +1. **【告警通知】->【新增接收人】 ->【选择WebHook通知方式】-> 【设置WebHook回调地址】 -> 【确定】** ![email](/img/docs/help/alert-notice-5.png) -2. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) -### WebHook回调POST请求体BODY内容 +### WebHook回调POST请求体BODY内容 + +内容格式:JSON -内容格式:JSON ```json { "alarmId": 76456, @@ -43,22 +44,23 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 } ``` -| | | -|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | -| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | -| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | -| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | -| content | string title: The actual content of the alarm notification 告警通知实际内容 | -| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | -| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | -| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | -| tags | example: {key1:value1} | +| | | +|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | +| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | +| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | +| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | +| content | string title: The actual content of the alarm notification 告警通知实际内容 | +| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | +| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | +| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | +| tags | example: {key1:value1} | + +### webhook通知常见问题 -### webhook通知常见问题 +1. WebHook回调未生效 -1. WebHook回调未生效 > 请查看告警中心是否已经产生此条告警信息 > 请排查配置的WebHook回调地址是否正确 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_wework.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_wework.md index ce73c131d00..e0dbabf1a70 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_wework.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_wework.md @@ -5,34 +5,34 @@ sidebar_label: 告警企业微信通知 keywords: [告警企业微信通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过企业微信机器人通知到接收人。 +> 阈值触发后发送告警信息,通过企业微信机器人通知到接收人。 -### 操作步骤 +### 操作步骤 -1. **【企业微信端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** +1. **【企业微信端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** -![email](/img/docs/help/alert-notice-6.jpg) +![email](/img/docs/help/alert-notice-6.jpg) -2. **【保存机器人的WebHook地址的KEY值】** +2. **【保存机器人的WebHook地址的KEY值】** > 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -3. **【告警通知】->【新增接收人】 ->【选择企业微信机器人通知方式】->【设置企业微信机器人KEY】-> 【确定】** +3. **【告警通知】->【新增接收人】 ->【选择企业微信机器人通知方式】->【设置企业微信机器人KEY】-> 【确定】** ![email](/img/docs/help/alert-notice-7.png) -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### 企业微信机器人通知常见问题 -### 企业微信机器人通知常见问题 +1. 企业微信群未收到机器人告警通知 -1. 企业微信群未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 +> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/api.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/api.md index caa4b9f51fd..88f0e690223 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/api.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/api.md @@ -5,32 +5,31 @@ sidebar_label: HTTP API keywords: [开源监控系统, 开源网站监控, HTTP API监控] --- -> 调用HTTP API接口,查看接口是否可用,对其响应时间等指标进行监测 +> 调用HTTP API接口,查看接口是否可用,对其响应时间等指标进行监测 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | -| 请求方式 | 设置接口调用的请求方式:GET,POST,PUT,DELETE。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| 用户名 | 接口Basic认证或Digest认证时使用的用户名 | -| 密码 | 接口Basic认证或Digest认证时使用的密码 | -| Content-Type | 设置携带BODY请求体数据请求时的资源类型 | -| 请求BODY | 设置携带BODY请求体数据,PUT POST请求方式时有效 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------------|-------------------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | +| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | +| 请求方式 | 设置接口调用的请求方式:GET,POST,PUT,DELETE。 | +| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | +| 用户名 | 接口Basic认证或Digest认证时使用的用户名 | +| 密码 | 接口Basic认证或Digest认证时使用的密码 | +| Content-Type | 设置携带BODY请求体数据请求时的资源类型 | +| 请求BODY | 设置携带BODY请求体数据,PUT POST请求方式时有效 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| responseTime | ms毫秒 | 网站响应时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | ms毫秒 | 网站响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/centos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/centos.md index 03c514bc7b8..3d0654db3b5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/centos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/centos.md @@ -9,74 +9,74 @@ keywords: [开源监控系统, 开源操作系统监控, CentOS操作系统监 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux SSH对外提供的端口,默认为22。 | +| 用户名 | SSH连接用户名,可选 | +| 密码 | SSH连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| hostname | 无 | 主机名称 | +| version | 无 | 操作系统版本 | +| uptime | 无 | 系统运行时间 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:memory -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:disk -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:interface -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | byte | 入站数据流量(bytes) | +| transmit_bytes | byte | 出站数据流量(bytes) | #### 指标集合:disk_free -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dm.md index cdd9f6f6ee5..ea4a376c049 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dm.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dm.md @@ -9,41 +9,41 @@ keywords: [开源监控系统, 开源数据库监控, 达梦数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为5236。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为5236。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | ------------------ | -| PORT_NUM | 无 | 数据库暴露服务端口 | -| CTL_PATH | 无 | 控制文件路径 | -| MAX_SESSIONS | 无 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|-----------| +| PORT_NUM | 无 | 数据库暴露服务端口 | +| CTL_PATH | 无 | 控制文件路径 | +| MAX_SESSIONS | 无 | 数据库最大连接数 | #### 指标集合:status -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------ | -| status$ | 无 | DM数据库的开闭状态 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|------------| +| status$ | 无 | DM数据库的开闭状态 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | -------- | ---------------------------------------------- | -| dm_sql_thd | 无 | 用于编写 dmsql dmserver 的线程 | -| dm_io_thd | 无 | IO线程,由IO_THR_GROUPS参数控制,默认为2个线程 | -| dm_quit_thd | 无 | 用于执行正常关闭数据库的线程 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|---------------------------------| +| dm_sql_thd | 无 | 用于编写 dmsql dmserver 的线程 | +| dm_io_thd | 无 | IO线程,由IO_THR_GROUPS参数控制,默认为2个线程 | +| dm_quit_thd | 无 | 用于执行正常关闭数据库的线程 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/docker.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/docker.md index 221776b2426..c546b46fd2c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/docker.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/docker.md @@ -7,7 +7,6 @@ keywords: [开源监控系统, 开源容器监控, Docker容器监控] > 对Docker容器的通用性能指标进行采集监控。 - ## 监控前操作 如果想要监控 `Docker` 中的容器信息,则需要按照一下步骤打开端口,让采集请求获取到对应的信息。 @@ -44,63 +43,60 @@ firewall-cmd --zone=public --add-port=2375/tcp --permanent firewall-cmd --reload ``` - - - - ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为2375。 | -| 查询超时时间 | 设置获取Docker服务器API接口时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 器名称 | 一般是监控所有运行中的容器信息。 | -| 用户名 | 连接用户名,可选 | -| 密码 | 连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为2375。 | +| 查询超时时间 | 设置获取Docker服务器API接口时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 器名称 | 一般是监控所有运行中的容器信息。 | +| 用户名 | 连接用户名,可选 | +| 密码 | 连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:system -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------ | -------- | -------------------------------------- | -| Name | 无 | 服务器名称 | -| version | 无 | docker本版号 | -| os | 无 | 服务器版本 例如:linux x86_64 | -| root_dir | 无 | docker文件夹目录 例如:/var/lib/docker | -| containers | 无 | 容器总数(在运行+未运行) | -| containers_running | 无 | 运行中的容器数目 | -| containers_paused | 无 | 暂停中的容器数目 | -| images | 无 | 容器景象的总数目。 | -| ncpu | 无 | NCPU | -| mem_total | MB | 占用的内存总大小 | -| system_time | 无 | 系统时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|--------------------------------| +| Name | 无 | 服务器名称 | +| version | 无 | docker本版号 | +| os | 无 | 服务器版本 例如:linux x86_64 | +| root_dir | 无 | docker文件夹目录 例如:/var/lib/docker | +| containers | 无 | 容器总数(在运行+未运行) | +| containers_running | 无 | 运行中的容器数目 | +| containers_paused | 无 | 暂停中的容器数目 | +| images | 无 | 容器景象的总数目。 | +| ncpu | 无 | NCPU | +| mem_total | MB | 占用的内存总大小 | +| system_time | 无 | 系统时间 | #### 指标集合:containers -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------- | -| id | 无 | Docker中容器的ID | -| name | 无 | Docker容器中的容器名称 | -| image | 无 | Docker容器使用的镜像 | -| command | 无 | Docker中的默认启动命令 | -| state | 无 | Docker中容器的运行状态 | -| status | 无 | Docker容器中的更新时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------------| +| id | 无 | Docker中容器的ID | +| name | 无 | Docker容器中的容器名称 | +| image | 无 | Docker容器使用的镜像 | +| command | 无 | Docker中的默认启动命令 | +| state | 无 | Docker中容器的运行状态 | +| status | 无 | Docker容器中的更新时间 | #### 指标集合:stats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- | -------- | ---------------------------- | -| name | 无 | Docker容器中的名字 | -| available_memory | MB | Docker容器可以利用的内存大小 | -| used_memory | MB | Docker容器已经使用的内存大小 | -| memory_usage | 无 | Docker容器的内存使用率 | -| cpu_delta | 无 | Docker容器已经使用的CPU数量 | -| number_cpus | 无 | Docker容器可以使用的CPU数量 | -| cpu_usage | 无 | Docker容器CPU使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------|------|--------------------| +| name | 无 | Docker容器中的名字 | +| available_memory | MB | Docker容器可以利用的内存大小 | +| used_memory | MB | Docker容器已经使用的内存大小 | +| memory_usage | 无 | Docker容器的内存使用率 | +| cpu_delta | 无 | Docker容器已经使用的CPU数量 | +| number_cpus | 无 | Docker容器可以使用的CPU数量 | +| cpu_usage | 无 | Docker容器CPU使用率 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dynamic_tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dynamic_tp.md index e3e143c17ed..8c2f1e290e4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dynamic_tp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dynamic_tp.md @@ -7,15 +7,15 @@ keywords: [开源监控系统, 开源中间件监控, DynamicTp线程池监控] > 对DynamicTp actuator 暴露的线程池性能指标进行采集监控。 -### 前提 +### 前提 -1. 集成使用 `DynamicTp` +1. 集成使用 `DynamicTp` `DynamicTp` 是Jvm语言的基于配置中心的轻量级动态线程池,内置监控告警功能,可通过SPI自定义扩展实现。 -集成使用,请参考文档 [快速接入](https://dynamictp.cn/guide/use/quick-start.html) +集成使用,请参考文档 [快速接入](https://dynamictp.cn/guide/use/quick-start.html) -2. 开启SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 +2. 开启SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 ```yaml management: @@ -24,7 +24,8 @@ management: exposure: include: '*' ``` -测试访问指标接口 `ip:port/actuator/dynamic-tp` 是否有响应json数据如下: + +测试访问指标接口 `ip:port/actuator/dynamic-tp` 是否有响应json数据如下: ```json [ @@ -58,45 +59,44 @@ management: ] ``` -3. 在HertzBeat中间件监控下添加DynamicTp监控即可 - +3. 在HertzBeat中间件监控下添加DynamicTp监控即可 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ |------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 应用服务对外提供的端口,默认为8080。 | +| 参数名称 | 参数帮助描述 | +|-----------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 应用服务对外提供的端口,默认为8080。 | | 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | - | Base Path | 暴露接口路径前缀,默认 /actuator | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| Base Path | 暴露接口路径前缀,默认 /actuator | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:thread_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|------------------------| -| pool_name | 无 | 线程池名称 | -| core_pool_size | 无 | 核心线程数 | -| maximum_pool_size | 无 | 最大线程数 | -| queue_type | 无 | 任务队列类型 | -| queue_capacity | MB | 任务队列容量 | -| queue_size | 无 | 任务队列当前占用大小 | -| fair | 无 | 队列模式,SynchronousQueue会用到 | -| queue_remaining_capacity | MB | 任务队列剩余大小 | -| active_count | 无 | 活跃线程数 | -| task_count | 无 | 任务总数 | -| completed_task_count | 无 | 已完成任务数 | -| largest_pool_size | 无 | 历史最大线程数 | -| pool_size | 无 | 当前线程数 | -| wait_task_count | 无 | 等待执行任务数 | -| reject_count | 无 | 拒绝任务数 | -| reject_handler_name | 无 | 拒绝策略类型 | -| dynamic | 无 | 是否动态线程池 | -| run_timeout_count | 无 | 运行超时任务数 | -| queue_timeout_count | 无 | 等待超时任务数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------|------|--------------------------| +| pool_name | 无 | 线程池名称 | +| core_pool_size | 无 | 核心线程数 | +| maximum_pool_size | 无 | 最大线程数 | +| queue_type | 无 | 任务队列类型 | +| queue_capacity | MB | 任务队列容量 | +| queue_size | 无 | 任务队列当前占用大小 | +| fair | 无 | 队列模式,SynchronousQueue会用到 | +| queue_remaining_capacity | MB | 任务队列剩余大小 | +| active_count | 无 | 活跃线程数 | +| task_count | 无 | 任务总数 | +| completed_task_count | 无 | 已完成任务数 | +| largest_pool_size | 无 | 历史最大线程数 | +| pool_size | 无 | 当前线程数 | +| wait_task_count | 无 | 等待执行任务数 | +| reject_count | 无 | 拒绝任务数 | +| reject_handler_name | 无 | 拒绝策略类型 | +| dynamic | 无 | 是否动态线程池 | +| run_timeout_count | 无 | 运行超时任务数 | +| queue_timeout_count | 无 | 等待超时任务数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/fullsite.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/fullsite.md index f7fc4c150b7..9d39da7c9e4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/fullsite.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/fullsite.md @@ -7,29 +7,29 @@ keywords: [开源监控系统, 开源网站监控, SiteMap监控] > 对网站的全部页面监测是否可用 > 往往一个网站有多个不同服务提供的页面,我们通过采集网站暴露出来的网站地图SiteMap来监控全站。 -> 注意⚠️,此监控需您网站支持SiteMap。我们支持XML和TXT格式的SiteMap。 +> 注意⚠️,此监控需您网站支持SiteMap。我们支持XML和TXT格式的SiteMap。 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 网站地图 | 网站SiteMap地图地址的相对路径,例如:/sitemap.xml。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|---------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | +| 网站地图 | 网站SiteMap地图地址的相对路径,例如:/sitemap.xml。 | +| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| url | 无 | 网页的URL路径 | -| statusCode | 无 | 请求此网页的响应HTTP状态码 | -| responseTime | ms毫秒 | 网站响应时间 | -| errorMsg | 无 | 请求此网站反馈的错误信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|-----------------| +| url | 无 | 网页的URL路径 | +| statusCode | 无 | 请求此网页的响应HTTP状态码 | +| responseTime | ms毫秒 | 网站响应时间 | +| errorMsg | 无 | 请求此网站反馈的错误信息 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/guide.md index 7d177cfe868..0670d75a984 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/guide.md @@ -1,84 +1,83 @@ --- id: guide title: 帮助中心 -sidebar_label: 帮助入门 +sidebar_label: 帮助入门 --- > 易用友好的实时监控工具,无需Agent,强大自定义监控能力。 -> 使用过程中的帮助文档,辅助信息。 +> 使用过程中的帮助文档,辅助信息。 ## 🔬 监控服务 > 定时采集监控对端服务暴露的性能指标,提供可视化界面,处理数据供告警等服务调度。 > 规划的监控类型:应用服务,数据库,操作系统,云原生,开源中间件 -### 应用服务监控 +### 应用服务监控 -  👉 [网站监测](website)
-  👉 [HTTP API](api)
-  👉 [PING连通性](ping)
-  👉 [端口可用性](port)
-  👉 [全站监控](fullsite)
-  👉 [SSL证书有效期](ssl_cert)
-  👉 [JVM虚拟机](jvm)
-  👉 [SpringBoot2.0](springboot2)
+ 👉 [网站监测](website)
+ 👉 [HTTP API](api)
+ 👉 [PING连通性](ping)
+ 👉 [端口可用性](port)
+ 👉 [全站监控](fullsite)
+ 👉 [SSL证书有效期](ssl_cert)
+ 👉 [JVM虚拟机](jvm)
+ 👉 [SpringBoot2.0](springboot2)
-### 数据库监控 +### 数据库监控 -  👉 [MYSQL数据库监控](mysql)
-  👉 [MariaDB数据库监控](mariadb)
-  👉 [PostgreSQL数据库监控](postgresql)
-  👉 [SqlServer数据库监控](sqlserver)
-  👉 [Oracle数据库监控](oracle)
-  👉 [达梦数据库监控](dm)
-  👉 [OpenGauss数据库监控](opengauss)
-  👉 [IoTDB数据库监控](iotdb)
+ 👉 [MYSQL数据库监控](mysql)
+ 👉 [MariaDB数据库监控](mariadb)
+ 👉 [PostgreSQL数据库监控](postgresql)
+ 👉 [SqlServer数据库监控](sqlserver)
+ 👉 [Oracle数据库监控](oracle)
+ 👉 [达梦数据库监控](dm)
+ 👉 [OpenGauss数据库监控](opengauss)
+ 👉 [IoTDB数据库监控](iotdb)
-### 操作系统监控 +### 操作系统监控 -  👉 [Linux操作系统监控](linux)
-  👉 [Windows操作系统监控](windows)
-  👉 [Ubuntu操作系统监控](ubuntu)
-  👉 [Centos操作系统监控](centos)
+ 👉 [Linux操作系统监控](linux)
+ 👉 [Windows操作系统监控](windows)
+ 👉 [Ubuntu操作系统监控](ubuntu)
+ 👉 [Centos操作系统监控](centos)
### 中间件监控 -  👉 [Zookeeper](zookeeper)
-  👉 [Kafka](kafka)
-  👉 [Tomcat](tomcat)
-  👉 [ShenYu](shenyu)
-  👉 [DynamicTp](dynamic_tp)
-  👉 [RabbitMQ](rabbitmq)
-  👉 [ActiveMQ](activemq)
-  👉 [Jetty](jetty)
+ 👉 [Zookeeper](zookeeper)
+ 👉 [Kafka](kafka)
+ 👉 [Tomcat](tomcat)
+ 👉 [ShenYu](shenyu)
+ 👉 [DynamicTp](dynamic_tp)
+ 👉 [RabbitMQ](rabbitmq)
+ 👉 [ActiveMQ](activemq)
+ 👉 [Jetty](jetty)
### 云原生监控 -  👉 [Docker](docker)
-  👉 [Kubernetes](kubernetes)
+ 👉 [Docker](docker)
+ 👉 [Kubernetes](kubernetes)
-## 💡 告警服务 +## 💡 告警服务 > 更自由化的阈值告警配置,支持邮箱,短信,webhook,钉钉,企业微信,飞书机器人等告警通知。 -> 告警服务的定位是阈值准确及时触发,告警通知及时可达。 +> 告警服务的定位是阈值准确及时触发,告警通知及时可达。 -### 告警中心 +### 告警中心 -> 已触发的告警信息中心,提供告警删除,告警处理,标记未处理,告警级别状态等查询过滤。 +> 已触发的告警信息中心,提供告警删除,告警处理,标记未处理,告警级别状态等查询过滤。 -### 告警配置 +### 告警配置 > 指标阈值配置,提供表达式形式的指标阈值配置,可设置告警级别,触发次数,告警通知模版和是否启用,关联监控等功能。 详见 👉 [阈值告警](alert_threshold)
-   👉 [阈值表达式](alert_threshold_expr) +   👉 [阈值表达式](alert_threshold_expr) -### 告警通知 +### 告警通知 > 触发告警信息后,除了显示在告警中心列表外,还可以用指定方式(邮件钉钉微信飞书等)通知给指定接收人。 > 告警通知提供设置不同类型的通知方式,如邮件接收人,企业微信机器人通知,钉钉机器人通知,飞书机器人通知。 -> 接收人设置后需要设置关联的告警通知策略,来配置哪些告警信息发给哪些接收人。 - +> 接收人设置后需要设置关联的告警通知策略,来配置哪些告警信息发给哪些接收人。  👉 [配置邮箱通知](alert_email)
 👉 [配置 Webhook 通知](alert_webhook)
diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hadoop.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hadoop.md index fec361e2366..bda83b006e4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hadoop.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hadoop.md @@ -37,57 +37,54 @@ export HADOOP_OPTS= "$HADOOP_OPTS ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:code_cache (限JDK8及以下) -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hive.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hive.md index 2c2136f91cf..3b41d3979c6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hive.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hive.md @@ -16,6 +16,7 @@ keywords: [开源监控工具, 开源 Apache Hive 监控工具, 监控 Apache Hi ```shell hive --service metastore & ``` + **2. 启用 Hive Server2:** ```shell @@ -24,54 +25,53 @@ hive --service hiveserver2 & ### 配置参数 -| 参数名称 | 参数描述 | -| ---------- |--------------------------------------------------------| -| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | -| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | -| 端口 | 数据库提供的默认端口为 10002。 | -| 启用 HTTPS | 是否通过 HTTPS 访问网站,请注意⚠️当启用 HTTPS 时,需要将默认端口更改为 443 | -| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | -| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | -| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | +| 参数名称 | 参数描述 | +|----------|--------------------------------------------------------| +| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | +| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | +| 端口 | 数据库提供的默认端口为 10002。 | +| 启用 HTTPS | 是否通过 HTTPS 访问网站,请注意⚠️当启用 HTTPS 时,需要将默认端口更改为 443 | +| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | +| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | +| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | ### 采集指标 #### 指标收集: 基本信息 -| 指标名称 | 指标单位 | 指标描述 | -|--------|-------|-----------------------------| -| 虚拟机名称 | 无 | 运行 HiveServer2 的虚拟机(VM)的名称。 | -| 虚拟机供应商 | 无 | 虚拟机的供应商或提供者。 | -| 虚拟机版本 | 无 | 虚拟机的版本。 | -| 允许持续时间 | 无 | HiveServer2 运行的持续时间。 | +| 指标名称 | 指标单位 | 指标描述 | +|--------|------|-----------------------------| +| 虚拟机名称 | 无 | 运行 HiveServer2 的虚拟机(VM)的名称。 | +| 虚拟机供应商 | 无 | 虚拟机的供应商或提供者。 | +| 虚拟机版本 | 无 | 虚拟机的版本。 | +| 允许持续时间 | 无 | HiveServer2 运行的持续时间。 | #### 指标收集: 环境信息 -| 指标名称 | 指标单位 | 指标描述 | -|------------|-------|--------------------------------| -| HTTPS代理端口号 | 无 | 用于 HTTPS 代理通信的端口号。 | -| 操作系统 | 无 | 运行 HiveServer2 的操作系统的名称。 | -| 操作系统版本 | 无 | 操作系统的版本。 | -| 操作系统架构 | 无 | 操作系统的架构。 | -| java运行环境 | 无 | HiveServer2 使用的 Java 运行时环境的名称。 | -| java运行环境版本 | 无 | Java 运行时环境的版本。 | +| 指标名称 | 指标单位 | 指标描述 | +|------------|------|--------------------------------| +| HTTPS代理端口号 | 无 | 用于 HTTPS 代理通信的端口号。 | +| 操作系统 | 无 | 运行 HiveServer2 的操作系统的名称。 | +| 操作系统版本 | 无 | 操作系统的版本。 | +| 操作系统架构 | 无 | 操作系统的架构。 | +| java运行环境 | 无 | HiveServer2 使用的 Java 运行时环境的名称。 | +| java运行环境版本 | 无 | Java 运行时环境的版本。 | #### 指标收集: 线程信息 -| 指标名称 | 指标单位 | 指标描述 | +| 指标名称 | 指标单位 | 指标描述 | |--------|------|------------------------------| | 线程数量 | None | HiveServer2 当前正在使用的线程数。 | -| 总启动线程数 | None | HiveServer2 启动以来启动的线程总数。 | +| 总启动线程数 | None | HiveServer2 启动以来启动的线程总数。 | | 最高线程数 | None | HiveServer2 在任何给定时间使用的最高线程数。 | | 守护线程数 | None | HiveServer2 当前活动的守护线程数。 | #### 指标收集: 代码缓存 -| 指标名称 | 指标单位 | 指标描述 | -|------------|-------------|---------------| -| 内存池当前内存 | MB | 当前为内存池分配的内存量。 | -| 内存池初始内存 | MB | 内存池请求的初始内存量。 | -| 内存池可分配最大内存 | MB | 内存池可分配的最大内存量。 | -| 内存池内存使用量 | MB | 内存池已使用内存量 | - +| 指标名称 | 指标单位 | 指标描述 | +|------------|------|---------------| +| 内存池当前内存 | MB | 当前为内存池分配的内存量。 | +| 内存池初始内存 | MB | 内存池请求的初始内存量。 | +| 内存池可分配最大内存 | MB | 内存池可分配的最大内存量。 | +| 内存池内存使用量 | MB | 内存池已使用内存量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/iotdb.md index 0f0dc0e0ecb..fceb485f05b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/iotdb.md @@ -5,15 +5,15 @@ sidebar_label: IoTDB数据库 keywords: [开源监控系统, 开源数据库监控, IoTDB数据库监控] --- -> 对 Apache IoTDB 物联网时序数据库的运行状态(JVM相关),内存任务集群等相关指标进行监测。 +> 对 Apache IoTDB 物联网时序数据库的运行状态(JVM相关),内存任务集群等相关指标进行监测。 -## 监控前操作 +## 监控前操作 -您需要在 IoTDB 开启`metrics`功能,他将提供 prometheus metrics 形式的接口数据。 +您需要在 IoTDB 开启`metrics`功能,他将提供 prometheus metrics 形式的接口数据。 -开启`metrics`功能, 参考 [官方文档](https://iotdb.apache.org/zh/UserGuide/V0.13.x/Maintenance-Tools/Metric-Tool.html) +开启`metrics`功能, 参考 [官方文档](https://iotdb.apache.org/zh/UserGuide/V0.13.x/Maintenance-Tools/Metric-Tool.html) -主要如下步骤: +主要如下步骤: 1. metric 采集默认是关闭的,需要先到 `conf/iotdb-metric.yml` 中修改参数打开后重启 server @@ -41,13 +41,13 @@ predefinedMetrics: - FILE ``` -2. 重启 IoTDB, 打开浏览器或者用curl 访问 http://ip:9091/metrics, 就能看到metric数据了。 +2. 重启 IoTDB, 打开浏览器或者用curl 访问 http://ip:9091/metrics, 就能看到metric数据了。 3. 在 HertzBeat 添加对应 IoTDB 监控即可。 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -57,37 +57,37 @@ predefinedMetrics: | 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | | 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:cluster_node_status +#### 指标集合:cluster_node_status -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- |------|-------------------------| +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|-------------------------| | name | 无 | 节点名称IP | | status | 无 | 节点状态,1=online 2=offline | #### 指标集合:jvm_memory_committed_bytes -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------|------|------------------| | area | 无 | heap内存或nonheap内存 | | id | 无 | 内存区块 | -| value | MB | 当前向JVM申请的内存大小 | +| value | MB | 当前向JVM申请的内存大小 | #### 指标集合:jvm_memory_used_bytes -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------------| +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------| | area | 无 | heap内存或nonheap内存 | | id | 无 | 内存区块 | -| value | MB | JVM已使用内存大小 | +| value | MB | JVM已使用内存大小 | #### 指标集合:jvm_threads_states_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------| -| state | 无 | 线程状态 | -| count | 无 | 线程状态对应线程数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------| +| state | 无 | 线程状态 | +| count | 无 | 线程状态对应线程数量 | #### 指标集合:quantity 业务数据 @@ -99,22 +99,23 @@ predefinedMetrics: #### 指标集合:cache_hit 缓存 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|----------------------------------------------------| -| name | 无 | 缓存名称 chunk/timeSeriesMeta/bloomFilter | -| value | % | chunk/timeSeriesMeta缓存命中率,bloomFilter拦截率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------------------| +| name | 无 | 缓存名称 chunk/timeSeriesMeta/bloomFilter | +| value | % | chunk/timeSeriesMeta缓存命中率,bloomFilter拦截率 | #### 指标集合:queue 任务队列 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|---------------------------------------------------| -| name | 无 | 队列名称 compaction_inner/compaction_cross/flush | -| status | 无 | 状态 running/waiting | -| value | 无 | 当前时间任务数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|----------------------------------------------| +| name | 无 | 队列名称 compaction_inner/compaction_cross/flush | +| status | 无 | 状态 running/waiting | +| value | 无 | 当前时间任务数 | #### 指标集合:thrift_connections -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------|------|-------------| -| name | 无 | 名称 | -| connection | 无 | thrift当前连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|-------------| +| name | 无 | 名称 | +| connection | 无 | thrift当前连接数 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/issue.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/issue.md index 6dbfc73a7f2..c62c6f9448a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/issue.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/issue.md @@ -1,62 +1,70 @@ --- id: issue title: 常见问题 -sidebar_label: 常见问题 +sidebar_label: 常见问题 --- -### 监控常见问题 +### 监控常见问题 -1. ** 页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名 ** -> 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http +1. ** 页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名 ** -2. ** 网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK ** -> 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) +> 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http + +2. ** 网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK ** + +> 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) 3. 安装包部署的hertzbeat下ping连通性监控异常 -安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 + 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 + > 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 > docker安装默认启用无此问题 -> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address + +4. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖服务TDengine时序数据库] -4. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖服务TDengine时序数据库] > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - TDengine数据库 -> 安装初始化此数据库参考 [TDengine安装初始化](../start/tdengine-init) +> 安装初始化此数据库参考 [TDengine安装初始化](../start/tdengine-init) 5. 配置了k8s监控,但是实际监控时间并未按照正确间隔时间执行 -请参考下面几点排查问题: + 请参考下面几点排查问题: + > 一:首先查看hertzbeat的错误日志,如果出现了'desc: SQL statement too long, check maxSQLLength config',信息 > 二:需要调整tdengine配置文件,可在服务器创建taos.cfg文件,调整# max length of an SQL : maxSQLLength 654800,然后重启tdengine,需要加入配置文件的挂载 > 三:如果遇到了重启tdengine失败,需要调整挂载数据文件中的配置,见 .../taosdata/dnode/dnodeEps.json,中dnodeFqdn调整为启动失败的dockerId即可,然后docker restart tdengine -6.配置http api监控,用于进行业务接口探测,确保业务可以用,另外接口有进行token鉴权校验,"Authorization:Bearer eyJhbGciOiJIUzI1....",配置后测试,提示“StatusCode 401”。服务端应用收到的token为"Authorization:Bearer%20eyJhbGciOiJIUzI1....",hertzbeat对空格进行转义为“%20”,服务器没有转义导致鉴权失败,建议转义功能作为可选项。 - +> 6.配置http api监控,用于进行业务接口探测,确保业务可以用,另外接口有进行token鉴权校验,"Authorization:Bearer eyJhbGciOiJIUzI1....",配置后测试,提示“StatusCode 401”。服务端应用收到的token为"Authorization:Bearer%20eyJhbGciOiJIUzI1....",hertzbeat对空格进行转义为“%20”,服务器没有转义导致鉴权失败,建议转义功能作为可选项。 -### Docker部署常见问题 +### Docker部署常见问题 1. **MYSQL,TDENGINE和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** -此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 + 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 + > 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP -> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` +> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` 2. **按照流程部署,访问 http://ip:1157/ 无界面** -请参考下面几点排查问题: + 请参考下面几点排查问题: + > 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 -3. **日志报错TDengine连接或插入SQL失败** +3. **日志报错TDengine连接或插入SQL失败** + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter +> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter ### 安装包部署常见问题 1. **按照流程部署,访问 http://ip:1157/ 无界面** 请参考下面几点排查问题: + > 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 > 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 2. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jetty.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jetty.md index 04a15823529..b60a5882b9f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jetty.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jetty.md @@ -15,17 +15,18 @@ keywords: [开源监控系统, 开源中间件监控, Jetty应用服务器监控 #### Jetty应用服务器开启JMX协议步骤 -[参考官方文档](https://www.eclipse.org/jetty/documentation/jetty-10/operations-guide/index.html#og-jmx-remote) +[参考官方文档](https://www.eclipse.org/jetty/documentation/jetty-10/operations-guide/index.html#og-jmx-remote) -1. 在 Jetty 启动 JMX JMX-REMOTE 模块 +1. 在 Jetty 启动 JMX JMX-REMOTE 模块 ```shell java -jar $JETTY_HOME/start.jar --add-module=jmx java -jar $JETTY_HOME/start.jar --add-module=jmx-remote ``` -命令执行成功会创建出 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件 -2. 编辑 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件,修改 JMX 的 IP 端口等参数。 +命令执行成功会创建出 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件 + +2. 编辑 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件,修改 JMX 的 IP 端口等参数。 **`localhost` 需修改为对外暴露 IP** @@ -50,49 +51,45 @@ java -jar $JETTY_HOME/start.jar --add-module=jmx-remote ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jvm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jvm.md index c97cc73b003..f046b3ef6a0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jvm.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jvm.md @@ -28,57 +28,54 @@ keywords: [开源监控系统, 开源JAVA监控, JVM虚拟机监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:code_cache (限JDK8及以下) -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kafka.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kafka.md index 2b4ed0514b7..3cb4d74132c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kafka.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kafka.md @@ -7,7 +7,7 @@ keywords: [开源监控系统, 开源消息中间件监控, Kafka监控] > 对Kafka的通用性能指标进行采集监控 -**使用协议:JMX** +**使用协议:JMX** ### 监控前操作 @@ -18,7 +18,7 @@ keywords: [开源监控系统, 开源消息中间件监控, Kafka监控] 2. 修改 Kafka 启动脚本 修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` -在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 +在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 ```shell export JMX_PORT=9999; @@ -32,71 +32,65 @@ export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=ip地址 -Dcom.sun.management. ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置Kafka连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置Kafka连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:server_info -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| Version | 无 | Kafka版本 | -| StartTimeMs | ms | 运行时间 | -| CommitId | 无 | 版本提交ID | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|---------| +| Version | 无 | Kafka版本 | +| StartTimeMs | ms | 运行时间 | +| CommitId | 无 | 版本提交ID | #### 指标集合:code_cache -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:active_controller_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| Value | 个 | 活跃监控器数量 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------| +| Value | 个 | 活跃监控器数量 | #### 指标集合:broker_partition_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| Value | 个 | 分区数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------| +| Value | 个 | 分区数量 | #### 指标集合:broker_leader_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| Value | 个 | 领导者数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------| +| Value | 个 | 领导者数量 | #### 指标集合:broker_handler_avg_percent 请求处理器空闲率 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| EventType | 无 | 类型 | -| RateUnit | 具体情况具体分析 | 单位 | -| Count | 个 | 数量 | -| OneMinuteRate | % | 一分钟处理率 | -| FiveMinuteRate | % | 五分钟处理率 | -| MeanRate | 无 | 平均处理率 | -| FifteenMinuteRate | 无 | 十五分钟处理率 | - - -> 其他指标见文知意,欢迎贡献一起优化文档。 +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|----------|---------| +| EventType | 无 | 类型 | +| RateUnit | 具体情况具体分析 | 单位 | +| Count | 个 | 数量 | +| OneMinuteRate | % | 一分钟处理率 | +| FiveMinuteRate | % | 五分钟处理率 | +| MeanRate | 无 | 平均处理率 | +| FifteenMinuteRate | 无 | 十五分钟处理率 | + +> 其他指标见文知意,欢迎贡献一起优化文档。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kubernetes.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kubernetes.md index f45da8d9b27..aa242d93a6b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kubernetes.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kubernetes.md @@ -7,14 +7,13 @@ keywords: [开源监控系统, 开源Kubernetes监控] > 对kubernetes的通用性能指标进行采集监控。 - ## 监控前操作 如果想要监控 `Kubernetes` 中的信息,则需要获取到可访问Api Server的授权TOKEN,让采集请求获取到对应的信息。 -参考获取token步骤 +参考获取token步骤 -#### 方式一: +#### 方式一: 1. 创建service account并绑定默认cluster-admin管理员集群角色 @@ -27,7 +26,9 @@ kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin -- kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` + #### 方式二: + ``` kubectl create serviceaccount cluster-admin @@ -36,13 +37,14 @@ kubectl create clusterrolebinding cluster-admin-manual --clusterrole=cluster-adm kubectl create token --duration=1000h cluster-admin ``` + ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |-------------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| APiServer端口 | K8s APiServer端口,默认6443 | +| APiServer端口 | K8s APiServer端口,默认6443 | | token | 授权Access Token | | URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | | 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | @@ -53,44 +55,45 @@ kubectl create token --duration=1000h cluster-admin #### 指标集合:nodes -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------ | -------- |--------| -| node_name | 无 | 节点名称 | -| is_ready | 无 | 节点状态 | -| capacity_cpu | 无 | CPU容量 | -| allocatable_cpu | 无 | 已分配CPU | -| capacity_memory | 无 | 内存容量 | -| allocatable_memory | 无 | 已分配内存 | -| creation_time | 无 | 节点创建时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|--------| +| node_name | 无 | 节点名称 | +| is_ready | 无 | 节点状态 | +| capacity_cpu | 无 | CPU容量 | +| allocatable_cpu | 无 | 已分配CPU | +| capacity_memory | 无 | 内存容量 | +| allocatable_memory | 无 | 已分配内存 | +| creation_time | 无 | 节点创建时间 | #### 指标集合:namespaces -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- |-------------| -| namespace | 无 | namespace名称 | -| status | 无 | 状态 | -| creation_time | 无 | 创建时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-------------| +| namespace | 无 | namespace名称 | +| status | 无 | 状态 | +| creation_time | 无 | 创建时间 | #### 指标集合:pods -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- | -------- |----------------| -| pod | 无 | pod名称 | -| namespace | 无 | pod所属namespace | -| status | 无 | pod状态 | -| restart | 无 | 重启次数 | -| host_ip | 无 | 所在主机IP | -| pod_ip | 无 | pod ip | -| creation_time | 无 | pod创建时间 | -| start_time | 无 | pod启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|----------------| +| pod | 无 | pod名称 | +| namespace | 无 | pod所属namespace | +| status | 无 | pod状态 | +| restart | 无 | 重启次数 | +| host_ip | 无 | 所在主机IP | +| pod_ip | 无 | pod ip | +| creation_time | 无 | pod创建时间 | +| start_time | 无 | pod启动时间 | #### 指标集合:services -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- |------|--------------------------------------------------------| -| service | 无 | service名称 | -| namespace | 无 | service所属namespace | -| type | 无 | service类型 ClusterIP NodePort LoadBalancer ExternalName | -| cluster_ip | 无 | cluster ip | -| selector | 无 | tag selector匹配 | -| creation_time | 无 | 创建时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|--------------------------------------------------------| +| service | 无 | service名称 | +| namespace | 无 | service所属namespace | +| type | 无 | service类型 ClusterIP NodePort LoadBalancer ExternalName | +| cluster_ip | 无 | cluster ip | +| selector | 无 | tag selector匹配 | +| creation_time | 无 | 创建时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/linux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/linux.md index 853be6610b4..4a69c04495e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/linux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/linux.md @@ -9,74 +9,74 @@ keywords: [开源监控系统, 开源操作系统监控, Linux操作系统监控 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux SSH对外提供的端口,默认为22。 | +| 用户名 | SSH连接用户名,可选 | +| 密码 | SSH连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| hostname | 无 | 主机名称 | +| version | 无 | 操作系统版本 | +| uptime | 无 | 系统运行时间 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:memory -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:disk -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:interface -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | byte | 入站数据流量(bytes) | +| transmit_bytes | byte | 出站数据流量(bytes) | #### 指标集合:disk_free -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mariadb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mariadb.md index 5fd59466717..2490e3630dd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mariadb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mariadb.md @@ -9,49 +9,46 @@ keywords: [开源监控系统, 开源数据库监控, MariaDB数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为3306。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为3306。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| version | 无 | 数据库版本 | -| port | 无 | 数据库暴露服务端口 | -| datadir | 无 | 数据库存储数据盘地址 | -| max_connections | 无 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|------------| +| version | 无 | 数据库版本 | +| port | 无 | 数据库暴露服务端口 | +| datadir | 无 | 数据库存储数据盘地址 | +| max_connections | 无 | 数据库最大连接数 | #### 指标集合:status -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| threads_created | 无 | MariaDB已经创建的总连接数 | -| threads_connected | 无 | MariaDB已经连接的连接数 | -| threads_cached | 无 | MariaDB当前缓存的连接数 | -| threads_running | 无 | MariaDB当前活跃的连接数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|------------------| +| threads_created | 无 | MariaDB已经创建的总连接数 | +| threads_connected | 无 | MariaDB已经连接的连接数 | +| threads_cached | 无 | MariaDB当前缓存的连接数 | +| threads_running | 无 | MariaDB当前活跃的连接数 | #### 指标集合:innodb -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | -| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | -| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | -| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------|------|-------------------------| +| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | +| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | +| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | +| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/memcached.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/memcached.md index 5d89ce0977b..920da021e6b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/memcached.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/memcached.md @@ -14,7 +14,7 @@ The default YML configuration for the memcache version is in compliance with 1.4 You need to use the stats command to view the parameters that your memcache can monitor ``` -### +### **1、Obtain usable parameter indicators through commands such as stats、stats setting、stats settings. @@ -36,7 +36,7 @@ STAT version 1.4.15 ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -49,7 +49,7 @@ STAT version 1.4.15 #### Metrics Set:server_info -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |------------------|-------------|---------------------------------------------------| | pid | | Memcache server process ID | | uptime | s | The number of seconds the server has been running | @@ -66,4 +66,5 @@ STAT version 1.4.15 | cmd_set | | Set command request count | | cmd_flush | | Flush command request count | | get_misses | | Get command misses | -| delete_misses | | Delete command misses | \ No newline at end of file +| delete_misses | | Delete command misses | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mysql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mysql.md index dc23f3d6fa5..c5deaab27a2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mysql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mysql.md @@ -9,49 +9,46 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为3306。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为3306。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| version | 无 | 数据库版本 | -| port | 无 | 数据库暴露服务端口 | -| datadir | 无 | 数据库存储数据盘地址 | -| max_connections | 无 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|------------| +| version | 无 | 数据库版本 | +| port | 无 | 数据库暴露服务端口 | +| datadir | 无 | 数据库存储数据盘地址 | +| max_connections | 无 | 数据库最大连接数 | #### 指标集合:status -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| threads_created | 无 | MySql已经创建的总连接数 | -| threads_connected | 无 | MySql已经连接的连接数 | -| threads_cached | 无 | MySql当前缓存的连接数 | -| threads_running | 无 | MySql当前活跃的连接数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|----------------| +| threads_created | 无 | MySql已经创建的总连接数 | +| threads_connected | 无 | MySql已经连接的连接数 | +| threads_cached | 无 | MySql当前缓存的连接数 | +| threads_running | 无 | MySql当前活跃的连接数 | #### 指标集合:innodb -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | -| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | -| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | -| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------|------|-------------------------| +| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | +| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | +| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | +| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nebulagraph.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nebulagraph.md index d070101da8f..ded4a06ad2f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nebulagraph.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nebulagraph.md @@ -14,7 +14,7 @@ keywords: [ 开源监控工具, 开源 NebulaGraph 监控工具, 监控 NebulaGr nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 的统计信息。 ``` -### +### **1、通过 stats 和 rocksdb stats 接口获取可用参数。** @@ -34,7 +34,7 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |-------------|--------------------------------------------------------------------| | 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️没有协议头(例如:https://、http://) | | 监控名称 | 识别此监控的名称。名称需要唯一 | @@ -53,7 +53,7 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 指标太多,相关链接如下 **https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------------------------------------------------------|------|--------| | 达到内存水位线的语句的数量(rate) | | | | 达到内存水位线的语句的数量(sum) | | | @@ -116,8 +116,9 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 指标太多,相关链接如下 **https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------------------|------|------------------------| | rocksdb.backup.read.bytes | | 备份 RocksDB 数据库期间读取的字节数 | | rocksdb.backup.write.bytes | | 指标名称 | | ... | | ... | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nginx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nginx.md index a509ff7da2a..82908df358b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nginx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nginx.md @@ -13,18 +13,19 @@ keywords: [开源监控工具, 开源Java监控工具, 监控Nginx指标] 如果你想使用这种监控方式监控 'Nginx' 的信息,你需要修改你的 Nginx 配置文件以启用监控模块。 -### 启用 ngx_http_stub_status_module +### 启用 ngx_http_stub_status_module 1. 检查是否已添加 `ngx_http_stub_status_module` ```shell nginx -V ``` + 查看是否包含 `--with-http_stub_status_module`,如果没有则需要重新编译安装 Nginx。 2. 编译安装 Nginx, 添加 `ngx_http_stub_status_module` 模块 -下载 Nginx 并解压,在目录下执行 +下载 Nginx 并解压,在目录下执行 ```shell ./configure --prefix=/usr/local/nginx --with-http_stub_status_module @@ -58,7 +59,7 @@ nginx -s reload 5. 在浏览器访问 `http://localhost/nginx-status` 即可查看 Nginx 监控状态信息。 -### 启用 `ngx_http_reqstat_module` +### 启用 `ngx_http_reqstat_module` 1. 安装 `ngx_http_reqstat_module` 模块 @@ -107,49 +108,47 @@ nginx -s reload 4. 在浏览器访问 `http://localhost/req-status` 即可查看 Nginx 监控状态信息。 - **参考文档: https://blog.csdn.net/weixin_55985097/article/details/116722309** **⚠️注意监控模块的端点路径为 `/nginx-status` `/req-status`** ### 配置参数 -| 参数名 | 参数描述 | -|-------------------|-----------------------------------------------------| -| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | -| 监控名称 | 标识此监控的名称。名称需要唯一 | -| 端口 | Nginx 提供的端口 | -| 超时时间 | 允许收集响应时间 | -| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | -| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | -| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | +| 参数名 | 参数描述 | +|--------|-----------------------------------------------------| +| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | +| 监控名称 | 标识此监控的名称。名称需要唯一 | +| 端口 | Nginx 提供的端口 | +| 超时时间 | 允许收集响应时间 | +| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | +| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | +| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | ### 收集指标 #### 指标收集:nginx_status -| 指标名称 | 指标单位 | 指标描述 | -|-------|-------------|------------| -| 接收连接数 | | 已接受的连接 | -| 处理连接数 | | 成功处理的连接 | -| 活动连接数 | | 当前活动连接 | -| 丢弃连接数 | | 丢弃的连接 | -| 请求连接数 | | 客户端请求 | -| 读连接数 | | 正在执行读操作的连接 | -| 写连接数 | | 正在执行写操作的连接 | -| 等待连接数 | | 等待连接 | +| 指标名称 | 指标单位 | 指标描述 | +|-------|------|------------| +| 接收连接数 | | 已接受的连接 | +| 处理连接数 | | 成功处理的连接 | +| 活动连接数 | | 当前活动连接 | +| 丢弃连接数 | | 丢弃的连接 | +| 请求连接数 | | 客户端请求 | +| 读连接数 | | 正在执行读操作的连接 | +| 写连接数 | | 正在执行写操作的连接 | +| 等待连接数 | | 等待连接 | #### 指标集:req_status -| 指标名称 | 指标单位 | 指标描述 | -|---------|-------|---------| -| 分组类别 | | 分组类别 | -| 分组名称 | | 分组名称 | -| 最大并发连接数 | | 最大并发连接数 | -| 最大带宽 | kb | 最大带宽 | -| 总流量 | kb | 总流量 | -| 总请求数 | | 总请求数 | -| 当前并发连接数 | | 当前并发连接数 | -| 当前带宽 | kb | 当前带宽 | - +| 指标名称 | 指标单位 | 指标描述 | +|---------|------|---------| +| 分组类别 | | 分组类别 | +| 分组名称 | | 分组名称 | +| 最大并发连接数 | | 最大并发连接数 | +| 最大带宽 | kb | 最大带宽 | +| 总流量 | kb | 总流量 | +| 总请求数 | | 总请求数 | +| 当前并发连接数 | | 当前并发连接数 | +| 当前带宽 | kb | 当前带宽 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md index 5760321922f..735ab741b4d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md @@ -15,7 +15,7 @@ NTP监控的中文文档如下: ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |------|--------------------------------------------------| | 监控主机 | 被监控的IPv4、IPv6或域名。注意⚠️不包含协议头(例如:https://,http://) | | 监控名称 | 标识此监控的名称。名称需要是唯一的 | @@ -27,7 +27,7 @@ NTP监控的中文文档如下: #### 指标集:概要 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------|------|--------------------------| | 响应时间 | 毫秒 | NTP服务器响应请求所需的时间。 | | 时间 | 毫秒 | NTP服务器报告的当前时间。 | @@ -39,3 +39,4 @@ NTP监控的中文文档如下: | 层级 | | NTP服务器的层级,表示其与参考时钟的距离。 | | 参考ID | | 指示NTP服务器使用的参考时钟或时间源的标识符。 | | 精度 | | NTP服务器时钟的精度,表示其准确性。 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/opengauss.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/opengauss.md index 632a7f41b2d..8bf21d7debb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/opengauss.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/opengauss.md @@ -9,50 +9,48 @@ keywords: [开源监控系统, 开源数据库监控, OpenGauss数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为5432。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为5432。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| server_version | 无 | 数据库服务器的版本号 | -| port | 无 | 数据库服务器端暴露服务端口 | -| server_encoding | 无 | 数据库服务器端的字符集编码 | -| data_directory | 无 | 数据库存储数据盘地址 | -| max_connections | 连接数 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------------| +| server_version | 无 | 数据库服务器的版本号 | +| port | 无 | 数据库服务器端暴露服务端口 | +| server_encoding | 无 | 数据库服务器端的字符集编码 | +| data_directory | 无 | 数据库存储数据盘地址 | +| max_connections | 连接数 | 数据库最大连接数 | #### 指标集合:state -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| name | 无 | 数据库名称,或share-object为共享对象。 | -| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | -| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | -| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | -| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | -| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | -| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | -| stats_reset | 无 | 这些统计信息上次被重置的时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------------------------------------------------------------| +| name | 无 | 数据库名称,或share-object为共享对象。 | +| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | +| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | +| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | +| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | +| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | +| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | +| stats_reset | 无 | 这些统计信息上次被重置的时间 | #### 指标集合:activity -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| running | 连接数 | 当前客户端连接数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------| +| running | 连接数 | 当前客户端连接数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/oracle.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/oracle.md index 49136e51c48..7ffdfa219ff 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/oracle.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/oracle.md @@ -9,55 +9,56 @@ keywords: [开源监控系统, 开源数据库监控, Oracle数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为1521。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为1521。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| database_version | 无 | 数据库版本 | -| database_type | 无 | 数据库类型 | -| hostname | 无 | 主机名称 | -| instance_name | 无 | 数据库实例名称 | -| startup_time | 无 | 数据库启动时间 | -| status | 无 | 数据库状态 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------|------|---------| +| database_version | 无 | 数据库版本 | +| database_type | 无 | 数据库类型 | +| hostname | 无 | 主机名称 | +| instance_name | 无 | 数据库实例名称 | +| startup_time | 无 | 数据库启动时间 | +| status | 无 | 数据库状态 | #### 指标集合:tablespace -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| file_id | 无 | 文件ID | -| file_name | 无 | 文件名称 | -| tablespace_name | 无 | 所属表空间名称 | -| status | 无 | 状态 | -| bytes | MB | 大小 | -| blocks | 无 | 区块数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------| +| file_id | 无 | 文件ID | +| file_name | 无 | 文件名称 | +| tablespace_name | 无 | 所属表空间名称 | +| status | 无 | 状态 | +| bytes | MB | 大小 | +| blocks | 无 | 区块数量 | #### 指标集合:user_connect -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| username | 无 | 用户名 | -| counts | 个数 | 当前连接数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| username | 无 | 用户名 | +| counts | 个数 | 当前连接数量 | #### 指标集合:performance -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| qps | QPS | I/O Requests per Second 每秒IO请求数量 | -| tps | TPS | User Transaction Per Sec 每秒用户事物处理数量 | -| mbps | MBPS | I/O Megabytes per Second 每秒 I/O 兆字节数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------|------|---------------------------------------| +| qps | QPS | I/O Requests per Second 每秒IO请求数量 | +| tps | TPS | User Transaction Per Sec 每秒用户事物处理数量 | +| mbps | MBPS | I/O Megabytes per Second 每秒 I/O 兆字节数量 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ping.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ping.md index 7b6613f25bd..401e86f9382 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ping.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ping.md @@ -5,32 +5,33 @@ sidebar_label: PING连通性 keywords: [开源监控系统, 开源网络监控, 网络PING监控] --- -> 对对端HOST地址进行PING操作,判断其连通性 +> 对对端HOST地址进行PING操作,判断其连通性 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| Ping超时时间 | 设置PING未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|----------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| Ping超时时间 | 设置PING未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| responseTime | ms毫秒 | 网站响应时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | ms毫秒 | 网站响应时间 | - -### 常见问题 +### 常见问题 1. 安装包部署的hertzbeat下ping连通性监控异常 - 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 + 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 + > 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 > docker安装默认启用无此问题 -> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/pop3.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/pop3.md index 8d6c2eb5548..4c58cc4a308 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/pop3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/pop3.md @@ -24,26 +24,24 @@ keywords: [开源监控工具,开源Java监控工具,监控POP3指标] 5. 通过POP3服务器域名,端口号,qq邮箱账号以及授权码连接POP3服务器,采集监控指标 ``` - ### 配置参数 -| 参数名 | 参数描述 | -|-------------------|-----------------------------------------------------| -| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | -| 监控名称 | 标识此监控的名称。名称需要唯一 | -| 端口 | POP3 提供的端口 | -| 超时时间 | 允许收集响应时间 | -| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | -| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | -| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | +| 参数名 | 参数描述 | +|--------|-----------------------------------------------------| +| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | +| 监控名称 | 标识此监控的名称。名称需要唯一 | +| 端口 | POP3 提供的端口 | +| 超时时间 | 允许收集响应时间 | +| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | +| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | +| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | ### 采集指标 #### 指标集:email_status -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------| -| 邮件数量 | | 邮件数量 | -| 邮箱总大小 | kb | 邮箱中邮件的总大小 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------| +| 邮件数量 | | 邮件数量 | +| 邮箱总大小 | kb | 邮箱中邮件的总大小 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/port.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/port.md index b89fde24a81..88dc6360cd3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/port.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/port.md @@ -9,22 +9,21 @@ keywords: [开源监控系统, 开源网络监控, 端口可用性监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 连接超时时间 | 端口连接的等待超时时间,单位毫秒,默认3000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | +| 连接超时时间 | 端口连接的等待超时时间,单位毫秒,默认3000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| responseTime | ms毫秒 | 网站响应时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | ms毫秒 | 网站响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/postgresql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/postgresql.md index 4716d0e2e64..59adae7da81 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/postgresql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/postgresql.md @@ -9,50 +9,48 @@ keywords: [开源监控系统, 开源数据库监控, PostgreSQL数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为5432。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为5432。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| server_version | 无 | 数据库服务器的版本号 | -| port | 无 | 数据库服务器端暴露服务端口 | -| server_encoding | 无 | 数据库服务器端的字符集编码 | -| data_directory | 无 | 数据库存储数据盘地址 | -| max_connections | 连接数 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------------| +| server_version | 无 | 数据库服务器的版本号 | +| port | 无 | 数据库服务器端暴露服务端口 | +| server_encoding | 无 | 数据库服务器端的字符集编码 | +| data_directory | 无 | 数据库存储数据盘地址 | +| max_connections | 连接数 | 数据库最大连接数 | #### 指标集合:state -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| name | 无 | 数据库名称,或share-object为共享对象。 | -| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | -| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | -| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | -| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | -| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | -| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | -| stats_reset | 无 | 这些统计信息上次被重置的时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------------------------------------------------------------| +| name | 无 | 数据库名称,或share-object为共享对象。 | +| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | +| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | +| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | +| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | +| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | +| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | +| stats_reset | 无 | 这些统计信息上次被重置的时间 | #### 指标集合:activity -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| running | 连接数 | 当前客户端连接数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------| +| running | 连接数 | 当前客户端连接数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/rabbitmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/rabbitmq.md index 8cb91eeb3e6..89c728162c9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/rabbitmq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/rabbitmq.md @@ -5,42 +5,42 @@ sidebar_label: RabbitMQ消息中间件 keywords: [开源监控系统, 开源消息中间件监控, RabbitMQ消息中间件监控] --- -> 对 RabbitMQ 消息中间件的运行状态,节点,队列等相关指标进行监测。 +> 对 RabbitMQ 消息中间件的运行状态,节点,队列等相关指标进行监测。 -### 监控前操作 +### 监控前操作 > HertzBeat 使用 RabbitMQ Management 的 Rest Api 对 RabbitMQ 进行指标数据采集。 -> 故需要您的 RabbitMQ 环境开启 Management 插件 +> 故需要您的 RabbitMQ 环境开启 Management 插件 -1. 开启 Management 插件,或使用自开启版本 +1. 开启 Management 插件,或使用自开启版本 ```shell rabbitmq-plugins enable rabbitmq_management ``` -2. 浏览器访问 http://ip:15672/ ,默认账户密码 `guest/guest`. 成功登录即开启成功。 +2. 浏览器访问 http://ip:15672/ ,默认账户密码 `guest/guest`. 成功登录即开启成功。 3. 在 HertzBeat 添加对应 RabbitMQ 监控即可,参数使用 Management 的 IP 端口,默认账户密码。 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | -|----------|---------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | RabbitMQ Management 对外提供的HTTP端口,默认为15672。 | -| 用户名 | 接口Basic认证时使用的用户名 | -| 密码 | 接口Basic认证时使用的密码 | -| 超时时间 | HTTP请求查询超时时间 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | RabbitMQ Management 对外提供的HTTP端口,默认为15672。 | +| 用户名 | 接口Basic认证时使用的用户名 | +| 密码 | 接口Basic认证时使用的密码 | +| 超时时间 | HTTP请求查询超时时间 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:overview +#### 指标集合:overview -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------------|------|--------------------------| | product_version | 无 | 产品版本 | | product_name | 无 | 产品名称 | @@ -52,7 +52,7 @@ rabbitmq-plugins enable rabbitmq_management #### 指标集合:object_totals -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------|------|-----------------| | channels | 无 | channels的总数量 | | connections | 无 | connections的总数量 | @@ -62,65 +62,65 @@ rabbitmq-plugins enable rabbitmq_management #### 指标集合:nodes -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------|------|--------------------------------| -| name | 无 | The node name | -| type | 无 | The node type | -| running | 无 | Running state | -| os_pid | 无 | Pid in OS | -| mem_limit | MB | Memory usage high watermark | -| mem_used | MB | Total amount of memory used | -| fd_total | 无 | File descriptors available | -| fd_used | 无 | File descriptors used | -| sockets_total | 无 | Sockets available | -| sockets_used | 无 | Sockets used | -| proc_total | 无 | Erlang process limit | -| proc_used | 无 | Erlang processes used | -| disk_free_limit | GB | Free disk space low watermark | -| disk_free | GB | Free disk space | -| gc_num | 无 | GC runs | -| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | -| context_switches | 无 | Context_switches num | -| io_read_count | 无 | 总共读操作的数量 | -| io_read_bytes | KB | 总共读入磁盘数据大小 | -| io_read_avg_time | ms | 读操作平均时间,毫秒为单位 | -| io_write_count | 无 | 磁盘写操作总量 | -| io_write_bytes | KB | 写入磁盘数据总量 | -| io_write_avg_time | ms | 每个磁盘写操作的平均时间,毫秒为单位 | -| io_seek_count | 无 | seek操作总量 | -| io_seek_avg_time | ms | seek操作的平均时间,毫秒单位 | -| io_sync_count | 无 | fsync操作的总量 | -| io_sync_avg_time | ms | fsync操作的平均时间,毫秒为单位 | -| connection_created | 无 | connection created num | -| connection_closed | 无 | connection closed num | -| channel_created | 无 | channel created num | -| channel_closed | 无 | channel closed num | -| queue_declared | 无 | queue declared num | -| queue_created | 无 | queue created num | -| queue_deleted | 无 | queue deleted num | -| connection_closed | 无 | connection closed num | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|-------------------------------| +| name | 无 | The node name | +| type | 无 | The node type | +| running | 无 | Running state | +| os_pid | 无 | Pid in OS | +| mem_limit | MB | Memory usage high watermark | +| mem_used | MB | Total amount of memory used | +| fd_total | 无 | File descriptors available | +| fd_used | 无 | File descriptors used | +| sockets_total | 无 | Sockets available | +| sockets_used | 无 | Sockets used | +| proc_total | 无 | Erlang process limit | +| proc_used | 无 | Erlang processes used | +| disk_free_limit | GB | Free disk space low watermark | +| disk_free | GB | Free disk space | +| gc_num | 无 | GC runs | +| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | +| context_switches | 无 | Context_switches num | +| io_read_count | 无 | 总共读操作的数量 | +| io_read_bytes | KB | 总共读入磁盘数据大小 | +| io_read_avg_time | ms | 读操作平均时间,毫秒为单位 | +| io_write_count | 无 | 磁盘写操作总量 | +| io_write_bytes | KB | 写入磁盘数据总量 | +| io_write_avg_time | ms | 每个磁盘写操作的平均时间,毫秒为单位 | +| io_seek_count | 无 | seek操作总量 | +| io_seek_avg_time | ms | seek操作的平均时间,毫秒单位 | +| io_sync_count | 无 | fsync操作的总量 | +| io_sync_avg_time | ms | fsync操作的平均时间,毫秒为单位 | +| connection_created | 无 | connection created num | +| connection_closed | 无 | connection closed num | +| channel_created | 无 | channel created num | +| channel_closed | 无 | channel closed num | +| queue_declared | 无 | queue declared num | +| queue_created | 无 | queue created num | +| queue_deleted | 无 | queue deleted num | +| connection_closed | 无 | connection closed num | #### 指标集合:queues -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------------------------|------|--------------------------------------------------------------------------------------------------------------------------------------| -| name | 无 | The name of the queue with non-ASCII characters escaped as in C. | +| name | 无 | The name of the queue with non-ASCII characters escaped as in C. | | node | 无 | The queue on the node name | -| state | 无 | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | +| state | 无 | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | | type | 无 | Queue type, one of: quorum, stream, classic. | -| vhost | 无 | vhost path | +| vhost | 无 | vhost path | | auto_delete | 无 | Whether the queue will be deleted automatically when no longer used | -| policy | 无 | Effective policy name for the queue. | +| policy | 无 | Effective policy name for the queue. | | consumers | 无 | Number of consumers. | | memory | B | Bytes of memory allocated by the runtime for the queue, including stack, heap and internal structures. | | messages_ready | 无 | Number of messages ready to be delivered to clients | -| messages_unacknowledged | 无 | Number of messages delivered to clients but not yet acknowledged | +| messages_unacknowledged | 无 | Number of messages delivered to clients but not yet acknowledged | | messages | 无 | Sum of ready and unacknowledged messages (queue depth) | -| messages_ready_ram | 无 | Number of messages from messages_ready which are resident in ram | +| messages_ready_ram | 无 | Number of messages from messages_ready which are resident in ram | | messages_persistent | 无 | Total number of persistent messages in the queue (will always be 0 for transient queues) | -| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | +| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | | message_bytes_ready | B | Like message_bytes but counting only those messages ready to be delivered to clients | -| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | +| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | | message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | | message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/redis.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/redis.md index dd9b304e1ce..58248fb0b45 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/redis.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/redis.md @@ -2,244 +2,239 @@ id: redis title: 监控:REDIS数据库监控 sidebar_label: REDIS数据库 -keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] +keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] --- > 对REDIS数据库的通用性能指标进行采集监控。支持REDIS1.0+。 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | -| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | +| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:server -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| redis_version | 无 | Redis 服务器版本 | -| redis_git_sha1 | 无 | Git SHA1 | -| redis_git_dirty | 无 | Git dirty flag | -| redis_build_id | 无 | redis 构建的id | -| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | -| os | 无 | Redis 服务器的宿主操作系统 | -| arch_bits | 无 | 架构(32 或 64 位) | -| multiplexing_api | 无 | Redis使用的事件循环机制| -| atomicvar_api | 无 | Redis使用的原子 API | -| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本| -| process_id | 无 | 服务器进程的PID | -| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | -| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | -| tcp_port | 无 | TCP/IP侦听端口 | -| server_time_usec | 无 | 微秒级精度的基于时间的系统时间| -| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | -| uptime_in_days | 无 | 自Redis服务器启动后的天数 | -| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | -| configured_hz | 无 | 服务器配置的频率设置 | -| lru_clock | 无 | 时钟每分钟递增,用于LRU管理| -| executable | 无 | 服务器可执行文件的路径 | -| config_file | 无 | 配置文件的路径 | -| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志| -| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------|------|-----------------------------------------------| +| redis_version | 无 | Redis 服务器版本 | +| redis_git_sha1 | 无 | Git SHA1 | +| redis_git_dirty | 无 | Git dirty flag | +| redis_build_id | 无 | redis 构建的id | +| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | +| os | 无 | Redis 服务器的宿主操作系统 | +| arch_bits | 无 | 架构(32 或 64 位) | +| multiplexing_api | 无 | Redis使用的事件循环机制 | +| atomicvar_api | 无 | Redis使用的原子 API | +| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本 | +| process_id | 无 | 服务器进程的PID | +| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | +| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | +| tcp_port | 无 | TCP/IP侦听端口 | +| server_time_usec | 无 | 微秒级精度的基于时间的系统时间 | +| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | +| uptime_in_days | 无 | 自Redis服务器启动后的天数 | +| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | +| configured_hz | 无 | 服务器配置的频率设置 | +| lru_clock | 无 | 时钟每分钟递增,用于LRU管理 | +| executable | 无 | 服务器可执行文件的路径 | +| config_file | 无 | 配置文件的路径 | +| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志 | +| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。 | #### 指标集合:clients -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | -| cluster_connections | 无 | 群集总线使用的套接字数量的近似值| -| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。| -| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | -| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | -| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | -| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING)| -| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------|------|--------------------------------------------------------------------------------| +| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | +| cluster_connections | 无 | 群集总线使用的套接字数量的近似值 | +| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。 | +| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | +| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | +| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | +| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING) | +| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | #### 指标集合:memory -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | -| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | -| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字| -| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值| -| used_memory_peak | byte | Redis消耗的峰值内存(字节)| -| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | -| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和| -| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节)| -| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | -| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | -| allocator_allocated | byte| 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同| -| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片| -| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | -| total_system_memory | byte | Redis主机的内存总量 | -| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_lua | byte | Lua引擎使用的字节数 | -| used_memory_lua_human | KB | 上一个值的人类可读值 | -| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | -| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | -| number_of_cached_scripts | 无 |缓存的lua脚本数量 | -| maxmemory | byte | maxmemory配置指令的值| -| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | -| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | -| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | -| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | -| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | -| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | -| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | -| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | -| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | -| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | -| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。| -| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | -| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | -| mem_clients_normal | 无 | 普通客户端使用的内存 | -| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | -| mem_allocator | 无 | 内存分配器,在编译时选择。 | -| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | -| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL)| -| lazyfreed_objects | 无 | 已延迟释放的对象数。| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|----------|-----------------------------------------------------------------------------------------------| +| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | +| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | +| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字 | +| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_peak | byte | Redis消耗的峰值内存(字节) | +| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | +| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和 | +| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节) | +| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | +| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | +| allocator_allocated | byte | 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同 | +| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片 | +| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | +| total_system_memory | byte | Redis主机的内存总量 | +| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_lua | byte | Lua引擎使用的字节数 | +| used_memory_lua_human | KB | 上一个值的人类可读值 | +| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | +| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | +| number_of_cached_scripts | 无 | 缓存的lua脚本数量 | +| maxmemory | byte | maxmemory配置指令的值 | +| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | +| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | +| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | +| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | +| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | +| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | +| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | +| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | +| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | +| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | +| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。 | +| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | +| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | +| mem_clients_normal | 无 | 普通客户端使用的内存 | +| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | +| mem_allocator | 无 | 内存分配器,在编译时选择。 | +| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | +| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL) | +| lazyfreed_objects | 无 | 已延迟释放的对象数。 | #### 指标集合:persistence -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是| -| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | -| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | -| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比| -| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | -| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | -| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | -| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | -| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | -| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功| -| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | -| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | -| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | -| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | -| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite| -| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | -| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | -| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | -| aof_last_write_status | 无 | 上次aof写入状态 | -| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | -| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------------|--------|-----------------------------------------------------------------------------------------------------| +| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是 | +| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | +| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | +| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比 | +| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | +| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | +| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | +| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | +| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | +| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功 | +| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | +| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | +| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | +| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | +| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | +| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite | +| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | +| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | +| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | +| aof_last_write_status | 无 | 上次aof写入状态 | +| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | +| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | +| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | #### 指标集合:stats -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total_connections_received | 无 | 服务器接受的连接总数 | -| total_commands_processed | 无 | 服务器处理的命令总数 | -| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | -| total_net_input_bytes | byte | 从网络读取的字节总数 | -| total_net_output_bytes | byte | 写入网络的总字节数 | -| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | -| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | -| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数| -| sync_full | 无 | 具有副本的完整重新同步数 | -| sync_partial_ok | 无 | 接受的部分重新同步请求数 | -| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | -| expired_keys | 无 | 过期的key总数 | -| expired_stale_perc | 无 | 可能过期key的百分比 | -| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | -| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | -| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | -| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | -| keyspace_misses | 无 | 在主dict 中未查到key的次数 | -| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | -| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | -| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | -| total_forks | 无 | 自服务器启动以来的fork操作总数| -| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | -| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | -| active_defrag_hits | 无 | 主动碎片整理命中次数 | -| active_defrag_misses | 无 | 主动碎片整理未命中次数 | -| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | -| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数| -| tracking_total_keys | 无 | key 查询的总数| -| tracking_total_items | 无 | item查询的总数 | -| tracking_total_prefixes | 无 | 前缀查询的总数 | -| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | -| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | -| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | -| total_reads_processed | 无 | 正在读取的请求数 | -| total_writes_processed | 无 | 正在写入的请求数 | -| io_threaded_reads_processed | 无 | 正在读取的线程数| -| io_threaded_writes_processed | 无 | 正在写入的线程数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|----------------------------------------------------| +| total_connections_received | 无 | 服务器接受的连接总数 | +| total_commands_processed | 无 | 服务器处理的命令总数 | +| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | +| total_net_input_bytes | byte | 从网络读取的字节总数 | +| total_net_output_bytes | byte | 写入网络的总字节数 | +| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | +| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | +| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数 | +| sync_full | 无 | 具有副本的完整重新同步数 | +| sync_partial_ok | 无 | 接受的部分重新同步请求数 | +| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | +| expired_keys | 无 | 过期的key总数 | +| expired_stale_perc | 无 | 可能过期key的百分比 | +| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | +| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | +| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | +| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | +| keyspace_misses | 无 | 在主dict 中未查到key的次数 | +| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | +| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | +| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | +| total_forks | 无 | 自服务器启动以来的fork操作总数 | +| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | +| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | +| active_defrag_hits | 无 | 主动碎片整理命中次数 | +| active_defrag_misses | 无 | 主动碎片整理未命中次数 | +| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | +| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数 | +| tracking_total_keys | 无 | key 查询的总数 | +| tracking_total_items | 无 | item查询的总数 | +| tracking_total_prefixes | 无 | 前缀查询的总数 | +| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | +| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | +| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | +| total_reads_processed | 无 | 正在读取的请求数 | +| total_writes_processed | 无 | 正在写入的请求数 | +| io_threaded_reads_processed | 无 | 正在读取的线程数 | +| io_threaded_writes_processed | 无 | 正在写入的线程数 | #### 指标集合:replication -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| role | 无 | 节点角色 master 主节点 slave 从节点 | -| connected_slaves | 无 | 连接的从节点数 | -| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | -| master_replid | 无 | 实例启动的随机字符串| -| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID| -| master_repl_offset | 无 | 主从同步偏移量 | -| second_repl_offset | 无 | 接受从服务ID的最大偏移量| -| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | -| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | -| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | -| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|-------------------------------------------------------------------------------------| +| role | 无 | 节点角色 master 主节点 slave 从节点 | +| connected_slaves | 无 | 连接的从节点数 | +| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | +| master_replid | 无 | 实例启动的随机字符串 | +| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID | +| master_repl_offset | 无 | 主从同步偏移量 | +| second_repl_offset | 无 | 接受从服务ID的最大偏移量 | +| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | +| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | +| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | +| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和| -| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和| -| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和| -| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | -| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU| -| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|------|------------------------| +| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和 | +| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和 | +| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和 | +| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | +| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU | +| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | #### 指标集合:errorstats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| errorstat_ERR | 无 | 错误累计出现的次数 | -| errorstat_MISCONF | 无 | | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|-----------| +| errorstat_ERR | 无 | 错误累计出现的次数 | +| errorstat_MISCONF | 无 | | #### 指标集合:cluster -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|--------------------| +| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是 | #### 指标集合:commandstats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数| -| cmdstat_get | 无 | get命令的统计信息 | -| cmdstat_setnx | 无 | setnx命令的统计信息 | -| cmdstat_hset | 无 | hset命令的统计信息 | -| cmdstat_hget | 无 | hget命令的统计信息 | -| cmdstat_lpush | 无 | lpush命令的统计信息 | -| cmdstat_rpush | 无 | rpush命令的统计信息 | -| cmdstat_lpop | 无 | lpop命令的统计信息 | -| cmdstat_rpop | 无 | rpop命令的统计信息 | -| cmdstat_llen | 无 | llen命令的统计信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|---------------------------------------------------------------------------------------------------------------------------| +| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数 | +| cmdstat_get | 无 | get命令的统计信息 | +| cmdstat_setnx | 无 | setnx命令的统计信息 | +| cmdstat_hset | 无 | hset命令的统计信息 | +| cmdstat_hget | 无 | hget命令的统计信息 | +| cmdstat_lpush | 无 | lpush命令的统计信息 | +| cmdstat_rpush | 无 | rpush命令的统计信息 | +| cmdstat_lpop | 无 | lpop命令的统计信息 | +| cmdstat_rpop | 无 | rpop命令的统计信息 | +| cmdstat_llen | 无 | llen命令的统计信息 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/shenyu.md index 08788efeaae..1149ed4bdd9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/shenyu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/shenyu.md @@ -5,24 +5,24 @@ sidebar_label: ShenYu网关监控 keywords: [开源监控系统, 开源消息中间件监控, ShenYu网关监控监控] --- -> 对 ShenYu 网关的运行状态(JVM相关),请求响应等相关指标进行监测。 +> 对 ShenYu 网关的运行状态(JVM相关),请求响应等相关指标进行监测。 -## 监控前操作 +## 监控前操作 -您需要在 ShenYu 网关开启`metrics`插件,暴露对应的 prometheus metrics 接口。 +您需要在 ShenYu 网关开启`metrics`插件,暴露对应的 prometheus metrics 接口。 -开启插件, 参考 [官方文档](https://shenyu.apache.org/zh/docs/plugin-center/observability/metrics-plugin) +开启插件, 参考 [官方文档](https://shenyu.apache.org/zh/docs/plugin-center/observability/metrics-plugin) -主要如下两步骤: +主要如下两步骤: 1. 在网关的 pom.xml 文件中添加 metrics 的依赖。 ```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + ``` 2. 在网关的配置yaml文件中编辑如下内容: @@ -39,11 +39,11 @@ shenyu: jvm_enabled: true #开启jvm的监控指标 ``` -最后重启访问网关指标接口 `http://ip:8090` 响应 prometheus 格式数据即可。 +最后重启访问网关指标接口 `http://ip:8090` 响应 prometheus 格式数据即可。 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -53,79 +53,78 @@ shenyu: | 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | | 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:shenyu_request_total +#### 指标集合:shenyu_request_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|-------------------| -| value | 无 | 收集ShenYu网关的所有请求数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------------| +| value | 无 | 收集ShenYu网关的所有请求数量 | #### 指标集合:shenyu_request_throw_created -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|--------------------| -| value | 无 | 收集ShenYu网关的异常请求数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------------| +| value | 无 | 收集ShenYu网关的异常请求数量 | #### 指标集合:process_cpu_seconds_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------------| -| value | 无 | 用户和系统CPU总计所用的秒数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| value | 无 | 用户和系统CPU总计所用的秒数 | #### 指标集合:process_open_fds -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|--------------| -| value | 无 | 打开的文件描述符的数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------| +| value | 无 | 打开的文件描述符的数量 | #### 指标集合:process_max_fds -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|----------------| -| value | 无 | 打开的文件描述符的最大数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------| +| value | 无 | 打开的文件描述符的最大数量 | #### 指标集合:jvm_info -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|-----------| -| runtime | 无 | JVM 版本信息 | -| vendor | 无 | JVM 版本信息 | -| version | 无 | JVM 版本信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------| +| runtime | 无 | JVM 版本信息 | +| vendor | 无 | JVM 版本信息 | +| version | 无 | JVM 版本信息 | #### 指标集合:jvm_memory_bytes_used -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------------| -| area | 无 | JVM 内存区域 | -| value | MB | 给定 JVM 内存区域的已用大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------| +| area | 无 | JVM 内存区域 | +| value | MB | 给定 JVM 内存区域的已用大小 | #### 指标集合:jvm_memory_pool_bytes_used -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------|------|-----------------| -| pool | 无 | JVM 内存池 | -| value | MB | 给定 JVM 内存池的已用大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| pool | 无 | JVM 内存池 | +| value | MB | 给定 JVM 内存池的已用大小 | #### 指标集合:jvm_memory_pool_bytes_committed -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------------| -| pool | 无 | JVM 内存池 | -| value | MB | 给定 JVM 内存池的已提交大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------| +| pool | 无 | JVM 内存池 | +| value | MB | 给定 JVM 内存池的已提交大小 | #### 指标集合:jvm_memory_pool_bytes_max -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------| ----------- | -| pool | 无 | JVM 内存池 | -| value | MB | 给定 JVM 内存池的最大大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| pool | 无 | JVM 内存池 | +| value | MB | 给定 JVM 内存池的最大大小 | #### 指标集合:jvm_threads_state -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|-------------| -| state | 无 | 线程状态 | -| value | 无 | 对应线程状态的线程数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------| +| state | 无 | 线程状态 | +| value | 无 | 对应线程状态的线程数量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/smtp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/smtp.md index 21dcd9a88f3..5755437e80e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/smtp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/smtp.md @@ -13,12 +13,11 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit > 详见 https://datatracker.ietf.org/doc/html/rfc821#page-13 - **协议使用:SMTP** ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |---------|---------------------------------------------------| | 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️无需协议头(例如:https://、http://) | | 监控名称 | 标识此监控的名称。名称需要保持唯一 | @@ -33,9 +32,10 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit #### 指标集:概要 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------|------|-------------------| | 响应时间 | 毫秒 | SMTP 服务器响应请求所需的时间 | | 响应状态 | | 响应状态 | | SMTP 服务器标语 | | SMTP 服务器的标语 | | helo 命令返回信息 | | helo 命令返回的响应信息 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/spring_gateway.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/spring_gateway.md index 086e0a63ac8..a0695849705 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/spring_gateway.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/spring_gateway.md @@ -19,6 +19,7 @@ keywords: [开源监控工具, 开源 Spring Gateway 监控工具, 监控 Spring spring-boot-starter-actuator ``` + **2. 修改 YML 配置以暴露度量接口:** ```yaml @@ -35,56 +36,55 @@ management: ### 配置参数 -| 参数名称 | 参数描述 | -| ----------- |--------------------------------------------------------| -| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | -| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | -| 端口 | 数据库提供的默认端口为 8080。 | +| 参数名称 | 参数描述 | +|----------|--------------------------------------------------------|-----------------------------------------------| +| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | +| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | +| 端口 | 数据库提供的默认端口为 8080。 | | 启用 HTTPS | 是否通过 HTTPS 访问网站,请注意⚠️当启用 HTTPS 时,需要将默认端口更改为 443 | -| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | -| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | will continue only if the probe is successful -| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | +| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | +| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | will continue only if the probe is successful | +| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | ### 采集指标 #### 指标收集: 健康状态 -| 指标名称 | 指标单位 | 指标描述 | -|-------|-------|--------------------------| -| 服务状态 | 无 | 服务健康状态: UP(正常),Down(异常) | +| 指标名称 | 指标单位 | 指标描述 | +|------|------|-------------------------| +| 服务状态 | 无 | 服务健康状态: UP(正常),Down(异常) | #### 指标收集: 环境信息 -| 指标名称 | 指标单位 | 指标描述 | -|---------|-------|----------------------------------------------| -| profile | 无 | 应用程序运行的配置环境: prod(生产环境),dev(开发环境),test(测试环境) | -| 端口号 | 无 | 应用程序暴露的端口 | -| 操作系统 | 无 | 运行操作系统 | -| 操作系统架构 | 无 | 运行操作系统的架构 | -| JDK供应商 | 无 | JDK 供应商 | -| JVM版本 | 无 | JVM 版本 | +| 指标名称 | 指标单位 | 指标描述 | +|---------|------|----------------------------------------------| +| profile | 无 | 应用程序运行的配置环境: prod(生产环境),dev(开发环境),test(测试环境) | +| 端口号 | 无 | 应用程序暴露的端口 | +| 操作系统 | 无 | 运行操作系统 | +| 操作系统架构 | 无 | 运行操作系统的架构 | +| JDK供应商 | 无 | JDK 供应商 | +| JVM版本 | 无 | JVM 版本 | #### 指标收集: 线程信息 -| 指标名称 | 指标单位 | 指标描述 | -|-------------|------------|-------------| -| 状态 | 无 | 线程状态 | -| 数量 | 无 | 线程状态对应的线程数量 | +| 指标名称 | 指标单位 | 指标描述 | +|------|------|-------------| +| 状态 | 无 | 线程状态 | +| 数量 | 无 | 线程状态对应的线程数量 | #### 指标收集: 内存使用情况 -| 指标名称 | 指标单位 | 指标描述 | -|-------|-------|-------------| -| 内存空间 | 无 | 内存空间名称 | -| 内存占用 | MB | 此空间占用的内存大小 | +| 指标名称 | 指标单位 | 指标描述 | +|------|------|------------| +| 内存空间 | 无 | 内存空间名称 | +| 内存占用 | MB | 此空间占用的内存大小 | #### 指标收集: 路由信息 -| 指标名称 | 指标单位 | 指标描述 | -|-------|-------|----------| -| 路由id | 无 | 路由 ID | -| 匹配规则 | 无 | 路由匹配规则 | -| 资源标识符 | 无 | 服务资源标识符 | -| 优先级 | 无 | 此路由的优先级 | - +| 指标名称 | 指标单位 | 指标描述 | +|-------|------|---------| +| 路由id | 无 | 路由 ID | +| 匹配规则 | 无 | 路由匹配规则 | +| 资源标识符 | 无 | 服务资源标识符 | +| 优先级 | 无 | 此路由的优先级 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/springboot2.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/springboot2.md index 280c6cb6b06..e66d4237a13 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/springboot2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/springboot2.md @@ -7,7 +7,6 @@ keywords: [开源监控系统, 开源消息中间件监控, SpringBoot2.0 监控 > 对SpringBoot2.0 actuator 暴露的通用性能指标进行采集监控。 - ## 监控前操作 如果想要通过此监控类型监控 `SpringBoot` 中的信息,则需要您的SpringBoot应用集成并开启SpringBoot Actuator。 @@ -20,6 +19,7 @@ keywords: [开源监控系统, 开源消息中间件监控, SpringBoot2.0 监控 spring-boot-starter-actuator ``` + **2、修改YML配置暴露指标接口:** ```yaml @@ -30,7 +30,9 @@ management: include: '*' enabled-by-default: on ``` + *注意:如果你的项目里还引入了认证相关的依赖,比如springboot-security,那么SpringBoot Actuator暴露出的接口可能会被拦截,此时需要你手动放开这些接口,以springboot-security为例,需要在SecurityConfig配置类中加入以下代码:* + ```java public class SecurityConfig extends WebSecurityConfigurerAdapter{ @Override @@ -46,48 +48,50 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ } } ``` + ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ |------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 应用服务对外提供的端口,默认为8080。 | +| 参数名称 | 参数帮助描述 | +|-----------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 应用服务对外提供的端口,默认为8080。 | | 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | | Base Path | 暴露接口路径前缀,默认 /actuator | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:health -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------ | -------- |--------------------------------| -| status | 无 | 服务健康状态: UP,Down | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|-----------------| +| status | 无 | 服务健康状态: UP,Down | #### 指标集合:environment -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------| -------- |----------------------------| -| profile | 无 | 应用运行profile: prod,dev,test | -| port | 无 | 应用暴露端口 | -| os | 无 | 运行所在操作系统 | -| os_arch | 无 | 运行所在操作系统架构 | -| jdk_vendor | 无 | jdk vendor | -| jvm_version | 无 | jvm version | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|----------------------------| +| profile | 无 | 应用运行profile: prod,dev,test | +| port | 无 | 应用暴露端口 | +| os | 无 | 运行所在操作系统 | +| os_arch | 无 | 运行所在操作系统架构 | +| jdk_vendor | 无 | jdk vendor | +| jvm_version | 无 | jvm version | #### 指标集合:threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- |------|--------------------| -| state | 无 | 线程状态 | -| number | 无 | 此线程状态对应的线程数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|--------------| +| state | 无 | 线程状态 | +| number | 无 | 此线程状态对应的线程数量 | #### 指标集合:memory_used -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|------------| -| space | 无 | 内存空间名称 | -| mem_used | MB | 此空间占用内存大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|-----------| +| space | 无 | 内存空间名称 | +| mem_used | MB | 此空间占用内存大小 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md index 5dc66e27cfc..22a5a50ddd8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md @@ -9,68 +9,68 @@ keywords: [开源监控系统, 开源数据库监控, SqlServer数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为1433。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为1433。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| machine_name | 无 | 运行服务器实例的 Windows 计算机名称 | -| server_name | 无 | 与Windows实例关联的服务器和实例信息SQL Server | -| version | 无 | 实例的版本,SQL Server,格式为"major.minor.build.revision" | -| edition | 无 | 已安装的 实例的产品SQL Server版本 | -| start_time | 无 | 数据库启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------------------------------------------------| +| machine_name | 无 | 运行服务器实例的 Windows 计算机名称 | +| server_name | 无 | 与Windows实例关联的服务器和实例信息SQL Server | +| version | 无 | 实例的版本,SQL Server,格式为"major.minor.build.revision" | +| edition | 无 | 已安装的 实例的产品SQL Server版本 | +| start_time | 无 | 数据库启动时间 | #### 指标集合:performance_counters -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| database_pages | 无 | Database pages, 已获得的页面数(缓冲池) | -| target_pages | 无 | Target pages, 缓冲池必须的理想页面数 | -| page_life_expectancy | s,秒 | Page life expectancy, 数据页在缓冲池中驻留的时间,这个时间一般会大于 300 | -| buffer_cache_hit_ratio | % | Buffer cache hit ratio, 数据库缓冲池高速缓冲命中率,被请求的数据在缓冲池中被找到的概率,一般会大于 80% 才算正常,否则可能是缓冲池容量太小 | -| checkpoint_pages_sec | 无 | Checkpoint pages/sec, 检查点每秒写入磁盘的脏页个数,如果数据过高,证明缺少内存容量 | -| page_reads_sec | 无 | Page reads/sec, 缓存池中每秒读的页数 | -| page_writes_sec | 无 | Page writes/sec, 缓存池中每秒写的页数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------|------|-------------------------------------------------------------------------------------| +| database_pages | 无 | Database pages, 已获得的页面数(缓冲池) | +| target_pages | 无 | Target pages, 缓冲池必须的理想页面数 | +| page_life_expectancy | s,秒 | Page life expectancy, 数据页在缓冲池中驻留的时间,这个时间一般会大于 300 | +| buffer_cache_hit_ratio | % | Buffer cache hit ratio, 数据库缓冲池高速缓冲命中率,被请求的数据在缓冲池中被找到的概率,一般会大于 80% 才算正常,否则可能是缓冲池容量太小 | +| checkpoint_pages_sec | 无 | Checkpoint pages/sec, 检查点每秒写入磁盘的脏页个数,如果数据过高,证明缺少内存容量 | +| page_reads_sec | 无 | Page reads/sec, 缓存池中每秒读的页数 | +| page_writes_sec | 无 | Page writes/sec, 缓存池中每秒写的页数 | #### 指标集合:connection -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| user_connection | 无 | 已连接的会话数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------| +| user_connection | 无 | 已连接的会话数 | -### 常见问题 +### 常见问题 -1. SSL连接问题修复 +1. SSL连接问题修复 jdk版本:jdk11 问题描述:SQL Server2019使用SA用户连接报错 -错误信息: +错误信息: + ```text The driver could not establish a secure connection to SQL Server by using Secure Sockets Layer (SSL) encryption. Error: "PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target". ClientConnectionId:xxxxxxxxxxxxxxxxx ``` + 问题截图: ![issue](https://user-images.githubusercontent.com/38679717/206621658-c0741d48-673d-45ff-9a3b-47d113064c12.png) 解决方案: -添加`SqlServer`监控时使用高级设置,自定义JDBC URL,拼接的jdbc url后面加上参数配置,```;encrypt=true;trustServerCertificate=true;```这个参数true表示无条件信任server端返回的任何根证书。 +添加`SqlServer`监控时使用高级设置,自定义JDBC URL,拼接的jdbc url后面加上参数配置,```;encrypt=true;trustServerCertificate=true;```这个参数true表示无条件信任server端返回的任何根证书。 -样例:```jdbc:sqlserver://127.0.0.1:1433;DatabaseName=demo;encrypt=true;trustServerCertificate=true;``` +样例:```jdbc:sqlserver://127.0.0.1:1433;DatabaseName=demo;encrypt=true;trustServerCertificate=true;``` -参考文档:[microsoft pkix-path-building-failed-unable-to-find-valid-certification](https://techcommunity.microsoft.com/t5/azure-database-support-blog/pkix-path-building-failed-unable-to-find-valid-certification/ba-p/2591304) +参考文档:[microsoft pkix-path-building-failed-unable-to-find-valid-certification](https://techcommunity.microsoft.com/t5/azure-database-support-blog/pkix-path-building-failed-unable-to-find-valid-certification/ba-p/2591304) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ssl_cert.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ssl_cert.md index ce0084f7e95..73957e31fb8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ssl_cert.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ssl_cert.md @@ -5,29 +5,30 @@ sidebar_label: SSL证书监控 keywords: [开源监控系统, 开源网站监控, SSL证书监控监控] --- -> 对网站的SSL证书过期时间,响应时间等指标进行监测 +> 对网站的SSL证书过期时间,响应时间等指标进行监测 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,https一般默认为443。 | -| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-------------------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,https一般默认为443。 | +| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:certificate +#### 指标集合:certificate + +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|----------| +| subject | 无 | 证书名称 | +| expired | 无 | 是否过期 | +| start_time | 无 | 有效期开始时间 | +| start_timestamp | ms毫秒 | 有效期开始时间戳 | +| end_time | 无 | 过期时间 | +| end_timestamp | ms毫秒 | 过期时间戳 | -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|----------| -| subject | 无 | 证书名称 | -| expired | 无 | 是否过期 | -| start_time | 无 | 有效期开始时间 | -| start_timestamp | ms毫秒 | 有效期开始时间戳 | -| end_time | 无 | 过期时间 | -| end_timestamp | ms毫秒 | 过期时间戳 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/tomcat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/tomcat.md index c306bebc550..b366ee3c2ac 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/tomcat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/tomcat.md @@ -11,67 +11,65 @@ keywords: [开源监控系统, 开源网站监控, Tomcat监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置Tomcat连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置Tomcat连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:code_cache -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | ### Tomcat开启JMX协议步骤 -1. 搭建好tomcat后,进入tomcat下的bin目录,修改catalina.sh文件 注意⚠️替换IP地址 +1. 搭建好tomcat后,进入tomcat下的bin目录,修改catalina.sh文件 注意⚠️替换IP地址 -2. vim catalina.sh +2. vim catalina.sh ```aidl CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote -Djava.rmi.server.hostname=10.1.1.52 -Dcom.sun.management.jmxremote.port=1099 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" ``` -参考: https://blog.csdn.net/weixin_41924764/article/details/108694239 +参考: https://blog.csdn.net/weixin_41924764/article/details/108694239 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ubuntu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ubuntu.md index e5d1be3a140..3ec51e5464a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ubuntu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ubuntu.md @@ -9,74 +9,74 @@ keywords: [开源监控系统, 开源操作系统监控, Ubuntu监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux SSH对外提供的端口,默认为22。 | +| 用户名 | SSH连接用户名,可选 | +| 密码 | SSH连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| hostname | 无 | 主机名称 | +| version | 无 | 操作系统版本 | +| uptime | 无 | 系统运行时间 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:memory -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:disk -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:interface -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | byte | 入站数据流量(bytes) | +| transmit_bytes | byte | 出站数据流量(bytes) | #### 指标集合:disk_free -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/website.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/website.md index 5dbb2f2c7c6..8efe5262612 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/website.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/website.md @@ -5,26 +5,26 @@ sidebar_label: 网站监测 keywords: [开源监控系统, 开源网站监控] --- -> 对网站是否可用,响应时间等指标进行监测 +> 对网站是否可用,响应时间等指标进行监测 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|---------|-------------------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | +| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | +| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:summary +#### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| responseTime | ms毫秒 | 网站响应时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | ms毫秒 | 网站响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/windows.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/windows.md index 6a1c79b9ede..41447469e61 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/windows.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/windows.md @@ -6,38 +6,39 @@ keywords: [开源监控系统, 开源操作系统监控, Windows操作系统监 --- > 通过SNMP协议对Windows操作系统的通用性能指标进行采集监控。 -> 注意⚠️ Windows服务器需开启SNMP服务 +> 注意⚠️ Windows服务器需开启SNMP服务 参考资料: [什么是SNMP协议1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) [什么是SNMP协议2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) [Win配置SNMP英文](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) -[Win配置SNMP中文](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) +[Win配置SNMP中文](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Windows SNMP服务对外提供的端口,默认为 161。 | -| SNMP 版本 | SNMP协议版本 V1 V2c V3 | +| 参数名称 | 参数帮助描述 | +|----------|----------------------------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Windows SNMP服务对外提供的端口,默认为 161。 | +| SNMP 版本 | SNMP协议版本 V1 V2c V3 | | SNMP 团体字 | SNMP 协议团体名(Community Name),用于实现SNMP网络管理员访问SNMP管理代理时的身份验证。类似于密码,默认值为 public | -| 超时时间 | 协议连接超时时间 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 超时时间 | 协议连接超时时间 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:system -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| name | 无 | 主机名称 | -| descr | 无 | 操作系统描述 | -| uptime | 无 | 系统运行时间 | -| numUsers | 个数 | 当前用户数 | -| services | 个数 | 当前服务数量 | -| processes | 个数 | 当前进程数量 | -| responseTime | ms | 采集响应时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| name | 无 | 主机名称 | +| descr | 无 | 操作系统描述 | +| uptime | 无 | 系统运行时间 | +| numUsers | 个数 | 当前用户数 | +| services | 个数 | 当前服务数量 | +| processes | 个数 | 当前进程数量 | +| responseTime | ms | 采集响应时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/zookeeper.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/zookeeper.md index 0783ceaf3fb..14d50c3c90d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/zookeeper.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/zookeeper.md @@ -12,7 +12,7 @@ keywords: [开源监控系统, Zookeeper监控监控] > 监控 zookeeper 目前的实现方案使用的是 zookeeper 提供的四字命令 + netcat 来收集指标数据 > 需要用户自己将zookeeper的四字命令加入白名单 -1. 加白名单步骤 +1. 加白名单步骤 > 1.找到我们 zookeeper 的配置文件,一般是 `zoo.cfg` > @@ -25,76 +25,76 @@ keywords: [开源监控系统, Zookeeper监控监控] # 将所有命令添加到白名单中 4lw.commands.whitelist=* ``` + > 3.重启服务 -```shell +```shell zkServer.sh restart ``` -2. netcat 协议 +2. netcat 协议 目前实现方案需要我们部署zookeeper的linux服务器,安装netcat的命令环境 > netcat安装步骤 -```shell -yum install -y nc -``` +> +> ```shell +> yum install -y nc +> ``` 如果终端显示以下信息则说明安装成功 + ```shell Complete! ``` - ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Zookeeper的Linux服务器SSH端口。 | -| 查询超时时间 | 设置Zookeeper连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | Zookeeper所在Linux连接用户名 | -| 密码 | Zookeeper所在Linux连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Zookeeper的Linux服务器SSH端口。 | +| 查询超时时间 | 设置Zookeeper连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | Zookeeper所在Linux连接用户名 | +| 密码 | Zookeeper所在Linux连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:conf -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| clientPort | 无 | 端口 | -| dataDir | 无 | 数据快照文件目录,默认10万次操作生成一次快照 | -| dataDirSize | kb | 数据快照文件大小 | -| dataLogDir | 无 | 事务日志文件目录,生产环境放在独立磁盘上 | -| dataLogSize | kb | 事务日志文件大小 | -| tickTime | ms | 服务器之间或客户端与服务器之间维持心跳的时间间隔 | -| minSessionTimeout | ms| 最小session超时时间 心跳时间x2 指定时间小于该时间默认使用此时间 | -| maxSessionTimeout | ms |最大session超时时间 心跳时间x20 指定时间大于该时间默认使用此时间 | -| serverId | 无 | 服务器编号 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|----------------------------------------| +| clientPort | 无 | 端口 | +| dataDir | 无 | 数据快照文件目录,默认10万次操作生成一次快照 | +| dataDirSize | kb | 数据快照文件大小 | +| dataLogDir | 无 | 事务日志文件目录,生产环境放在独立磁盘上 | +| dataLogSize | kb | 事务日志文件大小 | +| tickTime | ms | 服务器之间或客户端与服务器之间维持心跳的时间间隔 | +| minSessionTimeout | ms | 最小session超时时间 心跳时间x2 指定时间小于该时间默认使用此时间 | +| maxSessionTimeout | ms | 最大session超时时间 心跳时间x20 指定时间大于该时间默认使用此时间 | +| serverId | 无 | 服务器编号 | #### 指标集合:stats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| zk_version | 无 | 服务器版本 | -| zk_server_state | 无 | 服务器角色 | -| zk_num_alive_connections | 个 | 连接数 | -| zk_avg_latency | ms | 平均延时 | -| zk_outstanding_requests | 个 | 堆积请求数 | -| zk_znode_count | 个 | znode结点数量 | -| zk_packets_sent | 个 | 发包数 | -| zk_packets_received | 个 | 收包数 | -| zk_watch_count | 个 | watch数量 | -| zk_max_file_descriptor_count | 个 | 最大文件描述符数量 | -| zk_approximate_data_size | kb | 数据大小 | -| zk_open_file_descriptor_count | 个 | 打开的文件描述符数量 | -| zk_max_latency | ms | 最大延时 | -| zk_ephemerals_count | 个 | 临时节点数 | -| zk_min_latency | ms | 最小延时 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------------|------|------------| +| zk_version | 无 | 服务器版本 | +| zk_server_state | 无 | 服务器角色 | +| zk_num_alive_connections | 个 | 连接数 | +| zk_avg_latency | ms | 平均延时 | +| zk_outstanding_requests | 个 | 堆积请求数 | +| zk_znode_count | 个 | znode结点数量 | +| zk_packets_sent | 个 | 发包数 | +| zk_packets_received | 个 | 收包数 | +| zk_watch_count | 个 | watch数量 | +| zk_max_file_descriptor_count | 个 | 最大文件描述符数量 | +| zk_approximate_data_size | kb | 数据大小 | +| zk_open_file_descriptor_count | 个 | 打开的文件描述符数量 | +| zk_max_latency | ms | 最大延时 | +| zk_ephemerals_count | 个 | 临时节点数 | +| zk_min_latency | ms | 最小延时 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md index b9767924b38..aa9097c814c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md @@ -16,7 +16,6 @@ slug: / [![QQ](https://img.shields.io/badge/QQ-630061200-orange)](https://qm.qq.com/q/FltGGGIX2m) [![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UCri75zfWX0GHqJFPENEbLow?logo=youtube&label=YouTube%20Channel)](https://www.youtube.com/channel/UCri75zfWX0GHqJFPENEbLow) - ## 🎡 介绍 [HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是一个易用友好的开源实时监控告警系统,无需 Agent,高性能集群,兼容 Prometheus,提供强大的自定义监控和状态页构建能力。 @@ -31,7 +30,7 @@ slug: / - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 - 提供强大的状态页构建能力,轻松向用户传达您产品服务的实时状态。 -> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 +> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 ---- @@ -48,7 +47,6 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ![hertzbeat](/img/home/9.png) - ### 内置监控类型 **官方内置了大量的监控模版类型,方便用户直接在页面添加使用,一款监控类型对应一个YML监控模版** @@ -113,11 +111,11 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ### 高性能集群 -> 当监控数量指数级上升,采集性能下降或者环境不稳定容易造成采集器单点故障时,这时我们的采集器集群就出场了。 +> 当监控数量指数级上升,采集性能下降或者环境不稳定容易造成采集器单点故障时,这时我们的采集器集群就出场了。 -- `HertzBeat` 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 -- 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 -- 单机模式与集群模式相互切换部署非常方便,无需额外组件部署。 +- `HertzBeat` 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 +- 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 +- 单机模式与集群模式相互切换部署非常方便,无需额外组件部署。 ![hertzbeat](/img/docs/cluster-arch.png) @@ -134,10 +132,10 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ### 易用友好 -- 集 **监控+告警+通知** All in one, 无需单独部署多个组件服务。 -- 全UI界面操作,不管是新增监控,修改监控模版,还是告警阈值通知,都可在WEB界面操作完成,无需要修改文件或脚本或重启。 -- 无需 Agent, 监控对端我们只需在WEB界面填写所需IP端口账户密码等参数即可。 -- 自定义友好,只需一个监控模版YML,自动生成对应监控类型的监控管理页面,数据图表页面,阈值配置等。 +- 集 **监控+告警+通知** All in one, 无需单独部署多个组件服务。 +- 全UI界面操作,不管是新增监控,修改监控模版,还是告警阈值通知,都可在WEB界面操作完成,无需要修改文件或脚本或重启。 +- 无需 Agent, 监控对端我们只需在WEB界面填写所需IP端口账户密码等参数即可。 +- 自定义友好,只需一个监控模版YML,自动生成对应监控类型的监控管理页面,数据图表页面,阈值配置等。 - 阈值告警通知友好,基于表达式阈值配置,多种告警通知渠道,支持告警静默,时段标签告警级别过滤等。 ### 完全开源 @@ -151,8 +149,7 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ![cncf](/img/home/cncf-landscape-left-logo.svg) ------ - +--- **`HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。** ----- @@ -269,7 +266,6 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 ![hertzbeat](/img/home/13.png) - ### 告警静默 - 当通过阈值规则判断触发告警后,会进入到告警静默,告警静默会根据规则对特定一次性时间段或周期性时候段的告警消息屏蔽静默,此时间段不发送告警消息。 @@ -301,8 +297,7 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 ![hertzbeat](/img/home/9.png) - ------ +--- **还有更多强大的功能快去探索呀。Have Fun!** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contact.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contact.md index 480ba184e49..974009005cd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contact.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contact.md @@ -1,8 +1,9 @@ --- id: contact title: 交流联系 -sidebar_label: 交流联系 +sidebar_label: 交流联系 --- + > 如果您在使用过程有任何需要帮助或者想交流建议,可以通过 群 ISSUE 讨论交流。 [GITHUB ISSUES](https://github.com/apache/hertzbeat/issues) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contributing.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contributing.md index b14e75bd68f..d36fb538d92 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contributing.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contributing.md @@ -59,23 +59,31 @@ sidebar_label: 贡献者指南 1. 首先您需要 Fork 目标仓库 [hertzbeat repository](https://github.com/apache/hertzbeat). 2. 然后 用git命令 将代码下载到本地: + ```shell git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended ``` + 3. 下载完成后,请参考目标仓库的入门指南或者 README 文件对项目进行初始化。 4. 接着,您可以参考如下命令进行代码的提交, 切换新的分支, 进行开发: + ```shell git checkout -b a-feature-branch #Recommended ``` + 5. 提交 commit , commit 描述信息需要符合约定格式: [module name or type name]feature or bugfix or doc: custom message. + ```shell git add git commit -m '[docs]feature: necessary instructions' #Recommended ``` + 6. 推送到远程仓库 + ```shell git push origin a-feature-branch ``` + 7. 然后您就可以在 GitHub 上发起新的 PR (Pull Request)。 请注意 PR 的标题需要符合我们的规范,并且在 PR 中写上必要的说明,来方便 Committer 和其他贡献者进行代码审查。 @@ -120,12 +128,14 @@ git pull upstream master ### 模块 - **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** 提供监控管理,系统管理基础服务 + > 提供对监控的管理,监控应用配置的管理,系统用户租户后台管理等。 -- **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** 提供监控数据采集服务 +> - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** 提供监控数据采集服务 > 使用通用协议远程采集获取对端指标数据。 -- **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** 提供监控数据仓储服务 +> - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** 提供监控数据仓储服务 > 采集指标结果数据管理,数据落盘,查询,计算统计。 -- **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** 提供告警服务 +> - **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** 提供告警服务 > 告警计算触发,任务状态联动,告警配置,告警通知。 -- **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** 提供可视化控制台页面 +> - **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** 提供可视化控制台页面 > 监控告警系统可视化控制台前端 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/design.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/design.md index 12069809575..08bd383d97d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/design.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/design.md @@ -1,13 +1,13 @@ --- id: design title: 设计文档 -sidebar_label: 设计文档 +sidebar_label: 设计文档 --- -### HertzBeat架构 +### HertzBeat架构 -![architecture](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/docs/hertzbeat-arch.svg) +![architecture](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/docs/hertzbeat-arch.svg) -### TanCloud架构 +### TanCloud架构 -TanCloud是基于HertzBeat的公网SAAS集群版本,采用多集群,多租户的架构模式。 +TanCloud是基于HertzBeat的公网SAAS集群版本,采用多集群,多租户的架构模式。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md index f40f3bd9efd..0b503032d0f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md @@ -1,10 +1,10 @@ --- -id: developer -title: 开发者们 +id: developer +title: 开发者们 sidebar_label: 开发者们 --- -## ✨ HertzBeat的成员们 +## ✨ HertzBeat的成员们 @@ -30,13 +30,14 @@ sidebar_label: 开发者们 ## ✨ HertzBeat的开发者们 -Thanks these wonderful people, welcome to join us: [贡献者指南](contributing) +Thanks these wonderful people, welcome to join us: [贡献者指南](contributing) cert +
@@ -260,4 +261,3 @@ Thanks these wonderful people, welcome to join us: [贡献者指南](contributin - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/huaweicloud.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/huaweicloud.md index b934a5c2ca3..9f1d408fc79 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/huaweicloud.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/huaweicloud.md @@ -1,26 +1,23 @@ --- id: huaweicloud title: HertzBeat & HuaweiCloud -sidebar_label: HertzBeat & HuaweiCloud +sidebar_label: HertzBeat & HuaweiCloud --- -### HertzBeat 与 HuaweiCloud 的开源合作需求Issue +### HertzBeat 与 HuaweiCloud 的开源合作需求Issue > 欢迎大家对感兴趣的Issue领取贡献。 -- [Task] support using Huawei Cloud OBS to store custom define yml file [#841](https://github.com/apache/hertzbeat/issues/841) +- [Task] support using Huawei Cloud OBS to store custom define yml file [#841](https://github.com/apache/hertzbeat/issues/841) - [Task] support Huawei Cloud CCE metrics monitoring [#839](https://github.com/apache/hertzbeat/issues/839) - [Task] support EulerOS metrics monitoring [#838](https://github.com/apache/hertzbeat/issues/838) - [Task] support using Huawei Cloud SMN send alarm notification message [#837](https://github.com/apache/hertzbeat/issues/837) - [Task] support using GaussDB For Influx store history metrics data [#836](https://github.com/apache/hertzbeat/issues/836) - - - -### 关于 HuaweiCloud 开源活动 +### 关于 HuaweiCloud 开源活动 HuaweiCloud 华为云将面向开源软件工具链与环境、开源应用构建和开源生态组件构建这三大重点场景,提供技术支持、奖金支持、活动支持,邀请更多的开发者,携手构建开源for HuaweiCloud。 -开发者将开源软件工具、开源应用和开源组件与华为云对象存储OBS、数仓DWS、云容器CCE等云服务对接,同时基于Terraform模板,上架到华为云云商店,支持其他开发者一键部署使用开源组件 ,称为“开源xxx for HuaweiCloud”。 +开发者将开源软件工具、开源应用和开源组件与华为云对象存储OBS、数仓DWS、云容器CCE等云服务对接,同时基于Terraform模板,上架到华为云云商店,支持其他开发者一键部署使用开源组件 ,称为“开源xxx for HuaweiCloud”。 感兴趣的开发者可以查看:华为云开源项目仓库 https://gitee.com/HuaweiCloudDeveloper/huaweicloud-cloud-native-plugins-kits 了解更多。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/images-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/images-deploy.md index 782dbe83c05..ff350e763e0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/images-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/images-deploy.md @@ -1,10 +1,9 @@ --- id: images-deploy title: HertzBeat 华为云镜像部署 -sidebar_label: HertzBeat 华为云镜像部署快速指引 +sidebar_label: HertzBeat 华为云镜像部署快速指引 --- - > 易用友好的开源实时监控告警工具,无需Agent,强大自定义监控能力。 [![discord](https://img.shields.io/badge/chat-on%20discord-brightgreen)](https://discord.gg/Fb6M73htGr) @@ -20,20 +19,18 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 ![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/custom-monitor.svg) ![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/alert.svg) - ## 🎡 介绍 > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 > 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等监控,阈值告警通知一步到位。 > 更自由化的阈值规则(计算表达式),`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式及时送达。 - +> > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 > 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? - +> > `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 > 当然我们也提供了对应的 **[SAAS版本监控云](https://console.tancloud.cn)**,中小团队和个人无需再为了监控自己的网站资源,而去部署学习一套繁琐的监控系统,**[登录即可免费开始](https://console.tancloud.cn)**。 - ---- ![hertzbeat](/img/home/1.png) @@ -64,9 +61,9 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 - 和更多的自定义监控。 - 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 -## 镜像部署 +## 镜像部署 -> HertzBeat支持在Linux Windows Mac系统安装运行,CPU支持X86/ARM64。 +> HertzBeat支持在Linux Windows Mac系统安装运行,CPU支持X86/ARM64。 1. 开通服务器时选用 HertzBeat 镜像 2. 启动服务器 @@ -78,7 +75,6 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](../start/tdengine-init) - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](../start/iotdb-init) - 4. 配置用户配置文件(可选,自定义配置用户密码) HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat 若需要新增删除修改账户或密码,可以通过修改位于 `/opt/hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 @@ -86,10 +82,10 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 5. 部署启动 执行位于安装目录/opt/hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat - ``` + + ``` $ ./startup.sh ``` - 6. 开始探索HertzBeat 浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 @@ -101,11 +97,14 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 1. **按照流程部署,访问 http://ip:1157/ 无界面** 请参考下面几点排查问题: + > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 > 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 2. **监控历史图表长时间都一直无数据** + > 一:Tdengine或IoTDB是否配置,未配置则无历史图表数据 > 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 -> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB 或 Tdengine IP账户密码等配置是否正确 +> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB 或 Tdengine IP账户密码等配置是否正确 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md index f87ee49393b..502cfb8429e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md @@ -1,22 +1,22 @@ --- id: resource title: 相关资源 -sidebar_label: 相关资源 +sidebar_label: 相关资源 --- ## HertzBeat PDF介绍文档 下载: [PDF](http://cdn.hertzbeat.com/hertzbeat.pdf) -## 图标资源 +## 图标资源 -### HertzBeat LOGO +### HertzBeat LOGO -![logo](/img/hertzbeat-logo.svg) +![logo](/img/hertzbeat-logo.svg) -下载: [SVG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.svg) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.png) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.jpg) +下载: [SVG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.svg) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.png) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.jpg) -### HertzBeat Brand LOGO +### HertzBeat Brand LOGO ![logo](/img/hertzbeat-brand.svg) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/sponsor.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/sponsor.md index 0e6a876da32..fcf927d68ed 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/sponsor.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/sponsor.md @@ -1,17 +1,13 @@ --- id: sponsor title: 赞助我们 -sidebar_label: 赞助我们 +sidebar_label: 赞助我们 --- -**HertzBeat对个人或企业完全免费,我们也在全职做这件事情,如果您喜欢这个项目并且愿意提供帮助,请我们喝杯咖啡吧** +**HertzBeat对个人或企业完全免费,我们也在全职做这件事情,如果您喜欢这个项目并且愿意提供帮助,请我们喝杯咖啡吧** planet - 感谢[吉实信息(构建全新的微波+光交易网络)](https://www.flarespeed.com) 赞助服务器采集节点 -感谢[蓝易云(全新智慧上云)](https://www.tsyvps.com/aff/BZBEGYLX) 赞助服务器采集节点 - - - +感谢[蓝易云(全新智慧上云)](https://www.tsyvps.com/aff/BZBEGYLX) 赞助服务器采集节点 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/account-modify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/account-modify.md index 8436a317158..328447b586f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/account-modify.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/account-modify.md @@ -1,13 +1,13 @@ --- id: account-modify title: 配置修改账户密码 -sidebar_label: 配置修改账户密码 +sidebar_label: 配置修改账户密码 --- HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat 若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 修改位于安装目录下的 `/hertzbeat/config/sureness.yml` 的配置文件,docker环境目录为`opt/hertzbeat/config/sureness.yml`,建议提前挂载映射 -配置文件内容参考 项目仓库[/script/sureness.yml](https://github.com/apache/hertzbeat/blob/master/script/sureness.yml) +配置文件内容参考 项目仓库[/script/sureness.yml](https://github.com/apache/hertzbeat/blob/master/script/sureness.yml) ```yaml @@ -125,4 +125,4 @@ sureness: dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' ``` -**重启 HertzBeat 浏览器访问 http://ip:1157/ 即可探索使用 HertzBeat** +**重启 HertzBeat 浏览器访问 http://ip:1157/ 即可探索使用 HertzBeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/custom-config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/custom-config.md index 7dde7ec14e9..01380784169 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/custom-config.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/custom-config.md @@ -1,23 +1,25 @@ --- id: custom-config title: 常见参数配置 -sidebar_label: 常见参数配置 +sidebar_label: 常见参数配置 --- 这里描述了如果配置短信服务器,内置可用性告警触发次数等。 -**`hertzbeat`的配置文件`application.yml`** +**`hertzbeat`的配置文件`application.yml`** -### 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 - 安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 +### 配置HertzBeat的配置文件 + +修改位于 `hertzbeat/config/application.yml` 的配置文件 +注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 +安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 1. 配置短信发送服务器 -> 只有成功配置了您自己的短信服务器,监控系统内触发的告警短信才会正常发送。 +> 只有成功配置了您自己的短信服务器,监控系统内触发的告警短信才会正常发送。 + +在`application.yml`新增如下腾讯平台短信服务器配置(参数需替换为您的短信服务器配置) -在`application.yml`新增如下腾讯平台短信服务器配置(参数需替换为您的短信服务器配置) ```yaml common: sms: @@ -28,15 +30,17 @@ common: sign-name: 赫兹跳动 template-id: 1343434 ``` + 1.1 腾讯云短信创建签名(sign-name) ![image](https://github.com/apache/hertzbeat/assets/40455946/3a4c287d-b23d-4398-8562-4894296af485) 1.2 腾讯云短信创建正文模板(template-id) + ``` 监控:{1},告警级别:{2}。内容:{3} ``` -![image](https://github.com/apache/hertzbeat/assets/40455946/face71a6-46d5-452c-bed3-59d2a975afeb) +![image](https://github.com/apache/hertzbeat/assets/40455946/face71a6-46d5-452c-bed3-59d2a975afeb) 1.3 腾讯云短信创建应用(app-id) ![image](https://github.com/apache/hertzbeat/assets/40455946/2732d710-37fa-4455-af64-48bba273c2f8) @@ -44,8 +48,7 @@ common: 1.4 腾讯云访问管理(secret-id、secret-key) ![image](https://github.com/apache/hertzbeat/assets/40455946/36f056f0-94e7-43db-8f07-82893c98024e) - -2. 配置告警自定义参数 +2. 配置告警自定义参数 ```yaml alerter: @@ -53,11 +56,12 @@ alerter: console-url: https://console.tancloud.io ``` -3. 使用外置redis代替内存存储实时指标数据 +3. 使用外置redis代替内存存储实时指标数据 -> 默认我们的指标实时数据存储在内存中,可以配置如下来使用redis代替内存存储。 +> 默认我们的指标实时数据存储在内存中,可以配置如下来使用redis代替内存存储。 + +注意⚠️ `memory.enabled: false, redis.enabled: true` -注意⚠️ `memory.enabled: false, redis.enabled: true` ```yaml warehouse: store: @@ -70,3 +74,4 @@ warehouse: port: 6379 password: 123456 ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/docker-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/docker-deploy.md index c0b963c69e7..3eb90180a89 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/docker-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/docker-deploy.md @@ -1,14 +1,14 @@ --- id: docker-deploy title: 通过 Docker 方式安装 HertzBeat -sidebar_label: Docker方式部署 +sidebar_label: Docker方式部署 --- -> 推荐使用Docker部署HertzBeat +> 推荐使用Docker部署HertzBeat 1. 下载安装Docker环境 Docker 工具自身的下载请参考以下资料: - [Docker官网文档](https://docs.docker.com/get-docker/) + [Docker官网文档](https://docs.docker.com/get-docker/) [菜鸟教程-Docker教程](https://www.runoob.com/docker/docker-tutorial.html) 安装完毕后终端查看Docker版本是否正常输出。 @@ -16,7 +16,6 @@ sidebar_label: Docker方式部署 $ docker -v Docker version 20.10.12, build e91ed57 ``` - 2. 拉取HertzBeat Docker镜像 镜像版本TAG可查看 [dockerhub 官方镜像仓库](https://hub.docker.com/r/apache/hertzbeat/tags) 或者使用 [quay.io 镜像仓库](https://quay.io/repository/apache/hertzbeat) @@ -25,12 +24,13 @@ sidebar_label: Docker方式部署 $ docker pull apache/hertzbeat $ docker pull apache/hertzbeat-collector ``` + 若网络超时或者使用 + ```shell $ docker pull quay.io/tancloud/hertzbeat $ docker pull quay.io/tancloud/hertzbeat-collector ``` - 3. 部署HertzBeat您可能需要掌握的几条命令 ```shell @@ -46,25 +46,22 @@ sidebar_label: Docker方式部署 ctrl+d或者 $ exit ``` - 4. 挂载并配置HertzBeat的配置文件(可选) 下载 `application.yml` 文件到主机目录下,例如: $(pwd)/application.yml - 下载源 [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 或 [gitee/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml) + 下载源 [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 或 [gitee/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml) - 若需使用邮件发送告警,需替换 `application.yml` 里面的邮件服务器参数 - - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) - - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](tdengine-init) - - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](iotdb-init) - + - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) + - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](tdengine-init) + - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](iotdb-init) 5. 挂载并配置HertzBeat用户配置文件,自定义用户密码(可选) HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat 若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 下载 `sureness.yml` 文件到主机目录下,例如: $(pwd)/sureness.yml 下载源 [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) 或 [gitee/script/sureness.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/sureness.yml) - 具体修改步骤参考 [配置修改账户密码](account-modify) - -6. 启动HertzBeat Docker容器 + 具体修改步骤参考 [配置修改账户密码](account-modify) +6. 启动HertzBeat Docker容器 -```shell +```shell $ docker run -d -p 1157:1157 -p 1158:1158 \ -e LANG=zh_CN.UTF-8 \ -e TZ=Asia/Shanghai \ @@ -76,34 +73,31 @@ $ docker run -d -p 1157:1157 -p 1158:1158 \ --name hertzbeat apache/hertzbeat ``` - 这条命令启动一个运行HertzBeat的Docker容器,并且将容器的1157端口映射到宿主机的1157端口上。若宿主机已有进程占用该端口,则需要修改主机映射端口。 - - `docker run -d` : 通过Docker运行一个容器,使其在后台运行 - - `-e LANG=zh_CN.UTF-8` : 设置系统语言 - - `-e TZ=Asia/Shanghai` : 设置系统时区 - - `-p 1157:1157 -p 1158:1158` : 映射容器端口到主机端口,请注意,前面是宿主机的端口号,后面是容器的端口号。1157是WEB端口,1158是集群端口。 - - `-v $(pwd)/data:/opt/hertzbeat/data` : (可选,数据持久化)重要⚠️ 挂载H2数据库文件到本地主机,保证数据不会因为容器的创建删除而丢失 - - `-v $(pwd)/logs:/opt/hertzbeat/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 - - `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (可选,不需要可删除)挂载上上一步修改的本地配置文件到容器中,即使用本地配置文件覆盖容器配置文件。我们需要修改此配置文件的MYSQL,TDengine配置信息来连接外部服务。 - - `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (可选,不需要可删除)挂载上一步修改的账户配置文件到容器中,若无修改账户需求可删除此命令参数。 - - - 注意⚠️ 挂载文件时,前面参数为你自定义本地文件地址,后面参数为docker容器内文件地址(固定) - - - `--name hertzbeat` : 命名容器名称 hertzbeat - - - `--restart=always`:(可选,不需要可删除)使容器在Docker启动后自动重启。若您未在容器创建时指定该参数,可通过以下命令实现该容器自启。 - - ```shell - $ docker update --restart=always hertzbeat - ``` - - - `apache/hertzbeat` : 使用拉取最新的的HertzBeat官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat`代替。** + 这条命令启动一个运行HertzBeat的Docker容器,并且将容器的1157端口映射到宿主机的1157端口上。若宿主机已有进程占用该端口,则需要修改主机映射端口。 + +- `docker run -d` : 通过Docker运行一个容器,使其在后台运行 +- `-e LANG=zh_CN.UTF-8` : 设置系统语言 +- `-e TZ=Asia/Shanghai` : 设置系统时区 +- `-p 1157:1157 -p 1158:1158` : 映射容器端口到主机端口,请注意,前面是宿主机的端口号,后面是容器的端口号。1157是WEB端口,1158是集群端口。 +- `-v $(pwd)/data:/opt/hertzbeat/data` : (可选,数据持久化)重要⚠️ 挂载H2数据库文件到本地主机,保证数据不会因为容器的创建删除而丢失 +- `-v $(pwd)/logs:/opt/hertzbeat/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 +- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (可选,不需要可删除)挂载上上一步修改的本地配置文件到容器中,即使用本地配置文件覆盖容器配置文件。我们需要修改此配置文件的MYSQL,TDengine配置信息来连接外部服务。 +- `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (可选,不需要可删除)挂载上一步修改的账户配置文件到容器中,若无修改账户需求可删除此命令参数。 +- 注意⚠️ 挂载文件时,前面参数为你自定义本地文件地址,后面参数为docker容器内文件地址(固定) +- `--name hertzbeat` : 命名容器名称 hertzbeat +- `--restart=always`:(可选,不需要可删除)使容器在Docker启动后自动重启。若您未在容器创建时指定该参数,可通过以下命令实现该容器自启。 + + ```shell + $ docker update --restart=always hertzbeat + ``` +- `apache/hertzbeat` : 使用拉取最新的的HertzBeat官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat`代替。** 7. 开始探索HertzBeat - 浏览器访问 http://ip:1157/ 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 + 浏览器访问 http://ip:1157/ 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 8. 部署采集器集群(可选) -```shell +```shell $ docker run -d \ -e IDENTITY=custom-collector-name \ -e MODE=public \ @@ -112,52 +106,57 @@ $ docker run -d \ --name hertzbeat-collector apache/hertzbeat-collector ``` - 这条命令启动一个运行HertzBeat采集器的Docker容器,并直连上了HertzBeat主服务节点。 - - `docker run -d` : 通过Docker运行一个容器,使其在后台运行 - - `-e IDENTITY=custom-collector-name` : (可选) 设置采集器的唯一标识名称。⚠️注意多采集器时采集器名称需保证唯一性。 - - `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 - - `-e MANAGER_HOST=127.0.0.1` : 重要⚠️ 设置连接的主HertzBeat服务地址IP。 - - `-e MANAGER_PORT=1158` : (可选) 设置连接的主HertzBeat服务地址端口,默认 1158. - - `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 - - `--name hertzbeat-collector` : 命名容器名称 hertzbeat-collector - - `apache/hertzbeat-collector` : 使用拉取最新的的HertzBeat采集器官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat-collector`代替。** +这条命令启动一个运行HertzBeat采集器的Docker容器,并直连上了HertzBeat主服务节点。 +- `docker run -d` : 通过Docker运行一个容器,使其在后台运行 +- `-e IDENTITY=custom-collector-name` : (可选) 设置采集器的唯一标识名称。⚠️注意多采集器时采集器名称需保证唯一性。 +- `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 +- `-e MANAGER_HOST=127.0.0.1` : 重要⚠️ 设置连接的主HertzBeat服务地址IP。 +- `-e MANAGER_PORT=1158` : (可选) 设置连接的主HertzBeat服务地址端口,默认 1158. +- `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 +- `--name hertzbeat-collector` : 命名容器名称 hertzbeat-collector +- `apache/hertzbeat-collector` : 使用拉取最新的的HertzBeat采集器官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat-collector`代替。** -8. 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 +8. 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 -**HAVE FUN** +**HAVE FUN** -### Docker部署常见问题 +### Docker部署常见问题 **最多的问题就是网络问题,请先提前排查** 1. **MYSQL,TDENGINE或IotDB和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** -此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 + 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 + > 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP -> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` +> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` 2. **按照流程部署,访问 http://ip:1157/ 无界面** -请参考下面几点排查问题: + 请参考下面几点排查问题: + > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 -3. **日志报错TDengine连接或插入SQL失败** +3. **日志报错TDengine连接或插入SQL失败** + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter +> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter + +4. **监控历史图表长时间都一直无数据** -4. **监控历史图表长时间都一直无数据** > 一:Tdengine或IoTDB是否配置,未配置则无历史图表数据 > 二:Tdengine的数据库`hertzbeat`是否创建 -> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB或Tdengine IP账户密码等配置是否正确 +> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB或Tdengine IP账户密码等配置是否正确 5. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - -> 安装初始化此数据库参考 [TDengine安装初始化](tdengine-init) 或 [IoTDB安装初始化](iotdb-init) +> 安装初始化此数据库参考 [TDengine安装初始化](tdengine-init) 或 [IoTDB安装初始化](iotdb-init) 6. 安装配置了时序数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 请检查配置参数是否正确 > iot-db 或td-engine enable 是否设置为true > 注意⚠️若hertzbeat和IotDB,TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/greptime-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/greptime-init.md index 4c98f15722d..81d950a392c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/greptime-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/greptime-init.md @@ -1,7 +1,7 @@ --- id: greptime-init title: 依赖时序数据库服务GreptimeDB安装初始化 -sidebar_label: 使用GreptimeDB存储指标数据(可选) +sidebar_label: 使用GreptimeDB存储指标数据(可选) --- HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) @@ -10,18 +10,21 @@ HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始 GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage. -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** + +### 通过Docker方式安装GreptimeDB -### 通过Docker方式安装GreptimeDB > 可参考官方网站[安装教程](https://docs.greptime.com/getting-started/overview) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装GreptimeDB +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装GreptimeDB ```shell $ docker run -p 4000-4004:4000-4004 \ @@ -32,16 +35,16 @@ $ docker run -p 4000-4004:4000-4004 \ --rpc-addr 0.0.0.0:4001 ``` - `-v /opt/greptimedb:/tmp/greptimedb` 为greptimedb数据目录本地持久化挂载,需将`/opt/greptimedb`替换为实际本地存在的目录 - 使用```$ docker ps```查看数据库是否启动成功 +`-v /opt/greptimedb:/tmp/greptimedb` 为greptimedb数据目录本地持久化挂载,需将`/opt/greptimedb`替换为实际本地存在的目录 +使用```$ docker ps```查看数据库是否启动成功 -### 在hertzbeat的`application.yml`配置文件配置此数据库连接 +### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** ```yaml warehouse: @@ -56,8 +59,9 @@ warehouse: 2. 重启 HertzBeat -### 常见问题 +### 常见问题 1. 时序数据库 GreptimeDB 或者 IoTDB 或者 TDengine 是否都需要配置,能不能都用 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/influxdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/influxdb-init.md index f98fb837438..9f19a733b2e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/influxdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/influxdb-init.md @@ -1,35 +1,38 @@ --- id: influxdb-init title: 依赖时序数据库服务InfluxDB安装初始化 -sidebar_label: 使用InfluxDB存储指标数据(可选) +sidebar_label: 使用InfluxDB存储指标数据(可选) --- HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) 我们推荐使用并长期支持VictoriaMetrics -InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海量时序数据的高性能读、高性能写、高效存储与实时分析等。 注意支持⚠️ 1.x版本。 +InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海量时序数据的高性能读、高性能写、高效存储与实时分析等。 注意支持⚠️ 1.x版本。 **注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** ### 1. 直接使用华为云服务 GaussDB For Influx -> 开通使用[华为云云数据库 GaussDB For Influx](https://www.huaweicloud.com/product/gaussdbforinflux.html) - +> 开通使用[华为云云数据库 GaussDB For Influx](https://www.huaweicloud.com/product/gaussdbforinflux.html) +> > 获取云数据库对外暴露连接地址,账户密码即可 ⚠️注意云数据库默认开启了SSL,云数据库地址应使用 `https:` -### 2. 通过Docker方式安装InfluxDB +### 2. 通过Docker方式安装InfluxDB + > 可参考官方网站[安装教程](https://hub.docker.com/_/influxdb) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装InfluxDB 1.x +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装InfluxDB 1.x ```shell $ docker run -p 8086:8086 \ @@ -37,17 +40,16 @@ $ docker run -p 8086:8086 \ influxdb:1.8 ``` - `-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 - 使用```$ docker ps```查看数据库是否启动成功 +`-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 +使用```$ docker ps```查看数据库是否启动成功 - -### 在hertzbeat的`application.yml`配置文件配置此数据库连接 +### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** ```yaml warehouse: @@ -66,10 +68,9 @@ warehouse: 2. 重启 HertzBeat -### 常见问题 +### 常见问题 -1. 时序数据库InfluxDb, IoTDB和TDengine是否都需要配置,能不能都用 +1. 时序数据库InfluxDb, IoTDB和TDengine是否都需要配置,能不能都用 > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/iotdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/iotdb-init.md index bc37b31491a..c26e8feb7b0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/iotdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/iotdb-init.md @@ -1,7 +1,7 @@ --- id: iotdb-init title: 依赖时序数据库服务IoTDB安装初始化 -sidebar_label: 使用IoTDB存储指标数据(可选) +sidebar_label: 使用IoTDB存储指标数据(可选) --- HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) @@ -10,21 +10,23 @@ HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始 Apache IoTDB是一体化收集、存储、管理与分析物联网时序数据的软件系统,我们使用其存储分析采集到的监控指标历史数据。支持V0.12 - V0.13版本,推荐使用V0.13.*版本。 **注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有IoTDB环境,可直接跳到YML配置那一步。 +### 通过Docker方式安装IoTDB -### 通过Docker方式安装IoTDB > 可参考官方网站[安装教程](https://iotdb.apache.org/zh/UserGuide/V0.13.x/QuickStart/WayToGetIoTDB.html) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装IoTDB +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装IoTDB ```shell $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ @@ -33,16 +35,16 @@ $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ apache/iotdb:0.13.3-node ``` - `-v /opt/iotdb/data:/iotdb/data` 为IoTDB数据目录本地持久化挂载,需将`/iotdb/data`替换为实际本地存在的目录 - 使用```$ docker ps```查看数据库是否启动成功 +`-v /opt/iotdb/data:/iotdb/data` 为IoTDB数据目录本地持久化挂载,需将`/iotdb/data`替换为实际本地存在的目录 +使用```$ docker ps```查看数据库是否启动成功 -3. 在hertzbeat的`application.yml`配置文件配置IoTDB数据库连接 +3. 在hertzbeat的`application.yml`配置文件配置IoTDB数据库连接 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.iot-db`数据源参数,HOST账户密码等,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.iot-db`数据源参数,HOST账户密码等,并启用`enabled`为`true`** ```yaml warehouse: @@ -67,16 +69,20 @@ warehouse: 4. 重启 HertzBeat -### 常见问题 +### 常见问题 1. 时序数据库IoTDB和TDengine是否都需要配置,能不能都用 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 -3. 安装配置了IotDB数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] +3. 安装配置了IotDB数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 请检查配置参数是否正确 > iot-db enable是否设置为true > 注意⚠️若hertzbeat和IotDB都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/mysql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/mysql-change.md index 8db668fb634..fdf25643f1d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/mysql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/mysql-change.md @@ -1,23 +1,27 @@ --- id: mysql-change title: 关系型数据库使用 Mysql 替换依赖的 H2 存储系统元数据 -sidebar_label: 元数据使用Mysql存储(可选) +sidebar_label: 元数据使用Mysql存储(可选) --- -MYSQL是一款值得信赖的关系型数据库,HertzBeat除了支持使用默认内置的H2数据库外,还可以切换为使用MYSQL存储监控信息,告警信息,配置信息等结构化关系数据。 + +MYSQL是一款值得信赖的关系型数据库,HertzBeat除了支持使用默认内置的H2数据库外,还可以切换为使用MYSQL存储监控信息,告警信息,配置信息等结构化关系数据。 注意⚠️ 使用外置Mysql数据库替换内置H2数据库为可选项,但建议生产环境配置,以提供更好的性能 -> 如果您已有MYSQL环境,可直接跳到数据库创建那一步。 +> 如果您已有MYSQL环境,可直接跳到数据库创建那一步。 + +### 通过Docker方式安装MYSQL -### 通过Docker方式安装MYSQL 1. 下载安装Docker环境 Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 + 安装完毕后终端查看Docker版本是否正常输出。 + ``` $ docker -v Docker version 20.10.12, build e91ed57 ``` -2. Docker安装MYSQl +2. Docker安装MYSQl + ``` $ docker run -d --name mysql \ -p 3306:3306 \ @@ -26,26 +30,29 @@ MYSQL是一款值得信赖的关系型数据库,HertzBeat除了支持使用默 --restart=always \ mysql:5.7 ``` + `-v /opt/data:/var/lib/mysql` 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 使用```$ docker ps```查看数据库是否启动成功 -### 数据库创建 +### 数据库创建 + 1. 进入MYSQL或使用客户端连接MYSQL服务 - `mysql -uroot -p123456` + `mysql -uroot -p123456` 2. 创建名称为hertzbeat的数据库 `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. 查看hertzbeat数据库是否创建成功 `show databases;` -### 修改hertzbeat的配置文件application.yml切换数据源 +### 修改hertzbeat的配置文件application.yml切换数据源 1. 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 - ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml) - -需修改部分原参数: + ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml) + +需修改部分原参数: + ```yaml spring: datasource: @@ -54,7 +61,9 @@ spring: password: 123456 url: jdbc:h2:./data/hertzbeat;MODE=MYSQL ``` -具体替换参数如下,需根据mysql环境配置账户密码IP: + +具体替换参数如下,需根据mysql环境配置账户密码IP: + ```yaml spring: datasource: @@ -66,10 +75,9 @@ spring: 2. 通过docker启动时,需要修改host为宿主机的外网Ip,包括mysql连接字符串和redis。 +**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** -**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** - -### 常见问题 +### 常见问题 1. 缺少hibernate的mysql方言,导致启动异常 Caused by: org.hibernate.HibernateException: Access to DialectResolutionInfo cannot be null when 'hibernate.dialect' not set @@ -85,3 +93,4 @@ spring: hibernate: dialect: org.hibernate.dialect.MySQL5InnoDBDialect ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/package-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/package-deploy.md index 4965e19bbfd..bbb44f546b3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/package-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/package-deploy.md @@ -1,22 +1,24 @@ --- id: package-deploy title: 通过安装包安装 HertzBeat -sidebar_label: 安装包方式部署 +sidebar_label: 安装包方式部署 --- + > HertzBeat支持在Linux Windows Mac系统安装运行,CPU支持X86/ARM64。 - + 1. 下载HertzBeat安装包 下载您系统环境对应的安装包 `hertzbeat-xx.tar.gz` `hertzbeat-collector-xx.tar.gz` - 从[GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) 仓库下载 - 从[Download](https://hertzbeat.apache.org/docs/download) 仓库下载 - 2. 配置HertzBeat的配置文件(可选) - 解压安装包到主机 eg: /opt/hertzbeat - ``` + 解压安装包到主机 eg: /opt/hertzbeat + + ``` $ tar zxvf hertzbeat-xx.tar.gz or $ unzip -o hertzbeat-xx.zip ``` + 修改位于 `hertzbeat/config/application.yml` 的配置文件(可选),您可以根据需求修改配置文件 - 若需使用邮件发送告警,需替换`application.yml`里面的邮件服务器参数 - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) @@ -27,20 +29,20 @@ sidebar_label: 安装包方式部署 3. 配置用户配置文件(可选,自定义配置用户密码) HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat 若需要新增删除修改账户或密码,可以通过修改位于 `hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 - 具体参考 [配置修改账户密码](account-modify) + 具体参考 [配置修改账户密码](account-modify) 4. 部署启动 - 执行位于安装目录hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat - ``` + 执行位于安装目录hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat + + ``` $ ./startup.sh ``` - 5. 开始探索HertzBeat - 浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 - + 浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 6. 部署采集器集群(可选) - 下载解压您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) - 配置采集器的配置文件 `hertzbeat-collector/config/application.yml` 里面的连接主HertzBeat服务的对外IP,端口,当前采集器名称(需保证唯一性)等参数 `identity` `mode` (public or private) `manager-host` `manager-port` + ```yaml collector: dispatch: @@ -57,7 +59,6 @@ sidebar_label: 安装包方式部署 **HAVE FUN** - ### 安装包部署常见问题 **最多的问题就是网络环境问题,请先提前排查** @@ -68,25 +69,30 @@ sidebar_label: 安装包方式部署 要求:JAVA11环境 下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) 安装后命令行检查是否成功安装 - ``` - $ java -version - java version "11.0.12" - Java(TM) SE Runtime Environment 18.9 (build 11.0.12+8-LTS-237) - Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.12+8-LTS-237, mixed mode) - ``` +``` +$ java -version +java version "11.0.12" +Java(TM) SE Runtime Environment 18.9 (build 11.0.12+8-LTS-237) +Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.12+8-LTS-237, mixed mode) + +``` 2. **按照流程部署,访问 http://ip:1157/ 无界面** 请参考下面几点排查问题: + > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 > 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 3. **日志报错TDengine连接或插入SQL失败** + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter +> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter 4. **监控历史图表长时间都一直无数据** + > 一:时序数据库是否配置,未配置则无历史图表数据 > 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 -> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 时序数据库 IP账户密码等配置是否正确 +> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 时序数据库 IP账户密码等配置是否正确 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md index b09f969e4da..6f2d7cdf7bc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md @@ -3,42 +3,43 @@ id: postgresql-change title: 关系型数据库使用 PostgreSQL 替换依赖的 H2 存储系统元数据 sidebar_label: 元数据使用PostgreSQL存储(可选) --- -PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBMS)。HertzBeat除了支持使用默认内置的H2数据库外,还可以切换为使用PostgreSQL存储监控信息,告警信息,配置信息等结构化关系数据。 -注意⚠️ 使用外置PostgreSQL数据库替换内置H2数据库为可选项,但建议生产环境配置,以提供更好的性能 +PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBMS)。HertzBeat除了支持使用默认内置的H2数据库外,还可以切换为使用PostgreSQL存储监控信息,告警信息,配置信息等结构化关系数据。 -> 如果您已有PostgreSQL环境,可直接跳到数据库创建那一步。 +注意⚠️ 使用外置PostgreSQL数据库替换内置H2数据库为可选项,但建议生产环境配置,以提供更好的性能 +> 如果您已有PostgreSQL环境,可直接跳到数据库创建那一步。 -### 通过Docker方式安装PostgreSQL +### 通过Docker方式安装PostgreSQL 1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. + ``` $ docker -v Docker version 20.10.12, build e91ed57 ``` - 2. Docker安装 PostgreSQL + ``` $ docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` + 使用```$ docker ps```查看数据库是否启动成功 3. Create database in container manually or with [script](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-postgresql-iotdb/conf/sql/schema.sql). -### 数据库创建 +### 数据库创建 + +1. 进入 PostgreSQL 或使用客户端连接 PostgreSQL 服务 -1. 进入 PostgreSQL 或使用客户端连接 PostgreSQL 服务 ``` su - postgres psql ``` - 2. 创建名称为hertzbeat的数据库 `CREATE DATABASE hertzbeat;` - 3. 查看hertzbeat数据库是否创建成功 `\l` @@ -58,7 +59,9 @@ spring: password: 123456 url: jdbc:h2:./data/hertzbeat;MODE=MYSQL ``` + 具体替换参数如下,需根据 PostgreSQL 环境配置账户密码IP: + ```yaml spring: config: @@ -81,4 +84,4 @@ spring: dialect: org.hibernate.dialect.PostgreSQLDialect ``` -**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** +**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/quickstart.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/quickstart.md index ac8d65db395..f520ba5f0a4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/quickstart.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/quickstart.md @@ -1,18 +1,18 @@ --- id: quickstart title: 快速开始 -sidebar_label: 快速开始 +sidebar_label: 快速开始 --- ### 🐕 开始使用 - 如果您是想将HertzBeat部署到本地搭建监控系统,请参考下面的部署文档进行操作。 +### 🍞 HertzBeat安装 -### 🍞 HertzBeat安装 > HertzBeat支持通过源码安装启动,Docker容器运行和安装包方式安装部署,CPU架构支持X86/ARM64。 -#### 方式一:Docker方式快速安装 +#### 方式一:Docker方式快速安装 1. `docker` 环境仅需一条命令即可开始 @@ -29,14 +29,15 @@ sidebar_label: 快速开始 ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 - `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 -更多配置详细步骤参考 [通过Docker方式安装HertzBeat](docker-deploy) +更多配置详细步骤参考 [通过Docker方式安装HertzBeat](docker-deploy) -#### 方式二:通过安装包安装 +#### 方式二:通过安装包安装 1. 下载您系统环境对应的安装包`hertzbeat-xx.tar.gz` [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) 2. 配置 HertzBeat 的配置文件 `hertzbeat/config/application.yml`(可选) @@ -45,6 +46,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 5. 部署采集器集群 - 下载您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) - 配置采集器的配置文件 `hertzbeat-collector/config/application.yml` 里面的连接主HertzBeat服务的对外IP,端口,当前采集器名称(需保证唯一性)等参数 `identity` `mode` (public or private) `manager-host` `manager-port` + ```yaml collector: dispatch: @@ -59,9 +61,9 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - 启动 `$ ./bin/startup.sh ` 或 `bin/startup.bat` - 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 -更多配置详细步骤参考 [通过安装包安装HertzBeat](package-deploy) +更多配置详细步骤参考 [通过安装包安装HertzBeat](package-deploy) -#### 方式三:本地代码启动 +#### 方式三:本地代码启动 1. 此为前后端分离项目,本地代码调试需要分别启动后端工程`manager`和前端工程`web-app` 2. 后端:需要`maven3+`, `java11`和`lombok`环境,修改`YML`配置信息并启动`manager`服务 @@ -74,7 +76,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 通过 [docker-compose部署脚本](https://github.com/apache/hertzbeat/tree/master/script/docker-compose) 一次性把 mysql 数据库, iotdb/tdengine 时序数据库和 hertzbeat 安装部署。 -详细步骤参考 [docker-compose部署方案](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/README.md) +详细步骤参考 [docker-compose部署方案](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/README.md) ##### 方式五:Kubernetes Helm Charts 部署 hertzbeat+collector+mysql+iotdb @@ -121,4 +123,4 @@ $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ apache/iotdb:0.13.3-node ``` -详细步骤参考 [使用时序数据库IoTDB存储指标数据(可选)](iotdb-init) +详细步骤参考 [使用时序数据库IoTDB存储指标数据(可选)](iotdb-init) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/rainbond-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/rainbond-deploy.md index 0a55beb17a7..83afd21fc92 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/rainbond-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/rainbond-deploy.md @@ -1,7 +1,7 @@ --- id: rainbond-deploy title: 使用 Rainbond 部署 HertzBeat -sidebar_label: Rainbond方式部署 +sidebar_label: Rainbond方式部署 --- 如果你不熟悉 Kubernetes,想在 Kubernetes 中安装 HertzBeat,可以使用 Rainbond 来部署。Rainbond 是一个基于 Kubernetes 构建的云原生应用管理平台,可以很简单的将你的应用部署到 Kubernetes中。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/sslcert-practice.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/sslcert-practice.md index 9731621b4e9..fbcbab39578 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/sslcert-practice.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/sslcert-practice.md @@ -4,7 +4,6 @@ title: SSL证书过期监控使用案例 sidebar_label: 使用案例 --- - 现在大部分网站都默认支持 HTTPS,我们申请的证书一般是3个月或者1年,很容易随着时间的流逝SSL证书过期了我们却没有第一时间发现,或者在过期之前没有及时更新证书。 这篇文章介绍如果使用 hertzbeat 监控系统来检测我们网站的SSL证书有效期,当证书过期时或证书快过期前几天,给我们发告警消息。 @@ -34,10 +33,8 @@ gitee: https://gitee.com/hertzbeat/hertzbeat > 系统页面 -> 监控菜单 -> SSL证书 -> 新增SSL证书 - ![](/img/docs/start/ssl_1.png) - 2. 配置监控网站 > 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md index 1b70a487d41..c09e1daf15e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md @@ -1,30 +1,32 @@ --- id: tdengine-init title: 依赖时序数据库服务TDengine安装初始化 -sidebar_label: 使用TDengine存储指标数据(可选) +sidebar_label: 使用TDengine存储指标数据(可选) --- HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) 我们推荐使用并长期支持VictoriaMetrics -TDengine是一款开源物联网时序型数据库,我们用其存储采集到的监控指标历史数据。 注意支持⚠️ 3.x版本。 +TDengine是一款开源物联网时序型数据库,我们用其存储采集到的监控指标历史数据。 注意支持⚠️ 3.x版本。 **注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有TDengine环境,可直接跳到创建数据库实例那一步。 +### 通过Docker方式安装TDengine -### 通过Docker方式安装TDengine > 可参考官方网站[安装教程](https://docs.taosdata.com/get-started/docker/) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装TDengine +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装TDengine ```shell $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ @@ -33,23 +35,23 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ tdengine/tdengine:3.0.4.0 ``` - `-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 - `-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 - 使用```$ docker ps```查看数据库是否启动成功 +`-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 +`-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 +使用```$ docker ps```查看数据库是否启动成功 + +### 创建数据库实例 -### 创建数据库实例 +> [TDengine CLI 小技巧](https://docs.taosdata.com/develop/model/) -> [TDengine CLI 小技巧](https://docs.taosdata.com/develop/model/) +1. 进入数据库Docker容器 -1. 进入数据库Docker容器 ``` $ docker exec -it tdengine /bin/bash ``` - 2. 修改账户密码 > 建议您修改密码。TDengine默认的账户密码是 root/taosdata - > 进入容器后,执行 `taos` 命令进入TDengine CLI , 如下: + > 进入容器后,执行 `taos` 命令进入TDengine CLI , 如下: ``` root@tdengine-server:~/TDengine-server# taos @@ -57,6 +59,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. taos> ``` + > 在 TDengine CLI 中使用 alter user 命令可以修改用户密码,缺省密码为 taosdata 3. 创建名称为hertzbeat的数据库 @@ -76,24 +79,23 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ taos> show databases; taos> use hertzbeat; ``` - 5. 退出TDengine CLI ``` 输入 q 或 quit 或 exit 回车 ``` -**注意⚠️若是安装包安装的TDengine** +**注意⚠️若是安装包安装的TDengine** > 除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter -### 在hertzbeat的`application.yml`配置文件配置此数据库连接 +### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** ```yaml warehouse: @@ -111,19 +113,24 @@ warehouse: 2. 重启 HertzBeat -### 常见问题 +### 常见问题 1. 时序数据库IoTDB和TDengine是否都需要配置,能不能都用 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 -2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] +2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 -3. 监控详情历史图片不展示或无数据,已经配置了TDengine +3. 监控详情历史图片不展示或无数据,已经配置了TDengine + > 请确认是否安装的TDengine版本为3.0以上,版本2.x不支持兼容 4. 安装配置了TDengine数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 请检查配置参数是否正确 > td-engine enable是否设置为true > 注意⚠️若hertzbeat和TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP -> 可根据logs目录下启动日志排查 +> 可根据logs目录下启动日志排查 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/upgrade.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/upgrade.md index f70019c1575..ed85c460e76 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/upgrade.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/upgrade.md @@ -1,7 +1,7 @@ --- id: upgrade title: HertzBeat 新版本更新指引 -sidebar_label: 版本更新指引 +sidebar_label: 版本更新指引 --- **HertzBeat 的发布版本列表** @@ -15,8 +15,6 @@ HertzBeat 的元数据信息保存在 H2 或 Mysql, PostgreSQL 关系型数据 **升级前您需要保存备份好数据库的数据文件和监控模版文件** - - ### Docker部署方式的升级 1. 若使用了自定义监控模版 @@ -24,32 +22,26 @@ HertzBeat 的元数据信息保存在 H2 或 Mysql, PostgreSQL 关系型数据 - `docker cp hertzbeat:/opt/hertzbeat/define ./define` - 然后在后续升级启动 docker 容器的时候需要挂载上这个 define 目录,`-v $(pwd)/define:/opt/hertzbeat/define` - `-v $(pwd)/define:/opt/hertzbeat/define` - -2. 若使用内置默认 H2 数据库 +2. 若使用内置默认 H2 数据库 - 需挂载或备份 `-v $(pwd)/data:/opt/hertzbeat/data` 容器内的数据库文件目录 `/opt/hertzbeat/data` - 停止并删除容器,删除本地 HertzBeat docker 镜像,拉取新版本镜像 - 参考 [Docker安装HertzBeat](docker-deploy) 使用新镜像创建新的容器,注意需要将数据库文件目录挂载 `-v $(pwd)/data:/opt/hertzbeat/data` - 3. 若使用外置关系型数据库 Mysql, PostgreSQL - 无需挂载备份容器内的数据库文件目录 - 停止并删除容器,删除本地 HertzBeat docker 镜像,拉取新版本镜像 - 参考 [Docker安装HertzBeat](docker-deploy) 使用新镜像创建新的容器,`application.yml`配置数据库连接即可 - ### 安装包部署方式的升级 1. 若使用内置默认 H2 数据库 - - 备份安装包下的数据库文件目录 `/opt/hertzbeat/data` + - 备份安装包下的数据库文件目录 `/opt/hertzbeat/data` - 若有自定义监控模版,需备份 `/opt/hertzbeat/define` 下的模版YML - `bin/shutdown.sh` 停止 HertzBeat 进程,下载新安装包 - 参考 [安装包安装HertzBeat](package-deploy) 使用新安装包启动 - 2. 若使用外置关系型数据库 Mysql, PostgreSQL - 无需备份安装包下的数据库文件目录 - 若有自定义监控模版,需备份 `/opt/hertzbeat/define` 下的模版YML - `bin/shutdown.sh` 停止 HertzBeat 进程,下载新安装包 - 参考 [安装包安装HertzBeat](package-deploy) 使用新安装包启动,`application.yml`配置数据库连接即可 - - -**HAVE FUN** +**HAVE FUN** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/victoria-metrics-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/victoria-metrics-init.md index 8c9ae087058..d1608031078 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/victoria-metrics-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/victoria-metrics-init.md @@ -1,7 +1,7 @@ --- id: victoria-metrics-init title: 依赖时序数据库服务VictoriaMetrics安装初始化 -sidebar_label: 使用VictoriaMetrics存储指标数据(推荐) +sidebar_label: 使用VictoriaMetrics存储指标数据(推荐) --- HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) @@ -10,21 +10,23 @@ HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始 VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决方案和时序数据库,兼容 Prometheus 生态。推荐版本(VictoriaMetrics:v1.95.1+, HertzBeat:v1.4.3+) **注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有VictoriaMetrics环境,可直接跳到YML配置那一步。 +### 通过Docker方式安装VictoriaMetrics -### 通过Docker方式安装VictoriaMetrics > 可参考官方网站[安装教程](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装VictoriaMetrics +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装VictoriaMetrics ```shell $ docker run -d -p 8428:8428 \ @@ -33,16 +35,16 @@ $ docker run -d -p 8428:8428 \ victoriametrics/victoria-metrics:v1.95.1 ``` - `-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` 为VictoriaMetrics数据目录本地持久化挂载 - 使用```$ docker ps```查看数据库是否启动成功 +`-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` 为VictoriaMetrics数据目录本地持久化挂载 +使用```$ docker ps```查看数据库是否启动成功 -3. 在hertzbeat的`application.yml`配置文件配置VictoriaMetrics数据库连接 +3. 在hertzbeat的`application.yml`配置文件配置VictoriaMetrics数据库连接 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** ```yaml warehouse: @@ -60,8 +62,9 @@ warehouse: 4. 重启 HertzBeat -### 常见问题 +### 常见问题 1. 时序数据库是否都需要配置,能不能都用 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,但会影响历史图表数据和存储时长等。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/template.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/template.md index c3d507a11f8..16205107eaa 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/template.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/template.md @@ -4,27 +4,27 @@ title: 监控模版中心 sidebar_label: 监控模版 --- -> Hertzbeat 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 - +> Hertzbeat 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 +> > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 > 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? -这是它的架构原理: +这是它的架构原理: ![hertzBeat](/img/docs/hertzbeat-arch.png) -**我们将所有监控采集类型(mysql,website,jvm,k8s)都定义为yml模版,用户可以导入这些模版到hertzbeat系统中,使其支持对应类型的监控,非常方便!** +**我们将所有监控采集类型(mysql,website,jvm,k8s)都定义为yml模版,用户可以导入这些模版到hertzbeat系统中,使其支持对应类型的监控,非常方便!** ![](/img/docs/advanced/extend-point-1.png) **欢迎大家一起贡献你使用过程中自定义的通用监控类型YML模版,可用的模板如下:** -### 应用服务监控模版 +### 应用服务监控模版  👉 [Website monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml)
- 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
- 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
- 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
+ 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
+ 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
+ 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
 👉 [Full site monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml)
 👉 [SSL Cert monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml)
 👉 [JVM monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml)
@@ -32,7 +32,7 @@ sidebar_label: 监控模版  👉 [SpringBoot3.0](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml)
 👉 [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml)
-### 数据库监控模版 +### 数据库监控模版  👉 [MYSQL database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml)
 👉 [MariaDB database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml)
@@ -49,7 +49,7 @@ sidebar_label: 监控模版  👉 [Redis Sentinel database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml)
 👉 [Redis Cluster database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml)
-### 操作系统监控模版 +### 操作系统监控模版  👉 [Linux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml)
 👉 [Windows operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml)
@@ -64,7 +64,6 @@ sidebar_label: 监控模版  👉 [AlmaLinux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml)
 👉 [Debian operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml)
- ### 中间件监控模版  👉 [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml)
@@ -78,13 +77,12 @@ sidebar_label: 监控模版  👉 [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml)
 👉 [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml)
- ### 云原生监控模版  👉 [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml)
 👉 [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml)
-### 网络监控模版 +### 网络监控模版  👉 [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml)
 👉 [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml)
diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-default.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-default.md index 31fe0ac98f7..79bed896359 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-default.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-default.md @@ -3,14 +3,17 @@ id: extend-http-default title: HTTP协议系统默认解析方式 sidebar_label: 系统默认解析方式 --- -> HTTP接口调用获取响应数据后,用 Apache HertzBeat (incubating) 默认的解析方式去解析响应数据。 -**此需接口响应数据结构符合HertzBeat指定的数据结构规则** +> HTTP接口调用获取响应数据后,用 Apache HertzBeat (incubating) 默认的解析方式去解析响应数据。 -### HertzBeat数据格式规范 -注意⚠️ 响应数据为JSON +**此需接口响应数据结构符合HertzBeat指定的数据结构规则** + +### HertzBeat数据格式规范 + +注意⚠️ 响应数据为JSON 单层格式:key-value + ```json { "metricName1": "metricValue", @@ -19,7 +22,9 @@ sidebar_label: 系统默认解析方式 "metricName4": "metricValue" } ``` + 多层格式:数组里面套key-value + ```json [ { @@ -36,9 +41,11 @@ sidebar_label: 系统默认解析方式 } ] ``` + 样例: 查询自定义系统的CPU信息,其暴露接口为 `/metrics/cpu`,我们需要其中的`hostname,core,useage`指标 -若只有一台虚拟机,其单层格式为: +若只有一台虚拟机,其单层格式为: + ```json { "hostname": "linux-1", @@ -48,7 +55,9 @@ sidebar_label: 系统默认解析方式 "runningTime": 100 } ``` -若有多台虚拟机,其多层格式为: + +若有多台虚拟机,其多层格式为: + ```json [ { @@ -75,7 +84,7 @@ sidebar_label: 系统默认解析方式 ] ``` -**对应的监控模版YML可以配置为如下** +**对应的监控模版YML可以配置为如下** ```yaml # 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 @@ -148,3 +157,4 @@ metrics: # 这里使用HertzBeat默认解析 parseType: default ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md index 160adf031c9..03602131897 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md @@ -1,19 +1,17 @@ --- id: extend-http-example-hertzbeat title: 教程一:适配一款基于HTTP协议的监控类型 -sidebar_label: 教程一:适配一款HTTP协议监控 +sidebar_label: 教程一:适配一款HTTP协议监控 --- -通过此教程我们一步一步描述如何在 Apache HertzBeat (incubating) 监控系统下新增适配一款基于http协议的监控类型。 - -阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 +通过此教程我们一步一步描述如何在 Apache HertzBeat (incubating) 监控系统下新增适配一款基于http协议的监控类型。 +阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 ### HTTP协议解析通用响应结构体,获取指标数据 > 很多场景我们需要对提供的 HTTP API 接口进行监控,获取接口返回的指标值。这篇文章我们通过http自定义协议来解析我们常见的http接口响应结构,获取返回体中的字段作为指标数据。 - ``` { "code": 200, @@ -22,6 +20,7 @@ sidebar_label: 教程一:适配一款HTTP协议监控 } ``` + 如上,通常我们的后台API接口会设计这这样一个通用返回。hertzbeat系统的后台也是如此,我们今天就用hertzbeat的 API 做样例,新增适配一款新的监控类型 **hertzbeat**,监控采集它的系统摘要统计API `http://localhost:1157/api/summary`, 其响应数据为: @@ -58,7 +57,6 @@ sidebar_label: 教程一:适配一款HTTP协议监控 **我们这次获取其app下的 `category`,`app`,`status`,`size`,`availableSize`等指标数据。** - ### 新增自定义监控模版YML **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** @@ -188,32 +186,24 @@ metrics: **新增完毕,现在我们重启hertzbeat系统。我们可以看到系统页面已经多了一个`hertzbeat`监控类型了。** - ![](/img/docs/advanced/extend-http-example-1.png) - ### 系统页面添加对`hertzbeat`监控类型的监控 > 我们点击新增 `HertzBeat监控系统`,配置监控IP,端口,采集周期,高级设置里的账户密码等, 点击确定添加监控。 - ![](/img/docs/advanced/extend-http-example-2.png) - ![](/img/docs/advanced/extend-http-example-3.png) > 过一定时间(取决于采集周期)我们就可以在监控详情看到具体的指标数据和历史图表啦! - ![](/img/docs/advanced/extend-http-example-4.png) - - ### 设置阈值告警通知 > 接下来我们就可以正常的设置阈值,告警触发后可以在告警中心查看,也可以新增接收人,设置告警通知等,Have Fun!!! - ---- #### 完! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-token.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-token.md index 94f49c01f61..bebcc99244a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-token.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-token.md @@ -6,15 +6,15 @@ sidebar_label: 教程二:获取TOKEN后续认证使用 通过此教程我们一步一步描述如何在教程一的基础上改造,新增一个监控指标,先调用认证接口获取TOKEN后,使用TOKEN作为参数供后面的监控指标采集认证使用。 -阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 +阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 -### 请求流程 +### 请求流程 -【**认证信息监控指标(优先级最高)**】【**HTTP接口携带账户密码调用**】->【**响应数据解析**】->【**解析签发TOKEN-accessToken作为指标**】->【**将accessToken作为变量参数赋值给其他采集监控指标**】 +【**认证信息监控指标(优先级最高)**】【**HTTP接口携带账户密码调用**】->【**响应数据解析**】->【**解析签发TOKEN-accessToken作为指标**】->【**将accessToken作为变量参数赋值给其他采集监控指标**】 > 这里我们依然用教程一的hertzbeat监控举例!hertzbeat后台接口不仅仅支持教程一使用的basic直接账户密码认证,也支持token认证。 -**我们需要`POST`调用登录接口`/api/account/auth/form`获取`accessToken`,请求body(json格式)如下**: +**我们需要`POST`调用登录接口`/api/account/auth/form`获取`accessToken`,请求body(json格式)如下**: ```json { @@ -22,7 +22,8 @@ sidebar_label: 教程二:获取TOKEN后续认证使用 "identifier": "admin" } ``` -**响应结构数据如下**: + +**响应结构数据如下**: ```json { @@ -99,9 +100,9 @@ metrics: ...... ``` -### 定义监控指标`auth`登录请求获取`token` +### 定义监控指标`auth`登录请求获取`token` -1. 在`app-hertzbeat_token.yml`新增一个监控指标定义 `auth`, 设置采集优先级为最高0,采集指标 `token`. +1. 在`app-hertzbeat_token.yml`新增一个监控指标定义 `auth`, 设置采集优先级为最高0,采集指标 `token`. ```yaml @@ -206,16 +207,13 @@ metrics: ![](/img/docs/advanced/extend-http-example-5.png) - **新增成功后我们就可以在详情页面看到我们采集的 `token`, `refreshToken`指标数据。** ![](/img/docs/advanced/extend-http-example-6.png) ![](/img/docs/advanced/extend-http-example-7.png) - - -### 将`token`作为变量参数给后面的监控指标采集使用 +### 将`token`作为变量参数给后面的监控指标采集使用 **在`app-hertzbeat_token.yml`新增一个监控指标定义 `summary` 同教程一中的`summary`相同, 设置采集优先级为1** **设置此监控指标的HTTP协议配置中认证方式为 `Bearer Token` 将上一个监控指标`auth`采集的指标`token`作为参数给其赋值,使用`^o^`作为内部替换符标识,即`^o^token^o^`。如下:** @@ -231,7 +229,7 @@ metrics: bearerTokenToken: ^o^token^o^ ``` -**最终`app-hertzbeat_token.yml`定义如下:** +**最终`app-hertzbeat_token.yml`定义如下:** ```yaml @@ -368,9 +366,9 @@ metrics: ``` -**配置完成后,再次重启 `hertzbeat` 系统,查看监控详情页面** +**配置完成后,再次重启 `hertzbeat` 系统,查看监控详情页面** -![](/img/docs/advanced/extend-http-example-8.png) +![](/img/docs/advanced/extend-http-example-8.png) ![](/img/docs/advanced/extend-http-example-9.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-jsonpath.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-jsonpath.md index ae29f265574..befd1db351f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-jsonpath.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-jsonpath.md @@ -3,16 +3,18 @@ id: extend-http-jsonpath title: HTTP协议JsonPath解析方式 sidebar_label: JsonPath解析方式 --- + > HTTP接口调用获取响应数据后,用JsonPath脚本解析的解析方式去解析响应数据。 注意⚠️ 响应数据为JSON格式 -**使用JsonPath脚本将响应数据解析成符合 Apache HertzBeat (incubating) 指定的数据结构规则的数据** +**使用JsonPath脚本将响应数据解析成符合 Apache HertzBeat (incubating) 指定的数据结构规则的数据** + +#### JsonPath操作符 -#### JsonPath操作符 -[JSONPath在线验证](https://www.jsonpath.cn) +[JSONPath在线验证](https://www.jsonpath.cn) -| JSONPATH | 帮助描述 | +| JSONPATH | 帮助描述 | |------------------|-----------------------------------| | $ | 根对象或元素 | | @ | 当前对象或元素 | @@ -25,8 +27,10 @@ sidebar_label: JsonPath解析方式 | ?() | 过滤器(脚本)表达式. | | () | 脚本表达式. | -#### HertzBeat数据格式规范 +#### HertzBeat数据格式规范 + 单层格式:key-value + ```json { "metricName1": "metricValue", @@ -35,7 +39,9 @@ sidebar_label: JsonPath解析方式 "metricName4": "metricValue" } ``` + 多层格式:数组里面套key-value + ```json [ { @@ -53,10 +59,11 @@ sidebar_label: JsonPath解析方式 ] ``` -#### 样例 +#### 样例 查询自定义系统的数值信息,其暴露接口为 `/metrics/person`,我们需要其中的`type,num`指标 -接口返回的原始数据如下: +接口返回的原始数据如下: + ```json { "firstName": "John", @@ -80,7 +87,8 @@ sidebar_label: JsonPath解析方式 } ``` -我们使用JsonPath脚本解析,对应的脚本为: `$.number[*]` ,解析后的数据结构如下: +我们使用JsonPath脚本解析,对应的脚本为: `$.number[*]` ,解析后的数据结构如下: + ```json [ { @@ -93,9 +101,10 @@ sidebar_label: JsonPath解析方式 } ] ``` -此数据结构符合HertzBeat的数据格式规范,成功提取指标`type,num`值。 -**对应的监控模版YML可以配置为如下** +此数据结构符合HertzBeat的数据格式规范,成功提取指标`type,num`值。 + +**对应的监控模版YML可以配置为如下** ```yaml # 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 @@ -166,3 +175,4 @@ metrics: parseType: jsonPath parseScript: '$.number[*]' ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http.md index 21b8015da77..1a4a132c073 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http.md @@ -1,21 +1,22 @@ --- id: extend-http title: HTTP协议自定义监控 -sidebar_label: HTTP协议自定义监控 +sidebar_label: HTTP协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用HTTP协议自定义指标监控。 -### HTTP协议采集流程 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用HTTP协议自定义指标监控。 + +### HTTP协议采集流程 + 【**HTTP接口调用**】->【**响应校验**】->【**响应数据解析**】->【**默认方式解析|JsonPath脚本解析 | XmlPath解析(todo) | Prometheus解析**】->【**指标数据提取**】 由流程可见,我们自定义一个HTTP协议的监控类型,需要配置HTTP请求参数,配置获取哪些指标,对响应数据配置解析方式和解析脚本。 -HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数,请求方式,请求体等。 +HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数,请求方式,请求体等。 **系统默认解析方式**:http接口返回hertzbeat规定的json数据结构,即可用默认解析方式解析数据提取对应的指标数据,详细介绍见 [**系统默认解析**](extend-http-default) -**JsonPath脚本解析方式**:用JsonPath脚本对响应的json数据进行解析,返回系统指定的数据结构,然后提供对应的指标数据,详细介绍见 [**JsonPath脚本解析**](extend-http-jsonpath) - +**JsonPath脚本解析方式**:用JsonPath脚本对响应的json数据进行解析,返回系统指定的数据结构,然后提供对应的指标数据,详细介绍见 [**JsonPath脚本解析**](extend-http-jsonpath) -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** @@ -23,15 +24,14 @@ HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数, ------- -下面详细介绍下监控模版YML的配置用法,请注意看使用注释。 +下面详细介绍下监控模版YML的配置用法,请注意看使用注释。 -### 监控模版YML +### 监控模版YML > 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个监控模版,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 -样例:自定义一个名称为example_http的自定义监控类型,其使用HTTP协议采集指标数据。 - +样例:自定义一个名称为example_http的自定义监控类型,其使用HTTP协议采集指标数据。 ```yaml # 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 @@ -204,3 +204,4 @@ metrics: basicAuthPassword: ^_^password^_^ parseType: default ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jdbc.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jdbc.md index 6ff4b9bbed1..bb946d8ce1c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jdbc.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jdbc.md @@ -1,29 +1,34 @@ --- id: extend-jdbc title: JDBC协议自定义监控 -sidebar_label: JDBC协议自定义监控 +sidebar_label: JDBC协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JDBC(目前支持mysql,mariadb,postgresql,sqlserver)自定义指标监控。 -> JDBC协议自定义监控可以让我们很方便的通过写SQL查询语句就能监控到我们想监控的指标 -### JDBC协议采集流程 -【**系统直连MYSQL**】->【**运行SQL查询语句**】->【**响应数据解析:oneRow, multiRow, columns**】->【**指标数据提取**】 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JDBC(目前支持mysql,mariadb,postgresql,sqlserver)自定义指标监控。 +> JDBC协议自定义监控可以让我们很方便的通过写SQL查询语句就能监控到我们想监控的指标 + +### JDBC协议采集流程 + +【**系统直连MYSQL**】->【**运行SQL查询语句**】->【**响应数据解析:oneRow, multiRow, columns**】->【**指标数据提取**】 由流程可见,我们自定义一个JDBC协议的监控类型,需要配置JDBC请求参数,配置获取哪些指标,配置查询SQL语句。 -### 数据解析方式 +### 数据解析方式 + SQL查询回来的数据字段和我们需要的指标映射,就能获取对应的指标数据,目前映射解析方式有三种:oneRow, multiRow, columns -#### **oneRow** -> 查询一行数据, 通过查询返回结果集的列名称,和查询的字段映射 +#### **oneRow** + +> 查询一行数据, 通过查询返回结果集的列名称,和查询的字段映射 例如: 查询的指标字段为:one tow three four 查询SQL:select one, tow, three, four from book limit 1; -这里指标字段就能和响应数据一一映射为一行采集数据。 +这里指标字段就能和响应数据一一映射为一行采集数据。 #### **multiRow** -> 查询多行数据, 通过查询返回结果集的列名称,和查询的字段映射 + +> 查询多行数据, 通过查询返回结果集的列名称,和查询的字段映射 例如: 查询的指标字段为:one tow three four @@ -31,33 +36,34 @@ SQL查询回来的数据字段和我们需要的指标映射,就能获取对 这里指标字段就能和响应数据一一映射为多行采集数据。 #### **columns** -> 采集一行指标数据, 通过查询的两列数据(key-value),key和查询的字段匹配,value为查询字段的值 + +> 采集一行指标数据, 通过查询的两列数据(key-value),key和查询的字段匹配,value为查询字段的值 例如: 查询字段:one tow three four 查询SQL:select key, value from book; -SQL响应数据: +SQL响应数据: -| key | value | -|----------|-------| -| one | 243 | -| two | 435 | -| three | 332 | -| four | 643 | +| key | value | +|-------|-------| +| one | 243 | +| two | 435 | +| three | 332 | +| four | 643 | -这里指标字段就能和响应数据的key映射,获取对应的value为其采集监控数据。 +这里指标字段就能和响应数据的key映射,获取对应的value为其采集监控数据。 -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) - ------- -下面详细介绍下文件的配置用法,请注意看使用注释。 -### 监控模版YML +下面详细介绍下文件的配置用法,请注意看使用注释。 + +### 监控模版YML > 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 @@ -237,3 +243,4 @@ metrics: sql: show global status where Variable_name like 'innodb%'; url: ^_^url^_^ ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jmx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jmx.md index cca1bcfb726..71bb06ba2b2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jmx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jmx.md @@ -1,38 +1,38 @@ --- id: extend-jmx title: JMX协议自定义监控 -sidebar_label: JMX协议自定义监控 +sidebar_label: JMX协议自定义监控 --- + > 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JMX协议自定义指标监控。 -> JMX协议自定义监控可以让我们很方便的通过配置 JMX Mbean Object 就能监控采集到我们想监控的 Mbean 指标 +> JMX协议自定义监控可以让我们很方便的通过配置 JMX Mbean Object 就能监控采集到我们想监控的 Mbean 指标 + +### JMX协议采集流程 -### JMX协议采集流程 -【**对端JAVA应用暴露JMX服务**】->【**HertzBeat直连对端JMX服务**】->【**获取配置的 Mbean Object 数据**】->【**指标数据提取**】 +【**对端JAVA应用暴露JMX服务**】->【**HertzBeat直连对端JMX服务**】->【**获取配置的 Mbean Object 数据**】->【**指标数据提取**】 由流程可见,我们自定义一个JMX协议的监控类型,需要配置JMX请求参数,配置获取哪些指标,配置查询Object信息。 -### 数据解析方式 +### 数据解析方式 通过配置监控模版YML的指标`field`, `aliasFields`, `jmx` 协议的 `objectName` 来和对端系统暴露的 `Mbean`对象信息映射解析。 - - -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) ------- -下面详细介绍下监控模版的配置用法,请注意看使用注释。 + +下面详细介绍下监控模版的配置用法,请注意看使用注释。 ### 监控模版YML > 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 -样例:自定义一个名称为 `example_jvm` 的自定义监控类型,其使用JMX协议采集指标数据。 - +样例:自定义一个名称为 `example_jvm` 的自定义监控类型,其使用JMX协议采集指标数据。 ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -236,3 +236,4 @@ metrics: objectName: java.lang:type=MemoryPool,name=* url: ^_^url^_^ ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ngql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ngql.md index b5685eb3b16..34514b3f2bb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ngql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ngql.md @@ -20,11 +20,11 @@ NGQL查询回来的数据字段和我们需要的指标映射,就能获取对 > `filterName`: 过滤属性名称(可选) > `filterValue`: 过滤属性值(可选) -例如: +例如: - online_meta_count#SHOW HOSTS META#Status#ONLINE - 对 `SHOW HOSTS META` 返回的结果中统计滤Status==ONLINE的数量 +对 `SHOW HOSTS META` 返回的结果中统计滤Status==ONLINE的数量 - online_meta_count#SHOW HOSTS META## - 统计 `SHOW HOSTS META` 返回的行数 +统计 `SHOW HOSTS META` 返回的行数 #### **oneRow** @@ -72,6 +72,7 @@ NGQL查询回来的数据字段和我们需要的指标映射,就能获取对 ![](/img/docs/advanced/extend-point-1.png) ------- + 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML @@ -169,3 +170,4 @@ metrics: - match (v:tag2) return "tag2" as name ,count(v) as cnt timeout: ^_^timeout^_^ ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-point.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-point.md index c951f1bc190..a59d9b1898a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-point.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-point.md @@ -1,12 +1,13 @@ --- id: extend-point title: 自定义监控 -sidebar_label: 自定义监控 +sidebar_label: 自定义监控 --- + > HertzBeat拥有自定义监控能力,您只需配置监控模版YML就能适配一款自定义的监控类型。 -> 目前自定义监控支持[HTTP协议](extend-http),[JDBC协议](extend-jdbc),[SSH协议](extend-ssh),[JMX协议](extend-jmx),[SNMP协议](extend-snmp),后续会支持更多通用协议。 +> 目前自定义监控支持[HTTP协议](extend-http),[JDBC协议](extend-jdbc),[SSH协议](extend-ssh),[JMX协议](extend-jmx),[SNMP协议](extend-snmp),后续会支持更多通用协议。 -### 自定义流程 +### 自定义流程 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** @@ -14,13 +15,13 @@ sidebar_label: 自定义监控 ------- -### 监控模版YML +### 监控模版YML **HertzBeat的设计是一个监控模版对应一个监控类型,所有监控类型都是由监控模版来定义的**。 -> 监控模版YML定义了 *监控类型的名称(国际化), 配置参数映射, 采集指标信息, 采集协议配置* 等。 +> 监控模版YML定义了 *监控类型的名称(国际化), 配置参数映射, 采集指标信息, 采集协议配置* 等。 -下面使用样例详细介绍下这监控模版YML的配置用法。 +下面使用样例详细介绍下这监控模版YML的配置用法。 样例:自定义一个 `app` 名称为 `example2` 的自定义监控类型,其使用HTTP协议采集指标数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-snmp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-snmp.md index 3ff65d60a17..387d67c5987 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-snmp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-snmp.md @@ -1,38 +1,38 @@ --- id: extend-snmp title: SNMP协议自定义监控 -sidebar_label: SNMP协议自定义监控 +sidebar_label: SNMP协议自定义监控 --- + > 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用 SNMP 协议自定义指标监控。 -> SNMP 协议自定义监控可以让我们很方便的通过配置 Mib OID信息 就能监控采集到我们想监控的OID指标 +> SNMP 协议自定义监控可以让我们很方便的通过配置 Mib OID信息 就能监控采集到我们想监控的OID指标 + +### SNMP协议采集流程 -### SNMP协议采集流程 -【**对端开启SNMP服务**】->【**HertzBeat直连对端SNMP服务**】->【**根据配置抓取对端OID指标信息**】->【**指标数据提取**】 +【**对端开启SNMP服务**】->【**HertzBeat直连对端SNMP服务**】->【**根据配置抓取对端OID指标信息**】->【**指标数据提取**】 由流程可见,我们自定义一个SNMP协议的监控类型,需要配置SNMP请求参数,配置获取哪些指标,配置查询OID信息。 -### 数据解析方式 +### 数据解析方式 通过配置监控模版YML的指标`field`, `aliasFields`, `snmp` 协议下的 `oids`来抓取对端指定的数据并解析映射。 - - -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) ------- -下面详细介绍下文件的配置用法,请注意看使用注释。 + +下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML > 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 -样例:自定义一个名称为 example_windows 的自定义监控类型,其使用 SNMP 协议采集指标数据。 - +样例:自定义一个名称为 example_windows 的自定义监控类型,其使用 SNMP 协议采集指标数据。 ```yaml # The monitoring type category:service-application service monitoring db-database monitoring mid-middleware custom-custom monitoring os-operating system monitoring @@ -207,3 +207,4 @@ metrics: processes: 1.3.6.1.2.1.25.1.6.0 location: 1.3.6.1.2.1.1.6.0 ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ssh.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ssh.md index 8284726e661..0f643f153f8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ssh.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ssh.md @@ -1,21 +1,25 @@ --- id: extend-ssh title: SSH协议自定义监控 -sidebar_label: SSH协议自定义监控 +sidebar_label: SSH协议自定义监控 --- + > 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用SSH协议自定义指标监控。 -> SSH协议自定义监控可以让我们很方便的通过写sh命令脚本就能监控采集到我们想监控的Linux指标 +> SSH协议自定义监控可以让我们很方便的通过写sh命令脚本就能监控采集到我们想监控的Linux指标 + +### SSH协议采集流程 -### SSH协议采集流程 -【**系统直连Linux**】->【**运行SHELL命令脚本语句**】->【**响应数据解析:oneRow, multiRow**】->【**指标数据提取**】 +【**系统直连Linux**】->【**运行SHELL命令脚本语句**】->【**响应数据解析:oneRow, multiRow**】->【**指标数据提取**】 由流程可见,我们自定义一个SSH协议的监控类型,需要配置SSH请求参数,配置获取哪些指标,配置查询脚本语句。 -### 数据解析方式 +### 数据解析方式 + SHELL脚本查询回来的数据字段和我们需要的指标映射,就能获取对应的指标数据,目前映射解析方式有两种:oneRow, multiRow,能满足绝大部分指标需求。 -#### **oneRow** -> 查询出一列数据, 通过查询返回结果集的字段值(一行一个值)与字段映射 +#### **oneRow** + +> 查询出一列数据, 通过查询返回结果集的字段值(一行一个值)与字段映射 例如: 需要查询Linux的指标 hostname-主机名称,uptime-启动时间 @@ -23,31 +27,37 @@ SHELL脚本查询回来的数据字段和我们需要的指标映射,就能获 启动时间原始查询命令:`uptime | awk -F "," '{print $1}'` 则在hertzbeat对应的这两个指标的查询脚本为(用`;`将其连接到一起): `hostname; uptime | awk -F "," '{print $1}'` -终端响应的数据为: +终端响应的数据为: + ``` tombook 14:00:15 up 72 days -``` +``` + 则最后采集到的指标数据一一映射为: hostname值为 `tombook` -uptime值为 `14:00:15 up 72 days` +uptime值为 `14:00:15 up 72 days` -这里指标字段就能和响应数据一一映射为一行采集数据。 +这里指标字段就能和响应数据一一映射为一行采集数据。 #### **multiRow** -> 查询多行数据, 通过查询返回结果集的列名称,和查询的指标字段映射 + +> 查询多行数据, 通过查询返回结果集的列名称,和查询的指标字段映射 例如: 查询的Linux内存相关指标字段:total-内存总量 used-已使用内存 free-空闲内存 buff-cache-缓存大小 available-可用内存 -内存指标原始查询命令为:`free -m`, 控制台响应: +内存指标原始查询命令为:`free -m`, 控制台响应: + ```shell total used free shared buff/cache available Mem: 7962 4065 333 1 3562 3593 Swap: 8191 33 8158 ``` + 在hertzbeat中multiRow格式解析需要响应数据列名称和指标值一一映射,则对应的查询SHELL脚本为: `free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` -控制台响应为: +控制台响应为: + ```shell total used free buff_cache available 7962 4066 331 3564 3592 @@ -55,22 +65,22 @@ total used free buff_cache available 这里指标字段就能和响应数据一一映射为采集数据。 -### 自定义步骤 +### 自定义步骤 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** ![](/img/docs/advanced/extend-point-1.png) ------- -下面详细介绍下文件的配置用法,请注意看使用注释。 + +下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML > 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 -样例:自定义一个名称为example_linux的自定义监控类型,其使用SSH协议采集指标数据。 - +样例:自定义一个名称为example_linux的自定义监控类型,其使用SSH协议采集指标数据。 ```yaml # 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 @@ -206,3 +216,4 @@ metrics: script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' parseType: multiRow ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-tutorial.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-tutorial.md index c536fabf6c4..369210ee6be 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-tutorial.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-tutorial.md @@ -4,16 +4,14 @@ title: 自定义适配一款基于HTTP协议的新监控类型 sidebar_label: 教程案例 --- -通过此教程我们一步一步描述如何在 Apache HertzBeat (incubating) 系统下自定义新增适配一款基于 http 协议的监控类型。 - -阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 +通过此教程我们一步一步描述如何在 Apache HertzBeat (incubating) 系统下自定义新增适配一款基于 http 协议的监控类型。 +阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 ### HTTP协议解析通用响应结构体,获取指标数据 > 很多场景我们需要对提供的 HTTP API 接口进行监控,获取接口返回的指标值。这篇文章我们通过http自定义协议来解析我们常见的http接口响应结构,获取返回体中的字段作为指标数据。 - ``` { "code": 200, @@ -22,6 +20,7 @@ sidebar_label: 教程案例 } ``` + 如上,通常我们的后台API接口会设计这这样一个通用返回。hertzbeat系统的后台也是如此,我们今天就用hertzbeat的 API 做样例,新增适配一款新的监控类型 **hertzbeat**,监控采集它的系统摘要统计API `http://localhost:1157/api/summary`, 其响应数据为: @@ -58,12 +57,11 @@ sidebar_label: 教程案例 **我们这次获取其app下的 `category`,`app`,`status`,`size`,`availableSize`等指标数据。** - ### 新增配置监控模版YML **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -定义我们在页面上需要输入哪些参数,一般的HTTP协议参数主要有ip, port, headers, params, uri, 账户密码等,我们直接复用 `api`监控模版 里面的参数定义内容,删除其中的我们不需要输入的uri参数和keyword关键字等参数即可。 +定义我们在页面上需要输入哪些参数,一般的HTTP协议参数主要有ip, port, headers, params, uri, 账户密码等,我们直接复用 `api`监控模版 里面的参数定义内容,删除其中的我们不需要输入的uri参数和keyword关键字等参数即可。 定义采集类型是啥,需要用哪种协议采集方式,采集的指标是啥,协议的配置参数等。我们直接复用 `api`监控模版 里面的定义内容,修改为我们当前的监控类型`hertzbeat`配置参数即可,如下:注意⚠️我们这次获取接口响应数据中的`category`,`app`,`status`,`size`,`availableSize`等字段作为指标数据。 @@ -221,32 +219,24 @@ metrics: **点击保存并应用。我们可以看到系统页面的自定义监控菜单已经多了一个`hertzbeat`监控类型了。** - ![](/img/docs/advanced/extend-http-example-1.png) - ### 页面添加对`hertzbeat`监控类型的监控 > 我们点击新增 `HertzBeat监控系统`,配置监控IP,端口,采集周期,高级设置里的账户密码等, 点击确定添加监控。 - ![](/img/docs/advanced/extend-http-example-2.png) - ![](/img/docs/advanced/extend-http-example-3.png) > 过一定时间(取决于采集周期)我们就可以在监控详情看到具体的指标数据和历史图表啦! - ![](/img/docs/advanced/extend-http-example-4.png) - - ### 设置阈值告警通知 > 接下来我们就可以正常设置阈值,告警触发后可以在告警中心查看,也可以新增接收人,设置告警通知等,Have Fun!!! - ---- #### 完! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_committer.md index c91cdca7171..c30a850a3c2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_committer.md @@ -5,23 +5,22 @@ sidebar_position: 2 --- - ## 成为 Apache HertzBeat™ 的 Committer 任何支持社区并在 CoPDoC 领域中工作的人都可以成为 Apache HertzBeat 的 Committer。CoPDoC 是 ASF 的缩写,用来描述我们如何不仅仅通过代码来认识到您的贡献。 @@ -58,3 +57,4 @@ Committer 的候选人应该持续参与并为 HertzBeat 做出大量的贡献 - 对于拉取请求审查保持积极、有礼貌与尊重。 - 即使存在分歧,也要以专业和外交的态度参与技术路线图的讨论。 - 通过撰写文章或举办活动来推广项目。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_pmc_member.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_pmc_member.md index e64661b7595..39cf1da9123 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_pmc_member.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_pmc_member.md @@ -5,20 +5,20 @@ sidebar_position: 3 --- ## 成为 Apache HertzBeat™ 的 PMC 成员 @@ -45,6 +45,7 @@ PMC 成员的候选人应该持续参与并为 HertzBeat 做出大量的贡献 - +3 个月的高度活动和参与。 ### 贡献的质量 + - 对项目有深入的理解。 - 经过充分测试、良好设计、遵循 Apache HertzBeat 的编码标准,及简单的修复补丁。 - 井井有条的面向用户的详细文档。 @@ -56,3 +57,4 @@ PMC 成员的候选人应该持续参与并为 HertzBeat 做出大量的贡献 - 对于拉取请求审查保持积极、有礼貌与尊重。 - 即使存在分歧,也要以专业和外交的态度参与技术路线图的讨论。 - 通过撰写文章或举办活动来推广项目。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md index a9a6f54d474..1cad8c3add1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md @@ -5,41 +5,38 @@ sidebar_position: 3 --- - ## 1 拉取请求与变更规则 1. `ISSUE`/`PR`(拉取请求) 的引导和命名 - - 新建 `PR` 后需要在 `PR` 页面的 Github Development 按钮处关联已存在的对应 `ISSUE`(若无建议新建对应ISSUE) + - 新建 `PR` 后需要在 `PR` 页面的 Github Development 按钮处关联已存在的对应 `ISSUE`(若无建议新建对应ISSUE) - - 标题命名格式(英文,小写) - `[feature/bugfix/doc/improve/refactor/bug/cleanup] title` + - 标题命名格式(英文,小写) + `[feature/bugfix/doc/improve/refactor/bug/cleanup] title` 2. 添加描述信息 - - 新建 `PR` 时请仔细描述此贡献,描述文档和代码同样重要。审阅者可以从描述中,而不仅仅是从代码中,了解问题和解决方案。 - - 勾选是否完成了对应的 Checklist。 - + - 新建 `PR` 时请仔细描述此贡献,描述文档和代码同样重要。审阅者可以从描述中,而不仅仅是从代码中,了解问题和解决方案。 + - 勾选是否完成了对应的 Checklist。 3. 建议一次 `PR` 只包含一个功能/一种修复/一类改进/一种重构/一次清理/一类文档等 - 4. 提交消息(英文,小写,无特殊字符) - 消息的提交应遵循与 `PR` 类似的模式:`[feature/bugfix/doc/improve/refactor/bug/cleanup] title` + 消息的提交应遵循与 `PR` 类似的模式:`[feature/bugfix/doc/improve/refactor/bug/cleanup] title` ## 2 代码检查样式 @@ -69,81 +66,79 @@ sidebar_position: 3 ### 3.1 命名风格 1. 优先为变量命名选择名词,这样更容易区分`变量`或`方法`。 + ```java - Cache publicKeyCache; + Cache publicKeyCache; ``` - 2. 变量的拼音缩写是禁止的(排除地名等名词),例如chengdu。 - 3. 推荐的变量名以 `类型` 结尾。 对于 `Collection/List` 类型的变量,取 `xxxx` (复数表示多个元素)或以 `xxxList` (特定类型)结束。 对于 `map` 类型的变量,清晰地描述 `key` 和 `value`: + ```java - Map idUserMap; - Map userIdNameMap; + Map idUserMap; + Map userIdNameMap; ``` - 4. 通过其名称直观地知道变量的类型和含义。 方法名称应首先以动词开始,如下所示: + ```java - void computeVcores(Object parameter1); + void computeVcores(Object parameter1); ``` - > 注意:在 `Builder` 工具类中不必严格遵循这项规则。 - + > 注意:在 `Builder` 工具类中不必严格遵循这项规则。 ### 3.2 常量变量定义 1. 多余的字符串应提取为常量 - >如果一个常量被硬编码两次或多次,请直接提取它为常量并更改相应的引用。 - 通常,`log` 中的常量可以忽略提取。 - - - 负面示例: - - ```java - public static RestResponse success(Object data) { - RestResponse resp = new RestResponse(); - resp.put("status", "success"); - resp.put("code", ResponseCode.CODE_SUCCESS); - resp.put("data", data); - return resp; - } - - public static RestResponse error() { - RestResponse resp = new RestResponse(); - resp.put("status", "error"); - resp.put("code", ResponseCode.CODE_FAIL); - resp.put("data", null); - return resp; - } - ``` - - - 正面示例: - - > 字符串提取为常量引用。 - - ```java - public static final String STATUS = "status"; - public static final String CODE = "code"; - public static final String DATA = "data"; - - public static RestResponse success(Object data) { - RestResponse resp = new RestResponse(); - resp.put(STATUS, "success"); - resp.put(CODE, ResponseCode.CODE_SUCCESS); - resp.put(DATA, data); - return resp; - } - - public static RestResponse error() { - RestResponse resp = new RestResponse(); - resp.put(STATUS, "error"); - resp.put(CODE, ResponseCode.CODE_FAIL); - resp.put(DATA, null); - return resp; - } - ``` + > 如果一个常量被硬编码两次或多次,请直接提取它为常量并更改相应的引用。 + > 通常,`log` 中的常量可以忽略提取。 + + - 负面示例: + + ```java + public static RestResponse success(Object data) { + RestResponse resp = new RestResponse(); + resp.put("status", "success"); + resp.put("code", ResponseCode.CODE_SUCCESS); + resp.put("data", data); + return resp; + } + + public static RestResponse error() { + RestResponse resp = new RestResponse(); + resp.put("status", "error"); + resp.put("code", ResponseCode.CODE_FAIL); + resp.put("data", null); + return resp; + } + ``` + - 正面示例: + + > 字符串提取为常量引用。 + + ```java + public static final String STATUS = "status"; + public static final String CODE = "code"; + public static final String DATA = "data"; + + public static RestResponse success(Object data) { + RestResponse resp = new RestResponse(); + resp.put(STATUS, "success"); + resp.put(CODE, ResponseCode.CODE_SUCCESS); + resp.put(DATA, data); + return resp; + } + + public static RestResponse error() { + RestResponse resp = new RestResponse(); + resp.put(STATUS, "error"); + resp.put(CODE, ResponseCode.CODE_FAIL); + resp.put(DATA, null); + return resp; + } + ``` 2. 确保代码的可读性和直观性 - `annotation` 符号中的字符串不需要提取为常量。 @@ -155,9 +150,9 @@ sidebar_position: 3 4. 关于 `constant/variable` 行的排序顺序 按以下顺序对类中的变量行进行排序: - 1. `public static final V`, `static final V`,`protected static final V`, `private static final V` - 2. `public static v`, `static v`,`protected static v`, `private static v` - 3. `public v`, `v`, `protected v`, `private v` + 1. `public static final V`, `static final V`,`protected static final V`, `private static final V` + 2. `public static v`, `static v`,`protected static v`, `private static v` + 3. `public v`, `v`, `protected v`, `private v` ### 3.3 方法规则 @@ -174,32 +169,28 @@ sidebar_position: 3 3. 如果方法中的代码行数太多,请尝试在适当的点上使用多个子方法来分段方法体。 一般来说,需要坚持以下原则: - - 便于测试 - - 有好的语义 - - 易于阅读 + - 便于测试 + - 有好的语义 + - 易于阅读 此外,还需要考虑在组件、逻辑、抽象和场景等方面的切割是否合理。 > 然而,目前还没有明确的演示定义。在演变过程中,我们将为开发者提供更多的示例,以便他们有更清晰的参考和理解。 - ### 3.4 集合规则 1. 对于返回的 `collection` 值,除非有特殊的 `concurrent` (如线程安全),总是返回 `interface`,例如: - - - 如果使用 `ArrayList`,则返回 List - - 如果使用 `HashMap`,则返回 Map - - 如果使用 `HashSet`,则返回 Set - + - 如果使用 `ArrayList`,则返回 List + - 如果使用 `HashMap`,则返回 Map + - 如果使用 `HashSet`,则返回 Set 2. 如果存在多线程,可以使用以下声明或返回类型: - ```java - private CurrentHashMap map; - public CurrentHashMap funName(); - ``` +```java +private CurrentHashMap map; +public CurrentHashMap funName(); +``` 3. 使用 `isEmpty()` 而不是 `length() == 0` 或者 `size() == 0` - - 负面示例: ```java @@ -207,7 +198,6 @@ sidebar_position: 3 return; } ``` - - 正面示例: ```java @@ -227,9 +217,8 @@ sidebar_position: 3 ### 3.6 控制/条件语句 1. 避免因不合理的 `条件/控制` 分支顺序导致: - - - 多个代码行的 `深度` 为 `n+1` - - 多余的行 + - 多个代码行的 `深度` 为 `n+1` + - 多余的行 一般来说,如果一个方法的代码行深度由于连续嵌套的 `if... else..` 超过了 `2+ Tabs`,那么应该考虑试图 - `合并分支`, @@ -238,77 +227,85 @@ sidebar_position: 3 以减少代码行深度并提高可读性,例如: - 联合或将逻辑合并到下一级调用中 - - 负面示例: - ```java - if (isInsert) { - save(platform); - } else { - updateById(platform); - } - ``` - - 正面示例: - ```java - saveOrUpdate(platform); - ``` +- 负面示例: + +```java +if (isInsert) { +save(platform); +} else { +updateById(platform); +} +``` + +- 正面示例: + +```java +saveOrUpdate(platform); +``` + - 合并条件 - - 负面示例: - ```java - if (expression1) { - if(expression2) { - ...... - } - } - ``` - - 正面示例: - ```java - if (expression1 && expression2) { - ...... - } - ``` +- 负面示例: + +```java +if (expression1) { +if(expression2) { +...... +} +} + +``` + +- 正面示例: + + ```java + if (expression1 && expression2) { + ...... + } + ``` - 反转条件 - - 负面示例: - - ```java - public void doSomething() { - // 忽略更深的代码块行 - // ..... - if (condition1) { - ... - } else { - ... - } - } - ``` - - - 正面示例: - - ```java - public void doSomething() { - // 忽略更深的代码块行 - // ..... - if (!condition1) { - ... - return; - } - // ... - } - ``` +- 负面示例: + + ```java + public void doSomething() { + // 忽略更深的代码块行 + // ..... + if (condition1) { + ... + } else { + ... + } + } + ``` +- 正面示例: + + ```java + public void doSomething() { + // 忽略更深的代码块行 + // ..... + if (!condition1) { + ... + return; + } + // ... + } + ``` - 使用单一变量或方法减少复杂的条件表达式 - - 负面示例: - ```java - if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { - ... - } - ``` - - - 正面示例: - ```java - if (containsSqlServer(dbType)) { - .... - } - //..... - // containsSqlServer的定义 - ``` +- 负面示例: + + ```java + if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { + ... + } + ``` +- 正面示例: + + ```java + if (containsSqlServer(dbType)) { + .... + } + //..... + // containsSqlServer的定义 + ``` > 在未来,使用 `sonarlint` 和 `better highlights` 检查代码深度看起来是个不错的选择。 @@ -316,20 +313,20 @@ sidebar_position: 3 1. 方法缺少注释: - - `When`:该方法何时可以被调用 - - `How`:如何使用此方法以及如何传递参数等 - - `What`:此方法实现了哪些功能 - - `Note`:在调用此方法时开发人员应注意什么 +- `When`:该方法何时可以被调用 +- `How`:如何使用此方法以及如何传递参数等 +- `What`:此方法实现了哪些功能 +- `Note`:在调用此方法时开发人员应注意什么 2. 缺少必要的类头部描述注释。 - 添加 `What`,`Note` 等,如上述 `1` 中提到的。 +添加 `What`,`Note` 等,如上述 `1` 中提到的。 3. 在接口中的方法声明必须被注释。 - - 如果实现的语义和接口声明的注释内容不一致,则具体的实现方法也需要用注释重写。 +- 如果实现的语义和接口声明的注释内容不一致,则具体的实现方法也需要用注释重写。 - - 如果方法实现的语义与接口声明的注释内容一致,则建议不写注释以避免重复的注释。 +- 如果方法实现的语义与接口声明的注释内容一致,则建议不写注释以避免重复的注释。 4. 在注释行中的第一个词需要大写,如 `param` 行,`return` 行。 如果特殊引用作为主题不需要大写,需要注意特殊符号,例如引号。 @@ -339,31 +336,29 @@ sidebar_position: 3 1. 更倾向于使用 `non-capturing` lambda(不包含对外部范围的引用的lambda)。 Capturing lambda 在每次调用时都需要创建一个新的对象实例。`Non-capturing` lambda 可以为每次调用使用相同的实例。 - - 负面示例: +- 负面示例: - ```java - map.computeIfAbsent(key, x -> key.toLowerCase()) - ``` - - - 正面示例: + ```java + map.computeIfAbsent(key, x -> key.toLowerCase()) + ``` +- 正面示例: - ```java - map.computeIfAbsent(key, k -> k.toLowerCase()); - ``` + ```java + map.computeIfAbsent(key, k -> k.toLowerCase()); + ``` 2. 考虑使用方法引用而不是内联lambda - - 负面示例: - - ```java - map.computeIfAbsent(key, k-> Loader.load(k)); - ``` +- 负面示例: - - 正面示例: + ```java + map.computeIfAbsent(key, k-> Loader.load(k)); + ``` +- 正面示例: - ```java - map.computeIfAbsent(key, Loader::load); - ``` + ```java + map.computeIfAbsent(key, Loader::load); + ``` ### 3.9 Java Streams @@ -381,186 +376,180 @@ sidebar_position: 3 1. 使用 `StringUtils.isBlank` 而不是 `StringUtils.isEmpty` - - 负面示例: - - ```java - if (StringUtils.isEmpty(name)) { - return; - } - ``` +- 负面示例: - - 正面示例: + ```java + if (StringUtils.isEmpty(name)) { + return; + } + ``` +- 正面示例: - ```java - if (StringUtils.isBlank(name)) { - return; - } - ``` + ```java + if (StringUtils.isBlank(name)) { + return; + } + ``` 2. 使用 `StringUtils.isNotBlank` 而不是 `StringUtils.isNotEmpty` - - 负面示例: - - ```java - if (StringUtils.isNotEmpty(name)) { - return; - } - ``` +- 负面示例: - - 正面示例: + ```java + if (StringUtils.isNotEmpty(name)) { + return; + } + ``` +- 正面示例: - ```java - if (StringUtils.isNotBlank(name)) { - return; - } - ``` + ```java + if (StringUtils.isNotBlank(name)) { + return; + } + ``` 3. 使用 `StringUtils.isAllBlank` 而不是 `StringUtils.isAllEmpty` - - 负面示例: - - ```java - if (StringUtils.isAllEmpty(name, age)) { - return; - } - ``` +- 负面示例: - - 正面示例: + ```java + if (StringUtils.isAllEmpty(name, age)) { + return; + } + ``` +- 正面示例: - ```java - if (StringUtils.isAllBlank(name, age)) { - return; - } - ``` + ```java + if (StringUtils.isAllBlank(name, age)) { + return; + } + ``` ### 3.12 `Enum` 类 1. 枚举值比较 - - 负面示例: +- 负面示例: - ```java - if (status.equals(JobStatus.RUNNING)) { - return; - } - ``` - - - 正面示例: + ```java + if (status.equals(JobStatus.RUNNING)) { + return; + } + ``` +- 正面示例: - ```java - if (status == JobStatus.RUNNING) { - return; - } - ``` + ```java + if (status == JobStatus.RUNNING) { + return; + } + ``` 2. 枚举类不需要实现 Serializable - - 负面示例: - - ```java - public enum JobStatus implements Serializable { - ... - } - ``` +- 负面示例: - - 正面示例: + ```java + public enum JobStatus implements Serializable { + ... + } + ``` +- 正面示例: - ```java - public enum JobStatus { - ... - } - ``` + ```java + public enum JobStatus { + ... + } + ``` 3. 使用 `Enum.name()` 而不是 `Enum.toString()` - - 负面示例: - - ```java - System.out.println(JobStatus.RUNNING.toString()); - ``` +- 负面示例: - - 正面示例: + ```java + System.out.println(JobStatus.RUNNING.toString()); + ``` +- 正面示例: - ```java - System.out.println(JobStatus.RUNNING.name()); - ``` + ```java + System.out.println(JobStatus.RUNNING.name()); + ``` 4. 枚举类名称统一使用 Enum 后缀 - - 负面示例: +- 负面示例: - ```java - public enum JobStatus { - ... - } - ``` - - - 正面示例: + ```java + public enum JobStatus { + ... + } + ``` +- 正面示例: - ```java - public enum JobStatusEnum { - ... - } - ``` + ```java + public enum JobStatusEnum { + ... + } + ``` ### 3.13 `Deprecated` 注解 - - 负面示例: +- 负面示例: - ```java - @deprecated - public void process(String input) { - ... - } - ``` +```java +@deprecated +public void process(String input) { + ... +} +``` - - 正面示例: +- 正面示例: - ```java - @Deprecated - public void process(String input) { - ... - } - ``` +```java +@Deprecated +public void process(String input) { + ... +} +``` ## 4 日志 1. 使用 `占位符` 进行日志输出: - - 负面示例 - ```java - log.info("Deploy cluster request " + deployRequest); - ``` - - 正面示例 - ```java - log.info("load plugin:{} to {}", file.getName(), appPlugins); - ``` +- 负面示例 -2. 打印日志时,注意选择 `日志级别` + ```java + log.info("Deploy cluster request " + deployRequest); + ``` +- 正面示例 - 当打印日志内容时,如果传递了日志占位符的实际参数,必须避免过早评估,以避免由日志级别导致的不必要评估。 + ```java + log.info("load plugin:{} to {}", file.getName(), appPlugins); + ``` - - 负面示例: +2. 打印日志时,注意选择 `日志级别` - 假设当前日志级别为 `INFO`: +当打印日志内容时,如果传递了日志占位符的实际参数,必须避免过早评估,以避免由日志级别导致的不必要评估。 - ```java - // 忽略声明行。 - List userList = getUsersByBatch(1000); - LOG.debug("All users: {}", getAllUserIds(userList)); - ``` +- 负面示例: - - 正面示例: + 假设当前日志级别为 `INFO`: - 在这种情况下,我们应该在进行实际的日志调用之前提前确定日志级别,如下所示: + ```java + // 忽略声明行。 + List userList = getUsersByBatch(1000); + LOG.debug("All users: {}", getAllUserIds(userList)); + ``` +- 正面示例: - ```java - // 忽略声明行。 - List userList = getUsersByBatch(1000); - if (LOG.isDebugEnabled()) { - LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); - } - ``` + 在这种情况下,我们应该在进行实际的日志调用之前提前确定日志级别,如下所示: + + ```java + // 忽略声明行。 + List userList = getUsersByBatch(1000); + if (LOG.isDebugEnabled()) { + LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); + } + ``` ## 5 测试 @@ -568,10 +557,15 @@ sidebar_position: 3 2. 实现的接口需在`e2e`模块下编写`e2e`测试用例脚本。 - ## 参考资料 + - https://site.mockito.org/ - https://alibaba.github.io/p3c/ - https://rules.sonarsource.com/java/ - https://junit.org/junit5/ - https://streampark.apache.org/ + +``` + +``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contact.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contact.md index 6167d13aa5f..0c574937517 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contact.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contact.md @@ -1,8 +1,9 @@ --- id: contact title: 交流联系 -sidebar_label: 交流联系 +sidebar_label: 交流联系 --- + > 如果您在使用过程有任何需要帮助或者想交流建议,可以通过 群 ISSUE 讨论交流。 [订阅邮件列表](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) : 发送邮件至 `dev-subscribe@hertzbeat.apache.org` 来订阅邮件列表. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contribution.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contribution.md index 6df2d242220..55993958797 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contribution.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contribution.md @@ -5,20 +5,20 @@ sidebar_position: 0 --- > 非常欢迎参与项目贡献,我们致力于维护一个互相帮助的快乐社区。 @@ -87,23 +87,31 @@ sidebar_position: 0 1. 首先您需要 Fork 目标仓库 [hertzbeat repository](https://github.com/apache/hertzbeat). 2. 然后 用git命令 将代码下载到本地: + ```shell git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended ``` + 3. 下载完成后,请参考目标仓库的入门指南或者 README 文件对项目进行初始化。 4. 接着,您可以参考如下命令进行代码的提交, 切换新的分支, 进行开发: + ```shell git checkout -b a-feature-branch #Recommended ``` + 5. 提交 commit , commit 描述信息需要符合约定格式: [module name or type name]feature or bugfix or doc: custom message. + ```shell git add git commit -m '[docs]feature: necessary instructions' #Recommended ``` + 6. 推送到远程仓库 + ```shell git push origin a-feature-branch ``` + 7. 然后您就可以在 GitHub 上发起新的 PR (Pull Request)。 请注意 PR 的标题需要符合我们的规范,并且在 PR 中写上必要的说明,来方便 Committer 和其他贡献者进行代码审查。 @@ -148,14 +156,15 @@ git pull upstream master ### 模块 - **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** 提供监控管理,系统管理基础服务 + > 提供对监控的管理,监控应用配置的管理,系统用户租户后台管理等。 -- **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** 提供监控数据采集服务 +> - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** 提供监控数据采集服务 > 使用通用协议远程采集获取对端指标数据。 -- **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** 提供监控数据仓储服务 +> - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** 提供监控数据仓储服务 > 采集指标结果数据管理,数据落盘,查询,计算统计。 -- **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** 提供告警服务 +> - **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** 提供告警服务 > 告警计算触发,任务状态联动,告警配置,告警通知。 -- **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** 提供可视化控制台页面 +> - **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** 提供可视化控制台页面 > 监控告警系统可视化控制台前端 ![hertzBeat](/img/docs/hertzbeat-arch.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/development.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/development.md index 2bfebd21d2f..c9ed4b45859 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/development.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/development.md @@ -1,7 +1,7 @@ --- id: development title: 如何将 HertzBeat 运行编译? -sidebar_label: 运行编译 +sidebar_label: 运行编译 --- ## 让 HertzBeat 运行起来 @@ -9,7 +9,6 @@ sidebar_label: 运行编译 > 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 > 此为前后端分离项目,本地代码启动需将后端 [manager](https://github.com/apache/hertzbeat/tree/master/manager) 和前端 [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) 分别启动生效。 - ### 后端启动 1. 需要 `maven3+`, `java17` 和 `lombok` 环境 @@ -38,7 +37,7 @@ sidebar_label: 运行编译 ## 生成二进制包 -> 需要 `maven3+`, `java17`, `node` 和 `yarn` 环境. +> 需要 `maven3+`, `java17`, `node` 和 `yarn` 环境. ### 前端打包 @@ -52,7 +51,6 @@ sidebar_label: 运行编译 5. 打包: `yarn package` - ### 后端打包 1. 需要 `maven3+`, `java17` 环境 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/document.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/document.md index e696b3c35e4..4cf56e0137b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/document.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/document.md @@ -5,20 +5,20 @@ sidebar_position: 1 --- 对于任何类型的软件来说,良好的文档都是至关重要的。任何能够改进 HertzBeat 文档的贡献都是受欢迎的。 @@ -93,3 +93,4 @@ css 和其他样式文件放在 `src/css` 目录中。 ### 页面内容修改 > 所有页面文档都可以通过底部的'编辑此页面'按钮直接跳转到相应的 github 资源修改页面。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md index 541444473d2..2cccecd681c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md @@ -12,7 +12,7 @@ sidebar_position: 4 - JDK 17 - Node18 Yarn -- Apache Maven 3.x +- Apache Maven 3.x - GnuPG 2.x - Git - SVN (Apache使用svn来托管项目发布) @@ -22,11 +22,11 @@ sidebar_position: 4 ## 2. 准备发布 > 首先整理帐户信息以更好地了解操作过程,稍后会多次使用。 -- apache id: `muchunjin (APACHE LDAP 用户名)` -- apache passphrase: `APACHE LDAP 密钥` -- apache email: `muchunjin@apache.org` -- gpg real name: `muchunjin (任何名称均可用, 在这里我将其设置为与apache id相同的名称)` -- gpg key passphrase: `创建gpg密钥时设置的密码,你需要记住此密码` +> - apache id: `muchunjin (APACHE LDAP 用户名)` +> - apache passphrase: `APACHE LDAP 密钥` +> - apache email: `muchunjin@apache.org` +> - gpg real name: `muchunjin (任何名称均可用, 在这里我将其设置为与apache id相同的名称)` +> - gpg key passphrase: `创建gpg密钥时设置的密码,你需要记住此密码` ### 2.1 生成密钥 @@ -130,7 +130,6 @@ gpg: unchanged: 1 或者进入 https://keyserver.ubuntu.com/ 网址,输入密钥的名称,然后点击'Search key' 按钮,查看是否有对应名称的密钥。 - #### 2.4 将 gpg 公钥添加到 Apache SVN 项目仓库的 KEYS 文件中 - Apache HertzBeat Dev 分支 https://dist.apache.org/repos/dist/dev/incubator/hertzbeat @@ -230,9 +229,9 @@ release-1.6.0-rc1 #### 3.5 对二进制和源码包进行签名 -将上步骤生成的三个文件包放到`dist`目录下(若无则新建目录),然后对文件包进行签名和SHA512校验和生成。 +将上步骤生成的三个文件包放到`dist`目录下(若无则新建目录),然后对文件包进行签名和SHA512校验和生成。 -> 其中 `gpg -u 33545C76` 的 `33545C76` 是你的 GPG 密钥 ID,可以通过 `gpg --keyid-format SHORT --list-keys` 查看。 +> 其中 `gpg -u 33545C76` 的 `33545C76` 是你的 GPG 密钥 ID,可以通过 `gpg --keyid-format SHORT --list-keys` 查看。 ```shell cd dist @@ -341,7 +340,7 @@ svn commit -m "release for HertzBeat 1.6.0-RC1" > `Send to`: dev@hertzbeat.apache.org
> `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0 rc1
-> `Body`: +> `Body`: ``` Hello HertzBeat Community: @@ -404,8 +403,7 @@ Dear HertzBeat community, Thanks for your review and vote for "Release Apache HertzBeat (incubating) 1.6.0-rc1" I'm happy to announce the vote has passed: - - +--- 4 binding +1, from: - cc @@ -413,24 +411,20 @@ I'm happy to announce the vote has passed: 1 non-binding +1, from: - Roc Marshal - - +--- no 0 or -1 votes. Vote thread: https://lists.apache.org/thread/t01b2lbtqzyt7j4dsbdp5qjc3gngjsdq - - +--- Thank you to everyone who helped us to verify and vote for this release. We will move to the ASF Incubator voting shortly. - - +--- Best, ChunJin Mu ``` 邮件内容中的一项是`Vote thread`,在 https://lists.apache.org/list.html?dev@hertzbeat.apache.org 查看获取 - #### 3.2 发送孵化社区投票邮件 发送孵化社区投票邮件需要至少三个`+1`,且无`-1`。 @@ -476,17 +470,14 @@ More detailed checklist please refer: Steps to validate the release, Please refer to: • https://www.apache.org/info/verification.html • https://hertzbeat.apache.org/docs/community/how_to_verify_release - - +--- How to Build: https://hertzbeat.apache.org/docs/community/development/#build-hertzbeat-binary-package - - +--- Thanks, On behalf of Apache HertzBeat (incubating) community - - +--- Best, ChunJin Mu ``` @@ -562,11 +553,14 @@ https://github.com/apache/hertzbeat/blob/master/home/i18n/zh-cn/docusaurus-plugi 然后输入发版标题和描述 -- 发版标题: +- 发版标题: + ``` v1.6.0 ``` + - 描述: + ``` xxx release note: xxx @@ -597,8 +591,7 @@ Release Notes: https://github.com/apache/hertzbeat/releases/tag/v1.6.0 HertzBeat Resources: - Issue: https://github.com/apache/hertzbeat/issues - Mailing list: dev@hertzbeat.apache.org - - +--- Apache HertzBeat Team Best, @@ -607,6 +600,6 @@ ChunJin Mu 该版本的发布顺利结束。 ----- +--- -This doc refer from [Apache StreamPark](https://streampark.apache.org/) +This doc refer from [Apache StreamPark](https://streampark.apache.org/) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-verify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-verify.md index ee4f9563c83..9904af1967b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-verify.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-verify.md @@ -3,6 +3,7 @@ id: how_to_verify_release title: 版本物料的验证 sidebar_position: 4 --- + # 验证候选版本 详细检查列表请参考官方的[check list](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) @@ -21,12 +22,10 @@ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_versio ``` - ## 2. 验证上传的版本是否合规 > 开始验证环节,验证包含但不局限于以下内容和形式 - ### 2.1 查看发布包是否完整 > 上传到dist的包必须包含源码包,二进制包可选 @@ -36,7 +35,6 @@ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_versio 3. 是否包含源码包的sha512 4. 如果上传了二进制包,则同样检查(2)-(4)所列的内容 - ### 2.2 检查gpg签名 首先导入发布人公钥。从svn仓库导入KEYS到本地环境。(发布版本的人不需要再导入,帮助做验证的人需要导入,用户名填发版人的即可) @@ -47,6 +45,7 @@ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_versio $ curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # 下载KEYS $ gpg --import KEYS # 导入KEYS到本地 ``` + #### 2.2.2 信任公钥 > 信任此次版本所使用的KEY @@ -75,6 +74,7 @@ Do you really want to set this key to ultimate trust? (y/N) y #选择y gpg> ``` + #### 2.2.3 检查签名 ```shell @@ -82,6 +82,7 @@ for i in *.tar.gz; do echo $i; gpg --verify $i.asc $i ; done ``` 检查结果 + > 出现类似以下内容则说明签名正确,关键字:**`Good signature`** ```shell @@ -95,7 +96,6 @@ gpg: Good signature from "xxx @apache.org>" > 本地计算sha512哈希后,验证是否与dist上的一致,如果上传二进制包,则同样需要检查二进制包的sha512哈希 - ```shell for i in *.tar.gz; do echo $i; sha512sum --check $i.sha512; done ``` @@ -123,7 +123,6 @@ tar -xzvf apache-hertzbeat-${release.version}-incubating-bin.tar.gz 参考: https://apache.org/legal/resolved.html - ### 2.5. 源码编译验证 解压缩 `apache-hertzbeat-${release_version}-incubating-src.tar.gz` @@ -132,7 +131,7 @@ tar -xzvf apache-hertzbeat-${release.version}-incubating-bin.tar.gz cd apache-hertzbeat-${release_version}-incubating-src ``` -编译源码: https://hertzbeat.apache.org/docs/community/development/#build-hertzbeat-binary-package +编译源码: https://hertzbeat.apache.org/docs/community/development/#build-hertzbeat-binary-package 进行如下检查: @@ -148,7 +147,6 @@ cd apache-hertzbeat-${release_version}-incubating-src 参考: https://apache.org/legal/resolved.html - ## 3. 邮件回复 如果发起了发布投票,验证后,可以参照此回复示例进行邮件回复 @@ -169,11 +167,11 @@ IPMC 在 general@incubator.apache.org incubator 社区投票,请带上 binding Forward my +1 from dev@hertzbeat (non-binding) Copy my +1 from hertzbeat DEV ML (non-binding) ``` -::: - +::: 非PPMC/IPMC成员 + ```html +1 (non-binding) I checked: @@ -185,6 +183,7 @@ I checked: ``` PPMC/IPMC成员 + ```html //incubator社区 投票时,只有IPMC成员才具有约束性 binding +1 (binding) @@ -196,7 +195,6 @@ I checked: 5. .... ``` - --- -This doc refer from [Apache StreamPark](https://streampark.apache.org/) +This doc refer from [Apache StreamPark](https://streampark.apache.org/) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/mailing_lists.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/mailing_lists.md index 57de5409834..922cbfe9a6a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/mailing_lists.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/mailing_lists.md @@ -5,20 +5,20 @@ sidebar_position: 1 --- [开发者邮件列表](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) 是社区推荐的沟通和获取最新信息的方式。 @@ -32,19 +32,17 @@ sidebar_position: 1 - 使用此列表提出您对 HertzBeat 的问题 - 由 HertzBeat 贡献者用来讨论 HertzBeat 的开发 - -| 列表名称 | 地址 | 订阅 | 退订 | 归档 | -|--------------|------------------------------|------------------------------------------------------|---------------------------------------------------------|------------------------------------------------------------------------| -| **开发者列表** | dev@hertzbeat.apache.org | [订阅](mailto:dev-subscribe@hertzbeat.apache.org) | [退订](mailto:dev-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | - +| 列表名称 | 地址 | 订阅 | 退订 | 归档 | +|-----------|--------------------------|-------------------------------------------------|---------------------------------------------------|-------------------------------------------------------------------| +| **开发者列表** | dev@hertzbeat.apache.org | [订阅](mailto:dev-subscribe@hertzbeat.apache.org) | [退订](mailto:dev-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | ### 通知列表 - 关于 HertzBeat 代码库的更改的通知,频繁通知 -| 列表名称 | 地址 | 订阅 | 退订 | 归档 | -|-----------|------------------------------|-------------------------------------------------------------|--------------------------------------------------------------|-----------------------------------------------------------------------------| -| **通知列表** | notifications@hertzbeat.apache.org | [订阅](mailto:notifications-subscribe@hertzbeat.apache.org) | [退订](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | +| 列表名称 | 地址 | 订阅 | 退订 | 归档 | +|----------|------------------------------------|-----------------------------------------------------------|-------------------------------------------------------------|-----------------------------------------------------------------------------| +| **通知列表** | notifications@hertzbeat.apache.org | [订阅](mailto:notifications-subscribe@hertzbeat.apache.org) | [退订](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | ## 订阅步骤 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_committer_process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_committer_process.md index 188b68b3688..489215891e7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_committer_process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_committer_process.md @@ -5,20 +5,20 @@ sidebar_position: 4 --- [官方指南](https://community.apache.org/newcommitter.html#new-committer-process) @@ -50,7 +50,6 @@ sidebar_position: 4 - 等待root告诉我们已经完成 - PMC主席开启svn和其他访问权限 - 在JIRA和CWiki中将Committer添加到适当的组中 - - 通知Committer完成 参见 **Committer完成模板** @@ -243,3 +242,4 @@ you can now help fix that. A PPMC member will announce your election to the dev list soon. ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_pmc_member_process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_pmc_member_process.md index 4488f1fcfea..d7e144bb52b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_pmc_member_process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_pmc_member_process.md @@ -5,20 +5,20 @@ sidebar_position: 5 --- 这个指南是基于 [apache newcommitter](https://community.apache.org/newcommitter.html#new-committer-process) 的。 @@ -82,7 +82,6 @@ ${Work list}[1] [midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) [Apache 参考投票指南](https://community.apache.org/newcommitter.html) - ### Close Vote Template ```text @@ -284,3 +283,4 @@ A PPMC member helps manage and guide the direction of the project. Thanks, On behalf of the Apache HertzBeat (incubating) PPMC ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/submit-code.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/submit-code.md index ee553bb6e1c..8940571f71c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/submit-code.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/submit-code.md @@ -5,72 +5,64 @@ sidebar_position: 2 --- - * 首先从远程仓库 将代码的一份副本 fork 到您自己的仓库 * 远程仓库开发合并分支:`master` * 将您fork仓库clone到您的本地设备 - ```shell - git clone git@github.com:<您的账户名>/hertzbeat.git - ``` - + ```shell + git clone git@github.com:<您的账户名>/hertzbeat.git + ``` * 添加远程仓库地址,命名为 upstream - ```shell - git remote add upstream git@github.com:apache/hertzbeat.git - ``` - + ```shell + git remote add upstream git@github.com:apache/hertzbeat.git + ``` * 查看仓库 - ```shell - git remote -v - ``` + ```shell + git remote -v + ``` > 此时会有两个仓库:origin(您自己的仓库)和 upstream(远程仓库) * 获取/更新远程仓库代码 - ```shell - git fetch upstream - ``` - + ```shell + git fetch upstream + ``` * 将远程仓库代码同步到本地仓库 - ```shell - git checkout origin/master - git merge --no-ff upstream/master - ``` - + ```shell + git checkout origin/master + git merge --no-ff upstream/master + ``` * **⚠️注意一定要新建分支开发特性 `git checkout -b feature-xxx`,不建议使用master分支直接开发** - * 在本地修改代码后,提交到自己的仓库: - **注意提交信息为英文,不包含特殊字符** - ```shell - git commit -m '[docs]necessary instructions' - git push - ``` + **注意提交信息为英文,不包含特殊字符** + ```shell + git commit -m '[docs]necessary instructions' + git push + ``` * 将更改提交到远程仓库后,您可以在您的仓库页面上看到一个绿色的按钮“Compare & pull request”,点击它。 - * 这会弹出新建 Pull Request 页面,您需要这里仔细填写信息(英文),描述和代码同样重要,然后点击“Create pull request”按钮。 - * 然后社区的 Committers 将进行 CodeReview,并与您讨论一些细节(包括设计、实现、性能等),之后您可以根据建议直接在这个分支更新代码(无需新建PR)。当社区 Committer approve之后,提交将被合并到 master 分支。 - * 最后,恭喜您,您已经成为 HertzBeat 的官方贡献者,您会被加在贡献者墙上,您可以联系社区获取贡献者证书! + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/download.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/download.md index 7ce72412d0b..11455bfa3d1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/download.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/download.md @@ -18,11 +18,9 @@ sidebar_label: Download 以前版本的 HertzBeat 可能会受到安全问题的影响,请考虑使用最新版本。 ::: - -| 版本 | 日期 | 下载 | Release Notes | -|---------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------| -| v1.6.0 | 2024.06.10 | [apache-hertzbeat-1.6.0-incubating-bin.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz.sha512) )
[apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz.sha512) )
[apache-hertzbeat-1.6.0-incubating-src.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz.sha512) ) | [release note](https://github.com/apache/hertzbeat/releases/tag/v1.6.0) | - +| 版本 | 日期 | 下载 | Release Notes | +|--------|------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------| +| v1.6.0 | 2024.06.10 | [apache-hertzbeat-1.6.0-incubating-bin.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz.sha512) )
[apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz.sha512) )
[apache-hertzbeat-1.6.0-incubating-src.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz.sha512) ) | [release note](https://github.com/apache/hertzbeat/releases/tag/v1.6.0) | ## 归档版本 @@ -34,3 +32,4 @@ sidebar_label: Download - HertzBeat https://hub.docker.com/r/apache/hertzbeat - HertzBeat Collector https://hub.docker.com/r/apache/hertzbeat-collector + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/activemq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/activemq.md index b2d8f1489f5..29d5478158a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/activemq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/activemq.md @@ -9,7 +9,7 @@ keywords: [开源监控系统, 开源中间件监控, ActiveMQ消息中间件监 **使用协议:JMX** -### 监控前操作 +### 监控前操作 > 您需要在 ActiveMQ 开启 `JMX` 服务,HertzBeat 使用 JMX 协议对 ActiveMQ 进行指标采集。 @@ -23,9 +23,10 @@ keywords: [开源监控系统, 开源中间件监控, ActiveMQ消息中间件监 ``` -2. 修改安装目录下的 `bin/env` 文件,配置JMX 端口 IP等 +2. 修改安装目录下的 `bin/env` 文件,配置JMX 端口 IP等 + +将如下原配置信息 -将如下原配置信息 ```text # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" @@ -34,7 +35,9 @@ keywords: [开源监控系统, 开源中间件监控, ActiveMQ消息中间件监 ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" ``` -更新为如下配置,⚠️注意修改`本机对外IP` + +更新为如下配置,⚠️注意修改`本机对外IP` + ```text # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" @@ -49,25 +52,25 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" 3. 重启 ACTIVEMQ 服务,在 HertzBeat 添加对应 ActiveMQ 监控即可,参数使用 JMX 配置的 IP 端口。 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | -|-------------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| JMX端口 | JMX 对外提供的HTTP端口,默认为 11099。 | -| JMX URL | 可选,自定义 JMX URL 连接 | -| 用户名 | 认证时使用的用户名 | -| 密码 | 认证时使用的密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|---------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| JMX端口 | JMX 对外提供的HTTP端口,默认为 11099。 | +| JMX URL | 可选,自定义 JMX URL 连接 | +| 用户名 | 认证时使用的用户名 | +| 密码 | 认证时使用的密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 #### 指标集合:broker -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------------------|------|-----------------------------------------------------------------------| | BrokerName | 无 | The name of the broker. | | BrokerVersion | 无 | The version of the broker. | @@ -88,57 +91,56 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" | MaxMessageSize | 无 | Max message size on this broker | | MinMessageSize | 无 | Min message size on this broker | -#### 指标集合:topic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------|------|-------------------------------------------------------------------------------------------| -| Name | 无 | Name of this destination. | -| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | -| MemoryPercentUsage | 无 | The percentage of the memory limit used | -| ProducerCount | 无 | Number of producers attached to this destination | -| ConsumerCount | 无 | Number of consumers subscribed to this destination. | -| EnqueueCount | 无 | Number of messages that have been sent to the destination. | -| DequeueCount | 无 | Number of messages that has been acknowledged (and removed) from the destination. | -| ForwardCount | 无 | Number of messages that have been forwarded (to a networked broker) from the destination. | -| InFlightCount | 无 | Number of messages that have been dispatched to, but not acknowledged by, consumers. | -| DispatchCount | 无 | Number of messages that has been delivered to consumers, including those not acknowledged | -| ExpiredCount | 无 | Number of messages that have been expired. | -| StoreMessageSize | B | The memory size of all messages in this destination's store. | -| AverageEnqueueTime | ms | Average time a message was held on this destination. | -| MaxEnqueueTime | ms | The longest time a message was held on this destination | -| MinEnqueueTime | ms | The shortest time a message was held on this destination | -| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | -| AverageMessageSize | B | Average message size on this destination | -| MaxMessageSize | B | Max message size on this destination | -| MinMessageSize | B | Min message size on this destination | - +#### 指标集合:topic + +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|-------------------------------------------------------------------------------------------| +| Name | 无 | Name of this destination. | +| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | +| MemoryPercentUsage | 无 | The percentage of the memory limit used | +| ProducerCount | 无 | Number of producers attached to this destination | +| ConsumerCount | 无 | Number of consumers subscribed to this destination. | +| EnqueueCount | 无 | Number of messages that have been sent to the destination. | +| DequeueCount | 无 | Number of messages that has been acknowledged (and removed) from the destination. | +| ForwardCount | 无 | Number of messages that have been forwarded (to a networked broker) from the destination. | +| InFlightCount | 无 | Number of messages that have been dispatched to, but not acknowledged by, consumers. | +| DispatchCount | 无 | Number of messages that has been delivered to consumers, including those not acknowledged | +| ExpiredCount | 无 | Number of messages that have been expired. | +| StoreMessageSize | B | The memory size of all messages in this destination's store. | +| AverageEnqueueTime | ms | Average time a message was held on this destination. | +| MaxEnqueueTime | ms | The longest time a message was held on this destination | +| MinEnqueueTime | ms | The shortest time a message was held on this destination | +| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | +| AverageMessageSize | B | Average message size on this destination | +| MaxMessageSize | B | Max message size on this destination | +| MinMessageSize | B | Min message size on this destination | #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/airflow.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/airflow.md index 5323ede8110..52367155d89 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/airflow.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/airflow.md @@ -9,33 +9,31 @@ keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8080 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| HTTPS | 是否启用HTTPS | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-----------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | +| 端口 | 数据库对外提供的端口,默认为8080 | +| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | +| HTTPS | 是否启用HTTPS | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:airflow_health -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | -------------------- | -| metadatabase | 无 | metadatabase健康情况 | -| scheduler | 无 | scheduler健康情况 | -| triggerer | 无 | triggerer健康情况 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|------------------| +| metadatabase | 无 | metadatabase健康情况 | +| scheduler | 无 | scheduler健康情况 | +| triggerer | 无 | triggerer健康情况 | #### 指标集合:airflow_version -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | -------- | --------------- | -| value | 无 | Airflow版本 | -| git_version | 无 | Airflow git版本 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|---------------| +| value | 无 | Airflow版本 | +| git_version | 无 | Airflow git版本 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_console.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_console.md index 68cf7339eae..5198b961b66 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_console.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_console.md @@ -6,13 +6,13 @@ sidebar_label: 告警模板登录台地址 > 阈值触发后发送告警信息,通过钉钉/企业微信/飞书机器人通知或者使用邮箱通知的时候,告警内容中有登录控制台的详情链接 - ### 自定义设置 在我们的启动配置文件application.yml中,找到下面的配置 + ```yml alerter: console-url: #这里就是我们的自定义控制台地址 ``` -默认值是赫兹跳动的官方控制台地址 \ No newline at end of file +默认值是赫兹跳动的官方控制台地址 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_dingtalk.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_dingtalk.md index aec6342f7d3..9d0ee3b088f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_dingtalk.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_dingtalk.md @@ -5,15 +5,15 @@ sidebar_label: 告警钉钉机器人通知 keywords: [告警钉钉机器人通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过钉钉机器人通知到接收人。 +> 阈值触发后发送告警信息,通过钉钉机器人通知到接收人。 -### 操作步骤 +### 操作步骤 1. **【钉钉桌面客户端】-> 【群设置】-> 【智能群助手】-> 【添加新建机器人-选自定义】-> 【设置机器人名称头像】-> 【注意⚠️设置自定义关键字: HertzBeat】 ->【添加成功后复制其WebHook地址】** -> 注意⚠️ 新增机器人时需在安全设置块需设置其自定义关键字: HertzBeat ,其它安全设置加签或IP段不填写 +> 注意⚠️ 新增机器人时需在安全设置块需设置其自定义关键字: HertzBeat ,其它安全设置加签或IP段不填写 -![email](/img/docs/help/alert-notice-8.png) +![email](/img/docs/help/alert-notice-8.png) 2. **【保存机器人的WebHook地址access_token值】** @@ -24,18 +24,18 @@ keywords: [告警钉钉机器人通知, 开源告警系统, 开源监控告警 ![email](/img/docs/help/alert-notice-9.png) -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### 钉钉机器人通知常见问题 -### 钉钉机器人通知常见问题 +1. 钉钉群未收到机器人告警通知 -1. 钉钉群未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 > 请排查钉钉机器人是否配置了安全自定义关键字:HertzBeat > 请排查是否配置正确机器人ACCESS_TOKEN,是否已配置告警策略关联 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_discord.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_discord.md index d6c4879a2ba..9694126d0dd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_discord.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_discord.md @@ -5,68 +5,66 @@ sidebar_label: 告警 Discord 机器人通知 keywords: [告警 Discord 机器人通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过 Discord 机器人通知到接收人。 +> 阈值触发后发送告警信息,通过 Discord 机器人通知到接收人。 -## 操作步骤 +## 操作步骤 -> 部署网络本身需支持科学上网,不支持设置代理 +> 部署网络本身需支持科学上网,不支持设置代理 -### 在 Discord 创建应用, 应用下创建机器人, 获取机器人 Token +### 在 Discord 创建应用, 应用下创建机器人, 获取机器人 Token -1. 访问 [https://discord.com/developers/applications](https://discord.com/developers/applications) 创建应用 +1. 访问 [https://discord.com/developers/applications](https://discord.com/developers/applications) 创建应用 ![bot](/img/docs/help/discord-bot-1.png) -2. 在应用下创建机器人,获取机器人 Token +2. 在应用下创建机器人,获取机器人 Token ![bot](/img/docs/help/discord-bot-2.png) ![bot](/img/docs/help/discord-bot-3.png) -3. 授权机器人到聊天服务器 +3. 授权机器人到聊天服务器 -> 在 OAuth2 菜单下给此机器人授权,`SCOPES` 范围选 `bot`, `BOT PERMISSIONS` 选发送消息 `Send Messages` +> 在 OAuth2 菜单下给此机器人授权,`SCOPES` 范围选 `bot`, `BOT PERMISSIONS` 选发送消息 `Send Messages` ![bot](/img/docs/help/discord-bot-4.png) > 获取到最下方生成的 URL, 浏览器访问此 URL 给机器人正式授权,即设置将机器人加入哪个聊天服务器。 -4. 查看您的聊天服务器是否已经加入机器人成员 +4. 查看您的聊天服务器是否已经加入机器人成员 ![bot](/img/docs/help/discord-bot-5.png) -### 开启开发者模式,获取频道 Channel ID +### 开启开发者模式,获取频道 Channel ID -1. 个人设置 -> 高级设置 -> 开启开发者模式 +1. 个人设置 -> 高级设置 -> 开启开发者模式 ![bot](/img/docs/help/discord-bot-6.png) -2. 获取频道 Channel ID +2. 获取频道 Channel ID -> 右键选中您想要发送机器人消息的聊天频道,点击 COPY ID 按钮获取 Channel ID +> 右键选中您想要发送机器人消息的聊天频道,点击 COPY ID 按钮获取 Channel ID ![bot](/img/docs/help/discord-bot-7.png) - -### 在 HertzBeat 新增告警通知人,通知方式为 Discord Bot +### 在 HertzBeat 新增告警通知人,通知方式为 Discord Bot 1. **【告警通知】->【新增接收人】 ->【选择 Discord 机器人通知方式】->【设置机器人Token和ChannelId】-> 【确定】** ![email](/img/docs/help/discord-bot-8.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -![email](/img/docs/help/alert-notice-policy.png) +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 +![email](/img/docs/help/alert-notice-policy.png) -### Discord 机器人通知常见问题 +### Discord 机器人通知常见问题 -1. Discord 未收到机器人告警通知 +1. Discord 未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人Token, ChannelId,是否已配置告警策略关联 > 请排查机器人是否被 Discord聊天服务器正确赋权 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_email.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_email.md index 7033f6036d6..d4dc218c591 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_email.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_email.md @@ -5,34 +5,35 @@ sidebar_label: 告警邮件通知 keywords: [告警邮件通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过邮件通知到接收人。 +> 阈值触发后发送告警信息,通过邮件通知到接收人。 -### 操作步骤 +### 操作步骤 -1. **【告警通知】->【新增接收人】 ->【选择邮件通知方式】** +1. **【告警通知】->【新增接收人】 ->【选择邮件通知方式】** -![email](/img/docs/help/alert-notice-1.png) +![email](/img/docs/help/alert-notice-1.png) 2. **【获取验证码】-> 【输入邮箱验证码】-> 【确定】** -![email](/img/docs/help/alert-notice-2.png) + ![email](/img/docs/help/alert-notice-2.png) -![email](/img/docs/help/alert-notice-3.png) +![email](/img/docs/help/alert-notice-3.png) -3. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +3. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### 邮件通知常见问题 -### 邮件通知常见问题 +1. 自己内网部署的HertzBeat无法接收到邮件通知 -1. 自己内网部署的HertzBeat无法接收到邮件通知 -> HertzBeat需要自己配置邮件服务器,TanCloud无需,请确认是否在application.yml配置了自己的邮件服务器 +> HertzBeat需要自己配置邮件服务器,TanCloud无需,请确认是否在application.yml配置了自己的邮件服务器 + +2. 云环境TanCloud无法接收到邮件通知 -2. 云环境TanCloud无法接收到邮件通知 > 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确邮箱,是否已配置告警策略关联 -> 请查询邮箱的垃圾箱里是否把告警邮件拦截 +> 请查询邮箱的垃圾箱里是否把告警邮件拦截 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_enterprise_wechat_app.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_enterprise_wechat_app.md index 3f1c5a2b9c1..b70c8b10c40 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_enterprise_wechat_app.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_enterprise_wechat_app.md @@ -5,28 +5,28 @@ sidebar_label: 企业微信应用告警通知 keywords: [开源告警系统, 开源监控告警系统, 企业微信应用告警通知] --- -> 阈值触发后发送告警信息,通过企业微信应用通知到接收人. +> 阈值触发后发送告警信息,通过企业微信应用通知到接收人. -### Operation steps +### Operation steps 1. **【企业微信后台管理】-> 【App管理】-> 【创建一个新的应用】-> 【设置应用信息】->【添加成功后复制应用的AgentId和Secret】** -![email](/img/docs/help/alert-wechat-1.jpg) +![email](/img/docs/help/alert-wechat-1.jpg) 2. **【告警通知】->【新增接收人】 ->【选择企业微信应用通知方式】->【设置企业ID,企业应用id和应用的secret 】-> 【确定】** ![email](/img/docs/help/alert-wechat-2.jpg) -3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人。** +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人。** -![email](/img/docs/help/alert-wechat-3.jpg) +![email](/img/docs/help/alert-wechat-3.jpg) +### 企业微信应用通知常见问题 -### 企业微信应用通知常见问题 +1. 企业微信应用未收到告警通知. -1. 企业微信应用未收到告警通知. > 请检查用户是否具有应用程序权限. > 请检查企业应用程序回调地址设置是否正常. > 请检查服务器IP是否在企业应用程序白名单上. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_feishu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_feishu.md index 448e70de223..604eff34fdc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_feishu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_feishu.md @@ -5,30 +5,30 @@ sidebar_label: 告警飞书机器人通知 keywords: [告警飞书机器人通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过飞书机器人通知到接收人。 +> 阈值触发后发送告警信息,通过飞书机器人通知到接收人。 -### 操作步骤 +### 操作步骤 1. **【飞书客户端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** -2. **【保存机器人的WebHook地址的KEY值】** +2. **【保存机器人的WebHook地址的KEY值】** > 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择飞书机器人通知方式】->【设置飞书机器人KEY】-> 【确定】** -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### 飞书机器人通知常见问题 -### 飞书机器人通知常见问题 +1. 飞书群未收到机器人告警通知 -1. 飞书群未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 +> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md index e137d07e9e2..c81f5608674 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md @@ -5,34 +5,33 @@ sidebar_label: 告警 Slack Webhook 通知 keywords: [告警 Slack Webhook 通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过 Slack Webhook 通知到接收人。 +> 阈值触发后发送告警信息,通过 Slack Webhook 通知到接收人。 -## 操作步骤 +## 操作步骤 -> 部署网络本身需支持科学上网,不支持设置代理 +> 部署网络本身需支持科学上网,不支持设置代理 -### 在 Slack 开启 Webhook, 获取 Webhook URL +### 在 Slack 开启 Webhook, 获取 Webhook URL -参考官网文档 [Sending messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) +参考官网文档 [Sending messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) -### 在 HertzBeat 新增告警通知人,通知方式为 Slack Webhook +### 在 HertzBeat 新增告警通知人,通知方式为 Slack Webhook 1. **【告警通知】->【新增接收人】 ->【选择 Slack Webhook 通知方式】->【设置 Webhook URL】-> 【确定】** ![email](/img/docs/help/slack-bot-1.png) -2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-policy.png) +![email](/img/docs/help/alert-notice-policy.png) +### Slack 机器人通知常见问题 -### Slack 机器人通知常见问题 - -1. Slack 未收到机器人告警通知 +1. Slack 未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 +> 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_telegram.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_telegram.md index 6730aa19dcb..df609e66b50 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_telegram.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_telegram.md @@ -5,22 +5,22 @@ sidebar_label: 告警 Telegram 机器人通知 keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过 Telegram 机器人通知到接收人。 +> 阈值触发后发送告警信息,通过 Telegram 机器人通知到接收人。 -## 操作步骤 +## 操作步骤 -> 部署网络本身需支持科学上网,不支持设置代理 +> 部署网络本身需支持科学上网,不支持设置代理 ### 在 Telegram 创建机器人, 获取 Bot Token 和 UserId -1. 使用 [@BotFather](https://t.me/BotFather) 创建自己的机器人并获取访问令牌`Token` +1. 使用 [@BotFather](https://t.me/BotFather) 创建自己的机器人并获取访问令牌`Token` ![telegram-bot](/img/docs/help/telegram-bot-1.png) -2. 获取接收人的 `User ID` +2. 获取接收人的 `User ID` -**使用您要通知的接收人账户给刚创建 Bot 账户随便发送一个信息**, -访问 ```https://api.telegram.org/bot/getUpdates``` , **`使用上一步的 Bot Token 替换其中的`**, 响应`Json`数据中第一个`result.message.from.id` 值即为接收人的 `User ID` +**使用您要通知的接收人账户给刚创建 Bot 账户随便发送一个信息**, +访问 ```https://api.telegram.org/bot/getUpdates``` , **`使用上一步的 Bot Token 替换其中的`**, 响应`Json`数据中第一个`result.message.from.id` 值即为接收人的 `User ID` ```json { @@ -42,27 +42,26 @@ keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] } ``` -3. 记录保存我们获得的 `Token` 和 `User Id` +3. 记录保存我们获得的 `Token` 和 `User Id` -### 在 HertzBeat 新增告警通知人,通知方式为 Telegram Bot +### 在 HertzBeat 新增告警通知人,通知方式为 Telegram Bot 1. **【告警通知】->【新增接收人】 ->【选择 Telegram 机器人通知方式】->【设置机器人Token和UserId】-> 【确定】** ![email](/img/docs/help/telegram-bot-2.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-policy.png) +![email](/img/docs/help/alert-notice-policy.png) +### Telegram 机器人通知常见问题 -### Telegram 机器人通知常见问题 - -1. Telegram 未收到机器人告警通知 +1. Telegram 未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 -> UserId 应为消息接收对象的UserId +> UserId 应为消息接收对象的UserId -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_threshold.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_threshold.md index 401a3d04d45..a41ae531e28 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_threshold.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_threshold.md @@ -3,6 +3,7 @@ id: alert_threshold title: 阈值告警配置 sidebar_label: 阈值告警配置 --- + > 对监控指标配置告警阈值(警告告警,严重告警,紧急告警),系统根据阈值配置和采集指标数据计算触发告警。 ## 操作步骤 @@ -20,6 +21,7 @@ sidebar_label: 阈值告警配置 如上图所示,新增标签,这里我们设置标签为: linux:dev (开发环境使用Linux) #### 配置标签 + TODO 图片名称更新 ![threshold](/img/docs/help/alert-threshold-3.png) @@ -47,7 +49,6 @@ TODO 图片名称更新 - **恢复通知**:告警触发后是否发送恢复通知,默认不发送。 - **启用告警**:此告警阈值配置开启生效或关闭 - **阈值告警配置完毕,已经被成功触发的告警信息可以在【告警中心】看到。** **若需要将告警信息邮件,微信,钉钉飞书通知给相关人员,可以在【告警通知】配置。** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_threshold_expr.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_threshold_expr.md index 5924d8965f8..37a9fb29d1b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_threshold_expr.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_threshold_expr.md @@ -3,11 +3,11 @@ id: alert_threshold_expr title: 阈值触发表达式 sidebar_label: 阈值触发表达式 --- + > 在我们配置阈值告警时,需要配置阈值触发表达式,系统根据表达式和监控指标值计算触发是否告警,这里详细介绍下表达式使用。 #### 表达式支持的操作符 - | 运算符(可视化配置) | 运算符(表达式配置) | 支持类型 | 说明 | | | ------------------ | -------------------- | ----------------------- | ------------------------ | - | | 等于 | equals(str1,str2) | 字符型 | 判断字符串是否相等 | | @@ -27,22 +27,21 @@ sidebar_label: 阈值触发表达式 #### 表达式函数库列表 - -| 支持函数库 | 说明 | -| -------------------------------------------- | ------------------------------------------------------------------ | -| condition ? trueExpression : falseExpression | 三元运算符 | -| toDouble(str) | 将字符串转换为Double类型 | -| toBoolean(str) | 将字符串转换为Boolean类型 | -| toInteger(str) | 将字符串转换为Integer类型 | -| array[n] | 取数组第n个元素 | -| * | 算法乘 | -| / | 算法除 | -| % | 求余 | -| ( 和 ) | 括号用于控制运算的优先级,确保在逻辑或数学表达式中某些部分先被计算 | -| + | 加号用于表示数学中的加法运算、字符串拼接 | -| - | 减号用于表示数学中的减法运算 | -| && | 逻辑AND操作符 | -| \|\| | 逻辑OR操作符 | +| 支持函数库 | 说明 | +|----------------------------------------------|-----------------------------------| +| condition ? trueExpression : falseExpression | 三元运算符 | +| toDouble(str) | 将字符串转换为Double类型 | +| toBoolean(str) | 将字符串转换为Boolean类型 | +| toInteger(str) | 将字符串转换为Integer类型 | +| array[n] | 取数组第n个元素 | +| * | 算法乘 | +| / | 算法除 | +| % | 求余 | +| ( 和 ) | 括号用于控制运算的优先级,确保在逻辑或数学表达式中某些部分先被计算 | +| + | 加号用于表示数学中的加法运算、字符串拼接 | +| - | 减号用于表示数学中的减法运算 | +| && | 逻辑AND操作符 | +| \|\| | 逻辑OR操作符 | #### 支持的环境变量 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_webhook.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_webhook.md index 61ed1dc99b5..022cd50f07e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_webhook.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_webhook.md @@ -5,23 +5,24 @@ sidebar_label: 告警 Webhook 回调通知 keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过post请求方式调用WebHook接口通知到接收人。 +> 阈值触发后发送告警信息,通过post请求方式调用WebHook接口通知到接收人。 -## 操作步骤 +## 操作步骤 -1. **【告警通知】->【新增接收人】 ->【选择WebHook通知方式】-> 【设置WebHook回调地址】 -> 【确定】** +1. **【告警通知】->【新增接收人】 ->【选择WebHook通知方式】-> 【设置WebHook回调地址】 -> 【确定】** ![email](/img/docs/help/alert-notice-5.png) -2. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) -### WebHook回调POST请求体BODY内容 +### WebHook回调POST请求体BODY内容 + +内容格式:JSON -内容格式:JSON ```json { "alarmId": 76456, @@ -43,22 +44,23 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 } ``` -| | | -|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | -| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | -| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | -| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | -| content | string title: The actual content of the alarm notification 告警通知实际内容 | -| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | -| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | -| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | -| tags | example: {key1:value1} | +| | | +|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | +| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | +| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | +| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | +| content | string title: The actual content of the alarm notification 告警通知实际内容 | +| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | +| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | +| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | +| tags | example: {key1:value1} | + +### webhook通知常见问题 -### webhook通知常见问题 +1. WebHook回调未生效 -1. WebHook回调未生效 > 请查看告警中心是否已经产生此条告警信息 > 请排查配置的WebHook回调地址是否正确 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_wework.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_wework.md index ce73c131d00..e0dbabf1a70 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_wework.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_wework.md @@ -5,34 +5,34 @@ sidebar_label: 告警企业微信通知 keywords: [告警企业微信通知, 开源告警系统, 开源监控告警系统] --- -> 阈值触发后发送告警信息,通过企业微信机器人通知到接收人。 +> 阈值触发后发送告警信息,通过企业微信机器人通知到接收人。 -### 操作步骤 +### 操作步骤 -1. **【企业微信端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** +1. **【企业微信端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** -![email](/img/docs/help/alert-notice-6.jpg) +![email](/img/docs/help/alert-notice-6.jpg) -2. **【保存机器人的WebHook地址的KEY值】** +2. **【保存机器人的WebHook地址的KEY值】** > 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -3. **【告警通知】->【新增接收人】 ->【选择企业微信机器人通知方式】->【设置企业微信机器人KEY】-> 【确定】** +3. **【告警通知】->【新增接收人】 ->【选择企业微信机器人通知方式】->【设置企业微信机器人KEY】-> 【确定】** ![email](/img/docs/help/alert-notice-7.png) -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### 企业微信机器人通知常见问题 -### 企业微信机器人通知常见问题 +1. 企业微信群未收到机器人告警通知 -1. 企业微信群未收到机器人告警通知 > 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 +> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 -其它问题可以通过交流群ISSUE反馈哦! +其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/almalinux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/almalinux.md index 64f15c3777d..391005c080c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/almalinux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/almalinux.md @@ -4,114 +4,105 @@ title: 监控:AlmaLinux操作系统监控 sidebar_label: AlmaLinux操作系统 keywords: [开源监控系统, 开源操作系统监控, AlmaLinux操作系统监控] --- + > 对AlmaLinux操作系统的通用性能指标进行采集监控。 ### 配置参数 - -| 参数名称 | 参数帮助描述 | -| -------- | ------------------------------------------------------------------------- | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | -| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集器 | 配置此监控使用哪台采集器调度采集 | -| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -| 密钥 | 连接服务器所需密钥 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux SSH对外提供的端口,默认为22。 | +| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | +| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | +| 用户名 | SSH连接用户名,可选 | +| 密码 | SSH连接密码,可选 | +| 采集器 | 配置此监控使用哪台采集器调度采集 | +| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 密钥 | 连接服务器所需密钥 | ### 采集指标 #### 指标集合:系统基本信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | ------------ | -| Host Name | 无 | 主机名称 | -| System Version | 无 | 操作系统版本 | -| Uptime | 无 | 启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------| +| Host Name | 无 | 主机名称 | +| System Version | 无 | 操作系统版本 | +| Uptime | 无 | 启动时间 | #### 指标集合:CPU 信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | --------------------------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:内存信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------- | -------- | ---------------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:磁盘信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------- | -------- | ------------------ | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:网卡信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | ------------------- | -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | byte | 入站数据流量(bytes) | +| transmit_bytes | byte | 出站数据流量(bytes) | #### 指标集合:文件系统 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------- | -------- | -------------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | #### 指标集合:Top10 CPU进程 统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- | -------- | ------------ | -| pid | 无 | 进程ID | -| cpu_usage | % | CPU占用率 | -| mem_usage | % | 内存占用率 | -| command | 无 | 执行命令 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| pid | 无 | 进程ID | +| cpu_usage | % | CPU占用率 | +| mem_usage | % | 内存占用率 | +| command | 无 | 执行命令 | #### 指标集合:Top10 内存进程 统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| pid | 无 | 进程ID | +| mem_usage | % | 内存占用率 | +| cpu_usage | % | CPU占用率 | +| command | 无 | 执行命令 | -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- | -------- | ------------ | -| pid | 无 | 进程ID | -| mem_usage | % | 内存占用率 | -| cpu_usage | % | CPU占用率 | -| command | 无 | 执行命令 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/api.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/api.md index 9b80fc35828..89f3cd701bc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/api.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/api.md @@ -5,34 +5,33 @@ sidebar_label: HTTP API keywords: [开源监控系统, 开源网站监控, HTTP API监控] --- -> 调用HTTP API接口,查看接口是否可用,对其响应时间等指标进行监测 +> 调用HTTP API接口,查看接口是否可用,对其响应时间等指标进行监测 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | -| 请求方式 | 设置接口调用的请求方式:GET,POST,PUT,DELETE。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| 用户名 | 接口Basic认证或Digest认证时使用的用户名 | -| 密码 | 接口Basic认证或Digest认证时使用的密码 | -| 请求Headers | HTTP 请求头 | -| 查询Params | HTTP查询参数,支持[时间表达式](time_expression) | -| Content-Type | 设置携带BODY请求体数据请求时的资源类型 | -| 请求BODY | 设置携带BODY请求体数据,PUT POST请求方式时有效 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------------|-------------------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | +| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | +| 请求方式 | 设置接口调用的请求方式:GET,POST,PUT,DELETE。 | +| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | +| 用户名 | 接口Basic认证或Digest认证时使用的用户名 | +| 密码 | 接口Basic认证或Digest认证时使用的密码 | +| 请求Headers | HTTP 请求头 | +| 查询Params | HTTP查询参数,支持[时间表达式](time_expression) | +| Content-Type | 设置携带BODY请求体数据请求时的资源类型 | +| 请求BODY | 设置携带BODY请求体数据,PUT POST请求方式时有效 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| responseTime | ms毫秒 | 网站响应时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | ms毫秒 | 网站响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/centos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/centos.md index 03c514bc7b8..3d0654db3b5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/centos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/centos.md @@ -9,74 +9,74 @@ keywords: [开源监控系统, 开源操作系统监控, CentOS操作系统监 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux SSH对外提供的端口,默认为22。 | +| 用户名 | SSH连接用户名,可选 | +| 密码 | SSH连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| hostname | 无 | 主机名称 | +| version | 无 | 操作系统版本 | +| uptime | 无 | 系统运行时间 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:memory -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:disk -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:interface -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | byte | 入站数据流量(bytes) | +| transmit_bytes | byte | 出站数据流量(bytes) | #### 指标集合:disk_free -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/clickhouse.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/clickhouse.md index 1c5adeeccc2..955c87b4e4f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/clickhouse.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/clickhouse.md @@ -4,96 +4,93 @@ title: 监控:Clickhouse数据库监控 sidebar_label: Clickhouse数据库 keywords: [开源监控系统, 开源数据库监控, Clickhouse数据库监控] --- + > 对Clickhouse数据库的通用性能指标进行采集监控。 ### 配置参数 - -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------------------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为8123。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为8123。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:ping 可用性 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | ------------ | -| responseTime | 无 | 响应时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | 无 | 响应时间 | #### 指标集合:system.metrics表的数据 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- | -------- | -------------------------------------- | -| Query | 无 | 正在执行的查询的数量 | -| Merge | 无 | 正在执行的后台合并的数量 | -| Move | 无 | 正在执行的后台移动的数量 | -| PartMutation | 无 | 表变更的次数 | -| ReplicatedFetch | 无 | 从副本收集的数据块数量 | -| ReplicatedSend | 无 | 发送到副本的数量块数量 | -| ReplicatedChecks | 无 | 检查一致性的数据块数量 | -| QueryPreempted | 无 | 停止或等待的查询数量 | -| TCPConnection | 无 | TCP连接数量 | -| HTTPConnection | 无 | HTTP连接数量 | -| OpenFileForRead | 无 | 打开的可读文件的数量 | -| OpenFileForWrite | 无 | 打开的可写文件的数量 | -| QueryThread | 无 | 查询处理的线程数量 | -| ReadonlyReplica | 无 | 处于只读状态的 Replicated table 的数量 | -| EphemeralNode | 无 | ZooKeeper 中保存的临时节点数 | -| ZooKeeperWatch | 无 | ZooKeeper事件订阅数 | -| StorageBufferBytes | Bytes | Buffer tables 中的字节数 | -| VersionInteger | 无 | ClickHouse 版本号 | -| RWLockWaitingReaders | 无 | 等待读表的读写锁的线程数量 | -| RWLockWaitingWriters | 无 | 等待写表的读写锁的线程数量 | -| RWLockActiveReaders | 无 | 在一个表的读写锁中持有读锁的线程数 | -| RWLockActiveWriters | 无 | 在一个表的读写锁中持有写锁的线程数 | -| GlobalThread | 无 | 全局线程池中的线程数 | -| GlobalThreadActive | 无 | 全局线程池中活跃的线程数 | -| LocalThread | 无 | 本地线程池中的线程数 | -| LocalThreadActive | 无 | 本地线程池中活跃的线程数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------|-------|------------------------------| +| Query | 无 | 正在执行的查询的数量 | +| Merge | 无 | 正在执行的后台合并的数量 | +| Move | 无 | 正在执行的后台移动的数量 | +| PartMutation | 无 | 表变更的次数 | +| ReplicatedFetch | 无 | 从副本收集的数据块数量 | +| ReplicatedSend | 无 | 发送到副本的数量块数量 | +| ReplicatedChecks | 无 | 检查一致性的数据块数量 | +| QueryPreempted | 无 | 停止或等待的查询数量 | +| TCPConnection | 无 | TCP连接数量 | +| HTTPConnection | 无 | HTTP连接数量 | +| OpenFileForRead | 无 | 打开的可读文件的数量 | +| OpenFileForWrite | 无 | 打开的可写文件的数量 | +| QueryThread | 无 | 查询处理的线程数量 | +| ReadonlyReplica | 无 | 处于只读状态的 Replicated table 的数量 | +| EphemeralNode | 无 | ZooKeeper 中保存的临时节点数 | +| ZooKeeperWatch | 无 | ZooKeeper事件订阅数 | +| StorageBufferBytes | Bytes | Buffer tables 中的字节数 | +| VersionInteger | 无 | ClickHouse 版本号 | +| RWLockWaitingReaders | 无 | 等待读表的读写锁的线程数量 | +| RWLockWaitingWriters | 无 | 等待写表的读写锁的线程数量 | +| RWLockActiveReaders | 无 | 在一个表的读写锁中持有读锁的线程数 | +| RWLockActiveWriters | 无 | 在一个表的读写锁中持有写锁的线程数 | +| GlobalThread | 无 | 全局线程池中的线程数 | +| GlobalThreadActive | 无 | 全局线程池中活跃的线程数 | +| LocalThread | 无 | 本地线程池中的线程数 | +| LocalThreadActive | 无 | 本地线程池中活跃的线程数 | #### 指标集合:system.events表的数据 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------------------------- |-------| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------------------|-------|------------------------------------------------------------------------------------------------| | Query | 无 | 要解释和可能执行的查询数量。 不包括由于 AST 大小限制、配额限制或同时运行的查询数量限制而无法解析或被拒绝的查询。 可能包括 ClickHouse 本身发起的内部查询。 不计算子查询。 | -| SelectQuery | 无 | 可能执行的 Select 查询数 | -| InsertQuery | 无 | 可能执行的 Insert 查询数 | -| InsertedRows | 无 | 被插入到所有表中的行数 | -| InsertedBytes | Bytes | 被插入到所有表中的字节数 | -| FailedQuery | 无 | 执行失败的查询数量 | -| FailedSelectQuery | 无 | 执行失败的 Select 查询数量 | -| FileOpen | 无 | 文件打开数 | -| MergeTreeDataWriterRows | 无 | 写入 MergeTree 表的数据行数 | -| MergeTreeDataWriterCompressedBytes | Bytes | 压缩写入 MergeTree 表的数据字节数 | +| SelectQuery | 无 | 可能执行的 Select 查询数 | +| InsertQuery | 无 | 可能执行的 Insert 查询数 | +| InsertedRows | 无 | 被插入到所有表中的行数 | +| InsertedBytes | Bytes | 被插入到所有表中的字节数 | +| FailedQuery | 无 | 执行失败的查询数量 | +| FailedSelectQuery | 无 | 执行失败的 Select 查询数量 | +| FileOpen | 无 | 文件打开数 | +| MergeTreeDataWriterRows | 无 | 写入 MergeTree 表的数据行数 | +| MergeTreeDataWriterCompressedBytes | Bytes | 压缩写入 MergeTree 表的数据字节数 | #### 指标集合:system.asynchronous_metrics表的数据 +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------------------------|------|----------------------------------| +| AsynchronousMetricsCalculationTimeSpent | 无 | 异步指标计算花费的时间(秒) | +| jemalloc.arenas.all.muzzy_purged | 无 | 被清除的模糊(muzzy)页的数量 | +| jemalloc.arenas.all.dirty_purged | 无 | 被清除的脏 (dirty)页的数量 | +| BlockReadBytes_ram1 | 无 | ram1 块读取的字节数 | +| jemalloc.background_thread.run_intervals | 无 | jemalloc 后台线程的运行间隔数 | +| BlockQueueTime_nbd13 | 无 | nbd13 块队列等待时间 | +| jemalloc.background_thread.num_threads | 无 | jemalloc 后台线程的数量 | +| jemalloc.resident | 无 | jemalloc 分配器占用的物理内存大小(字节) | +| InterserverThreads | 无 | Interserver 线程数 | +| BlockWriteMerges_nbd7 | 无 | nbd7 块写合并数量 | +| MarkCacheBytes | 无 | StorageMergeTree 的 marks 的缓存大小 | +| MarkCacheFiles | 无 | StorageMergeTree 的 marks 的缓存文件数量 | +| MaxPartCountForPartition | 无 | partitions 中最大的活跃数据块的数量 | -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------------------------------- | ------ | ----------------------------------------- | -| AsynchronousMetricsCalculationTimeSpent | 无 | 异步指标计算花费的时间(秒) | -| jemalloc.arenas.all.muzzy_purged | 无 | 被清除的模糊(muzzy)页的数量 | -| jemalloc.arenas.all.dirty_purged | 无 | 被清除的脏 (dirty)页的数量 | -| BlockReadBytes_ram1 | 无 | ram1 块读取的字节数 | -| jemalloc.background_thread.run_intervals | 无 | jemalloc 后台线程的运行间隔数 | -| BlockQueueTime_nbd13 | 无 | nbd13 块队列等待时间 | -| jemalloc.background_thread.num_threads | 无 | jemalloc 后台线程的数量 | -| jemalloc.resident | 无 | jemalloc 分配器占用的物理内存大小(字节) | -| InterserverThreads | 无 | Interserver 线程数 | -| BlockWriteMerges_nbd7 | 无 | nbd7 块写合并数量 | -| MarkCacheBytes | 无 | StorageMergeTree 的 marks 的缓存大小 | -| MarkCacheFiles | 无 | StorageMergeTree 的 marks 的缓存文件数量 | -| MaxPartCountForPartition | 无 | partitions 中最大的活跃数据块的数量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/debian.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/debian.md index dcda89ee2b6..6b353bafd0b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/debian.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/debian.md @@ -4,71 +4,67 @@ title: 监控:debian 系统监控 sidebar_label: Debian keywords: [开源监控系统, 操作系统监控, Debian监控] --- + > 对Debian系统的通用性能指标进行采集监控 ## 配置参数 - -| 参数名称 | 参数帮助描述 | -| -------- | ------------------------------------------------------------------------- | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Debian系统的ssh端口,默认: 22 | -| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | -| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | -| 用户名 | 服务器用户名 | -| 密码 | 服务器密码 | -| 采集器 | 配置此监控使用哪台采集器调度采集 | -| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -| 密钥 | 连接服务器所需密钥 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Debian系统的ssh端口,默认: 22 | +| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | +| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | +| 用户名 | 服务器用户名 | +| 密码 | 服务器密码 | +| 采集器 | 配置此监控使用哪台采集器调度采集 | +| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 密钥 | 连接服务器所需密钥 | ### 采集指标 #### 指标集合:系统基本信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | ------------ | -| Host Name | 无 | 主机名称 | -| System Version | 无 | 操作系统版本 | -| Uptime | 无 | 启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------| +| Host Name | 无 | 主机名称 | +| System Version | 无 | 操作系统版本 | +| Uptime | 无 | 启动时间 | #### 指标集合:CPU 信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | ------------ | -| Info | 无 | 型号 | -| Cores | 无 | 核数 | -| Interrupt | 无 | 中断数 | -| Load | 无 | 负载 | -| Context Switch | 无 | 上下文切换 | -| Usage | % | 使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------| +| Info | 无 | 型号 | +| Cores | 无 | 核数 | +| Interrupt | 无 | 中断数 | +| Load | 无 | 负载 | +| Context Switch | 无 | 上下文切换 | +| Usage | % | 使用率 | #### 指标集合:内存信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------- | -------- | -------------- | -| Total Memory | Mb | 总内存容量 | -| User Program Memory | Mb | 用户程序内存量 | -| Free Memory | Mb | 空闲内存容量 | -| Buff Cache Memory | Mb | 缓存占用内存 | -| Available Memory | Mb | 剩余可用内存 | -| Memory Usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------|------|---------| +| Total Memory | Mb | 总内存容量 | +| User Program Memory | Mb | 用户程序内存量 | +| Free Memory | Mb | 空闲内存容量 | +| Buff Cache Memory | Mb | 缓存占用内存 | +| Available Memory | Mb | 剩余可用内存 | +| Memory Usage | % | 内存使用率 | #### 指标集合:磁盘信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------- | -------- | ------------ | -| Disk Num | 无 | 磁盘总数 | -| Partition Num | 无 | 分区总数 | -| Block Write | 无 | 写磁盘块数 | -| Block Read | 无 | 读磁盘块数 | -| Write Rate | iops | 磁盘写速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|--------| +| Disk Num | 无 | 磁盘总数 | +| Partition Num | 无 | 分区总数 | +| Block Write | 无 | 写磁盘块数 | +| Block Read | 无 | 读磁盘块数 | +| Write Rate | iops | 磁盘写速率 | #### 指标集合:网卡信息 @@ -103,3 +99,4 @@ keywords: [开源监控系统, 操作系统监控, Debian监控] - 内存占用率:% - CPU占用率:% + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dm.md index cdd9f6f6ee5..ea4a376c049 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dm.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dm.md @@ -9,41 +9,41 @@ keywords: [开源监控系统, 开源数据库监控, 达梦数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为5236。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为5236。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | ------------------ | -| PORT_NUM | 无 | 数据库暴露服务端口 | -| CTL_PATH | 无 | 控制文件路径 | -| MAX_SESSIONS | 无 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|-----------| +| PORT_NUM | 无 | 数据库暴露服务端口 | +| CTL_PATH | 无 | 控制文件路径 | +| MAX_SESSIONS | 无 | 数据库最大连接数 | #### 指标集合:status -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------ | -| status$ | 无 | DM数据库的开闭状态 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|------------| +| status$ | 无 | DM数据库的开闭状态 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | -------- | ---------------------------------------------- | -| dm_sql_thd | 无 | 用于编写 dmsql dmserver 的线程 | -| dm_io_thd | 无 | IO线程,由IO_THR_GROUPS参数控制,默认为2个线程 | -| dm_quit_thd | 无 | 用于执行正常关闭数据库的线程 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|---------------------------------| +| dm_sql_thd | 无 | 用于编写 dmsql dmserver 的线程 | +| dm_io_thd | 无 | IO线程,由IO_THR_GROUPS参数控制,默认为2个线程 | +| dm_quit_thd | 无 | 用于执行正常关闭数据库的线程 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dns.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dns.md index 67ea72c177e..303ac47444f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dns.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dns.md @@ -13,24 +13,24 @@ keywords: [ 开源监控系统, 开源DNS监控工具, 监控DNS指标 ] ### 配置参数 -| 参数名称 | 参数帮助描述 | -|-----------|------------------------------------------------| -| DNS服务器IP | 被监控的IPv4、IPv6。注意⚠️不包含协议头(例如:https://,http://)。 | -| 监控名称 | 标识此监控的名称,名称需要是唯一的。 | -| 端口 | DNS服务对外提供的端口,默认为53。 | -| 域名解析的地址 | 域名解析的地址。 | -| 连接超时时间 | 设置连接DNS服务器的超时时间,单位ms毫秒,默认6000毫秒。 | +| 参数名称 | 参数帮助描述 | +|-----------|------------------------------------------------------------------| +| DNS服务器IP | 被监控的IPv4、IPv6。注意⚠️不包含协议头(例如:https://,http://)。 | +| 监控名称 | 标识此监控的名称,名称需要是唯一的。 | +| 端口 | DNS服务对外提供的端口,默认为53。 | +| 域名解析的地址 | 域名解析的地址。 | +| 连接超时时间 | 设置连接DNS服务器的超时时间,单位ms毫秒,默认6000毫秒。 | | 查询类别 | DNS查询的类别. 可选的值包括 `IN`, `CHAOS`, `HESIOD`, `NONE`, 和 `ANY`。默认值:IN | -| 是否使用tcp协议 | 设置DNS查询是否使用tcp协议。 | -| 采集间隔 | 监控周期性数据采集的时间间隔,单位:秒,最小可设置为30秒。 | -| 绑定标签 | 用于对监控资源进行分类管理。 | -| 描述备注 | 用于更多关于标识和描述此监控的信息,用户可以在此处添加备注信息。 | +| 是否使用tcp协议 | 设置DNS查询是否使用tcp协议。 | +| 采集间隔 | 监控周期性数据采集的时间间隔,单位:秒,最小可设置为30秒。 | +| 绑定标签 | 用于对监控资源进行分类管理。 | +| 描述备注 | 用于更多关于标识和描述此监控的信息,用户可以在此处添加备注信息。 | ### 采集指标 #### 指标集:Header -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------|------|-----------------| | 响应时间 | 毫秒 | DNS服务器响应请求所需的时间 | | 操作码 | 无 | 当前消息的类型 | @@ -43,27 +43,31 @@ keywords: [ 开源监控系统, 开源DNS监控工具, 监控DNS指标 ] ### 指标集: Question -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|-----------------------------------| | Section | 无 | 请求记录信息,其中包含查询的域名,资源类型,资源记录类,附加信息。 | ### 指标集: Answer -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|----------------------------------------| | Section0 | 无 | 请求响应信息,其中包含查询的域名,TTL,资源记录类,资源类型,查询的结果。 | + > Answer 指标集最多会采集10条响应记录,指标名称从 Section0 到 Section9。 ### 指标集: Authority -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|------------------------------------------------------------------| | Section0 | 无 | 域名的SOA(Start of Authority,权威区域起始)记录,其中包含查询的域名,TTL,资源类型,资源记录类等信息。 | + > Authority 指标集最多会采集10条响应记录,指标名称从 Section0 到 Section9。 ### 指标集: Additional -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|-------------| | Section0 | 无 | DNS查询的附加信息。 | + > Additional 指标集最多会采集10条响应记录,指标名称从 Section0 到 Section9。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/docker.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/docker.md index 221776b2426..c546b46fd2c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/docker.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/docker.md @@ -7,7 +7,6 @@ keywords: [开源监控系统, 开源容器监控, Docker容器监控] > 对Docker容器的通用性能指标进行采集监控。 - ## 监控前操作 如果想要监控 `Docker` 中的容器信息,则需要按照一下步骤打开端口,让采集请求获取到对应的信息。 @@ -44,63 +43,60 @@ firewall-cmd --zone=public --add-port=2375/tcp --permanent firewall-cmd --reload ``` - - - - ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为2375。 | -| 查询超时时间 | 设置获取Docker服务器API接口时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 器名称 | 一般是监控所有运行中的容器信息。 | -| 用户名 | 连接用户名,可选 | -| 密码 | 连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为2375。 | +| 查询超时时间 | 设置获取Docker服务器API接口时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 器名称 | 一般是监控所有运行中的容器信息。 | +| 用户名 | 连接用户名,可选 | +| 密码 | 连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:system -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------ | -------- | -------------------------------------- | -| Name | 无 | 服务器名称 | -| version | 无 | docker本版号 | -| os | 无 | 服务器版本 例如:linux x86_64 | -| root_dir | 无 | docker文件夹目录 例如:/var/lib/docker | -| containers | 无 | 容器总数(在运行+未运行) | -| containers_running | 无 | 运行中的容器数目 | -| containers_paused | 无 | 暂停中的容器数目 | -| images | 无 | 容器景象的总数目。 | -| ncpu | 无 | NCPU | -| mem_total | MB | 占用的内存总大小 | -| system_time | 无 | 系统时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|--------------------------------| +| Name | 无 | 服务器名称 | +| version | 无 | docker本版号 | +| os | 无 | 服务器版本 例如:linux x86_64 | +| root_dir | 无 | docker文件夹目录 例如:/var/lib/docker | +| containers | 无 | 容器总数(在运行+未运行) | +| containers_running | 无 | 运行中的容器数目 | +| containers_paused | 无 | 暂停中的容器数目 | +| images | 无 | 容器景象的总数目。 | +| ncpu | 无 | NCPU | +| mem_total | MB | 占用的内存总大小 | +| system_time | 无 | 系统时间 | #### 指标集合:containers -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------- | -| id | 无 | Docker中容器的ID | -| name | 无 | Docker容器中的容器名称 | -| image | 无 | Docker容器使用的镜像 | -| command | 无 | Docker中的默认启动命令 | -| state | 无 | Docker中容器的运行状态 | -| status | 无 | Docker容器中的更新时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------------| +| id | 无 | Docker中容器的ID | +| name | 无 | Docker容器中的容器名称 | +| image | 无 | Docker容器使用的镜像 | +| command | 无 | Docker中的默认启动命令 | +| state | 无 | Docker中容器的运行状态 | +| status | 无 | Docker容器中的更新时间 | #### 指标集合:stats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- | -------- | ---------------------------- | -| name | 无 | Docker容器中的名字 | -| available_memory | MB | Docker容器可以利用的内存大小 | -| used_memory | MB | Docker容器已经使用的内存大小 | -| memory_usage | 无 | Docker容器的内存使用率 | -| cpu_delta | 无 | Docker容器已经使用的CPU数量 | -| number_cpus | 无 | Docker容器可以使用的CPU数量 | -| cpu_usage | 无 | Docker容器CPU使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------|------|--------------------| +| name | 无 | Docker容器中的名字 | +| available_memory | MB | Docker容器可以利用的内存大小 | +| used_memory | MB | Docker容器已经使用的内存大小 | +| memory_usage | 无 | Docker容器的内存使用率 | +| cpu_delta | 无 | Docker容器已经使用的CPU数量 | +| number_cpus | 无 | Docker容器可以使用的CPU数量 | +| cpu_usage | 无 | Docker容器CPU使用率 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/doris_fe.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/doris_fe.md index 0e4803826be..10a66aa6853 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/doris_fe.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/doris_fe.md @@ -15,118 +15,118 @@ keywords: [开源监控系统, 开源数据库监控, DORIS数据库FE监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ |-----------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8030 , 取值自 `http_port` 配置项的值 | +| 参数名称 | 参数帮助描述 | +|--------|-----------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | +| 端口 | 数据库对外提供的端口,默认为8030 , 取值自 `http_port` 配置项的值 | | 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认6000毫秒 | -| 采集器 | 配置此监控使用哪台采集器调度采集 | -| 绑定标签 | 您可以使用标签进行监控资源的分类管理, 例如给资源分别绑定生产环境、测试环境的标签。 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 采集器 | 配置此监控使用哪台采集器调度采集 | +| 绑定标签 | 您可以使用标签进行监控资源的分类管理, 例如给资源分别绑定生产环境、测试环境的标签。 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:doris_fe_connection_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------- | -| value | 无 | 当前FE的MySQL端口连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| value | 无 | 当前FE的MySQL端口连接数 | #### 指标集合:doris_fe_edit_log_clean 不应失败,如失败,需人工介入 -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------- | -| success | 无 | 清理历史元数据日志成功的次数 | -| failed | 无 | 清理历史元数据日志失败的次数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------------| +| success | 无 | 清理历史元数据日志成功的次数 | +| failed | 无 | 清理历史元数据日志失败的次数 | #### 指标集合:doris_fe_edit_log -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------------- | -------- | ------------------------ | -| write | 无 | 元数据日志写入次数的计数 | -| read | 无 | 元数据日志读取次数的计数 | -| current | 无 | 元数据日志当前数量 | -| accumulated_bytes | 字节 | 元数据日志写入量的累计值 | -| current_bytes | 字节 | 元数据日志当前值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|--------------| +| write | 无 | 元数据日志写入次数的计数 | +| read | 无 | 元数据日志读取次数的计数 | +| current | 无 | 元数据日志当前数量 | +| accumulated_bytes | 字节 | 元数据日志写入量的累计值 | +| current_bytes | 字节 | 元数据日志当前值 | #### 指标集合:doris_fe_image_clean 不应失败,如失败,需人工介入 -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------------- | -| success | 无 | 清理历史元数据镜像文件成功的次数 | -| failed | 无 | 清理历史元数据镜像文件失败的次数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|------------------| +| success | 无 | 清理历史元数据镜像文件成功的次数 | +| failed | 无 | 清理历史元数据镜像文件失败的次数 | #### 指标集合:doris_fe_image_write 不应失败,如失败,需人工介入 -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------- | -| success | 无 | 生成元数据镜像文件成功的次数 | -| failed | 无 | 生成元数据镜像文件失败的次数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------------| +| success | 无 | 生成元数据镜像文件成功的次数 | +| failed | 无 | 生成元数据镜像文件失败的次数 | #### 指标集合:doris_fe_query_err -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------- | -| value | 无 | 错误查询的累积值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|----------| +| value | 无 | 错误查询的累积值 | #### 指标集合:doris_fe_max_journal_id -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | 当前FE节点最大元数据日志ID。如果是Master FE,则是当前写入的最大ID,如果是非Master FE,则代表当前回放的元数据日志最大ID。用于观察多个FE之间的 id 是否差距过大。过大则表示元数据同步出现问题 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------------------------------------------------------------------------------------------------| +| value | 无 | 当前FE节点最大元数据日志ID。如果是Master FE,则是当前写入的最大ID,如果是非Master FE,则代表当前回放的元数据日志最大ID。用于观察多个FE之间的 id 是否差距过大。过大则表示元数据同步出现问题 | #### 指标集合:doris_fe_max_tablet_compaction_score -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | 所有BE节点中最大的 compaction score 值。该值可以观测当前集群最大的 compaction score,以判断是否过高。如过高则可能出现查询或写入延迟 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------------------------------------------------------------------| +| value | 无 | 所有BE节点中最大的 compaction score 值。该值可以观测当前集群最大的 compaction score,以判断是否过高。如过高则可能出现查询或写入延迟 | #### 指标集合:doris_fe_qps -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------ | -| value | 无 | 当前FE每秒查询数量(仅统计查询请求) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------| +| value | 无 | 当前FE每秒查询数量(仅统计查询请求) | #### 指标集合:doris_fe_query_err_rate -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------- | -| value | 无 | 每秒错误查询数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------| +| value | 无 | 每秒错误查询数 | #### 指标集合:doris_fe_report_queue_size -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | BE的各种定期汇报任务在FE端的队列长度,该值反映了汇报任务在 Master FE 节点上的阻塞程度,数值越大,表示FE处理能力不足 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------------------------------------------------| +| value | 无 | BE的各种定期汇报任务在FE端的队列长度,该值反映了汇报任务在 Master FE 节点上的阻塞程度,数值越大,表示FE处理能力不足 | #### 指标集合:doris_fe_rps -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------------------------- | -| value | 无 | 当前FE每秒请求数量(包含查询以及其他各类语句) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------| +| value | 无 | 当前FE每秒请求数量(包含查询以及其他各类语句) | #### 指标集合:doris_fe_scheduled_tablet_num -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | Master FE节点正在调度的 tablet 数量。包括正在修复的副本和正在均衡的副本,该数值可以反映当前集群,正在迁移的 tablet 数量。如果长时间有值,说明集群不稳定 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------------------------------------------------------------------| +| value | 无 | Master FE节点正在调度的 tablet 数量。包括正在修复的副本和正在均衡的副本,该数值可以反映当前集群,正在迁移的 tablet 数量。如果长时间有值,说明集群不稳定 | #### 指标集合:doris_fe_txn_status 可以观测各个状态下导入事务的数量,来判断是否有堆积 -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- | -------- | ------------- | -| unknown | 无 | 未知 | -| prepare | 无 | 准备中 | -| committed | 无 | 已提交 | -| visible | 无 | 可见 | -| aborted | 无 | 已中止/已撤销 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|---------| +| unknown | 无 | 未知 | +| prepare | 无 | 准备中 | +| committed | 无 | 已提交 | +| visible | 无 | 可见 | +| aborted | 无 | 已中止/已撤销 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dynamic_tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dynamic_tp.md index e3e143c17ed..8c2f1e290e4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dynamic_tp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dynamic_tp.md @@ -7,15 +7,15 @@ keywords: [开源监控系统, 开源中间件监控, DynamicTp线程池监控] > 对DynamicTp actuator 暴露的线程池性能指标进行采集监控。 -### 前提 +### 前提 -1. 集成使用 `DynamicTp` +1. 集成使用 `DynamicTp` `DynamicTp` 是Jvm语言的基于配置中心的轻量级动态线程池,内置监控告警功能,可通过SPI自定义扩展实现。 -集成使用,请参考文档 [快速接入](https://dynamictp.cn/guide/use/quick-start.html) +集成使用,请参考文档 [快速接入](https://dynamictp.cn/guide/use/quick-start.html) -2. 开启SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 +2. 开启SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 ```yaml management: @@ -24,7 +24,8 @@ management: exposure: include: '*' ``` -测试访问指标接口 `ip:port/actuator/dynamic-tp` 是否有响应json数据如下: + +测试访问指标接口 `ip:port/actuator/dynamic-tp` 是否有响应json数据如下: ```json [ @@ -58,45 +59,44 @@ management: ] ``` -3. 在HertzBeat中间件监控下添加DynamicTp监控即可 - +3. 在HertzBeat中间件监控下添加DynamicTp监控即可 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ |------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 应用服务对外提供的端口,默认为8080。 | +| 参数名称 | 参数帮助描述 | +|-----------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 应用服务对外提供的端口,默认为8080。 | | 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | - | Base Path | 暴露接口路径前缀,默认 /actuator | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| Base Path | 暴露接口路径前缀,默认 /actuator | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:thread_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|------------------------| -| pool_name | 无 | 线程池名称 | -| core_pool_size | 无 | 核心线程数 | -| maximum_pool_size | 无 | 最大线程数 | -| queue_type | 无 | 任务队列类型 | -| queue_capacity | MB | 任务队列容量 | -| queue_size | 无 | 任务队列当前占用大小 | -| fair | 无 | 队列模式,SynchronousQueue会用到 | -| queue_remaining_capacity | MB | 任务队列剩余大小 | -| active_count | 无 | 活跃线程数 | -| task_count | 无 | 任务总数 | -| completed_task_count | 无 | 已完成任务数 | -| largest_pool_size | 无 | 历史最大线程数 | -| pool_size | 无 | 当前线程数 | -| wait_task_count | 无 | 等待执行任务数 | -| reject_count | 无 | 拒绝任务数 | -| reject_handler_name | 无 | 拒绝策略类型 | -| dynamic | 无 | 是否动态线程池 | -| run_timeout_count | 无 | 运行超时任务数 | -| queue_timeout_count | 无 | 等待超时任务数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------|------|--------------------------| +| pool_name | 无 | 线程池名称 | +| core_pool_size | 无 | 核心线程数 | +| maximum_pool_size | 无 | 最大线程数 | +| queue_type | 无 | 任务队列类型 | +| queue_capacity | MB | 任务队列容量 | +| queue_size | 无 | 任务队列当前占用大小 | +| fair | 无 | 队列模式,SynchronousQueue会用到 | +| queue_remaining_capacity | MB | 任务队列剩余大小 | +| active_count | 无 | 活跃线程数 | +| task_count | 无 | 任务总数 | +| completed_task_count | 无 | 已完成任务数 | +| largest_pool_size | 无 | 历史最大线程数 | +| pool_size | 无 | 当前线程数 | +| wait_task_count | 无 | 等待执行任务数 | +| reject_count | 无 | 拒绝任务数 | +| reject_handler_name | 无 | 拒绝策略类型 | +| dynamic | 无 | 是否动态线程池 | +| run_timeout_count | 无 | 运行超时任务数 | +| queue_timeout_count | 无 | 等待超时任务数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/elasticsearch.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/elasticsearch.md index e64d956bc77..a0b3082cc1a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/elasticsearch.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/elasticsearch.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, 监控ElasticSearch ] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |---------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -27,7 +27,7 @@ keywords: [ 开源监控系统, 监控ElasticSearch ] #### 指标集合:health -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------------------|------|----------| | cluster_name | 无 | 集群名称 | | status | 无 | 集群状态 | @@ -41,7 +41,7 @@ keywords: [ 开源监控系统, 监控ElasticSearch ] #### 指标集合:nodes -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|--------| | total | 无 | 节点数 | | successful | 无 | 在线节点数 | @@ -49,7 +49,7 @@ keywords: [ 开源监控系统, 监控ElasticSearch ] #### 指标集合:nodes_detail -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------------|------|---------| | node_name | 无 | 节点名称 | | ip | 无 | IP地址 | @@ -61,3 +61,4 @@ keywords: [ 开源监控系统, 监控ElasticSearch ] | disk_free | GB | 磁盘剩余容量 | | disk_total | GB | 磁盘总容量 | | disk_used_percent | % | 磁盘使用率 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/euleros.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/euleros.md index 258f7bb36dd..6c894671cc6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/euleros.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/euleros.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -28,7 +28,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 #### 指标集合:系统基本信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | Host Name | 无 | 主机名称 | | System Version | 无 | 操作系统版本 | @@ -36,7 +36,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 #### 指标集合:CPU 信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------------------| | info | 无 | CPU型号 | | cores | 无 | CPU内核数量 | @@ -47,7 +47,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 #### 指标集合:内存信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|----------| | total | Mb | 总内存容量 | | used | Mb | 用户程序内存量 | @@ -58,7 +58,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 #### 指标集合:磁盘信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------|------|-----------| | disk_num | 无 | 磁盘总数 | | partition_num | 无 | 分区总数 | @@ -68,7 +68,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 #### 指标集合:网卡信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | interface_name | 无 | 网卡名称 | | receive_bytes | Mb | 入站数据流量 | @@ -76,7 +76,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 #### 指标集合:文件系统 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|---------| | filesystem | 无 | 文件系统的名称 | | used | Mb | 已使用磁盘大小 | @@ -88,7 +88,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | cpu_usage | % | CPU占用率 | @@ -99,9 +99,10 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/flink.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/flink.md index ef260f4dc2c..177c41874fb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/flink.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/flink.md @@ -9,7 +9,7 @@ keywords: [开源监控系统, 开源 Flink 监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -26,7 +26,7 @@ keywords: [开源监控系统, 开源 Flink 监控] #### 指标集合:overview -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|----------| | slots_total | 个 | 插槽总数 | | slots_used | 个 | 已用插槽数 | @@ -34,5 +34,3 @@ keywords: [开源监控系统, 开源 Flink 监控] | jobs_running | 个 | 正在运行的任务数 | | jobs_failed | 个 | 已经失败的任务数 | - - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/freebsd.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/freebsd.md index 53f7eceb7c3..01313bd7ae6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/freebsd.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/freebsd.md @@ -7,10 +7,9 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 > 对FreeBSD操作系统的通用性能指标 (系统信息、CPU、内存、磁盘、网卡、文件系统、TOP资源进程等) 进行采集监控。 - ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -29,7 +28,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 #### 指标集合:系统基本信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | Host Name | 无 | 主机名称 | | System Version | 无 | 操作系统版本 | @@ -37,7 +36,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 #### 指标集合:CPU 信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------------------| | info | 无 | CPU型号 | | cores | 核数 | CPU内核数量 | @@ -48,7 +47,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 #### 指标集合:内存信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|---------| | physmem | Mb | 物理内存 | | usermem | Mb | 用户程序内存量 | @@ -57,7 +56,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 #### 指标集合:文件系统 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|---------| | filesystem | 无 | 文件系统的名称 | | used | Mb | 已使用磁盘大小 | @@ -69,7 +68,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | cpu_usage | % | CPU占用率 | @@ -80,7 +79,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | mem_usage | % | 内存占用率 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ftp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ftp.md index c86dfb14a7b..d421b6a78eb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ftp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ftp.md @@ -11,7 +11,7 @@ keywords: [ 开源监控系统, 开源FTP服务器监控工具, 监控FTP指标 ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|--------------------------------------| | 目标Host | 被监控的IPv4、IPv6。注意⚠️不包含协议头(例如:ftp://)。 | | 监控名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -28,7 +28,8 @@ keywords: [ 开源监控系统, 开源FTP服务器监控工具, 监控FTP指标 #### 指标集合:概要 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------|------|------------------| | 活动状态 | 无 | 检查目录是否存在,且具有访问权限 | | 响应时间 | ms | 连接FTP响应时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/fullsite.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/fullsite.md index f7fc4c150b7..9d39da7c9e4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/fullsite.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/fullsite.md @@ -7,29 +7,29 @@ keywords: [开源监控系统, 开源网站监控, SiteMap监控] > 对网站的全部页面监测是否可用 > 往往一个网站有多个不同服务提供的页面,我们通过采集网站暴露出来的网站地图SiteMap来监控全站。 -> 注意⚠️,此监控需您网站支持SiteMap。我们支持XML和TXT格式的SiteMap。 +> 注意⚠️,此监控需您网站支持SiteMap。我们支持XML和TXT格式的SiteMap。 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 网站地图 | 网站SiteMap地图地址的相对路径,例如:/sitemap.xml。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|---------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | +| 网站地图 | 网站SiteMap地图地址的相对路径,例如:/sitemap.xml。 | +| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| url | 无 | 网页的URL路径 | -| statusCode | 无 | 请求此网页的响应HTTP状态码 | -| responseTime | ms毫秒 | 网站响应时间 | -| errorMsg | 无 | 请求此网站反馈的错误信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|-----------------| +| url | 无 | 网页的URL路径 | +| statusCode | 无 | 请求此网页的响应HTTP状态码 | +| responseTime | ms毫秒 | 网站响应时间 | +| errorMsg | 无 | 请求此网站反馈的错误信息 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/guide.md index 40ad5765ca8..48dc239b69a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/guide.md @@ -1,30 +1,30 @@ --- id: guide title: 帮助中心 -sidebar_label: 帮助入门 +sidebar_label: 帮助入门 --- > 易用友好的实时监控工具,无需Agent,强大自定义监控能力。 -> 使用过程中的帮助文档,辅助信息。 +> 使用过程中的帮助文档,辅助信息。 ## 🔬 监控服务 > 定时采集监控对端服务暴露的性能指标,提供可视化界面,处理数据供告警等服务调度。 > 规划的监控类型:应用服务,数据库,操作系统,云原生,开源中间件 -### 应用服务监控 +### 应用服务监控 -  👉 [网站监测](website)
-  👉 [HTTP API](api)
-  👉 [PING连通性](ping)
-  👉 [端口可用性](port)
-  👉 [全站监控](fullsite)
-  👉 [SSL证书有效期](ssl_cert)
-  👉 [JVM虚拟机](jvm)
-  👉 [SpringBoot2.0](springboot2)
-  👉 [DNS服务器](dns)
-  👉 [FTP服务器](ftp)
-  👉 [Websocket](websocket)
+ 👉 [网站监测](website)
+ 👉 [HTTP API](api)
+ 👉 [PING连通性](ping)
+ 👉 [端口可用性](port)
+ 👉 [全站监控](fullsite)
+ 👉 [SSL证书有效期](ssl_cert)
+ 👉 [JVM虚拟机](jvm)
+ 👉 [SpringBoot2.0](springboot2)
+ 👉 [DNS服务器](dns)
+ 👉 [FTP服务器](ftp)
+ 👉 [Websocket](websocket)
### 应用程序监控 @@ -34,92 +34,90 @@ sidebar_label: 帮助入门  👉 [SpringBoot3.0](springboot3)
 👉 [DynamicTp线程池](dynamic_tp)
+### 数据库监控 -### 数据库监控 - -  👉 [MYSQL数据库监控](mysql)
-  👉 [MariaDB数据库监控](mariadb)
-  👉 [PostgreSQL数据库监控](postgresql)
-  👉 [SqlServer数据库监控](sqlserver)
-  👉 [Oracle数据库监控](oracle)
-  👉 [达梦数据库监控](dm)
-  👉 [OpenGauss数据库监控](opengauss)
-  👉 [IoTDB数据库监控](iotdb)
-  👉 [TiDB数据库监控](tidb)
-  👉 [MongoDB数据库监控](mongodb)
-  👉 [NebulaGraph集群监控](nebulagraph_cluster)
+ 👉 [MYSQL数据库监控](mysql)
+ 👉 [MariaDB数据库监控](mariadb)
+ 👉 [PostgreSQL数据库监控](postgresql)
+ 👉 [SqlServer数据库监控](sqlserver)
+ 👉 [Oracle数据库监控](oracle)
+ 👉 [达梦数据库监控](dm)
+ 👉 [OpenGauss数据库监控](opengauss)
+ 👉 [IoTDB数据库监控](iotdb)
+ 👉 [TiDB数据库监控](tidb)
+ 👉 [MongoDB数据库监控](mongodb)
+ 👉 [NebulaGraph集群监控](nebulagraph_cluster)
### 缓存监控  👉 [Redis](redis)
 👉 [Memcached](memcached)
-### 操作系统监控 +### 操作系统监控 -  👉 [Linux操作系统监控](linux)
-  👉 [Windows操作系统监控](windows)
-  👉 [Ubuntu操作系统监控](ubuntu)
-  👉 [Centos操作系统监控](centos)
-  👉 [FreeBSD操作系统监控](freebsd)
-  👉 [RedHat操作系统监控](redhat)
-  👉 [RockyLinux操作系统监控](rockylinux)
-  👉 [EulerOS操作系统监控](euleros)
+ 👉 [Linux操作系统监控](linux)
+ 👉 [Windows操作系统监控](windows)
+ 👉 [Ubuntu操作系统监控](ubuntu)
+ 👉 [Centos操作系统监控](centos)
+ 👉 [FreeBSD操作系统监控](freebsd)
+ 👉 [RedHat操作系统监控](redhat)
+ 👉 [RockyLinux操作系统监控](rockylinux)
+ 👉 [EulerOS操作系统监控](euleros)
### 中间件监控 -  👉 [Zookeeper](zookeeper)
-  👉 [Kafka](kafka)
-  👉 [Tomcat](tomcat)
-  👉 [ShenYu](shenyu)
-  👉 [DynamicTp](dynamic_tp)
-  👉 [RabbitMQ](rabbitmq)
-  👉 [ActiveMQ](activemq)
-  👉 [Jetty](jetty)
-  👉 [Nacos](nacos)
+ 👉 [Zookeeper](zookeeper)
+ 👉 [Kafka](kafka)
+ 👉 [Tomcat](tomcat)
+ 👉 [ShenYu](shenyu)
+ 👉 [DynamicTp](dynamic_tp)
+ 👉 [RabbitMQ](rabbitmq)
+ 👉 [ActiveMQ](activemq)
+ 👉 [Jetty](jetty)
+ 👉 [Nacos](nacos)
### 云原生监控 -  👉 [Docker](docker)
-  👉 [Kubernetes](kubernetes)
+ 👉 [Docker](docker)
+ 👉 [Kubernetes](kubernetes)
### 大数据监控 -  👉 [Clickhouse](clickhouse)
-  👉 [ElasticSearch](elasticsearch)
-  👉 [Flink](flink)
+ 👉 [Clickhouse](clickhouse)
+ 👉 [ElasticSearch](elasticsearch)
+ 👉 [Flink](flink)
### Ai大模型监控  👉 [OpenAi](openai)
-### 网络监控 - -  👉 [华为通用交换机](huawei_switch)
+### 网络监控 + + 👉 [华为通用交换机](huawei_switch)
### 服务器监控 -## 💡 告警服务 +## 💡 告警服务 > 更自由化的阈值告警配置,支持邮箱,短信,webhook,钉钉,企业微信,飞书机器人等告警通知。 -> 告警服务的定位是阈值准确及时触发,告警通知及时可达。 +> 告警服务的定位是阈值准确及时触发,告警通知及时可达。 -### 告警中心 +### 告警中心 -> 已触发的告警信息中心,提供告警删除,告警处理,标记未处理,告警级别状态等查询过滤。 +> 已触发的告警信息中心,提供告警删除,告警处理,标记未处理,告警级别状态等查询过滤。 -### 告警配置 +### 告警配置 > 指标阈值配置,提供表达式形式的指标阈值配置,可设置告警级别,触发次数,告警通知模版和是否启用,关联监控等功能。 详见 👉 [阈值告警](alert_threshold)
-   👉 [阈值表达式](alert_threshold_expr) +   👉 [阈值表达式](alert_threshold_expr) -### 告警通知 +### 告警通知 > 触发告警信息后,除了显示在告警中心列表外,还可以用指定方式(邮件钉钉微信飞书等)通知给指定接收人。 > 告警通知提供设置不同类型的通知方式,如邮件接收人,企业微信机器人通知,钉钉机器人通知,飞书机器人通知。 -> 接收人设置后需要设置关联的告警通知策略,来配置哪些告警信息发给哪些接收人。 - +> 接收人设置后需要设置关联的告警通知策略,来配置哪些告警信息发给哪些接收人。  👉 [配置邮箱通知](alert_email)
 👉 [配置 Webhook 通知](alert_webhook)
diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hadoop.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hadoop.md index fec361e2366..bda83b006e4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hadoop.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hadoop.md @@ -37,57 +37,54 @@ export HADOOP_OPTS= "$HADOOP_OPTS ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:code_cache (限JDK8及以下) -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_master.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_master.md index f75d5cc8e98..e732bf45fd6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_master.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_master.md @@ -4,6 +4,7 @@ title: 监控:Hbase Master监控 sidebar_label: Apache Hbase Master keywords: [开源监控系统, 开源数据库监控, HbaseMaster监控] --- + > 对Hbase Master的通用性能指标进行采集监控 **使用协议:HTTP** @@ -14,49 +15,46 @@ keywords: [开源监控系统, 开源数据库监控, HbaseMaster监控] ## 配置参数 - -| 参数名称 | 参数帮助描述 | -| ------------ | -------------------------------------------------------------------- | -| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 端口 | hbase master的端口号,默认为16010。即:`hbase.master.info.port`参数值 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|---------------------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 端口 | hbase master的端口号,默认为16010。即:`hbase.master.info.port`参数值 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:server - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- |----| ---------------------------- | -| numRegionServers | 无 | 当前存活的 RegionServer 个数 | -| numDeadRegionServers | 无 | 当前Dead的 RegionServer 个数 | -| averageLoad | 无 | 集群平均负载 | -| clusterRequests | 无 | 集群请求数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------|------|-------------------------| +| numRegionServers | 无 | 当前存活的 RegionServer 个数 | +| numDeadRegionServers | 无 | 当前Dead的 RegionServer 个数 | +| averageLoad | 无 | 集群平均负载 | +| clusterRequests | 无 | 集群请求数量 | #### 指标集合:Rit - -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------- | ------ | ------------------- | -| ritCount | 无 | 当前的 RIT 数量 | -| ritCountOverThreshold | 无 | 超过阈值的 RIT 数量 | -| ritOldestAge | ms | 最老的RIT的持续时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|--------------| +| ritCount | 无 | 当前的 RIT 数量 | +| ritCountOverThreshold | 无 | 超过阈值的 RIT 数量 | +| ritOldestAge | ms | 最老的RIT的持续时间 | #### 指标集合:basic - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------------------- | ----- | ------------------------ | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|--------------------| | liveRegionServers | 无 | 当前活跃RegionServer列表 | | deadRegionServers | 无 | 当前离线RegionServer列表 | -| zookeeperQuorum | 无 | Zookeeper列表 | -| masterHostName | 无 | Master节点 | -| BalancerCluster_num_ops | 无 | 集群负载均衡次数 | -| numActiveHandler | 无 | RPC句柄数 | -| receivedBytes | MB | 集群接收数据量 | -| sentBytes | MB | 集群发送数据量(MB) | -| clusterRequests | 无 | 集群总请求数量 | +| zookeeperQuorum | 无 | Zookeeper列表 | +| masterHostName | 无 | Master节点 | +| BalancerCluster_num_ops | 无 | 集群负载均衡次数 | +| numActiveHandler | 无 | RPC句柄数 | +| receivedBytes | MB | 集群接收数据量 | +| sentBytes | MB | 集群发送数据量(MB) | +| clusterRequests | 无 | 集群总请求数量 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_regionserver.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_regionserver.md index 44d5b533932..1c1cfdf1802 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_regionserver.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_regionserver.md @@ -4,6 +4,7 @@ title: 监控 Hbase RegionServer监控 sidebar_label: Apache Hbase RegionServer keywords: [开源监控系统, 开源数据库监控, RegionServer监控] --- + > 对Hbase RegionServer的通用性能指标进行采集监控 **使用协议:HTTP** @@ -14,16 +15,15 @@ keywords: [开源监控系统, 开源数据库监控, RegionServer监控] ## 配置参数 - -| 参数名称 | 参数帮助描述 | -| ------------ |----------------------------------------------------------------| -| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 端口 | hbase regionserver的端口号,默认为16030。即:`hbase.regionserver.info.port`参数值 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|---------------------------------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 端口 | hbase regionserver的端口号,默认为16030。即:`hbase.regionserver.info.port`参数值 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 @@ -31,67 +31,64 @@ keywords: [开源监控系统, 开源数据库监控, RegionServer监控] #### 指标集合:server - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- |-------|------------------------------------------| -| regionCount | 无 | Region数量 | -| readRequestCount | 无 | 重启集群后的读请求数量 | -| writeRequestCount | 无 | 重启集群后的写请求数量 | -| averageRegionSize | MB | 平均Region大小 | -| totalRequestCount | 无 | 全部请求数量 | -| ScanTime_num_ops | 无 | Scan 请求总量 | -| Append_num_ops | 无 | Append 请求量 | -| Increment_num_ops | 无 | Increment请求量 | -| Get_num_ops | 无 | Get 请求量 | -| Delete_num_ops | 无 | Delete 请求量 | -| Put_num_ops | 无 | Put 请求量 | -| ScanTime_mean | 无 | 平均 Scan 请求时间 | -| ScanTime_min | 无 | 最小 Scan 请求时间 | -| ScanTime_max | 无 | 最大 Scan 请求时间 | -| ScanSize_mean | bytes | 平均 Scan 请求大小 | -| ScanSize_min | 无 | 最小 Scan 请求大小 | -| ScanSize_max | 无 | 最大 Scan 请求大小 | -| slowPutCount | 无 | 慢操作次数/Put | -| slowGetCount | 无 | 慢操作次数/Get | -| slowAppendCount | 无 | 慢操作次数/Append | -| slowIncrementCount | 无 | 慢操作次数/Increment | -| slowDeleteCount | 无 | 慢操作次数/Delete | -| blockCacheSize | 无 | 缓存块内存占用大小 | -| blockCacheCount | 无 | 缓存块数量_Block Cache 中的 Block 数量 | -| blockCacheExpressHitPercent | 无 | 读缓存命中率 | -| memStoreSize | 无 | Memstore 大小 | -| FlushTime_num_ops | 无 | RS写磁盘次数/Memstore flush 写磁盘次数 | -| flushQueueLength | 无 | Region Flush 队列长度 | -| flushedCellsSize | 无 | flush到磁盘大小 | -| storeFileCount | 无 | Storefile 个数 | -| storeCount | 无 | Store 个数 | -| storeFileSize | 无 | Storefile 大小 | -| compactionQueueLength | 无 | Compaction 队列长度 | -| percentFilesLocal | 无 | Region 的 HFile 位于本地 HDFS Data Node的比例 | -| percentFilesLocalSecondaryRegions | 无 | Region 副本的 HFile 位于本地 HDFS Data Node的比例 | -| hlogFileCount | 无 | WAL 文件数量 | -| hlogFileSize | 无 | WAL 文件大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------------------|-------|-----------------------------------------| +| regionCount | 无 | Region数量 | +| readRequestCount | 无 | 重启集群后的读请求数量 | +| writeRequestCount | 无 | 重启集群后的写请求数量 | +| averageRegionSize | MB | 平均Region大小 | +| totalRequestCount | 无 | 全部请求数量 | +| ScanTime_num_ops | 无 | Scan 请求总量 | +| Append_num_ops | 无 | Append 请求量 | +| Increment_num_ops | 无 | Increment请求量 | +| Get_num_ops | 无 | Get 请求量 | +| Delete_num_ops | 无 | Delete 请求量 | +| Put_num_ops | 无 | Put 请求量 | +| ScanTime_mean | 无 | 平均 Scan 请求时间 | +| ScanTime_min | 无 | 最小 Scan 请求时间 | +| ScanTime_max | 无 | 最大 Scan 请求时间 | +| ScanSize_mean | bytes | 平均 Scan 请求大小 | +| ScanSize_min | 无 | 最小 Scan 请求大小 | +| ScanSize_max | 无 | 最大 Scan 请求大小 | +| slowPutCount | 无 | 慢操作次数/Put | +| slowGetCount | 无 | 慢操作次数/Get | +| slowAppendCount | 无 | 慢操作次数/Append | +| slowIncrementCount | 无 | 慢操作次数/Increment | +| slowDeleteCount | 无 | 慢操作次数/Delete | +| blockCacheSize | 无 | 缓存块内存占用大小 | +| blockCacheCount | 无 | 缓存块数量_Block Cache 中的 Block 数量 | +| blockCacheExpressHitPercent | 无 | 读缓存命中率 | +| memStoreSize | 无 | Memstore 大小 | +| FlushTime_num_ops | 无 | RS写磁盘次数/Memstore flush 写磁盘次数 | +| flushQueueLength | 无 | Region Flush 队列长度 | +| flushedCellsSize | 无 | flush到磁盘大小 | +| storeFileCount | 无 | Storefile 个数 | +| storeCount | 无 | Store 个数 | +| storeFileSize | 无 | Storefile 大小 | +| compactionQueueLength | 无 | Compaction 队列长度 | +| percentFilesLocal | 无 | Region 的 HFile 位于本地 HDFS Data Node的比例 | +| percentFilesLocalSecondaryRegions | 无 | Region 副本的 HFile 位于本地 HDFS Data Node的比例 | +| hlogFileCount | 无 | WAL 文件数量 | +| hlogFileSize | 无 | WAL 文件大小 | #### 指标集合:IPC - -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------- | ------ | ------------------- | -| numActiveHandler | 无 | 当前的 RIT 数量 | -| NotServingRegionException | 无 | 超过阈值的 RIT 数量 | -| RegionMovedException | ms | 最老的RIT的持续时间 | -| RegionTooBusyException | ms | 最老的RIT的持续时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|------|--------------| +| numActiveHandler | 无 | 当前的 RIT 数量 | +| NotServingRegionException | 无 | 超过阈值的 RIT 数量 | +| RegionMovedException | ms | 最老的RIT的持续时间 | +| RegionTooBusyException | ms | 最老的RIT的持续时间 | #### 指标集合:JVM - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------------------- | ----- | ------------------------ | -| MemNonHeapUsedM | 无 | 当前活跃RegionServer列表 | -| MemNonHeapCommittedM | 无 | 当前离线RegionServer列表 | -| MemHeapUsedM | 无 | Zookeeper列表 | -| MemHeapCommittedM | 无 | Master节点 | -| MemHeapMaxM | 无 | 集群负载均衡次数 | -| MemMaxM | 无 | RPC句柄数 | -| GcCount | MB | 集群接收数据量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------|------|--------------------| +| MemNonHeapUsedM | 无 | 当前活跃RegionServer列表 | +| MemNonHeapCommittedM | 无 | 当前离线RegionServer列表 | +| MemHeapUsedM | 无 | Zookeeper列表 | +| MemHeapCommittedM | 无 | Master节点 | +| MemHeapMaxM | 无 | 集群负载均衡次数 | +| MemMaxM | 无 | RPC句柄数 | +| GcCount | MB | 集群接收数据量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_datanode.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_datanode.md index efb05494290..db494acbb8e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_datanode.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_datanode.md @@ -15,42 +15,43 @@ keywords: [大数据监控系统, 分布式文件系统监控, Apache HDFS DataN ## 配置参数 -| 参数名称 | 参数帮助描述 | -| ---------------- |---------------------------------------| -| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | -| 端口 | Apache HDFS DataNode 的监控端口号,默认为50075。 | -| 查询超时时间 | 查询 Apache HDFS DataNode 的超时时间,单位毫秒,默认6000毫秒。 | -| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | -| 是否探测 | 新增监控前是否先探测检查监控可用性。 | -| 描述备注 | 此监控的更多描述和备注信息。 | +| 参数名称 | 参数帮助描述 | +|--------|----------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | +| 端口 | Apache HDFS DataNode 的监控端口号,默认为50075。 | +| 查询超时时间 | 查询 Apache HDFS DataNode 的超时时间,单位毫秒,默认6000毫秒。 | +| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | +| 是否探测 | 新增监控前是否先探测检查监控可用性。 | +| 描述备注 | 此监控的更多描述和备注信息。 | ### 采集指标 #### 指标集合:FSDatasetState -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------------- | -------- | ------------------------------------ | -| DfsUsed | GB | DataNode HDFS使用量 | -| Remaining | GB | DataNode HDFS剩余空间 | -| Capacity | GB | DataNode HDFS空间总量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|-------------------| +| DfsUsed | GB | DataNode HDFS使用量 | +| Remaining | GB | DataNode HDFS剩余空间 | +| Capacity | GB | DataNode HDFS空间总量 | #### 指标集合:JvmMetrics -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------------ | -------- | ------------------------------------ | -| MemNonHeapUsedM | MB | JVM 当前已经使用的 NonHeapMemory 的大小 | -| MemNonHeapCommittedM | MB | JVM 配置的 NonHeapCommittedM 的大小 | -| MemHeapUsedM | MB | JVM 当前已经使用的 HeapMemory 的大小 | -| MemHeapCommittedM | MB | JVM HeapMemory 提交大小 | -| MemHeapMaxM | MB | JVM 配置的 HeapMemory 的大小 | -| MemMaxM | MB | JVM 运行时可以使用的最大内存大小 | -| ThreadsRunnable | 个 | 处于 RUNNABLE 状态的线程数量 | -| ThreadsBlocked | 个 | 处于 BLOCKED 状态的线程数量 | -| ThreadsWaiting | 个 | 处于 WAITING 状态的线程数量 | -| ThreadsTimedWaiting | 个 | 处于 TIMED WAITING 状态的线程数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------|------|-------------------------------| +| MemNonHeapUsedM | MB | JVM 当前已经使用的 NonHeapMemory 的大小 | +| MemNonHeapCommittedM | MB | JVM 配置的 NonHeapCommittedM 的大小 | +| MemHeapUsedM | MB | JVM 当前已经使用的 HeapMemory 的大小 | +| MemHeapCommittedM | MB | JVM HeapMemory 提交大小 | +| MemHeapMaxM | MB | JVM 配置的 HeapMemory 的大小 | +| MemMaxM | MB | JVM 运行时可以使用的最大内存大小 | +| ThreadsRunnable | 个 | 处于 RUNNABLE 状态的线程数量 | +| ThreadsBlocked | 个 | 处于 BLOCKED 状态的线程数量 | +| ThreadsWaiting | 个 | 处于 WAITING 状态的线程数量 | +| ThreadsTimedWaiting | 个 | 处于 TIMED WAITING 状态的线程数量 | #### 指标集合:runtime -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------| -------- | ----------------- | -| StartTime | | 启动时间 | \ No newline at end of file +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| StartTime | | 启动时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_namenode.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_namenode.md index 26fd5e985af..66343c11cd2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_namenode.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_namenode.md @@ -15,79 +15,79 @@ keywords: [大数据监控系统, 分布式文件系统监控, Apache HDFS NameN ## 配置参数 -| 参数名称 | 参数帮助描述 | -| ---------------- |---------------------------------------| -| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | -| 端口 | HDFS NameNode 的监控端口号,默认为50070。 | -| 查询超时时间 | 查询 HDFS NameNode 的超时时间,单位毫秒,默认6000毫秒。 | -| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | -| 是否探测 | 新增监控前是否先探测检查监控可用性。 | -| 描述备注 | 此监控的更多描述和备注信息。 | +| 参数名称 | 参数帮助描述 | +|--------|---------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | +| 端口 | HDFS NameNode 的监控端口号,默认为50070。 | +| 查询超时时间 | 查询 HDFS NameNode 的超时时间,单位毫秒,默认6000毫秒。 | +| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | +| 是否探测 | 新增监控前是否先探测检查监控可用性。 | +| 描述备注 | 此监控的更多描述和备注信息。 | ### 采集指标 #### 指标集合:FSNamesystem -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------------- | -------- | ------------------------------------ | -| CapacityTotal | | 集群存储总容量 | -| CapacityTotalGB | GB | 集群存储总容量 | -| CapacityUsed | | 集群存储已使用容量 | -| CapacityUsedGB | GB | 集群存储已使用容量 | -| CapacityRemaining | | 集群存储剩余容量 | -| CapacityRemainingGB | GB | 集群存储剩余容量 | -| CapacityUsedNonDFS | | 集群非 HDFS 使用容量 | -| TotalLoad | | 整个集群的客户端连接数 | -| FilesTotal | | 集群文件总数量 | -| BlocksTotal | | 总 BLOCK 数量 | -| PendingReplicationBlocks | | 等待被备份的块数量 | -| UnderReplicatedBlocks | | 副本数不够的块数量 | -| CorruptBlocks | | 坏块数量 | -| ScheduledReplicationBlocks | | 安排要备份的块数量 | -| PendingDeletionBlocks | | 等待被删除的块数量 | -| ExcessBlocks | | 多余的块数量 | -| PostponedMisreplicatedBlocks | | 被推迟处理的异常块数量 | -| NumLiveDataNodes | | 活的数据节点数量 | -| NumDeadDataNodes | | 已经标记为 Dead 状态的数据节点数量 | -| NumDecomLiveDataNodes | | 下线且 Live 的节点数量 | -| NumDecomDeadDataNodes | | 下线且 Dead 的节点数量 | -| NumDecommissioningDataNodes | | 正在下线的节点数量 | -| TransactionsSinceLastCheckpoint | | 从上次Checkpoint之后的事务数量 | -| LastCheckpointTime | | 上一次Checkpoint时间 | -| PendingDataNodeMessageCount | | DATANODE 的请求被 QUEUE 在 standby namenode 中的个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------|------|---------------------------------------------| +| CapacityTotal | | 集群存储总容量 | +| CapacityTotalGB | GB | 集群存储总容量 | +| CapacityUsed | | 集群存储已使用容量 | +| CapacityUsedGB | GB | 集群存储已使用容量 | +| CapacityRemaining | | 集群存储剩余容量 | +| CapacityRemainingGB | GB | 集群存储剩余容量 | +| CapacityUsedNonDFS | | 集群非 HDFS 使用容量 | +| TotalLoad | | 整个集群的客户端连接数 | +| FilesTotal | | 集群文件总数量 | +| BlocksTotal | | 总 BLOCK 数量 | +| PendingReplicationBlocks | | 等待被备份的块数量 | +| UnderReplicatedBlocks | | 副本数不够的块数量 | +| CorruptBlocks | | 坏块数量 | +| ScheduledReplicationBlocks | | 安排要备份的块数量 | +| PendingDeletionBlocks | | 等待被删除的块数量 | +| ExcessBlocks | | 多余的块数量 | +| PostponedMisreplicatedBlocks | | 被推迟处理的异常块数量 | +| NumLiveDataNodes | | 活的数据节点数量 | +| NumDeadDataNodes | | 已经标记为 Dead 状态的数据节点数量 | +| NumDecomLiveDataNodes | | 下线且 Live 的节点数量 | +| NumDecomDeadDataNodes | | 下线且 Dead 的节点数量 | +| NumDecommissioningDataNodes | | 正在下线的节点数量 | +| TransactionsSinceLastCheckpoint | | 从上次Checkpoint之后的事务数量 | +| LastCheckpointTime | | 上一次Checkpoint时间 | +| PendingDataNodeMessageCount | | DATANODE 的请求被 QUEUE 在 standby namenode 中的个数 | #### 指标集合:RPC -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------- | -------- | ---------------------- | -| ReceivedBytes | | 接收数据速率 | -| SentBytes | | 发送数据速率 | -| RpcQueueTimeNumOps | | RPC 调用速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|----------| +| ReceivedBytes | | 接收数据速率 | +| SentBytes | | 发送数据速率 | +| RpcQueueTimeNumOps | | RPC 调用速率 | #### 指标集合:runtime -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------| -------- | ----------------- | -| StartTime | | 启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| StartTime | | 启动时间 | #### 指标集合:JvmMetrics -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------------ | -------- | ---------------- | -| MemNonHeapUsedM | MB | JVM 当前已经使用的 NonHeapMemory 的大小 | -| MemNonHeapCommittedM | MB | JVM 配置的 NonHeapCommittedM 的大小 | -| MemHeapUsedM | MB | JVM 当前已经使用的 HeapMemory 的大小 | -| MemHeapCommittedM | MB | JVM HeapMemory 提交大小 | -| MemHeapMaxM | MB | JVM 配置的 HeapMemory 的大小 | -| MemMaxM | MB | JVM 运行时可以使用的最大内存大小 | -| GcCountParNew | 次 | 新生代GC消耗时间 | -| GcTimeMillisParNew | 毫秒 | 新生代GC消耗时间 | -| GcCountConcurrentMarkSweep | 毫秒 | 老年代GC次数 | -| GcTimeMillisConcurrentMarkSweep | 个 | 老年代GC消耗时间 | -| GcCount | 个 | GC次数 | -| GcTimeMillis | 个 | GC消耗时间 | -| ThreadsRunnable | 个 | 处于 BLOCKED 状态的线程数量 | -| ThreadsBlocked | 个 | 处于 BLOCKED 状态的线程数量 | -| ThreadsWaiting | 个 | 处于 WAITING 状态的线程数量 | -| ThreadsTimedWaiting | 个 | 处于 TIMED WAITING 状态的线程数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------|------|-------------------------------| +| MemNonHeapUsedM | MB | JVM 当前已经使用的 NonHeapMemory 的大小 | +| MemNonHeapCommittedM | MB | JVM 配置的 NonHeapCommittedM 的大小 | +| MemHeapUsedM | MB | JVM 当前已经使用的 HeapMemory 的大小 | +| MemHeapCommittedM | MB | JVM HeapMemory 提交大小 | +| MemHeapMaxM | MB | JVM 配置的 HeapMemory 的大小 | +| MemMaxM | MB | JVM 运行时可以使用的最大内存大小 | +| GcCountParNew | 次 | 新生代GC消耗时间 | +| GcTimeMillisParNew | 毫秒 | 新生代GC消耗时间 | +| GcCountConcurrentMarkSweep | 毫秒 | 老年代GC次数 | +| GcTimeMillisConcurrentMarkSweep | 个 | 老年代GC消耗时间 | +| GcCount | 个 | GC次数 | +| GcTimeMillis | 个 | GC消耗时间 | +| ThreadsRunnable | 个 | 处于 BLOCKED 状态的线程数量 | +| ThreadsBlocked | 个 | 处于 BLOCKED 状态的线程数量 | +| ThreadsWaiting | 个 | 处于 WAITING 状态的线程数量 | +| ThreadsTimedWaiting | 个 | 处于 TIMED WAITING 状态的线程数量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hive.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hive.md index 2c2136f91cf..3b41d3979c6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hive.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hive.md @@ -16,6 +16,7 @@ keywords: [开源监控工具, 开源 Apache Hive 监控工具, 监控 Apache Hi ```shell hive --service metastore & ``` + **2. 启用 Hive Server2:** ```shell @@ -24,54 +25,53 @@ hive --service hiveserver2 & ### 配置参数 -| 参数名称 | 参数描述 | -| ---------- |--------------------------------------------------------| -| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | -| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | -| 端口 | 数据库提供的默认端口为 10002。 | -| 启用 HTTPS | 是否通过 HTTPS 访问网站,请注意⚠️当启用 HTTPS 时,需要将默认端口更改为 443 | -| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | -| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | -| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | +| 参数名称 | 参数描述 | +|----------|--------------------------------------------------------| +| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | +| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | +| 端口 | 数据库提供的默认端口为 10002。 | +| 启用 HTTPS | 是否通过 HTTPS 访问网站,请注意⚠️当启用 HTTPS 时,需要将默认端口更改为 443 | +| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | +| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | +| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | ### 采集指标 #### 指标收集: 基本信息 -| 指标名称 | 指标单位 | 指标描述 | -|--------|-------|-----------------------------| -| 虚拟机名称 | 无 | 运行 HiveServer2 的虚拟机(VM)的名称。 | -| 虚拟机供应商 | 无 | 虚拟机的供应商或提供者。 | -| 虚拟机版本 | 无 | 虚拟机的版本。 | -| 允许持续时间 | 无 | HiveServer2 运行的持续时间。 | +| 指标名称 | 指标单位 | 指标描述 | +|--------|------|-----------------------------| +| 虚拟机名称 | 无 | 运行 HiveServer2 的虚拟机(VM)的名称。 | +| 虚拟机供应商 | 无 | 虚拟机的供应商或提供者。 | +| 虚拟机版本 | 无 | 虚拟机的版本。 | +| 允许持续时间 | 无 | HiveServer2 运行的持续时间。 | #### 指标收集: 环境信息 -| 指标名称 | 指标单位 | 指标描述 | -|------------|-------|--------------------------------| -| HTTPS代理端口号 | 无 | 用于 HTTPS 代理通信的端口号。 | -| 操作系统 | 无 | 运行 HiveServer2 的操作系统的名称。 | -| 操作系统版本 | 无 | 操作系统的版本。 | -| 操作系统架构 | 无 | 操作系统的架构。 | -| java运行环境 | 无 | HiveServer2 使用的 Java 运行时环境的名称。 | -| java运行环境版本 | 无 | Java 运行时环境的版本。 | +| 指标名称 | 指标单位 | 指标描述 | +|------------|------|--------------------------------| +| HTTPS代理端口号 | 无 | 用于 HTTPS 代理通信的端口号。 | +| 操作系统 | 无 | 运行 HiveServer2 的操作系统的名称。 | +| 操作系统版本 | 无 | 操作系统的版本。 | +| 操作系统架构 | 无 | 操作系统的架构。 | +| java运行环境 | 无 | HiveServer2 使用的 Java 运行时环境的名称。 | +| java运行环境版本 | 无 | Java 运行时环境的版本。 | #### 指标收集: 线程信息 -| 指标名称 | 指标单位 | 指标描述 | +| 指标名称 | 指标单位 | 指标描述 | |--------|------|------------------------------| | 线程数量 | None | HiveServer2 当前正在使用的线程数。 | -| 总启动线程数 | None | HiveServer2 启动以来启动的线程总数。 | +| 总启动线程数 | None | HiveServer2 启动以来启动的线程总数。 | | 最高线程数 | None | HiveServer2 在任何给定时间使用的最高线程数。 | | 守护线程数 | None | HiveServer2 当前活动的守护线程数。 | #### 指标收集: 代码缓存 -| 指标名称 | 指标单位 | 指标描述 | -|------------|-------------|---------------| -| 内存池当前内存 | MB | 当前为内存池分配的内存量。 | -| 内存池初始内存 | MB | 内存池请求的初始内存量。 | -| 内存池可分配最大内存 | MB | 内存池可分配的最大内存量。 | -| 内存池内存使用量 | MB | 内存池已使用内存量 | - +| 指标名称 | 指标单位 | 指标描述 | +|------------|------|---------------| +| 内存池当前内存 | MB | 当前为内存池分配的内存量。 | +| 内存池初始内存 | MB | 内存池请求的初始内存量。 | +| 内存池可分配最大内存 | MB | 内存池可分配的最大内存量。 | +| 内存池内存使用量 | MB | 内存池已使用内存量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/huawei_switch.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/huawei_switch.md index 9a4d4400c6c..6bd76f639e8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/huawei_switch.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/huawei_switch.md @@ -11,7 +11,7 @@ keywords: [ 开源监控系统, 网络监控, 华为通用交换机监控 ] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |---------------------|-------------------------------------------------------------------------------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -25,8 +25,8 @@ keywords: [ 开源监控系统, 网络监控, 华为通用交换机监控 ] | SNMP privPassphrase | 用于SNMP v3,SNMP 加密密码 | | privPassword 加密方式 | 用于SNMP v3,SNMP 加密算法 | | 查询超时时间 | 设置查询未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | -| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | -| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | +| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | +| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | ### 采集指标 @@ -37,7 +37,7 @@ keywords: [ 开源监控系统, 网络监控, 华为通用交换机监控 ] #### 指标集合:huawei_core -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | ifIndex | 无 | 接口索引 该值大于零且全局唯一。 | | ifDescr | 无 | 描述接口的字符串,应该包含制造商、产品名和接口软硬件的版本。 | @@ -47,7 +47,8 @@ keywords: [ 开源监控系统, 网络监控, 华为通用交换机监控 ] | ifInDiscards | 无 | 入方向的被丢弃的报文个数,即使没有错误发生。也将阻止这些报文送往上层协议。 一个可能的原因是释放buffer的空间。在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | | ifInErrors | 无 | 出错而不会被送往上层协议的报文/传输单元个数。在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | | ifOutOctets | octets | 该接口出方向通过的总字节数,包括分桢的数据。在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | -| ifOutDiscards | 无 | 出方向的被丢弃的报文个数,即使没有错误发生。也将阻止这些报文发送。丢弃此类报文的一个可能原因是为了释放缓冲区空间。 在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | +| ifOutDiscards | 无 | 出方向的被丢弃的报文个数,即使没有错误发生。也将阻止这些报文发送。丢弃此类报文的一个可能原因是为了释放缓冲区空间。 在管理系统的重新初始化和ifCounterDiscontinuityTime项指定的时间内,该节点的值将出现不连续的情况。 | | ifOutErrors | 无 | 对于面向数据包的接口,该节点表示由于错误而无法发送的数据包数量。对于面向字符或固定长度接口,该节点表示由于错误而无法传输的传输单元的数量。这种计数器的值可能在管理系统的重新初始化时会不连续,其他时间如ifCounterDiscontinuityTime的值。 | | ifAdminStatus | 无 | 接口的理想状态。 testing(3)状态表示没有可操作的数据包通过。 当受管系统初始化时,全部接口开始于ifAdminStatus在down(2)状态。由于明确的管理动作或被管理的系统保留的每个配置信息,ifAdminStatus然后被更改为Up(1)或testing(3)状态(或保留在down(2)状态)。 | | ifOperStatus | 无 | 当前接口的操作状态。testing(3)状态表示没有可操作的数据包可以通过。如果ifAdminStatus是down(2),则ifOperStatus应该是down(2)。 如果ifAdminStatus是改为up(1),则ifOperStatus应该更改为up(1)。如果接口准备好传输,接收网络流量; 它应该改为dormant(5)。如果接口正在等待外部动作(如串行线路等待传入连接); 它应该保持在down(2)状态,并且只有当有故障阻止它变成up(1)状态。 它应该留在notPresent(6)状态如果接口缺少(通常为硬件)组件。 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hugegraph.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hugegraph.md index 8770ababdcd..bb802791dda 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hugegraph.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hugegraph.md @@ -4,6 +4,7 @@ title: 监控:HugeGraph监控 sidebar_label: Apache HugeGraph keywords: [开源监控系统, 开源数据库监控, HugeGraph监控] --- + > 对HugeGraph的通用性能指标进行采集监控 **使用协议:HTTP** @@ -14,8 +15,7 @@ keywords: [开源监控系统, 开源数据库监控, HugeGraph监控] ## 配置参数 - -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |-----------|---------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 端口 | HugeGraph restserver的端口号,默认为8080。即:`restserver_port`参数值 | @@ -30,117 +30,112 @@ keywords: [开源监控系统, 开源数据库监控, HugeGraph监控] #### 指标集合:gauges -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------------------------ | -------- | --------------------------------- | -| edge-hugegraph-capacity | 无 | 表示当前图中边的容量上限 | -| edge-hugegraph-expire | 无 | 表示边数据的过期时间 | -| edge-hugegraph-hits | 无 | 表示边数据缓存的命中次数 | -| edge-hugegraph-miss | 无 | 表示边数据缓存的未命中次数 | -| edge-hugegraph-size | 无 | 表示当前图中边的数量 | -| instances | 无 | 表示当前运行的HugeGraph实例数量| -| schema-id-hugegraph-capacity | 无 | 表示图中schema ID的容量上限 | -| schema-id-hugegraph-expire | 无 | 表示schema ID数据的过期时间 | -| schema-id-hugegraph-hits | 无 | 表示schema ID数据缓存的命中次数| -| schema-id-hugegraph-miss | 无 | 表示schema ID数据缓存的未命中次数| -| schema-id-hugegraph-size | 无 | 表示当前图中schema ID的数量 | -| schema-name-hugegraph-capacity | 无 | 表示图中schema名称的容量上限 | -| schema-name-hugegraph-expire | 无 | 表示schema名称数据的过期时间 | -| schema-name-hugegraph-hits | 无 | 表示schema名称数据缓存的命中次数| -| schema-name-hugegraph-miss | 无 | 表示schema名称数据缓存的未命中次数| -| schema-name-hugegraph-size | 无 | 表示当前图中schema名称的数量 | -| token-hugegraph-capacity | 无 | 表示图中token的容量上限 | -| token-hugegraph-expire | 无 | 表示token数据的过期时间 | -| token-hugegraph-hits | 无 | 表示token数据缓存的命中次数 | -| token-hugegraph-miss | 无 | 表示token数据缓存的未命中次数 | -| token-hugegraph-size | 无 | 表示当前图中token的数量 | -| users-hugegraph-capacity | 无 | 表示图中用户的容量上限 | -| users-hugegraph-expire | 无 | 表示用户数据的过期时间 | -| users-hugegraph-hits | 无 | 表示用户数据缓存的命中次数 | -| users-hugegraph-miss | 无 | 表示用户数据缓存的未命中次数 | -| users-hugegraph-size | 无 | 表示当前图中用户的数量 | -| users_pwd-hugegraph-capacity | 无 | 表示users_pwd的容量上限 | -| users_pwd-hugegraph-expire | 无 | 表示users_pwd数据的过期时间 | -| users_pwd-hugegraph-hits | 无 | 表示users_pwd数据缓存的命中次数 | -| users_pwd-hugegraph-miss | 无 | 表示users_pwd数据缓存的未命中次数| -| users_pwd-hugegraph-size | 无 | 表示当前图中users_pwd的数量 | -| vertex-hugegraph-capacity | 无 | 表示图中顶点的容量上限 | -| vertex-hugegraph-expire | 无 | 表示顶点数据的过期时间 | -| vertex-hugegraph-hits | 无 | 表示顶点数据缓存的命中次数 | -| vertex-hugegraph-miss | 无 | 表示顶点数据缓存的未命中次数 | -| vertex-hugegraph-size | 无 | 表示当前图中顶点的数量 | -| batch-write-threads | 无 | 表示批量写入操作时的线程数 | -| max-write-threads | 无 | 表示最大写入操作的线程数 | -| pending-tasks | 无 | 表示待处理的任务数 | -| workers | 无 | 表示当前工作线程的数量 | -| average-load-penalty | 无 | 表示平均加载延迟 | -| estimated-size | 无 | 表示估计的数据大小 | -| eviction-count | 无 | 表示被驱逐的数据条数 | -| eviction-weight | 无 | 表示被驱逐数据的权重 | -| hit-count | 无 | 表示缓存命中总数 | -| hit-rate | 无 | 表示缓存命中率 | -| load-count | 无 | 表示数据加载次数 | -| load-failure-count | 无 | 表示数据加载失败次数 | -| load-failure-rate | 无 | 表示数据加载失败率 | -| load-success-count | 无 | 表示数据加载成功次数 | -| long-run-compilation-count | 无 | 表示长时间运行的编译次数 | -| miss-count | 无 | 表示缓存未命中总数 | -| miss-rate | 无 | 表示缓存未命中率 | -| request-count | 无 | 表示总的请求次数 | -| total-load-time | 无 | 表示总的数据加载时间 | -| sessions | 无 | 表示当前的活动会话数量 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|-----------------------| +| edge-hugegraph-capacity | 无 | 表示当前图中边的容量上限 | +| edge-hugegraph-expire | 无 | 表示边数据的过期时间 | +| edge-hugegraph-hits | 无 | 表示边数据缓存的命中次数 | +| edge-hugegraph-miss | 无 | 表示边数据缓存的未命中次数 | +| edge-hugegraph-size | 无 | 表示当前图中边的数量 | +| instances | 无 | 表示当前运行的HugeGraph实例数量 | +| schema-id-hugegraph-capacity | 无 | 表示图中schema ID的容量上限 | +| schema-id-hugegraph-expire | 无 | 表示schema ID数据的过期时间 | +| schema-id-hugegraph-hits | 无 | 表示schema ID数据缓存的命中次数 | +| schema-id-hugegraph-miss | 无 | 表示schema ID数据缓存的未命中次数 | +| schema-id-hugegraph-size | 无 | 表示当前图中schema ID的数量 | +| schema-name-hugegraph-capacity | 无 | 表示图中schema名称的容量上限 | +| schema-name-hugegraph-expire | 无 | 表示schema名称数据的过期时间 | +| schema-name-hugegraph-hits | 无 | 表示schema名称数据缓存的命中次数 | +| schema-name-hugegraph-miss | 无 | 表示schema名称数据缓存的未命中次数 | +| schema-name-hugegraph-size | 无 | 表示当前图中schema名称的数量 | +| token-hugegraph-capacity | 无 | 表示图中token的容量上限 | +| token-hugegraph-expire | 无 | 表示token数据的过期时间 | +| token-hugegraph-hits | 无 | 表示token数据缓存的命中次数 | +| token-hugegraph-miss | 无 | 表示token数据缓存的未命中次数 | +| token-hugegraph-size | 无 | 表示当前图中token的数量 | +| users-hugegraph-capacity | 无 | 表示图中用户的容量上限 | +| users-hugegraph-expire | 无 | 表示用户数据的过期时间 | +| users-hugegraph-hits | 无 | 表示用户数据缓存的命中次数 | +| users-hugegraph-miss | 无 | 表示用户数据缓存的未命中次数 | +| users-hugegraph-size | 无 | 表示当前图中用户的数量 | +| users_pwd-hugegraph-capacity | 无 | 表示users_pwd的容量上限 | +| users_pwd-hugegraph-expire | 无 | 表示users_pwd数据的过期时间 | +| users_pwd-hugegraph-hits | 无 | 表示users_pwd数据缓存的命中次数 | +| users_pwd-hugegraph-miss | 无 | 表示users_pwd数据缓存的未命中次数 | +| users_pwd-hugegraph-size | 无 | 表示当前图中users_pwd的数量 | +| vertex-hugegraph-capacity | 无 | 表示图中顶点的容量上限 | +| vertex-hugegraph-expire | 无 | 表示顶点数据的过期时间 | +| vertex-hugegraph-hits | 无 | 表示顶点数据缓存的命中次数 | +| vertex-hugegraph-miss | 无 | 表示顶点数据缓存的未命中次数 | +| vertex-hugegraph-size | 无 | 表示当前图中顶点的数量 | +| batch-write-threads | 无 | 表示批量写入操作时的线程数 | +| max-write-threads | 无 | 表示最大写入操作的线程数 | +| pending-tasks | 无 | 表示待处理的任务数 | +| workers | 无 | 表示当前工作线程的数量 | +| average-load-penalty | 无 | 表示平均加载延迟 | +| estimated-size | 无 | 表示估计的数据大小 | +| eviction-count | 无 | 表示被驱逐的数据条数 | +| eviction-weight | 无 | 表示被驱逐数据的权重 | +| hit-count | 无 | 表示缓存命中总数 | +| hit-rate | 无 | 表示缓存命中率 | +| load-count | 无 | 表示数据加载次数 | +| load-failure-count | 无 | 表示数据加载失败次数 | +| load-failure-rate | 无 | 表示数据加载失败率 | +| load-success-count | 无 | 表示数据加载成功次数 | +| long-run-compilation-count | 无 | 表示长时间运行的编译次数 | +| miss-count | 无 | 表示缓存未命中总数 | +| miss-rate | 无 | 表示缓存未命中率 | +| request-count | 无 | 表示总的请求次数 | +| total-load-time | 无 | 表示总的数据加载时间 | +| sessions | 无 | 表示当前的活动会话数量 | #### 指标集合:counters - -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------------------------------- | -------- | ---------------------------------------- | -| GET-SUCCESS_COUNTER | 无 | 记录GET请求成功的次数 | -| GET-TOTAL_COUNTER | 无 | 记录GET请求的总次数 | -| favicon-ico-GET-FAILED_COUNTER | 无 | 记录获取favicon.ico失败的GET请求次数 | -| favicon-ico-GET-TOTAL_COUNTER | 无 | 记录获取favicon.ico的GET请求总次数 | -| graphs-HEAD-FAILED_COUNTER | 无 | 记录graphs资源的HEAD请求失败的次数 | -| graphs-HEAD-SUCCESS_COUNTER | 无 | 记录graphs资源的HEAD请求成功的次数 | -| graphs-HEAD-TOTAL_COUNTER | 无 | 记录graphs资源的HEAD请求的总次数 | -| graphs-hugegraph-graph-vertices-GET-SUCCESS_COUNTER | 无 | 记录获取HugeGraph图中顶点的GET请求成功的次数 | -| graphs-hugegraph-graph-vertices-GET-TOTAL_COUNTER | 无 | 记录获取HugeGraph图中顶点的GET请求的总次数 | -| metircs-GET-FAILED_COUNTER | 无 | 记录获取metrics失败的GET请求次数 | -| metircs-GET-TOTAL_COUNTER | 无 | 记录获取metrics的GET请求总次数 | -| metrics-GET-SUCCESS_COUNTER | 无 | 记录获取metrics成功的GET请求次数 | -| metrics-GET-TOTAL_COUNTER | 无 | 记录获取metrics的GET请求总次数 | -| metrics-gauges-GET-SUCCESS_COUNTER | 无 | 记录获取metrics gauges成功的GET请求次数 | -| metrics-gauges-GET-TOTAL_COUNTER | 无 | 记录获取metrics gauges的GET请求总次数 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------------------------------------|------|------------------------------| +| GET-SUCCESS_COUNTER | 无 | 记录GET请求成功的次数 | +| GET-TOTAL_COUNTER | 无 | 记录GET请求的总次数 | +| favicon-ico-GET-FAILED_COUNTER | 无 | 记录获取favicon.ico失败的GET请求次数 | +| favicon-ico-GET-TOTAL_COUNTER | 无 | 记录获取favicon.ico的GET请求总次数 | +| graphs-HEAD-FAILED_COUNTER | 无 | 记录graphs资源的HEAD请求失败的次数 | +| graphs-HEAD-SUCCESS_COUNTER | 无 | 记录graphs资源的HEAD请求成功的次数 | +| graphs-HEAD-TOTAL_COUNTER | 无 | 记录graphs资源的HEAD请求的总次数 | +| graphs-hugegraph-graph-vertices-GET-SUCCESS_COUNTER | 无 | 记录获取HugeGraph图中顶点的GET请求成功的次数 | +| graphs-hugegraph-graph-vertices-GET-TOTAL_COUNTER | 无 | 记录获取HugeGraph图中顶点的GET请求的总次数 | +| metircs-GET-FAILED_COUNTER | 无 | 记录获取metrics失败的GET请求次数 | +| metircs-GET-TOTAL_COUNTER | 无 | 记录获取metrics的GET请求总次数 | +| metrics-GET-SUCCESS_COUNTER | 无 | 记录获取metrics成功的GET请求次数 | +| metrics-GET-TOTAL_COUNTER | 无 | 记录获取metrics的GET请求总次数 | +| metrics-gauges-GET-SUCCESS_COUNTER | 无 | 记录获取metrics gauges成功的GET请求次数 | +| metrics-gauges-GET-TOTAL_COUNTER | 无 | 记录获取metrics gauges的GET请求总次数 | #### 指标集合:system +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------------------|------|--------------------------------| +| mem | 无 | 表示系统的总内存量 | +| mem_total | 无 | 表示系统的总内存量(与mem相同) | +| mem_used | 无 | 表示系统当前使用的内存量 | +| mem_free | 无 | 表示系统空闲的内存量 | +| mem_unit | 无 | 表示内存量的单位(如字节、千字节、兆字节等) | +| processors | 无 | 表示系统的处理器数量 | +| uptime | 无 | 表示系统运行时间,即从启动到现在的时间 | +| systemload_average | 无 | 表示系统的平均负载,反映了系统的繁忙程度 | +| heap_committed | 无 | 表示JVM堆内存的承诺大小,即保证可供JVM使用的堆内存大小 | +| heap_init | 无 | 表示JVM堆内存的初始大小 | +| heap_used | 无 | 表示JVM当前使用的堆内存大小 | +| heap_max | 无 | 表示JVM堆内存的最大可使用大小 | +| nonheap_committed | 无 | 表示JVM非堆内存的承诺大小 | +| nonheap_init | 无 | 表示JVM非堆内存的初始大小 | +| nonheap_used | 无 | 表示JVM当前使用的非堆内存大小 | +| nonheap_max | 无 | 表示JVM非堆内存的最大可使用大小 | +| thread_peak | 无 | 表示自JVM启动以来峰值线程数 | +| thread_daemon | 无 | 表示当前活跃的守护线程数 | +| thread_total_started | 无 | 表示自JVM启动以来总共启动过的线程数 | +| thread_count | 无 | 表示当前活跃的线程数 | +| garbage_collector_g1_young_generation_count | 无 | 表示G1垃圾收集器年轻代垃圾收集的次数 | +| garbage_collector_g1_young_generation_time | 无 | 表示G1垃圾收集器年轻代垃圾收集的总时间 | +| garbage_collector_g1_old_generation_count | 无 | 表示G1垃圾收集器老年代垃圾收集的次数 | +| garbage_collector_g1_old_generation_time | 无 | 表示G1垃圾收集器老年代垃圾收集的总时间 | +| garbage_collector_time_unit | 无 | 表示垃圾收集时间的单位(如毫秒、秒等) | -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------------------------------- | -------- | -------------------------------------------------- | -| mem | 无 | 表示系统的总内存量 | -| mem_total | 无 | 表示系统的总内存量(与mem相同) | -| mem_used | 无 | 表示系统当前使用的内存量 | -| mem_free | 无 | 表示系统空闲的内存量 | -| mem_unit | 无 | 表示内存量的单位(如字节、千字节、兆字节等) | -| processors | 无 | 表示系统的处理器数量 | -| uptime | 无 | 表示系统运行时间,即从启动到现在的时间 | -| systemload_average | 无 | 表示系统的平均负载,反映了系统的繁忙程度 | -| heap_committed | 无 | 表示JVM堆内存的承诺大小,即保证可供JVM使用的堆内存大小 | -| heap_init | 无 | 表示JVM堆内存的初始大小 | -| heap_used | 无 | 表示JVM当前使用的堆内存大小 | -| heap_max | 无 | 表示JVM堆内存的最大可使用大小 | -| nonheap_committed | 无 | 表示JVM非堆内存的承诺大小 | -| nonheap_init | 无 | 表示JVM非堆内存的初始大小 | -| nonheap_used | 无 | 表示JVM当前使用的非堆内存大小 | -| nonheap_max | 无 | 表示JVM非堆内存的最大可使用大小 | -| thread_peak | 无 | 表示自JVM启动以来峰值线程数 | -| thread_daemon | 无 | 表示当前活跃的守护线程数 | -| thread_total_started | 无 | 表示自JVM启动以来总共启动过的线程数 | -| thread_count | 无 | 表示当前活跃的线程数 | -| garbage_collector_g1_young_generation_count | 无 | 表示G1垃圾收集器年轻代垃圾收集的次数 | -| garbage_collector_g1_young_generation_time | 无 | 表示G1垃圾收集器年轻代垃圾收集的总时间 | -| garbage_collector_g1_old_generation_count | 无 | 表示G1垃圾收集器老年代垃圾收集的次数 | -| garbage_collector_g1_old_generation_time | 无 | 表示G1垃圾收集器老年代垃圾收集的总时间 | -| garbage_collector_time_unit | 无 | 表示垃圾收集时间的单位(如毫秒、秒等) | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb.md index 1881d79f4ee..00ff0b7f679 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb.md @@ -5,64 +5,62 @@ sidebar_label: InfluxDB 数据库 keywords: [开源监控系统, 开源数据库监控, InfluxDB 数据库监控] --- - ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- |------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为8086。 | -| URL | 数据库连接URL,一般是由host拼接,不需要添加 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为8086。 | +| URL | 数据库连接URL,一般是由host拼接,不需要添加 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:influxdb 基本信息 -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------| ----------- |--------| -| build_date | 无 | 创建日期 | -| os | 无 | 操作系统 | -| cpus | 无 | cpus | -| version | 无 | 版本号 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|--------| +| build_date | 无 | 创建日期 | +| os | 无 | 操作系统 | +| cpus | 无 | cpus | +| version | 无 | 版本号 | #### 指标集合:http 响应时间 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------|------|---------| | handler | 无 | handler | | path | 无 | 路径 | | response_code | 无 | 返回code | | method | 无 | 请求方法 | | user_agent | 无 | 用户代理 | -| status | 无 | 状态 | +| status | 无 | 状态 | #### 指标集合:正在排队的 TSM 数 -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------------------------|------|------------| -| bucket | 无 | 存储桶 | -| engine | 无 | 引擎类型 | -| id | 无 | 标识符 | -| level | 无 | 级别 | -| path | 无 | 数据文件路径 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|--------| +| bucket | 无 | 存储桶 | +| engine | 无 | 引擎类型 | +| id | 无 | 标识符 | +| level | 无 | 级别 | +| path | 无 | 数据文件路径 | #### 指标集合:HTTP写入请求的字节数量 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|--------| -| endpoint | 无 | 终点 | -| org_id | 无 | 组织标识符 | -| status | 无 | 状态 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| endpoint | 无 | 终点 | +| org_id | 无 | 组织标识符 | +| status | 无 | 状态 | #### 指标集合:质量控制请求总数 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|--------| -| result | 无 | 结果 | -| org | 无 | 组织标识符 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|--------| +| result | 无 | 结果 | +| org | 无 | 组织标识符 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb_promql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb_promql.md index ac56245fee4..97469a71932 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb_promql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb_promql.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, InfluxDB监控,InfluxDB-PromQL监控 ] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -28,7 +28,7 @@ keywords: [ 开源监控系统, InfluxDB监控,InfluxDB-PromQL监控 ] #### 指标集合:basic_influxdb_memstats_alloc -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|---------| | instance | 无 | 指标所属实例 | | timestamp | 无 | 采集指标时间戳 | @@ -36,7 +36,7 @@ keywords: [ 开源监控系统, InfluxDB监控,InfluxDB-PromQL监控 ] #### 指标集合: influxdb_database_numMeasurements -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|---------| | job | 无 | 指标名称 | | instance | 无 | 指标所属实例 | @@ -46,7 +46,7 @@ keywords: [ 开源监控系统, InfluxDB监控,InfluxDB-PromQL监控 ] #### 指标集合: influxdb_query_rate_seconds -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|---------| | instance | 无 | 指标所属实例 | | timestamp | 无 | 采集指标时间戳 | @@ -54,10 +54,9 @@ keywords: [ 开源监控系统, InfluxDB监控,InfluxDB-PromQL监控 ] #### 指标集合: influxdb_queryExecutor_queriesFinished_10s -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|---------| | instance | 无 | 指标所属实例 | | timestamp | 无 | 采集指标时间戳 | | value | 无 | 指标值 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/iotdb.md index 0f0dc0e0ecb..fceb485f05b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/iotdb.md @@ -5,15 +5,15 @@ sidebar_label: IoTDB数据库 keywords: [开源监控系统, 开源数据库监控, IoTDB数据库监控] --- -> 对 Apache IoTDB 物联网时序数据库的运行状态(JVM相关),内存任务集群等相关指标进行监测。 +> 对 Apache IoTDB 物联网时序数据库的运行状态(JVM相关),内存任务集群等相关指标进行监测。 -## 监控前操作 +## 监控前操作 -您需要在 IoTDB 开启`metrics`功能,他将提供 prometheus metrics 形式的接口数据。 +您需要在 IoTDB 开启`metrics`功能,他将提供 prometheus metrics 形式的接口数据。 -开启`metrics`功能, 参考 [官方文档](https://iotdb.apache.org/zh/UserGuide/V0.13.x/Maintenance-Tools/Metric-Tool.html) +开启`metrics`功能, 参考 [官方文档](https://iotdb.apache.org/zh/UserGuide/V0.13.x/Maintenance-Tools/Metric-Tool.html) -主要如下步骤: +主要如下步骤: 1. metric 采集默认是关闭的,需要先到 `conf/iotdb-metric.yml` 中修改参数打开后重启 server @@ -41,13 +41,13 @@ predefinedMetrics: - FILE ``` -2. 重启 IoTDB, 打开浏览器或者用curl 访问 http://ip:9091/metrics, 就能看到metric数据了。 +2. 重启 IoTDB, 打开浏览器或者用curl 访问 http://ip:9091/metrics, 就能看到metric数据了。 3. 在 HertzBeat 添加对应 IoTDB 监控即可。 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -57,37 +57,37 @@ predefinedMetrics: | 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | | 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:cluster_node_status +#### 指标集合:cluster_node_status -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- |------|-------------------------| +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|-------------------------| | name | 无 | 节点名称IP | | status | 无 | 节点状态,1=online 2=offline | #### 指标集合:jvm_memory_committed_bytes -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------|------|------------------| | area | 无 | heap内存或nonheap内存 | | id | 无 | 内存区块 | -| value | MB | 当前向JVM申请的内存大小 | +| value | MB | 当前向JVM申请的内存大小 | #### 指标集合:jvm_memory_used_bytes -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------------| +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------| | area | 无 | heap内存或nonheap内存 | | id | 无 | 内存区块 | -| value | MB | JVM已使用内存大小 | +| value | MB | JVM已使用内存大小 | #### 指标集合:jvm_threads_states_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------| -| state | 无 | 线程状态 | -| count | 无 | 线程状态对应线程数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------| +| state | 无 | 线程状态 | +| count | 无 | 线程状态对应线程数量 | #### 指标集合:quantity 业务数据 @@ -99,22 +99,23 @@ predefinedMetrics: #### 指标集合:cache_hit 缓存 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|----------------------------------------------------| -| name | 无 | 缓存名称 chunk/timeSeriesMeta/bloomFilter | -| value | % | chunk/timeSeriesMeta缓存命中率,bloomFilter拦截率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------------------| +| name | 无 | 缓存名称 chunk/timeSeriesMeta/bloomFilter | +| value | % | chunk/timeSeriesMeta缓存命中率,bloomFilter拦截率 | #### 指标集合:queue 任务队列 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|---------------------------------------------------| -| name | 无 | 队列名称 compaction_inner/compaction_cross/flush | -| status | 无 | 状态 running/waiting | -| value | 无 | 当前时间任务数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|----------------------------------------------| +| name | 无 | 队列名称 compaction_inner/compaction_cross/flush | +| status | 无 | 状态 running/waiting | +| value | 无 | 当前时间任务数 | #### 指标集合:thrift_connections -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------|------|-------------| -| name | 无 | 名称 | -| connection | 无 | thrift当前连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|-------------| +| name | 无 | 名称 | +| connection | 无 | thrift当前连接数 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/issue.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/issue.md index b7414f878b1..745a4f70a88 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/issue.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/issue.md @@ -1,59 +1,66 @@ --- id: issue title: 常见问题 -sidebar_label: 常见问题 +sidebar_label: 常见问题 --- -### 监控常见问题 +### 监控常见问题 -1. ** 页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名 ** -> 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http +1. ** 页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名 ** -2. ** 网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK ** -> 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) +> 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http + +2. ** 网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK ** + +> 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) 3. 安装包部署的hertzbeat下ping连通性监控异常 -安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 + 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 + > 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 > docker安装默认启用无此问题 -> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address 4. 配置了k8s监控,但是实际监控时间并未按照正确间隔时间执行 -请参考下面几点排查问题: + 请参考下面几点排查问题: + > 一:首先查看hertzbeat的错误日志,如果出现了'desc: SQL statement too long, check maxSQLLength config',信息 > 二:需要调整tdengine配置文件,可在服务器创建taos.cfg文件,调整# max length of an SQL : maxSQLLength 654800,然后重启tdengine,需要加入配置文件的挂载 -> 三:如果遇到了重启tdengine失败,需要调整挂载数据文件中的配置,见 .../taosdata/dnode/dnodeEps.json,中dnodeFqdn调整为启动失败的dockerId即可,然后docker restart tdengine +> 三:如果遇到了重启tdengine失败,需要调整挂载数据文件中的配置,见 .../taosdata/dnode/dnodeEps.json,中dnodeFqdn调整为启动失败的dockerId即可,然后docker restart tdengine 5. 配置http api监控,用于进行业务接口探测,确保业务可以用,另外接口有进行token鉴权校验,"Authorization:Bearer eyJhbGciOiJIUzI1....",配置后测试,提示“StatusCode 401”。服务端应用收到的token为"Authorization:Bearer%20eyJhbGciOiJIUzI1....",hertzbeat对空格进行转义为“%20”,服务器没有转义导致鉴权失败,建议转义功能作为可选项。 - -### Docker部署常见问题 +### Docker部署常见问题 1. **MYSQL,TDENGINE和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** -此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 + 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 + > 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP -> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` +> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` 2. **按照流程部署,访问 http://ip:1157/ 无界面** -请参考下面几点排查问题: + 请参考下面几点排查问题: + > 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 -3. **日志报错TDengine连接或插入SQL失败** +3. **日志报错TDengine连接或插入SQL失败** + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter +> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter ### 安装包部署常见问题 1. **按照流程部署,访问 http://ip:1157/ 无界面** 请参考下面几点排查问题: + > 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 > 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 2. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jetty.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jetty.md index 04a15823529..b60a5882b9f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jetty.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jetty.md @@ -15,17 +15,18 @@ keywords: [开源监控系统, 开源中间件监控, Jetty应用服务器监控 #### Jetty应用服务器开启JMX协议步骤 -[参考官方文档](https://www.eclipse.org/jetty/documentation/jetty-10/operations-guide/index.html#og-jmx-remote) +[参考官方文档](https://www.eclipse.org/jetty/documentation/jetty-10/operations-guide/index.html#og-jmx-remote) -1. 在 Jetty 启动 JMX JMX-REMOTE 模块 +1. 在 Jetty 启动 JMX JMX-REMOTE 模块 ```shell java -jar $JETTY_HOME/start.jar --add-module=jmx java -jar $JETTY_HOME/start.jar --add-module=jmx-remote ``` -命令执行成功会创建出 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件 -2. 编辑 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件,修改 JMX 的 IP 端口等参数。 +命令执行成功会创建出 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件 + +2. 编辑 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件,修改 JMX 的 IP 端口等参数。 **`localhost` 需修改为对外暴露 IP** @@ -50,49 +51,45 @@ java -jar $JETTY_HOME/start.jar --add-module=jmx-remote ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jvm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jvm.md index c97cc73b003..f046b3ef6a0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jvm.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jvm.md @@ -28,57 +28,54 @@ keywords: [开源监控系统, 开源JAVA监控, JVM虚拟机监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:code_cache (限JDK8及以下) -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------| ----------- | ----------- | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka.md index 2b4ed0514b7..3cb4d74132c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka.md @@ -7,7 +7,7 @@ keywords: [开源监控系统, 开源消息中间件监控, Kafka监控] > 对Kafka的通用性能指标进行采集监控 -**使用协议:JMX** +**使用协议:JMX** ### 监控前操作 @@ -18,7 +18,7 @@ keywords: [开源监控系统, 开源消息中间件监控, Kafka监控] 2. 修改 Kafka 启动脚本 修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` -在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 +在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 ```shell export JMX_PORT=9999; @@ -32,71 +32,65 @@ export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=ip地址 -Dcom.sun.management. ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置Kafka连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置Kafka连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:server_info -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| Version | 无 | Kafka版本 | -| StartTimeMs | ms | 运行时间 | -| CommitId | 无 | 版本提交ID | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|---------| +| Version | 无 | Kafka版本 | +| StartTimeMs | ms | 运行时间 | +| CommitId | 无 | 版本提交ID | #### 指标集合:code_cache -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:active_controller_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| Value | 个 | 活跃监控器数量 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------| +| Value | 个 | 活跃监控器数量 | #### 指标集合:broker_partition_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| Value | 个 | 分区数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------| +| Value | 个 | 分区数量 | #### 指标集合:broker_leader_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| Value | 个 | 领导者数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------| +| Value | 个 | 领导者数量 | #### 指标集合:broker_handler_avg_percent 请求处理器空闲率 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| EventType | 无 | 类型 | -| RateUnit | 具体情况具体分析 | 单位 | -| Count | 个 | 数量 | -| OneMinuteRate | % | 一分钟处理率 | -| FiveMinuteRate | % | 五分钟处理率 | -| MeanRate | 无 | 平均处理率 | -| FifteenMinuteRate | 无 | 十五分钟处理率 | - - -> 其他指标见文知意,欢迎贡献一起优化文档。 +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|----------|---------| +| EventType | 无 | 类型 | +| RateUnit | 具体情况具体分析 | 单位 | +| Count | 个 | 数量 | +| OneMinuteRate | % | 一分钟处理率 | +| FiveMinuteRate | % | 五分钟处理率 | +| MeanRate | 无 | 平均处理率 | +| FifteenMinuteRate | 无 | 十五分钟处理率 | + +> 其他指标见文知意,欢迎贡献一起优化文档。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka_promql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka_promql.md index a0b0b625485..e0e5ecf7e50 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka_promql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka_promql.md @@ -15,7 +15,7 @@ keywords: [ 开源监控系统,开源中间件监控, Kafka监控,Kafka-PromQL ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -34,7 +34,7 @@ keywords: [ 开源监控系统,开源中间件监控, Kafka监控,Kafka-PromQL #### 指标集合:kafka_brokers -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|---------| | \_\_name__ | 无 | 指标名称 | | instance | 无 | 指标所属实例 | @@ -43,7 +43,7 @@ keywords: [ 开源监控系统,开源中间件监控, Kafka监控,Kafka-PromQL #### 指标集合: kafka_topic_partitions -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|---------| | \_\_name__ | 无 | 指标名称 | | instance | 无 | 指标所属实例 | @@ -54,3 +54,4 @@ keywords: [ 开源监控系统,开源中间件监控, Kafka监控,Kafka-PromQL 1. kafka启用了JMX监控,可以使用 [Kafka](kafka) 监控; 2. kafka集群部署kafka_exporter暴露的监控指标,可以参考 [Prometheus任务](prometheus) 配置Prometheus采集任务监控kafka。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md index f45da8d9b27..aa242d93a6b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md @@ -7,14 +7,13 @@ keywords: [开源监控系统, 开源Kubernetes监控] > 对kubernetes的通用性能指标进行采集监控。 - ## 监控前操作 如果想要监控 `Kubernetes` 中的信息,则需要获取到可访问Api Server的授权TOKEN,让采集请求获取到对应的信息。 -参考获取token步骤 +参考获取token步骤 -#### 方式一: +#### 方式一: 1. 创建service account并绑定默认cluster-admin管理员集群角色 @@ -27,7 +26,9 @@ kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin -- kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` + #### 方式二: + ``` kubectl create serviceaccount cluster-admin @@ -36,13 +37,14 @@ kubectl create clusterrolebinding cluster-admin-manual --clusterrole=cluster-adm kubectl create token --duration=1000h cluster-admin ``` + ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |-------------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| APiServer端口 | K8s APiServer端口,默认6443 | +| APiServer端口 | K8s APiServer端口,默认6443 | | token | 授权Access Token | | URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | | 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | @@ -53,44 +55,45 @@ kubectl create token --duration=1000h cluster-admin #### 指标集合:nodes -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------ | -------- |--------| -| node_name | 无 | 节点名称 | -| is_ready | 无 | 节点状态 | -| capacity_cpu | 无 | CPU容量 | -| allocatable_cpu | 无 | 已分配CPU | -| capacity_memory | 无 | 内存容量 | -| allocatable_memory | 无 | 已分配内存 | -| creation_time | 无 | 节点创建时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|--------| +| node_name | 无 | 节点名称 | +| is_ready | 无 | 节点状态 | +| capacity_cpu | 无 | CPU容量 | +| allocatable_cpu | 无 | 已分配CPU | +| capacity_memory | 无 | 内存容量 | +| allocatable_memory | 无 | 已分配内存 | +| creation_time | 无 | 节点创建时间 | #### 指标集合:namespaces -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- |-------------| -| namespace | 无 | namespace名称 | -| status | 无 | 状态 | -| creation_time | 无 | 创建时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-------------| +| namespace | 无 | namespace名称 | +| status | 无 | 状态 | +| creation_time | 无 | 创建时间 | #### 指标集合:pods -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- | -------- |----------------| -| pod | 无 | pod名称 | -| namespace | 无 | pod所属namespace | -| status | 无 | pod状态 | -| restart | 无 | 重启次数 | -| host_ip | 无 | 所在主机IP | -| pod_ip | 无 | pod ip | -| creation_time | 无 | pod创建时间 | -| start_time | 无 | pod启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|----------------| +| pod | 无 | pod名称 | +| namespace | 无 | pod所属namespace | +| status | 无 | pod状态 | +| restart | 无 | 重启次数 | +| host_ip | 无 | 所在主机IP | +| pod_ip | 无 | pod ip | +| creation_time | 无 | pod创建时间 | +| start_time | 无 | pod启动时间 | #### 指标集合:services -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- |------|--------------------------------------------------------| -| service | 无 | service名称 | -| namespace | 无 | service所属namespace | -| type | 无 | service类型 ClusterIP NodePort LoadBalancer ExternalName | -| cluster_ip | 无 | cluster ip | -| selector | 无 | tag selector匹配 | -| creation_time | 无 | 创建时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|--------------------------------------------------------| +| service | 无 | service名称 | +| namespace | 无 | service所属namespace | +| type | 无 | service类型 ClusterIP NodePort LoadBalancer ExternalName | +| cluster_ip | 无 | cluster ip | +| selector | 无 | tag selector匹配 | +| creation_time | 无 | 创建时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/linux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/linux.md index 853be6610b4..4a69c04495e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/linux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/linux.md @@ -9,74 +9,74 @@ keywords: [开源监控系统, 开源操作系统监控, Linux操作系统监控 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux SSH对外提供的端口,默认为22。 | +| 用户名 | SSH连接用户名,可选 | +| 密码 | SSH连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| hostname | 无 | 主机名称 | +| version | 无 | 操作系统版本 | +| uptime | 无 | 系统运行时间 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:memory -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:disk -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:interface -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | byte | 入站数据流量(bytes) | +| transmit_bytes | byte | 出站数据流量(bytes) | #### 指标集合:disk_free -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mariadb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mariadb.md index 5fd59466717..2490e3630dd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mariadb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mariadb.md @@ -9,49 +9,46 @@ keywords: [开源监控系统, 开源数据库监控, MariaDB数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为3306。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为3306。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| version | 无 | 数据库版本 | -| port | 无 | 数据库暴露服务端口 | -| datadir | 无 | 数据库存储数据盘地址 | -| max_connections | 无 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|------------| +| version | 无 | 数据库版本 | +| port | 无 | 数据库暴露服务端口 | +| datadir | 无 | 数据库存储数据盘地址 | +| max_connections | 无 | 数据库最大连接数 | #### 指标集合:status -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| threads_created | 无 | MariaDB已经创建的总连接数 | -| threads_connected | 无 | MariaDB已经连接的连接数 | -| threads_cached | 无 | MariaDB当前缓存的连接数 | -| threads_running | 无 | MariaDB当前活跃的连接数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|------------------| +| threads_created | 无 | MariaDB已经创建的总连接数 | +| threads_connected | 无 | MariaDB已经连接的连接数 | +| threads_cached | 无 | MariaDB当前缓存的连接数 | +| threads_running | 无 | MariaDB当前活跃的连接数 | #### 指标集合:innodb -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | -| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | -| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | -| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------|------|-------------------------| +| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | +| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | +| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | +| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/memcached.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/memcached.md index 0debad01ce9..db88c1ac5fc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/memcached.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/memcached.md @@ -34,7 +34,7 @@ STAT version 1.4.15 ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |------|---------------------------------------------------| | 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不带协议头(例如:https://,http://) | | 监控名称 | 标识此监控的名称。名称需要唯一 | @@ -47,7 +47,7 @@ STAT version 1.4.15 #### 指标集:server_info -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------------|------|-------------------| | pid | 无 | Memcache 服务器进程 ID | | uptime | s | 服务器已运行的秒数 | @@ -65,3 +65,4 @@ STAT version 1.4.15 | cmd_flush | 无 | Flush 命令请求数 | | get_misses | 无 | Get 命令未命中次数 | | delete_misses | 无 | Delete 命令未命中次数 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mongodb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mongodb.md index 23c4a866809..8c54174b54a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mongodb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mongodb.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -27,7 +27,7 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] #### 指标集合:构建信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------------|------|-------------------------| | version | 无 | MongoDB版本信息 | | gitVersion | 无 | 源代码git版本 | @@ -39,7 +39,7 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] #### 指标集合:服务器文档 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|--------| | deleted | 无 | 已删除数 | | inserted | 无 | 已插入数 | @@ -48,21 +48,21 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] #### 指标集合:服务器操作 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|-------------------| | scanAndOrder | 无 | 执行查询时需要扫描并进行排序的次数 | | writeConflicts | 无 | 写冲突的次数 | #### 指标集合: 服务器_ttl -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------------|------|-------------------------------| | deletedDocuments | 无 | 删除的过期文档数量 | | passes | 无 | TTL清理过程的总传递次数,每次传递会检查并删除过期的文档 | #### 指标集合:系统信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------|------|-----------------------| | currentTime | 无 | 当前时间 | | hostname | 无 | 主机名 | @@ -75,7 +75,7 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] #### 指标集合:操作系统信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|--------| | type | 无 | 操作系统类型 | | name | 无 | 操作系统名称 | @@ -83,7 +83,7 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] #### 指标集合:额外信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------------|------|----------------------| | versionString | 无 | 版本 | | libcVersion | 无 | 标准库版本 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mysql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mysql.md index dc23f3d6fa5..c5deaab27a2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mysql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mysql.md @@ -9,49 +9,46 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为3306。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为3306。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| version | 无 | 数据库版本 | -| port | 无 | 数据库暴露服务端口 | -| datadir | 无 | 数据库存储数据盘地址 | -| max_connections | 无 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|------------| +| version | 无 | 数据库版本 | +| port | 无 | 数据库暴露服务端口 | +| datadir | 无 | 数据库存储数据盘地址 | +| max_connections | 无 | 数据库最大连接数 | #### 指标集合:status -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| threads_created | 无 | MySql已经创建的总连接数 | -| threads_connected | 无 | MySql已经连接的连接数 | -| threads_cached | 无 | MySql当前缓存的连接数 | -| threads_running | 无 | MySql当前活跃的连接数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|----------------| +| threads_created | 无 | MySql已经创建的总连接数 | +| threads_connected | 无 | MySql已经连接的连接数 | +| threads_cached | 无 | MySql当前缓存的连接数 | +| threads_running | 无 | MySql当前活跃的连接数 | #### 指标集合:innodb -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | -| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | -| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | -| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------|------|-------------------------| +| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | +| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | +| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | +| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nacos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nacos.md index 8b5313c2dc1..84b432f4651 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nacos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nacos.md @@ -7,87 +7,89 @@ keywords: [开源监控系统, 中间件监控, Nacos分布式监控] > 通过调用 Nacos Metrics 接口对 Nacos 注册配置中心服务的通用指标进行采集监控。 -### 监控前操作 +### 监控前操作 #### 搭建Nacos集群暴露metrics数据 1. 按照[部署文档](https://nacos.io/zh-cn/docs/deployment.html)搭建好Nacos集群。 2. 配置application.properties文件,暴露metrics数据。 + ``` management.endpoints.web.exposure.include=* ``` + 3. 访问```{ip}:8848/nacos/actuator/prometheus```,查看是否能访问到metrics数据。 更多信息请参考[Nacos 监控手册](https://nacos.io/zh-cn/docs/monitor-guide.html)。 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 服务器Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| Nacos服务端口 | Nacos服务对外提供的端口,默认为8848。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|-----------|------------------------------------------------------| +| 服务器Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| Nacos服务端口 | Nacos服务对外提供的端口,默认为8848。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:jvm -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| system_cpu_usage | 无 | CPU使用率 | -| system_load_average_1m | 无 | load | -| jvm_memory_used_bytes | 字节 | 内存使用字节,包含各种内存区 | -| jvm_memory_max_bytes | 字节 | 内存最大字节,包含各种内存区 | -| jvm_gc_pause_seconds_count | 无 | gc次数,包含各种gc | -| jvm_gc_pause_seconds_sum | 秒 | gc耗时,包含各种gc | -| jvm_threads_daemon | 无 | 线程数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------------|------|----------------| +| system_cpu_usage | 无 | CPU使用率 | +| system_load_average_1m | 无 | load | +| jvm_memory_used_bytes | 字节 | 内存使用字节,包含各种内存区 | +| jvm_memory_max_bytes | 字节 | 内存最大字节,包含各种内存区 | +| jvm_gc_pause_seconds_count | 无 | gc次数,包含各种gc | +| jvm_gc_pause_seconds_sum | 秒 | gc耗时,包含各种gc | +| jvm_threads_daemon | 无 | 线程数 | #### 指标集合:Nacos -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| http_server_requests_seconds_count | 秒 | http请求次数,包括多种(url,方法,code) | -| http_server_requests_seconds_sum | 秒 | http请求总耗时,包括多种(url,方法,code) | -| nacos_timer_seconds_sum | 秒 | Nacos config水平通知耗时 | -| nacos_timer_seconds_count | 无 | Nacos config水平通知次数 | -| nacos_monitor{name='longPolling'} | 无 | Nacos config长连接数 | -| nacos_monitor{name='configCount'} | 无 | Nacos config配置个数 | -| nacos_monitor{name='dumpTask'} | 无 | Nacos config配置落盘任务堆积数 | -| nacos_monitor{name='notifyTask'} | 无 | Nacos config配置水平通知任务堆积数 | -| nacos_monitor{name='getConfig'} | 无 | Nacos config读配置统计数 | -| nacos_monitor{name='publish'} | 无 | Nacos config写配置统计数 | -| nacos_monitor{name='ipCount'} | 无 | Nacos naming ip个数 | -| nacos_monitor{name='domCount'} | 无 | Nacos naming域名个数(1.x 版本) | -| nacos_monitor{name='serviceCount'} | 无 | Nacos naming域名个数(2.x 版本) | -| nacos_monitor{name='failedPush'} | 无 | Nacos naming推送失败数 | -| nacos_monitor{name='avgPushCost'} | 秒 | Nacos naming平均推送耗时 | -| nacos_monitor{name='leaderStatus'} | 无 | Nacos naming角色状态 | -| nacos_monitor{name='maxPushCost'} | 秒 | Nacos naming最大推送耗时 | -| nacos_monitor{name='mysqlhealthCheck'} | 无 | Nacos naming mysql健康检查次数 | -| nacos_monitor{name='httpHealthCheck'} | 无 | Nacos naming http健康检查次数 | -| nacos_monitor{name='tcpHealthCheck'} | 无 | Nacos naming tcp健康检查次数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------------------------|------|-----------------------------| +| http_server_requests_seconds_count | 秒 | http请求次数,包括多种(url,方法,code) | +| http_server_requests_seconds_sum | 秒 | http请求总耗时,包括多种(url,方法,code) | +| nacos_timer_seconds_sum | 秒 | Nacos config水平通知耗时 | +| nacos_timer_seconds_count | 无 | Nacos config水平通知次数 | +| nacos_monitor{name='longPolling'} | 无 | Nacos config长连接数 | +| nacos_monitor{name='configCount'} | 无 | Nacos config配置个数 | +| nacos_monitor{name='dumpTask'} | 无 | Nacos config配置落盘任务堆积数 | +| nacos_monitor{name='notifyTask'} | 无 | Nacos config配置水平通知任务堆积数 | +| nacos_monitor{name='getConfig'} | 无 | Nacos config读配置统计数 | +| nacos_monitor{name='publish'} | 无 | Nacos config写配置统计数 | +| nacos_monitor{name='ipCount'} | 无 | Nacos naming ip个数 | +| nacos_monitor{name='domCount'} | 无 | Nacos naming域名个数(1.x 版本) | +| nacos_monitor{name='serviceCount'} | 无 | Nacos naming域名个数(2.x 版本) | +| nacos_monitor{name='failedPush'} | 无 | Nacos naming推送失败数 | +| nacos_monitor{name='avgPushCost'} | 秒 | Nacos naming平均推送耗时 | +| nacos_monitor{name='leaderStatus'} | 无 | Nacos naming角色状态 | +| nacos_monitor{name='maxPushCost'} | 秒 | Nacos naming最大推送耗时 | +| nacos_monitor{name='mysqlhealthCheck'} | 无 | Nacos naming mysql健康检查次数 | +| nacos_monitor{name='httpHealthCheck'} | 无 | Nacos naming http健康检查次数 | +| nacos_monitor{name='tcpHealthCheck'} | 无 | Nacos naming tcp健康检查次数 | #### 指标集合:Nacos 异常 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| nacos_exception_total{name='db'} | 无 | 数据库异常 | -| nacos_exception_total{name='configNotify'} | 无 | Nacos config水平通知失败 | -| nacos_exception_total{name='unhealth'} | 无 | Nacos config server之间健康检查异常 | -| nacos_exception_total{name='disk'} | 无 | Nacos naming写磁盘异常 | -| nacos_exception_total{name='leaderSendBeatFailed'} | 无 | Nacos naming leader发送心跳异常 | -| nacos_exception_total{name='illegalArgument'} | 无 | 请求参数不合法 | -| nacos_exception_total{name='nacos'} | 无 | Nacos请求响应内部错误异常(读写失败,没权限,参数错误) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------------------------------------|------|--------------------------------| +| nacos_exception_total{name='db'} | 无 | 数据库异常 | +| nacos_exception_total{name='configNotify'} | 无 | Nacos config水平通知失败 | +| nacos_exception_total{name='unhealth'} | 无 | Nacos config server之间健康检查异常 | +| nacos_exception_total{name='disk'} | 无 | Nacos naming写磁盘异常 | +| nacos_exception_total{name='leaderSendBeatFailed'} | 无 | Nacos naming leader发送心跳异常 | +| nacos_exception_total{name='illegalArgument'} | 无 | 请求参数不合法 | +| nacos_exception_total{name='nacos'} | 无 | Nacos请求响应内部错误异常(读写失败,没权限,参数错误) | #### 指标集合:client -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| nacos_monitor{name='subServiceCount'} | 无 | 订阅的服务数 | -| nacos_monitor{name='pubServiceCount'} | 无 | 发布的服务数 | -| nacos_monitor{name='configListenSize'} | 无 | 监听的配置数 | -| nacos_client_request_seconds_count | 无 | 请求的次数,包括多种(url,方法,code) | -| nacos_client_request_seconds_sum | 秒 | 请求的总耗时,包括多种(url,方法,code) | - \ No newline at end of file +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------------------------|------|--------------------------| +| nacos_monitor{name='subServiceCount'} | 无 | 订阅的服务数 | +| nacos_monitor{name='pubServiceCount'} | 无 | 发布的服务数 | +| nacos_monitor{name='configListenSize'} | 无 | 监听的配置数 | +| nacos_client_request_seconds_count | 无 | 请求的次数,包括多种(url,方法,code) | +| nacos_client_request_seconds_sum | 秒 | 请求的总耗时,包括多种(url,方法,code) | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph.md index d070101da8f..ded4a06ad2f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph.md @@ -14,7 +14,7 @@ keywords: [ 开源监控工具, 开源 NebulaGraph 监控工具, 监控 NebulaGr nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 的统计信息。 ``` -### +### **1、通过 stats 和 rocksdb stats 接口获取可用参数。** @@ -34,7 +34,7 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |-------------|--------------------------------------------------------------------| | 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️没有协议头(例如:https://、http://) | | 监控名称 | 识别此监控的名称。名称需要唯一 | @@ -53,7 +53,7 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 指标太多,相关链接如下 **https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------------------------------------------------------|------|--------| | 达到内存水位线的语句的数量(rate) | | | | 达到内存水位线的语句的数量(sum) | | | @@ -116,8 +116,9 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 指标太多,相关链接如下 **https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------------------|------|------------------------| | rocksdb.backup.read.bytes | | 备份 RocksDB 数据库期间读取的字节数 | | rocksdb.backup.write.bytes | | 指标名称 | | ... | | ... | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph_cluster.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph_cluster.md index 851f6dd7946..252f5f47d8a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph_cluster.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph_cluster.md @@ -11,7 +11,7 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |---------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -27,7 +27,7 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, #### 指标集合:基础信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|---------| | responseTime | 无 | 响应时间 | | charset | 无 | 字符集 | @@ -35,21 +35,21 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, #### 指标集合:Session -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------------|------|------------| | session | 无 | session的数量 | | running_query_count | 无 | 正在执行的查询的数量 | #### 指标集合:后台任务 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------------| | queue_jobs | 无 | 等待中的后台任务 | | running_jobs | 无 | 正在执行的后台任务的数量 | #### 指标集合:节点信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------------|------|-----------------| | total_storage_node | 无 | storage节点的数量 | | offline_storage_node | 无 | 离线的storage节点的数量 | @@ -60,7 +60,7 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, #### 指标集合:Storage节点 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------------------|------|---------------------| | host | 无 | 节点地址 | | port | 无 | 端口 | @@ -72,7 +72,7 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, #### 指标集合:Meta节点 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|---------------------| | host | 无 | 节点地址 | | port | 无 | 端口 | @@ -81,7 +81,7 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, #### 指标集合:Graph节点 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|---------------------| | host | 无 | 节点地址 | | port | 无 | 端口 | @@ -89,3 +89,4 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, | version | 无 | 版本 | > 如果需要自定义监控模板采集NebulaGraph集群的数据,请参考: [NGQL自定义监控](../advanced/extend-ngql.md) + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nginx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nginx.md index a509ff7da2a..82908df358b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nginx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nginx.md @@ -13,18 +13,19 @@ keywords: [开源监控工具, 开源Java监控工具, 监控Nginx指标] 如果你想使用这种监控方式监控 'Nginx' 的信息,你需要修改你的 Nginx 配置文件以启用监控模块。 -### 启用 ngx_http_stub_status_module +### 启用 ngx_http_stub_status_module 1. 检查是否已添加 `ngx_http_stub_status_module` ```shell nginx -V ``` + 查看是否包含 `--with-http_stub_status_module`,如果没有则需要重新编译安装 Nginx。 2. 编译安装 Nginx, 添加 `ngx_http_stub_status_module` 模块 -下载 Nginx 并解压,在目录下执行 +下载 Nginx 并解压,在目录下执行 ```shell ./configure --prefix=/usr/local/nginx --with-http_stub_status_module @@ -58,7 +59,7 @@ nginx -s reload 5. 在浏览器访问 `http://localhost/nginx-status` 即可查看 Nginx 监控状态信息。 -### 启用 `ngx_http_reqstat_module` +### 启用 `ngx_http_reqstat_module` 1. 安装 `ngx_http_reqstat_module` 模块 @@ -107,49 +108,47 @@ nginx -s reload 4. 在浏览器访问 `http://localhost/req-status` 即可查看 Nginx 监控状态信息。 - **参考文档: https://blog.csdn.net/weixin_55985097/article/details/116722309** **⚠️注意监控模块的端点路径为 `/nginx-status` `/req-status`** ### 配置参数 -| 参数名 | 参数描述 | -|-------------------|-----------------------------------------------------| -| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | -| 监控名称 | 标识此监控的名称。名称需要唯一 | -| 端口 | Nginx 提供的端口 | -| 超时时间 | 允许收集响应时间 | -| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | -| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | -| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | +| 参数名 | 参数描述 | +|--------|-----------------------------------------------------| +| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | +| 监控名称 | 标识此监控的名称。名称需要唯一 | +| 端口 | Nginx 提供的端口 | +| 超时时间 | 允许收集响应时间 | +| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | +| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | +| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | ### 收集指标 #### 指标收集:nginx_status -| 指标名称 | 指标单位 | 指标描述 | -|-------|-------------|------------| -| 接收连接数 | | 已接受的连接 | -| 处理连接数 | | 成功处理的连接 | -| 活动连接数 | | 当前活动连接 | -| 丢弃连接数 | | 丢弃的连接 | -| 请求连接数 | | 客户端请求 | -| 读连接数 | | 正在执行读操作的连接 | -| 写连接数 | | 正在执行写操作的连接 | -| 等待连接数 | | 等待连接 | +| 指标名称 | 指标单位 | 指标描述 | +|-------|------|------------| +| 接收连接数 | | 已接受的连接 | +| 处理连接数 | | 成功处理的连接 | +| 活动连接数 | | 当前活动连接 | +| 丢弃连接数 | | 丢弃的连接 | +| 请求连接数 | | 客户端请求 | +| 读连接数 | | 正在执行读操作的连接 | +| 写连接数 | | 正在执行写操作的连接 | +| 等待连接数 | | 等待连接 | #### 指标集:req_status -| 指标名称 | 指标单位 | 指标描述 | -|---------|-------|---------| -| 分组类别 | | 分组类别 | -| 分组名称 | | 分组名称 | -| 最大并发连接数 | | 最大并发连接数 | -| 最大带宽 | kb | 最大带宽 | -| 总流量 | kb | 总流量 | -| 总请求数 | | 总请求数 | -| 当前并发连接数 | | 当前并发连接数 | -| 当前带宽 | kb | 当前带宽 | - +| 指标名称 | 指标单位 | 指标描述 | +|---------|------|---------| +| 分组类别 | | 分组类别 | +| 分组名称 | | 分组名称 | +| 最大并发连接数 | | 最大并发连接数 | +| 最大带宽 | kb | 最大带宽 | +| 总流量 | kb | 总流量 | +| 总请求数 | | 总请求数 | +| 当前并发连接数 | | 当前并发连接数 | +| 当前带宽 | kb | 当前带宽 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ntp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ntp.md index 5760321922f..735ab741b4d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ntp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ntp.md @@ -15,7 +15,7 @@ NTP监控的中文文档如下: ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |------|--------------------------------------------------| | 监控主机 | 被监控的IPv4、IPv6或域名。注意⚠️不包含协议头(例如:https://,http://) | | 监控名称 | 标识此监控的名称。名称需要是唯一的 | @@ -27,7 +27,7 @@ NTP监控的中文文档如下: #### 指标集:概要 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------|------|--------------------------| | 响应时间 | 毫秒 | NTP服务器响应请求所需的时间。 | | 时间 | 毫秒 | NTP服务器报告的当前时间。 | @@ -39,3 +39,4 @@ NTP监控的中文文档如下: | 层级 | | NTP服务器的层级,表示其与参考时钟的距离。 | | 参考ID | | 指示NTP服务器使用的参考时钟或时间源的标识符。 | | 精度 | | NTP服务器时钟的精度,表示其准确性。 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/openai.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/openai.md index d22b1238855..0af3ca3d17b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/openai.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/openai.md @@ -8,12 +8,13 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] ### 准备工作 #### 获取会话密钥 -> 1. 打开 Chrome 浏览器的网络请求界面 + +> 1. 打开 Chrome 浏览器的网络请求界面 > `Mac: cmd + option + i` > `Windows: ctrl + shift + i` > 2. 访问 https://platform.openai.com/usage > 3. 找到 https://api.openai.com/dashboard/billing/usage 请求 -> 4. 找到请求头中 Authorization 字段,并复制 `Bearer ` 之后的内容。例如: `sess-123456` +> 4. 找到请求头中 Authorization 字段,并复制 `Bearer ` 之后的内容。例如: `sess-123456` ### 注意事项 @@ -22,11 +23,11 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -|:-------|---------------------------------| +| 参数名称 | 参数帮助描述 | +|:-------|---------------------------------|---| | 监控Host | 此处填写 api.openai.com 。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | | -| 会话密钥 | 即准备工作中获取的会话密钥。 | | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | | +| 会话密钥 | 即准备工作中获取的会话密钥。 | | | 采集器 | 配置此监控使用哪台采集器调度采集。 | | 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒。 | | 绑定标签 | 对监控资源的分类管理标签。 | @@ -36,7 +37,7 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] #### 指标集合:信用额度授予 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------|--------|---------| | 总授予额度 | 美元 ($) | 总授予额度 | | 总使用额度 | 美元 ($) | 总使用额度 | @@ -45,14 +46,14 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] #### 指标集合:模型花费 -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|--------|---------| -| 模型名称 | 无 | 模型名称 | -| 花费 | 美元 ($) | 花费 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------|--------|--------| +| 模型名称 | 无 | 模型名称 | +| 花费 | 美元 ($) | 花费 | #### 指标集合:订阅计费 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------|--------|--------------| | 是否有支付方式 | 无 | 是否有支付方式 | | 订阅是否已取消 | 无 | 订阅是否已取消 | @@ -80,3 +81,4 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] | 税务ID | 无 | 税务ID | | 结算地址 | 无 | 结算地址 | | 业务地址 | 无 | 业务地址 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opengauss.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opengauss.md index 632a7f41b2d..8bf21d7debb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opengauss.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opengauss.md @@ -9,50 +9,48 @@ keywords: [开源监控系统, 开源数据库监控, OpenGauss数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为5432。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为5432。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| server_version | 无 | 数据库服务器的版本号 | -| port | 无 | 数据库服务器端暴露服务端口 | -| server_encoding | 无 | 数据库服务器端的字符集编码 | -| data_directory | 无 | 数据库存储数据盘地址 | -| max_connections | 连接数 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------------| +| server_version | 无 | 数据库服务器的版本号 | +| port | 无 | 数据库服务器端暴露服务端口 | +| server_encoding | 无 | 数据库服务器端的字符集编码 | +| data_directory | 无 | 数据库存储数据盘地址 | +| max_connections | 连接数 | 数据库最大连接数 | #### 指标集合:state -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| name | 无 | 数据库名称,或share-object为共享对象。 | -| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | -| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | -| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | -| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | -| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | -| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | -| stats_reset | 无 | 这些统计信息上次被重置的时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------------------------------------------------------------| +| name | 无 | 数据库名称,或share-object为共享对象。 | +| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | +| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | +| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | +| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | +| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | +| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | +| stats_reset | 无 | 这些统计信息上次被重置的时间 | #### 指标集合:activity -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| running | 连接数 | 当前客户端连接数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------| +| running | 连接数 | 当前客户端连接数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opensuse.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opensuse.md index 6c3b2e9ac7e..f32e2b070ae 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opensuse.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opensuse.md @@ -4,114 +4,105 @@ title: 监控:OpenSUSE操作系统监控 sidebar_label: OpenSUSE操作系统 keywords: [开源监控系统, 开源操作系统监控, OpenSUSE操作系统监控] --- + > 对OpenSUSE操作系统的通用性能指标进行采集监控。 ### 配置参数 - -| 参数名称 | 参数帮助描述 | -| -------- |------------------------------------------------------| +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | | 端口 | Linux SSH对外提供的端口,默认为22。 | -| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | -| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次获取信息都会创建一个连接 | -| 用户名 | SSH连接用户名,可选 | +| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | +| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次获取信息都会创建一个连接 | +| 用户名 | SSH连接用户名,可选 | | 密码 | SSH连接密码,可选 | -| 采集器 | 配置此监控使用哪台采集器调度采集 | -| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 采集器 | 配置此监控使用哪台采集器调度采集 | +| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | | 密钥 | 连接服务器所需密钥 | ### 采集指标 #### 指标集合:系统基本信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | ------------ | -| Host Name | 无 | 主机名称 | -| System Version | 无 | 操作系统版本 | -| Uptime | 无 | 启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------| +| Host Name | 无 | 主机名称 | +| System Version | 无 | 操作系统版本 | +| Uptime | 无 | 启动时间 | #### 指标集合:CPU 信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- | -------- | --------------------------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:内存信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------- | -------- | ---------------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:磁盘信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------- | -------- | ------------------ | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:网卡信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------- |------|-------------| -| interface_name | 无 | 网卡名称 | -| receive_bytes | Mb | 入站数据流量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | Mb | 入站数据流量 | | transmit_bytes | Mb | 出站数据流量 | #### 指标集合:文件系统 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------- | -------- | -------------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | #### 指标集合:Top10 CPU进程 统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- | -------- | ------------ | -| pid | 无 | 进程ID | -| cpu_usage | % | CPU占用率 | -| mem_usage | % | 内存占用率 | -| command | 无 | 执行命令 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| pid | 无 | 进程ID | +| cpu_usage | % | CPU占用率 | +| mem_usage | % | 内存占用率 | +| command | 无 | 执行命令 | #### 指标集合:Top10 内存进程 统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| pid | 无 | 进程ID | +| mem_usage | % | 内存占用率 | +| cpu_usage | % | CPU占用率 | +| command | 无 | 执行命令 | -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- | -------- | ------------ | -| pid | 无 | 进程ID | -| mem_usage | % | 内存占用率 | -| cpu_usage | % | CPU占用率 | -| command | 无 | 执行命令 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/oracle.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/oracle.md index 49136e51c48..7ffdfa219ff 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/oracle.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/oracle.md @@ -9,55 +9,56 @@ keywords: [开源监控系统, 开源数据库监控, Oracle数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为1521。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为1521。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| database_version | 无 | 数据库版本 | -| database_type | 无 | 数据库类型 | -| hostname | 无 | 主机名称 | -| instance_name | 无 | 数据库实例名称 | -| startup_time | 无 | 数据库启动时间 | -| status | 无 | 数据库状态 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------|------|---------| +| database_version | 无 | 数据库版本 | +| database_type | 无 | 数据库类型 | +| hostname | 无 | 主机名称 | +| instance_name | 无 | 数据库实例名称 | +| startup_time | 无 | 数据库启动时间 | +| status | 无 | 数据库状态 | #### 指标集合:tablespace -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| file_id | 无 | 文件ID | -| file_name | 无 | 文件名称 | -| tablespace_name | 无 | 所属表空间名称 | -| status | 无 | 状态 | -| bytes | MB | 大小 | -| blocks | 无 | 区块数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------| +| file_id | 无 | 文件ID | +| file_name | 无 | 文件名称 | +| tablespace_name | 无 | 所属表空间名称 | +| status | 无 | 状态 | +| bytes | MB | 大小 | +| blocks | 无 | 区块数量 | #### 指标集合:user_connect -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| username | 无 | 用户名 | -| counts | 个数 | 当前连接数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| username | 无 | 用户名 | +| counts | 个数 | 当前连接数量 | #### 指标集合:performance -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| qps | QPS | I/O Requests per Second 每秒IO请求数量 | -| tps | TPS | User Transaction Per Sec 每秒用户事物处理数量 | -| mbps | MBPS | I/O Megabytes per Second 每秒 I/O 兆字节数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------|------|---------------------------------------| +| qps | QPS | I/O Requests per Second 每秒IO请求数量 | +| tps | TPS | User Transaction Per Sec 每秒用户事物处理数量 | +| mbps | MBPS | I/O Megabytes per Second 每秒 I/O 兆字节数量 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ping.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ping.md index 7b6613f25bd..401e86f9382 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ping.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ping.md @@ -5,32 +5,33 @@ sidebar_label: PING连通性 keywords: [开源监控系统, 开源网络监控, 网络PING监控] --- -> 对对端HOST地址进行PING操作,判断其连通性 +> 对对端HOST地址进行PING操作,判断其连通性 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| Ping超时时间 | 设置PING未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|----------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| Ping超时时间 | 设置PING未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| responseTime | ms毫秒 | 网站响应时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | ms毫秒 | 网站响应时间 | - -### 常见问题 +### 常见问题 1. 安装包部署的hertzbeat下ping连通性监控异常 - 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 + 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 + > 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 > docker安装默认启用无此问题 -> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/plugin.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/plugin.md index e89f669886f..36a6bba9fe7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/plugin.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/plugin.md @@ -1,10 +1,11 @@ --- id: plugin title: 自定义插件 -sidebar_label: 自定义插件 +sidebar_label: 自定义插件 --- ## 自定义插件 + ### 简介 当前`Hertzbeat`在使用时,主要依赖`alert`模块对用户进行通知,然后用户采取一些措施如发送请求、执行`sql`、执行`shell`脚本等。 @@ -13,13 +14,15 @@ sidebar_label: 自定义插件 目前,`HertzBeat`只在告警后设置了触发`alert`方法,如需在采集、启动程序等时机设置触发方法,请在`https://github.com/apache/hertzbeat/issues/new/choose` 提`Task`。 ### 具体使用 + 1. 拉取主分支代码 `git clone https://github.com/apache/hertzbeat.git` ,定位到`plugin`模块的 `Plugin`接口。 - ![plugin-1.png](/img/docs/help/plugin-1.png) + ![plugin-1.png](/img/docs/help/plugin-1.png) 2. 在`org.apache.hertzbeat.plugin.impl`目录下, 新建一个接口实现类,如`org.apache.hertzbeat.plugin.impl.DemoPluginImpl`,在实现类中接收`Alert`类作为参数,实现`alert`方法,逻辑由用户自定义,这里我们简单打印一下对象。 - ![plugin-2.png](/img/docs/help/plugin-2.png) + ![plugin-2.png](/img/docs/help/plugin-2.png) 3. 打包`hertzbeat-plugin`模块。 - ![plugin-3.png](/img/docs/help/plugin-3.png) + ![plugin-3.png](/img/docs/help/plugin-3.png) 4. 将打包后的`jar`包,拷贝到安装目录下的`ext-lib`目录下(若为`docker`安装则先将`ext-lib`目录挂载出来,再拷贝到该目录下) - ![plugin-4.png](/img/docs/help/plugin-4.png) + ![plugin-4.png](/img/docs/help/plugin-4.png) 5. 然后重启`HertzBeat`,即可实现自定义告警后处理策略。 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pop3.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pop3.md index 8d6c2eb5548..4c58cc4a308 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pop3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pop3.md @@ -24,26 +24,24 @@ keywords: [开源监控工具,开源Java监控工具,监控POP3指标] 5. 通过POP3服务器域名,端口号,qq邮箱账号以及授权码连接POP3服务器,采集监控指标 ``` - ### 配置参数 -| 参数名 | 参数描述 | -|-------------------|-----------------------------------------------------| -| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | -| 监控名称 | 标识此监控的名称。名称需要唯一 | -| 端口 | POP3 提供的端口 | -| 超时时间 | 允许收集响应时间 | -| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | -| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | -| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | +| 参数名 | 参数描述 | +|--------|-----------------------------------------------------| +| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | +| 监控名称 | 标识此监控的名称。名称需要唯一 | +| 端口 | POP3 提供的端口 | +| 超时时间 | 允许收集响应时间 | +| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | +| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | +| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | ### 采集指标 #### 指标集:email_status -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------| -| 邮件数量 | | 邮件数量 | -| 邮箱总大小 | kb | 邮箱中邮件的总大小 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------| +| 邮件数量 | | 邮件数量 | +| 邮箱总大小 | kb | 邮箱中邮件的总大小 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/port.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/port.md index f27314e8e40..dd0b19aac82 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/port.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/port.md @@ -9,22 +9,21 @@ keywords: [开源监控系统, 开源网络监控, TCP 端口可用性监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 连接超时时间 | 端口连接的等待超时时间,单位毫秒,默认3000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | +| 连接超时时间 | 端口连接的等待超时时间,单位毫秒,默认3000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| responseTime | ms毫秒 | 网站响应时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | ms毫秒 | 网站响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/postgresql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/postgresql.md index 4716d0e2e64..59adae7da81 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/postgresql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/postgresql.md @@ -9,50 +9,48 @@ keywords: [开源监控系统, 开源数据库监控, PostgreSQL数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为5432。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为5432。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| server_version | 无 | 数据库服务器的版本号 | -| port | 无 | 数据库服务器端暴露服务端口 | -| server_encoding | 无 | 数据库服务器端的字符集编码 | -| data_directory | 无 | 数据库存储数据盘地址 | -| max_connections | 连接数 | 数据库最大连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------------| +| server_version | 无 | 数据库服务器的版本号 | +| port | 无 | 数据库服务器端暴露服务端口 | +| server_encoding | 无 | 数据库服务器端的字符集编码 | +| data_directory | 无 | 数据库存储数据盘地址 | +| max_connections | 连接数 | 数据库最大连接数 | #### 指标集合:state -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| name | 无 | 数据库名称,或share-object为共享对象。 | -| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | -| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | -| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | -| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | -| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | -| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | -| stats_reset | 无 | 这些统计信息上次被重置的时间 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------------------------------------------------------------| +| name | 无 | 数据库名称,或share-object为共享对象。 | +| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | +| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | +| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | +| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | +| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | +| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | +| stats_reset | 无 | 这些统计信息上次被重置的时间 | #### 指标集合:activity -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| running | 连接数 | 当前客户端连接数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------| +| running | 连接数 | 当前客户端连接数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/process.md index 91d7718e419..2eda0726d27 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/process.md @@ -4,48 +4,46 @@ title: 监控:Linux 进程监控 sidebar_label: Process keywords: [开源监控系统, 操作系统进程监控, 进程监控] --- + > 对Linux系统进程基础信息进行采集监控,包括进程的 CPU使用率、内存使用率、物理内存、IO 等监控 ## 配置参数 - -| 参数名称 | 参数帮助描述 | -| -------- | ------------------------------------------------------------------------- | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux系统的ssh端口,默认: 22 | -| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | -| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | -| 用户名 | 服务器用户名 | -| 密码 | 服务器密码 | -| 进程名称 | 需要监控的进程名称或进程部分名称 | -| 采集器 | 配置此监控使用哪台采集器调度采集 | -| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -| 私钥 | 连接服务器所需私钥 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux系统的ssh端口,默认: 22 | +| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | +| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | +| 用户名 | 服务器用户名 | +| 密码 | 服务器密码 | +| 进程名称 | 需要监控的进程名称或进程部分名称 | +| 采集器 | 配置此监控使用哪台采集器调度采集 | +| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 私钥 | 连接服务器所需私钥 | ### 采集指标 #### 指标集合:进程基本信息 - | 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------ | -| PID | 无 | 进程ID | -| User | 无 | 用户 | -| CPU | 无 | CPU使用率 | -| MEM | 无 | 内存使用率 | -| rss | 无 | 物理内存 | -| cmd | 无 | 运行命令 | +|------|------|--------| +| PID | 无 | 进程ID | +| User | 无 | 用户 | +| CPU | 无 | CPU使用率 | +| MEM | 无 | 内存使用率 | +| rss | 无 | 物理内存 | +| cmd | 无 | 运行命令 | #### 指标集合:内存使用信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------ | -| PID | 无 | 进程ID | -| detail | 无 | 详细监控指标 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|--------| +| PID | 无 | 进程ID | +| detail | 无 | 详细监控指标 | 包含的指标: @@ -63,22 +61,20 @@ keywords: [开源监控系统, 操作系统进程监控, 进程监控] #### 指标集合:其他监控信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------ | -| PID | 无 | 进程ID | -| path | 无 | 执行路径 | -| date | 无 | 启动时间 | -| fd_count | 无 | 打开文件描述符数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|-----------| +| PID | 无 | 进程ID | +| path | 无 | 执行路径 | +| date | 无 | 启动时间 | +| fd_count | 无 | 打开文件描述符数量 | #### 指标集合:IO - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------ | -| PID | 无 | 进程ID | -| metric | 无 | 监控指标名称 | -| value | 无 | 监控指标值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|--------| +| PID | 无 | 进程ID | +| metric | 无 | 监控指标名称 | +| value | 无 | 监控指标值 | 包含的指标: @@ -89,3 +85,4 @@ keywords: [开源监控系统, 操作系统进程监控, 进程监控] - read_bytes(进程从磁盘实际读取的字节数) - write_bytes(进程写入到磁盘的实际字节数) - cancelled_write_bytes(进程写入到磁盘的实际字节数) + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/prometheus.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/prometheus.md index 50d148f72a9..571da45aac0 100755 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/prometheus.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/prometheus.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, Prometheus协议监控 ] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------------|------------------------------------------------------| | 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(例如: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -40,5 +40,3 @@ keywords: [ 开源监控系统, Prometheus协议监控 ] 其余设置保持默认。 - - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pulsar.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pulsar.md index a59178686b3..1c12244997b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pulsar.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pulsar.md @@ -4,52 +4,48 @@ title: 监控:Pulsar监控 sidebar_label: Apache Pulsar keywords: [开源监控系统, 开源数据库监控, HbaseMaster监控] --- + > 对Pulsar的通用性能指标进行采集监控 **使用协议:HTTP** ## 配置参数 - -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------------------- | -| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 端口 | Pulsar的webServiceProt值,默认为8080。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 绑定标签 | 用于对监控资源进行分类管理 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 端口 | Pulsar的webServiceProt值,默认为8080。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:版本信息 - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | ------------ | -| Version Info | 无 | 版本信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| Version Info | 无 | 版本信息 | #### 指标集合:process_start_time_seconds - -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------ | -------- | ------------ | -| Process Start Time | 无 | 进程启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|--------| +| Process Start Time | 无 | 进程启动时间 | #### 指标集合:process_open_fds - -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------------------- | -------- | ---------------- | -| Open File Descriptors | 无 | 打开的文件描述符 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| Open File Descriptors | 无 | 打开的文件描述符 | #### 指标集合:process_max_fds - -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- | -------- | -------------- | -| Max File Descriptors | 无 | 最大文件描述符 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------|------|---------| +| Max File Descriptors | 无 | 最大文件描述符 | #### 指标集合: jvm_memory_pool_allocated_bytes diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rabbitmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rabbitmq.md index 8cb91eeb3e6..89c728162c9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rabbitmq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rabbitmq.md @@ -5,42 +5,42 @@ sidebar_label: RabbitMQ消息中间件 keywords: [开源监控系统, 开源消息中间件监控, RabbitMQ消息中间件监控] --- -> 对 RabbitMQ 消息中间件的运行状态,节点,队列等相关指标进行监测。 +> 对 RabbitMQ 消息中间件的运行状态,节点,队列等相关指标进行监测。 -### 监控前操作 +### 监控前操作 > HertzBeat 使用 RabbitMQ Management 的 Rest Api 对 RabbitMQ 进行指标数据采集。 -> 故需要您的 RabbitMQ 环境开启 Management 插件 +> 故需要您的 RabbitMQ 环境开启 Management 插件 -1. 开启 Management 插件,或使用自开启版本 +1. 开启 Management 插件,或使用自开启版本 ```shell rabbitmq-plugins enable rabbitmq_management ``` -2. 浏览器访问 http://ip:15672/ ,默认账户密码 `guest/guest`. 成功登录即开启成功。 +2. 浏览器访问 http://ip:15672/ ,默认账户密码 `guest/guest`. 成功登录即开启成功。 3. 在 HertzBeat 添加对应 RabbitMQ 监控即可,参数使用 Management 的 IP 端口,默认账户密码。 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | -|----------|---------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | RabbitMQ Management 对外提供的HTTP端口,默认为15672。 | -| 用户名 | 接口Basic认证时使用的用户名 | -| 密码 | 接口Basic认证时使用的密码 | -| 超时时间 | HTTP请求查询超时时间 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | RabbitMQ Management 对外提供的HTTP端口,默认为15672。 | +| 用户名 | 接口Basic认证时使用的用户名 | +| 密码 | 接口Basic认证时使用的密码 | +| 超时时间 | HTTP请求查询超时时间 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:overview +#### 指标集合:overview -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------------|------|--------------------------| | product_version | 无 | 产品版本 | | product_name | 无 | 产品名称 | @@ -52,7 +52,7 @@ rabbitmq-plugins enable rabbitmq_management #### 指标集合:object_totals -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------|------|-----------------| | channels | 无 | channels的总数量 | | connections | 无 | connections的总数量 | @@ -62,65 +62,65 @@ rabbitmq-plugins enable rabbitmq_management #### 指标集合:nodes -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------|------|--------------------------------| -| name | 无 | The node name | -| type | 无 | The node type | -| running | 无 | Running state | -| os_pid | 无 | Pid in OS | -| mem_limit | MB | Memory usage high watermark | -| mem_used | MB | Total amount of memory used | -| fd_total | 无 | File descriptors available | -| fd_used | 无 | File descriptors used | -| sockets_total | 无 | Sockets available | -| sockets_used | 无 | Sockets used | -| proc_total | 无 | Erlang process limit | -| proc_used | 无 | Erlang processes used | -| disk_free_limit | GB | Free disk space low watermark | -| disk_free | GB | Free disk space | -| gc_num | 无 | GC runs | -| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | -| context_switches | 无 | Context_switches num | -| io_read_count | 无 | 总共读操作的数量 | -| io_read_bytes | KB | 总共读入磁盘数据大小 | -| io_read_avg_time | ms | 读操作平均时间,毫秒为单位 | -| io_write_count | 无 | 磁盘写操作总量 | -| io_write_bytes | KB | 写入磁盘数据总量 | -| io_write_avg_time | ms | 每个磁盘写操作的平均时间,毫秒为单位 | -| io_seek_count | 无 | seek操作总量 | -| io_seek_avg_time | ms | seek操作的平均时间,毫秒单位 | -| io_sync_count | 无 | fsync操作的总量 | -| io_sync_avg_time | ms | fsync操作的平均时间,毫秒为单位 | -| connection_created | 无 | connection created num | -| connection_closed | 无 | connection closed num | -| channel_created | 无 | channel created num | -| channel_closed | 无 | channel closed num | -| queue_declared | 无 | queue declared num | -| queue_created | 无 | queue created num | -| queue_deleted | 无 | queue deleted num | -| connection_closed | 无 | connection closed num | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------|------|-------------------------------| +| name | 无 | The node name | +| type | 无 | The node type | +| running | 无 | Running state | +| os_pid | 无 | Pid in OS | +| mem_limit | MB | Memory usage high watermark | +| mem_used | MB | Total amount of memory used | +| fd_total | 无 | File descriptors available | +| fd_used | 无 | File descriptors used | +| sockets_total | 无 | Sockets available | +| sockets_used | 无 | Sockets used | +| proc_total | 无 | Erlang process limit | +| proc_used | 无 | Erlang processes used | +| disk_free_limit | GB | Free disk space low watermark | +| disk_free | GB | Free disk space | +| gc_num | 无 | GC runs | +| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | +| context_switches | 无 | Context_switches num | +| io_read_count | 无 | 总共读操作的数量 | +| io_read_bytes | KB | 总共读入磁盘数据大小 | +| io_read_avg_time | ms | 读操作平均时间,毫秒为单位 | +| io_write_count | 无 | 磁盘写操作总量 | +| io_write_bytes | KB | 写入磁盘数据总量 | +| io_write_avg_time | ms | 每个磁盘写操作的平均时间,毫秒为单位 | +| io_seek_count | 无 | seek操作总量 | +| io_seek_avg_time | ms | seek操作的平均时间,毫秒单位 | +| io_sync_count | 无 | fsync操作的总量 | +| io_sync_avg_time | ms | fsync操作的平均时间,毫秒为单位 | +| connection_created | 无 | connection created num | +| connection_closed | 无 | connection closed num | +| channel_created | 无 | channel created num | +| channel_closed | 无 | channel closed num | +| queue_declared | 无 | queue declared num | +| queue_created | 无 | queue created num | +| queue_deleted | 无 | queue deleted num | +| connection_closed | 无 | connection closed num | #### 指标集合:queues -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------------------------|------|--------------------------------------------------------------------------------------------------------------------------------------| -| name | 无 | The name of the queue with non-ASCII characters escaped as in C. | +| name | 无 | The name of the queue with non-ASCII characters escaped as in C. | | node | 无 | The queue on the node name | -| state | 无 | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | +| state | 无 | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | | type | 无 | Queue type, one of: quorum, stream, classic. | -| vhost | 无 | vhost path | +| vhost | 无 | vhost path | | auto_delete | 无 | Whether the queue will be deleted automatically when no longer used | -| policy | 无 | Effective policy name for the queue. | +| policy | 无 | Effective policy name for the queue. | | consumers | 无 | Number of consumers. | | memory | B | Bytes of memory allocated by the runtime for the queue, including stack, heap and internal structures. | | messages_ready | 无 | Number of messages ready to be delivered to clients | -| messages_unacknowledged | 无 | Number of messages delivered to clients but not yet acknowledged | +| messages_unacknowledged | 无 | Number of messages delivered to clients but not yet acknowledged | | messages | 无 | Sum of ready and unacknowledged messages (queue depth) | -| messages_ready_ram | 无 | Number of messages from messages_ready which are resident in ram | +| messages_ready_ram | 无 | Number of messages from messages_ready which are resident in ram | | messages_persistent | 无 | Total number of persistent messages in the queue (will always be 0 for transient queues) | -| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | +| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | | message_bytes_ready | B | Like message_bytes but counting only those messages ready to be delivered to clients | -| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | +| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | | message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | | message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redhat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redhat.md index 5ceb911c8d3..e0b8ae48cf4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redhat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redhat.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -28,7 +28,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 #### 指标集合:系统基本信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | Host Name | 无 | 主机名称 | | System Version | 无 | 操作系统版本 | @@ -36,7 +36,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 #### 指标集合:CPU 信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------------------| | info | 无 | CPU型号 | | cores | 无 | CPU内核数量 | @@ -47,7 +47,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 #### 指标集合:内存信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|----------| | total | Mb | 总内存容量 | | used | Mb | 用户程序内存量 | @@ -58,7 +58,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 #### 指标集合:磁盘信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------|------|-----------| | disk_num | 无 | 磁盘总数 | | partition_num | 无 | 分区总数 | @@ -68,7 +68,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 #### 指标集合:网卡信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | interface_name | 无 | 网卡名称 | | receive_bytes | Mb | 入站数据流量 | @@ -76,7 +76,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 #### 指标集合:文件系统 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|---------| | filesystem | 无 | 文件系统的名称 | | used | Mb | 已使用磁盘大小 | @@ -88,7 +88,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | cpu_usage | % | CPU占用率 | @@ -99,9 +99,10 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redis.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redis.md index dd9b304e1ce..58248fb0b45 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redis.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redis.md @@ -2,244 +2,239 @@ id: redis title: 监控:REDIS数据库监控 sidebar_label: REDIS数据库 -keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] +keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] --- > 对REDIS数据库的通用性能指标进行采集监控。支持REDIS1.0+。 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | -| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | +| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:server -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| redis_version | 无 | Redis 服务器版本 | -| redis_git_sha1 | 无 | Git SHA1 | -| redis_git_dirty | 无 | Git dirty flag | -| redis_build_id | 无 | redis 构建的id | -| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | -| os | 无 | Redis 服务器的宿主操作系统 | -| arch_bits | 无 | 架构(32 或 64 位) | -| multiplexing_api | 无 | Redis使用的事件循环机制| -| atomicvar_api | 无 | Redis使用的原子 API | -| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本| -| process_id | 无 | 服务器进程的PID | -| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | -| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | -| tcp_port | 无 | TCP/IP侦听端口 | -| server_time_usec | 无 | 微秒级精度的基于时间的系统时间| -| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | -| uptime_in_days | 无 | 自Redis服务器启动后的天数 | -| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | -| configured_hz | 无 | 服务器配置的频率设置 | -| lru_clock | 无 | 时钟每分钟递增,用于LRU管理| -| executable | 无 | 服务器可执行文件的路径 | -| config_file | 无 | 配置文件的路径 | -| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志| -| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------|------|-----------------------------------------------| +| redis_version | 无 | Redis 服务器版本 | +| redis_git_sha1 | 无 | Git SHA1 | +| redis_git_dirty | 无 | Git dirty flag | +| redis_build_id | 无 | redis 构建的id | +| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | +| os | 无 | Redis 服务器的宿主操作系统 | +| arch_bits | 无 | 架构(32 或 64 位) | +| multiplexing_api | 无 | Redis使用的事件循环机制 | +| atomicvar_api | 无 | Redis使用的原子 API | +| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本 | +| process_id | 无 | 服务器进程的PID | +| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | +| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | +| tcp_port | 无 | TCP/IP侦听端口 | +| server_time_usec | 无 | 微秒级精度的基于时间的系统时间 | +| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | +| uptime_in_days | 无 | 自Redis服务器启动后的天数 | +| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | +| configured_hz | 无 | 服务器配置的频率设置 | +| lru_clock | 无 | 时钟每分钟递增,用于LRU管理 | +| executable | 无 | 服务器可执行文件的路径 | +| config_file | 无 | 配置文件的路径 | +| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志 | +| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。 | #### 指标集合:clients -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | -| cluster_connections | 无 | 群集总线使用的套接字数量的近似值| -| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。| -| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | -| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | -| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | -| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING)| -| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------|------|--------------------------------------------------------------------------------| +| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | +| cluster_connections | 无 | 群集总线使用的套接字数量的近似值 | +| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。 | +| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | +| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | +| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | +| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING) | +| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | #### 指标集合:memory -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | -| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | -| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字| -| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值| -| used_memory_peak | byte | Redis消耗的峰值内存(字节)| -| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | -| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和| -| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节)| -| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | -| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | -| allocator_allocated | byte| 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同| -| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片| -| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | -| total_system_memory | byte | Redis主机的内存总量 | -| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_lua | byte | Lua引擎使用的字节数 | -| used_memory_lua_human | KB | 上一个值的人类可读值 | -| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | -| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | -| number_of_cached_scripts | 无 |缓存的lua脚本数量 | -| maxmemory | byte | maxmemory配置指令的值| -| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | -| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | -| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | -| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | -| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | -| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | -| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | -| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | -| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | -| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | -| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。| -| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | -| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | -| mem_clients_normal | 无 | 普通客户端使用的内存 | -| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | -| mem_allocator | 无 | 内存分配器,在编译时选择。 | -| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | -| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL)| -| lazyfreed_objects | 无 | 已延迟释放的对象数。| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|----------|-----------------------------------------------------------------------------------------------| +| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | +| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | +| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字 | +| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_peak | byte | Redis消耗的峰值内存(字节) | +| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | +| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和 | +| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节) | +| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | +| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | +| allocator_allocated | byte | 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同 | +| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片 | +| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | +| total_system_memory | byte | Redis主机的内存总量 | +| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_lua | byte | Lua引擎使用的字节数 | +| used_memory_lua_human | KB | 上一个值的人类可读值 | +| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | +| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | +| number_of_cached_scripts | 无 | 缓存的lua脚本数量 | +| maxmemory | byte | maxmemory配置指令的值 | +| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | +| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | +| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | +| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | +| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | +| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | +| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | +| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | +| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | +| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | +| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。 | +| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | +| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | +| mem_clients_normal | 无 | 普通客户端使用的内存 | +| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | +| mem_allocator | 无 | 内存分配器,在编译时选择。 | +| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | +| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL) | +| lazyfreed_objects | 无 | 已延迟释放的对象数。 | #### 指标集合:persistence -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是| -| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | -| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | -| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比| -| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | -| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | -| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | -| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | -| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | -| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功| -| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | -| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | -| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | -| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | -| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite| -| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | -| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | -| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | -| aof_last_write_status | 无 | 上次aof写入状态 | -| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | -| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------------|--------|-----------------------------------------------------------------------------------------------------| +| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是 | +| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | +| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | +| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比 | +| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | +| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | +| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | +| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | +| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | +| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功 | +| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | +| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | +| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | +| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | +| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | +| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite | +| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | +| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | +| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | +| aof_last_write_status | 无 | 上次aof写入状态 | +| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | +| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | +| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | #### 指标集合:stats -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total_connections_received | 无 | 服务器接受的连接总数 | -| total_commands_processed | 无 | 服务器处理的命令总数 | -| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | -| total_net_input_bytes | byte | 从网络读取的字节总数 | -| total_net_output_bytes | byte | 写入网络的总字节数 | -| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | -| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | -| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数| -| sync_full | 无 | 具有副本的完整重新同步数 | -| sync_partial_ok | 无 | 接受的部分重新同步请求数 | -| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | -| expired_keys | 无 | 过期的key总数 | -| expired_stale_perc | 无 | 可能过期key的百分比 | -| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | -| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | -| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | -| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | -| keyspace_misses | 无 | 在主dict 中未查到key的次数 | -| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | -| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | -| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | -| total_forks | 无 | 自服务器启动以来的fork操作总数| -| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | -| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | -| active_defrag_hits | 无 | 主动碎片整理命中次数 | -| active_defrag_misses | 无 | 主动碎片整理未命中次数 | -| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | -| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数| -| tracking_total_keys | 无 | key 查询的总数| -| tracking_total_items | 无 | item查询的总数 | -| tracking_total_prefixes | 无 | 前缀查询的总数 | -| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | -| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | -| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | -| total_reads_processed | 无 | 正在读取的请求数 | -| total_writes_processed | 无 | 正在写入的请求数 | -| io_threaded_reads_processed | 无 | 正在读取的线程数| -| io_threaded_writes_processed | 无 | 正在写入的线程数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|----------------------------------------------------| +| total_connections_received | 无 | 服务器接受的连接总数 | +| total_commands_processed | 无 | 服务器处理的命令总数 | +| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | +| total_net_input_bytes | byte | 从网络读取的字节总数 | +| total_net_output_bytes | byte | 写入网络的总字节数 | +| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | +| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | +| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数 | +| sync_full | 无 | 具有副本的完整重新同步数 | +| sync_partial_ok | 无 | 接受的部分重新同步请求数 | +| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | +| expired_keys | 无 | 过期的key总数 | +| expired_stale_perc | 无 | 可能过期key的百分比 | +| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | +| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | +| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | +| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | +| keyspace_misses | 无 | 在主dict 中未查到key的次数 | +| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | +| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | +| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | +| total_forks | 无 | 自服务器启动以来的fork操作总数 | +| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | +| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | +| active_defrag_hits | 无 | 主动碎片整理命中次数 | +| active_defrag_misses | 无 | 主动碎片整理未命中次数 | +| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | +| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数 | +| tracking_total_keys | 无 | key 查询的总数 | +| tracking_total_items | 无 | item查询的总数 | +| tracking_total_prefixes | 无 | 前缀查询的总数 | +| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | +| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | +| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | +| total_reads_processed | 无 | 正在读取的请求数 | +| total_writes_processed | 无 | 正在写入的请求数 | +| io_threaded_reads_processed | 无 | 正在读取的线程数 | +| io_threaded_writes_processed | 无 | 正在写入的线程数 | #### 指标集合:replication -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| role | 无 | 节点角色 master 主节点 slave 从节点 | -| connected_slaves | 无 | 连接的从节点数 | -| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | -| master_replid | 无 | 实例启动的随机字符串| -| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID| -| master_repl_offset | 无 | 主从同步偏移量 | -| second_repl_offset | 无 | 接受从服务ID的最大偏移量| -| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | -| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | -| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | -| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|-------------------------------------------------------------------------------------| +| role | 无 | 节点角色 master 主节点 slave 从节点 | +| connected_slaves | 无 | 连接的从节点数 | +| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | +| master_replid | 无 | 实例启动的随机字符串 | +| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID | +| master_repl_offset | 无 | 主从同步偏移量 | +| second_repl_offset | 无 | 接受从服务ID的最大偏移量 | +| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | +| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | +| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | +| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和| -| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和| -| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和| -| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | -| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU| -| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|------|------------------------| +| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和 | +| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和 | +| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和 | +| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | +| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU | +| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | #### 指标集合:errorstats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| errorstat_ERR | 无 | 错误累计出现的次数 | -| errorstat_MISCONF | 无 | | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|-----------| +| errorstat_ERR | 无 | 错误累计出现的次数 | +| errorstat_MISCONF | 无 | | #### 指标集合:cluster -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|--------------------| +| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是 | #### 指标集合:commandstats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数| -| cmdstat_get | 无 | get命令的统计信息 | -| cmdstat_setnx | 无 | setnx命令的统计信息 | -| cmdstat_hset | 无 | hset命令的统计信息 | -| cmdstat_hget | 无 | hget命令的统计信息 | -| cmdstat_lpush | 无 | lpush命令的统计信息 | -| cmdstat_rpush | 无 | rpush命令的统计信息 | -| cmdstat_lpop | 无 | lpop命令的统计信息 | -| cmdstat_rpop | 无 | rpop命令的统计信息 | -| cmdstat_llen | 无 | llen命令的统计信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|---------------------------------------------------------------------------------------------------------------------------| +| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数 | +| cmdstat_get | 无 | get命令的统计信息 | +| cmdstat_setnx | 无 | setnx命令的统计信息 | +| cmdstat_hset | 无 | hset命令的统计信息 | +| cmdstat_hget | 无 | hget命令的统计信息 | +| cmdstat_lpush | 无 | lpush命令的统计信息 | +| cmdstat_rpush | 无 | rpush命令的统计信息 | +| cmdstat_lpop | 无 | lpop命令的统计信息 | +| cmdstat_rpop | 无 | rpop命令的统计信息 | +| cmdstat_llen | 无 | llen命令的统计信息 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rocketmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rocketmq.md index f12a43628ac..84cc24fc976 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rocketmq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rocketmq.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, 开源中间件监控, RocketMQ消息中间件 ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |-----------|------------------------------------------------| | 注册中心Host | RocketMQ注册中心的IPV4,IPV6(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -24,7 +24,7 @@ keywords: [ 开源监控系统, 开源中间件监控, RocketMQ消息中间件 #### 指标集合:集群 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------------------|------|------------| | BrokerId | 无 | Broker唯一ID | | Address | 无 | Broker地址 | @@ -38,7 +38,7 @@ keywords: [ 开源监控系统, 开源中间件监控, RocketMQ消息中间件 #### 指标集合:消费者 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------------|------|--------| | Consumer_group | 无 | 消费者组 | | Client_quantity | 无 | 客户端数量 | @@ -46,3 +46,4 @@ keywords: [ 开源监控系统, 开源中间件监控, RocketMQ消息中间件 | Consume_type | 无 | 消费类型 | | Consume_tps | 无 | 消费TPS | | Delay | 无 | 延迟 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rockylinux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rockylinux.md index 0cf541702bc..55923468da8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rockylinux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rockylinux.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -28,7 +28,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 #### 指标集合:系统基本信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | Host Name | 无 | 主机名称 | | System Version | 无 | 操作系统版本 | @@ -36,7 +36,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 #### 指标集合:CPU 信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------------------| | info | 无 | CPU型号 | | cores | 无 | CPU内核数量 | @@ -47,7 +47,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 #### 指标集合:内存信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|----------| | total | Mb | 总内存容量 | | used | Mb | 用户程序内存量 | @@ -58,7 +58,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 #### 指标集合:磁盘信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------|------|-----------| | disk_num | 无 | 磁盘总数 | | partition_num | 无 | 分区总数 | @@ -68,7 +68,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 #### 指标集合:网卡信息 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |----------------|------|--------| | interface_name | 无 | 网卡名称 | | receive_bytes | Mb | 入站数据流量 | @@ -76,7 +76,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 #### 指标集合:文件系统 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------------|------|---------| | filesystem | 无 | 文件系统的名称 | | used | Mb | 已使用磁盘大小 | @@ -88,7 +88,7 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | cpu_usage | % | CPU占用率 | @@ -99,9 +99,10 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | pid | 无 | 进程ID | | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/shenyu.md index 08788efeaae..1149ed4bdd9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/shenyu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/shenyu.md @@ -5,24 +5,24 @@ sidebar_label: ShenYu网关监控 keywords: [开源监控系统, 开源消息中间件监控, ShenYu网关监控监控] --- -> 对 ShenYu 网关的运行状态(JVM相关),请求响应等相关指标进行监测。 +> 对 ShenYu 网关的运行状态(JVM相关),请求响应等相关指标进行监测。 -## 监控前操作 +## 监控前操作 -您需要在 ShenYu 网关开启`metrics`插件,暴露对应的 prometheus metrics 接口。 +您需要在 ShenYu 网关开启`metrics`插件,暴露对应的 prometheus metrics 接口。 -开启插件, 参考 [官方文档](https://shenyu.apache.org/zh/docs/plugin-center/observability/metrics-plugin) +开启插件, 参考 [官方文档](https://shenyu.apache.org/zh/docs/plugin-center/observability/metrics-plugin) -主要如下两步骤: +主要如下两步骤: 1. 在网关的 pom.xml 文件中添加 metrics 的依赖。 ```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + ``` 2. 在网关的配置yaml文件中编辑如下内容: @@ -39,11 +39,11 @@ shenyu: jvm_enabled: true #开启jvm的监控指标 ``` -最后重启访问网关指标接口 `http://ip:8090` 响应 prometheus 格式数据即可。 +最后重启访问网关指标接口 `http://ip:8090` 响应 prometheus 格式数据即可。 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -53,79 +53,78 @@ shenyu: | 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | | 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:shenyu_request_total +#### 指标集合:shenyu_request_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|-------------------| -| value | 无 | 收集ShenYu网关的所有请求数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------------| +| value | 无 | 收集ShenYu网关的所有请求数量 | #### 指标集合:shenyu_request_throw_created -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|--------------------| -| value | 无 | 收集ShenYu网关的异常请求数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------------| +| value | 无 | 收集ShenYu网关的异常请求数量 | #### 指标集合:process_cpu_seconds_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------------| -| value | 无 | 用户和系统CPU总计所用的秒数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| value | 无 | 用户和系统CPU总计所用的秒数 | #### 指标集合:process_open_fds -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|--------------| -| value | 无 | 打开的文件描述符的数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------| +| value | 无 | 打开的文件描述符的数量 | #### 指标集合:process_max_fds -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|----------------| -| value | 无 | 打开的文件描述符的最大数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------| +| value | 无 | 打开的文件描述符的最大数量 | #### 指标集合:jvm_info -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|-----------| -| runtime | 无 | JVM 版本信息 | -| vendor | 无 | JVM 版本信息 | -| version | 无 | JVM 版本信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------| +| runtime | 无 | JVM 版本信息 | +| vendor | 无 | JVM 版本信息 | +| version | 无 | JVM 版本信息 | #### 指标集合:jvm_memory_bytes_used -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------------| -| area | 无 | JVM 内存区域 | -| value | MB | 给定 JVM 内存区域的已用大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------| +| area | 无 | JVM 内存区域 | +| value | MB | 给定 JVM 内存区域的已用大小 | #### 指标集合:jvm_memory_pool_bytes_used -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------|------|-----------------| -| pool | 无 | JVM 内存池 | -| value | MB | 给定 JVM 内存池的已用大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| pool | 无 | JVM 内存池 | +| value | MB | 给定 JVM 内存池的已用大小 | #### 指标集合:jvm_memory_pool_bytes_committed -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|------------------| -| pool | 无 | JVM 内存池 | -| value | MB | 给定 JVM 内存池的已提交大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------| +| pool | 无 | JVM 内存池 | +| value | MB | 给定 JVM 内存池的已提交大小 | #### 指标集合:jvm_memory_pool_bytes_max -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------| ----------- | -| pool | 无 | JVM 内存池 | -| value | MB | 给定 JVM 内存池的最大大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| pool | 无 | JVM 内存池 | +| value | MB | 给定 JVM 内存池的最大大小 | #### 指标集合:jvm_threads_state -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|-------------| -| state | 无 | 线程状态 | -| value | 无 | 对应线程状态的线程数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------| +| state | 无 | 线程状态 | +| value | 无 | 对应线程状态的线程数量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/smtp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/smtp.md index 21dcd9a88f3..5755437e80e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/smtp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/smtp.md @@ -13,12 +13,11 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit > 详见 https://datatracker.ietf.org/doc/html/rfc821#page-13 - **协议使用:SMTP** ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |---------|---------------------------------------------------| | 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️无需协议头(例如:https://、http://) | | 监控名称 | 标识此监控的名称。名称需要保持唯一 | @@ -33,9 +32,10 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit #### 指标集:概要 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |-------------|------|-------------------| | 响应时间 | 毫秒 | SMTP 服务器响应请求所需的时间 | | 响应状态 | | 响应状态 | | SMTP 服务器标语 | | SMTP 服务器的标语 | | helo 命令返回信息 | | helo 命令返回的响应信息 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/spring_gateway.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/spring_gateway.md index 086e0a63ac8..a0695849705 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/spring_gateway.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/spring_gateway.md @@ -19,6 +19,7 @@ keywords: [开源监控工具, 开源 Spring Gateway 监控工具, 监控 Spring spring-boot-starter-actuator ``` + **2. 修改 YML 配置以暴露度量接口:** ```yaml @@ -35,56 +36,55 @@ management: ### 配置参数 -| 参数名称 | 参数描述 | -| ----------- |--------------------------------------------------------| -| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | -| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | -| 端口 | 数据库提供的默认端口为 8080。 | +| 参数名称 | 参数描述 | +|----------|--------------------------------------------------------|-----------------------------------------------| +| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | +| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | +| 端口 | 数据库提供的默认端口为 8080。 | | 启用 HTTPS | 是否通过 HTTPS 访问网站,请注意⚠️当启用 HTTPS 时,需要将默认端口更改为 443 | -| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | -| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | will continue only if the probe is successful -| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | +| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | +| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | will continue only if the probe is successful | +| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | ### 采集指标 #### 指标收集: 健康状态 -| 指标名称 | 指标单位 | 指标描述 | -|-------|-------|--------------------------| -| 服务状态 | 无 | 服务健康状态: UP(正常),Down(异常) | +| 指标名称 | 指标单位 | 指标描述 | +|------|------|-------------------------| +| 服务状态 | 无 | 服务健康状态: UP(正常),Down(异常) | #### 指标收集: 环境信息 -| 指标名称 | 指标单位 | 指标描述 | -|---------|-------|----------------------------------------------| -| profile | 无 | 应用程序运行的配置环境: prod(生产环境),dev(开发环境),test(测试环境) | -| 端口号 | 无 | 应用程序暴露的端口 | -| 操作系统 | 无 | 运行操作系统 | -| 操作系统架构 | 无 | 运行操作系统的架构 | -| JDK供应商 | 无 | JDK 供应商 | -| JVM版本 | 无 | JVM 版本 | +| 指标名称 | 指标单位 | 指标描述 | +|---------|------|----------------------------------------------| +| profile | 无 | 应用程序运行的配置环境: prod(生产环境),dev(开发环境),test(测试环境) | +| 端口号 | 无 | 应用程序暴露的端口 | +| 操作系统 | 无 | 运行操作系统 | +| 操作系统架构 | 无 | 运行操作系统的架构 | +| JDK供应商 | 无 | JDK 供应商 | +| JVM版本 | 无 | JVM 版本 | #### 指标收集: 线程信息 -| 指标名称 | 指标单位 | 指标描述 | -|-------------|------------|-------------| -| 状态 | 无 | 线程状态 | -| 数量 | 无 | 线程状态对应的线程数量 | +| 指标名称 | 指标单位 | 指标描述 | +|------|------|-------------| +| 状态 | 无 | 线程状态 | +| 数量 | 无 | 线程状态对应的线程数量 | #### 指标收集: 内存使用情况 -| 指标名称 | 指标单位 | 指标描述 | -|-------|-------|-------------| -| 内存空间 | 无 | 内存空间名称 | -| 内存占用 | MB | 此空间占用的内存大小 | +| 指标名称 | 指标单位 | 指标描述 | +|------|------|------------| +| 内存空间 | 无 | 内存空间名称 | +| 内存占用 | MB | 此空间占用的内存大小 | #### 指标收集: 路由信息 -| 指标名称 | 指标单位 | 指标描述 | -|-------|-------|----------| -| 路由id | 无 | 路由 ID | -| 匹配规则 | 无 | 路由匹配规则 | -| 资源标识符 | 无 | 服务资源标识符 | -| 优先级 | 无 | 此路由的优先级 | - +| 指标名称 | 指标单位 | 指标描述 | +|-------|------|---------| +| 路由id | 无 | 路由 ID | +| 匹配规则 | 无 | 路由匹配规则 | +| 资源标识符 | 无 | 服务资源标识符 | +| 优先级 | 无 | 此路由的优先级 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot2.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot2.md index 280c6cb6b06..e66d4237a13 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot2.md @@ -7,7 +7,6 @@ keywords: [开源监控系统, 开源消息中间件监控, SpringBoot2.0 监控 > 对SpringBoot2.0 actuator 暴露的通用性能指标进行采集监控。 - ## 监控前操作 如果想要通过此监控类型监控 `SpringBoot` 中的信息,则需要您的SpringBoot应用集成并开启SpringBoot Actuator。 @@ -20,6 +19,7 @@ keywords: [开源监控系统, 开源消息中间件监控, SpringBoot2.0 监控 spring-boot-starter-actuator ``` + **2、修改YML配置暴露指标接口:** ```yaml @@ -30,7 +30,9 @@ management: include: '*' enabled-by-default: on ``` + *注意:如果你的项目里还引入了认证相关的依赖,比如springboot-security,那么SpringBoot Actuator暴露出的接口可能会被拦截,此时需要你手动放开这些接口,以springboot-security为例,需要在SecurityConfig配置类中加入以下代码:* + ```java public class SecurityConfig extends WebSecurityConfigurerAdapter{ @Override @@ -46,48 +48,50 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ } } ``` + ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ |------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 应用服务对外提供的端口,默认为8080。 | +| 参数名称 | 参数帮助描述 | +|-----------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 应用服务对外提供的端口,默认为8080。 | | 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | | Base Path | 暴露接口路径前缀,默认 /actuator | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:health -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------ | -------- |--------------------------------| -| status | 无 | 服务健康状态: UP,Down | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|-----------------| +| status | 无 | 服务健康状态: UP,Down | #### 指标集合:environment -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------| -------- |----------------------------| -| profile | 无 | 应用运行profile: prod,dev,test | -| port | 无 | 应用暴露端口 | -| os | 无 | 运行所在操作系统 | -| os_arch | 无 | 运行所在操作系统架构 | -| jdk_vendor | 无 | jdk vendor | -| jvm_version | 无 | jvm version | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|----------------------------| +| profile | 无 | 应用运行profile: prod,dev,test | +| port | 无 | 应用暴露端口 | +| os | 无 | 运行所在操作系统 | +| os_arch | 无 | 运行所在操作系统架构 | +| jdk_vendor | 无 | jdk vendor | +| jvm_version | 无 | jvm version | #### 指标集合:threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------------- |------|--------------------| -| state | 无 | 线程状态 | -| number | 无 | 此线程状态对应的线程数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------|------|--------------| +| state | 无 | 线程状态 | +| number | 无 | 此线程状态对应的线程数量 | #### 指标集合:memory_used -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|------------| -| space | 无 | 内存空间名称 | -| mem_used | MB | 此空间占用内存大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|-----------| +| space | 无 | 内存空间名称 | +| mem_used | MB | 此空间占用内存大小 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot3.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot3.md index 384f9249d16..56a63068b17 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot3.md @@ -51,7 +51,7 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |--------|------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -65,23 +65,28 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ ### 采集指标 #### 指标集合:可用性 -| 指标名称 | 指标单位 | 指标帮助描述 | + +| 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------| | responseTime | ms | 响应时间 | #### 指标集合:线程 -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|--------------------| -| state | 无 | 线程状态 | -| size | 无 | 此线程状态对应的线程数量 | + +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------| +| state | 无 | 线程状态 | +| size | 无 | 此线程状态对应的线程数量 | #### 指标集合:内存使用 -| 指标名称 | 指标单位 | 指标帮助描述 | + +| 指标名称 | 指标单位 | 指标帮助描述 | |----------|------|-----------| | space | 无 | 内存空间名称 | | mem_used | MB | 此空间占用内存大小 | #### 指标集合:健康状态 -| 指标名称 | 指标单位 | 指标帮助描述 | + +| 指标名称 | 指标单位 | 指标帮助描述 | |--------|------|-----------------| -| status | 无 | 服务健康状态: UP,Down | \ No newline at end of file +| status | 无 | 服务健康状态: UP,Down | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/sqlserver.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/sqlserver.md index 5dc66e27cfc..22a5a50ddd8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/sqlserver.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/sqlserver.md @@ -9,68 +9,68 @@ keywords: [开源监控系统, 开源数据库监控, SqlServer数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为1433。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为1433。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| machine_name | 无 | 运行服务器实例的 Windows 计算机名称 | -| server_name | 无 | 与Windows实例关联的服务器和实例信息SQL Server | -| version | 无 | 实例的版本,SQL Server,格式为"major.minor.build.revision" | -| edition | 无 | 已安装的 实例的产品SQL Server版本 | -| start_time | 无 | 数据库启动时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------------------------------------------------| +| machine_name | 无 | 运行服务器实例的 Windows 计算机名称 | +| server_name | 无 | 与Windows实例关联的服务器和实例信息SQL Server | +| version | 无 | 实例的版本,SQL Server,格式为"major.minor.build.revision" | +| edition | 无 | 已安装的 实例的产品SQL Server版本 | +| start_time | 无 | 数据库启动时间 | #### 指标集合:performance_counters -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| database_pages | 无 | Database pages, 已获得的页面数(缓冲池) | -| target_pages | 无 | Target pages, 缓冲池必须的理想页面数 | -| page_life_expectancy | s,秒 | Page life expectancy, 数据页在缓冲池中驻留的时间,这个时间一般会大于 300 | -| buffer_cache_hit_ratio | % | Buffer cache hit ratio, 数据库缓冲池高速缓冲命中率,被请求的数据在缓冲池中被找到的概率,一般会大于 80% 才算正常,否则可能是缓冲池容量太小 | -| checkpoint_pages_sec | 无 | Checkpoint pages/sec, 检查点每秒写入磁盘的脏页个数,如果数据过高,证明缺少内存容量 | -| page_reads_sec | 无 | Page reads/sec, 缓存池中每秒读的页数 | -| page_writes_sec | 无 | Page writes/sec, 缓存池中每秒写的页数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------|------|-------------------------------------------------------------------------------------| +| database_pages | 无 | Database pages, 已获得的页面数(缓冲池) | +| target_pages | 无 | Target pages, 缓冲池必须的理想页面数 | +| page_life_expectancy | s,秒 | Page life expectancy, 数据页在缓冲池中驻留的时间,这个时间一般会大于 300 | +| buffer_cache_hit_ratio | % | Buffer cache hit ratio, 数据库缓冲池高速缓冲命中率,被请求的数据在缓冲池中被找到的概率,一般会大于 80% 才算正常,否则可能是缓冲池容量太小 | +| checkpoint_pages_sec | 无 | Checkpoint pages/sec, 检查点每秒写入磁盘的脏页个数,如果数据过高,证明缺少内存容量 | +| page_reads_sec | 无 | Page reads/sec, 缓存池中每秒读的页数 | +| page_writes_sec | 无 | Page writes/sec, 缓存池中每秒写的页数 | #### 指标集合:connection -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| user_connection | 无 | 已连接的会话数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------| +| user_connection | 无 | 已连接的会话数 | -### 常见问题 +### 常见问题 -1. SSL连接问题修复 +1. SSL连接问题修复 jdk版本:jdk11 问题描述:SQL Server2019使用SA用户连接报错 -错误信息: +错误信息: + ```text The driver could not establish a secure connection to SQL Server by using Secure Sockets Layer (SSL) encryption. Error: "PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target". ClientConnectionId:xxxxxxxxxxxxxxxxx ``` + 问题截图: ![issue](https://user-images.githubusercontent.com/38679717/206621658-c0741d48-673d-45ff-9a3b-47d113064c12.png) 解决方案: -添加`SqlServer`监控时使用高级设置,自定义JDBC URL,拼接的jdbc url后面加上参数配置,```;encrypt=true;trustServerCertificate=true;```这个参数true表示无条件信任server端返回的任何根证书。 +添加`SqlServer`监控时使用高级设置,自定义JDBC URL,拼接的jdbc url后面加上参数配置,```;encrypt=true;trustServerCertificate=true;```这个参数true表示无条件信任server端返回的任何根证书。 -样例:```jdbc:sqlserver://127.0.0.1:1433;DatabaseName=demo;encrypt=true;trustServerCertificate=true;``` +样例:```jdbc:sqlserver://127.0.0.1:1433;DatabaseName=demo;encrypt=true;trustServerCertificate=true;``` -参考文档:[microsoft pkix-path-building-failed-unable-to-find-valid-certification](https://techcommunity.microsoft.com/t5/azure-database-support-blog/pkix-path-building-failed-unable-to-find-valid-certification/ba-p/2591304) +参考文档:[microsoft pkix-path-building-failed-unable-to-find-valid-certification](https://techcommunity.microsoft.com/t5/azure-database-support-blog/pkix-path-building-failed-unable-to-find-valid-certification/ba-p/2591304) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ssl_cert.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ssl_cert.md index ce0084f7e95..73957e31fb8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ssl_cert.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ssl_cert.md @@ -5,29 +5,30 @@ sidebar_label: SSL证书监控 keywords: [开源监控系统, 开源网站监控, SSL证书监控监控] --- -> 对网站的SSL证书过期时间,响应时间等指标进行监测 +> 对网站的SSL证书过期时间,响应时间等指标进行监测 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,https一般默认为443。 | -| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-------------------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,https一般默认为443。 | +| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:certificate +#### 指标集合:certificate + +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|----------| +| subject | 无 | 证书名称 | +| expired | 无 | 是否过期 | +| start_time | 无 | 有效期开始时间 | +| start_timestamp | ms毫秒 | 有效期开始时间戳 | +| end_time | 无 | 过期时间 | +| end_timestamp | ms毫秒 | 过期时间戳 | -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- |------|----------| -| subject | 无 | 证书名称 | -| expired | 无 | 是否过期 | -| start_time | 无 | 有效期开始时间 | -| start_timestamp | ms毫秒 | 有效期开始时间戳 | -| end_time | 无 | 过期时间 | -| end_timestamp | ms毫秒 | 过期时间戳 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tidb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tidb.md index bc5db826027..fe5eef718ef 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tidb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tidb.md @@ -9,21 +9,21 @@ keywords: [开源监控系统, 开源数据库监控, TiDB数据库监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| Service端口 | TiDB数据库对外提供用于状态报告的端口,默认为10080。 | -| PD端口 | TiDB数据库的PD端口,默认为2379。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | -| JDBC端口 | TiDB数据库对外提供用于客户端请求的端口,默认为4000。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| JDBC URL | 数据库使用[JDBC驱动的](https://docs.pingcap.com/zh/tidb/stable/dev-guide-connect-to-tidb#jdbc)连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|-----------|------------------------------------------------------------------------------------------------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| Service端口 | TiDB数据库对外提供用于状态报告的端口,默认为10080。 | +| PD端口 | TiDB数据库的PD端口,默认为2379。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | +| JDBC端口 | TiDB数据库对外提供用于客户端请求的端口,默认为4000。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| JDBC URL | 数据库使用[JDBC驱动的](https://docs.pingcap.com/zh/tidb/stable/dev-guide-connect-to-tidb#jdbc)连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 @@ -35,15 +35,13 @@ keywords: [开源监控系统, 开源数据库监控, TiDB数据库监控] #### 指标集合:系统变量 -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| version | 无 | MySQL 的版本和 TiDB 的版本,例如 '8.0.11-TiDB-v7.5.1' | -| version_comment | 无 | TiDB 版本号的其他信息,例如 'TiDB Server (Apache License 2.0) Community Edition, MySQL 8.0 compatible' | -| version_compile_machine | 无 | 运行 TiDB 的 CPU 架构的名称 | -| version_compile_os | 无 | TiDB 所在操作系统的名称 | -| max_connections | 无 | 该变量表示 TiDB 中同时允许的最大客户端连接数,用于资源控制。默认情况下,该变量值为 0 表示不限制客户端连接数。当本变量的值大于 0 且客户端连接数到达此值时,TiDB 服务端将会拒绝新的客户端连接。 | -| datadir | 无 | 数据存储的位置,位置可以是本地路径 /tmp/tidb。如果数据存储在 TiKV 上,则可以是指向 PD 服务器的路径。变量值的格式为 ${pd-ip}:${pd-port},表示 TiDB 在启动时连接到的 PD 服务器。 | -| port | 无 | 使用 MySQL 协议时 tidb-server 监听的端口。 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|------------------------------------------------------------------------------------------------------------------| +| version | 无 | MySQL 的版本和 TiDB 的版本,例如 '8.0.11-TiDB-v7.5.1' | +| version_comment | 无 | TiDB 版本号的其他信息,例如 'TiDB Server (Apache License 2.0) Community Edition, MySQL 8.0 compatible' | +| version_compile_machine | 无 | 运行 TiDB 的 CPU 架构的名称 | +| version_compile_os | 无 | TiDB 所在操作系统的名称 | +| max_connections | 无 | 该变量表示 TiDB 中同时允许的最大客户端连接数,用于资源控制。默认情况下,该变量值为 0 表示不限制客户端连接数。当本变量的值大于 0 且客户端连接数到达此值时,TiDB 服务端将会拒绝新的客户端连接。 | +| datadir | 无 | 数据存储的位置,位置可以是本地路径 /tmp/tidb。如果数据存储在 TiKV 上,则可以是指向 PD 服务器的路径。变量值的格式为 ${pd-ip}:${pd-port},表示 TiDB 在启动时连接到的 PD 服务器。 | +| port | 无 | 使用 MySQL 协议时 tidb-server 监听的端口。 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/time_expression.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/time_expression.md index f03ebfab25e..8b5e6c8aca9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/time_expression.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/time_expression.md @@ -23,7 +23,7 @@ ${FORMATTER [{ + | - } ]} > 示例输出以当前时间为 `2022-04-24 02:40:00.123` -| 名称 | 描述 | 示例 | +| 名称 | 描述 | 示例 | |--------------|----------------------------|---------------------| | @now | 格式化为 `yyyy-MM-dd HH:mm:ss` | 2022-04-24 02:40:00 | | @date | 格式化为 `yyyy-MM-dd` | 2022-04-24 | @@ -42,9 +42,9 @@ ${FORMATTER [{ + | - } ]} | 名称 | 描述 | |----|----| -| y | 年 | +| y | 年 | | M | 月 | -| d | 日 | +| d | 日 | | H | 小时 | | m | 分钟 | | s | 秒 | @@ -57,10 +57,9 @@ ${FORMATTER [{ + | - } ]} #### 使用示例 1. 简单表达式 - - `${now}` 获取当前时间,并格式化为 `yyyy-MM-dd HH:mm:ss` - - `${time+1h}` 计算当前时间一小时之后的时间,并格式化为 `HH:mm:ss` - - `${time+1h+15s+30s}` 计算当前时间一小时15分钟30秒之后的时间,并格式化为 `HH:mm:ss` + - `${now}` 获取当前时间,并格式化为 `yyyy-MM-dd HH:mm:ss` + - `${time+1h}` 计算当前时间一小时之后的时间,并格式化为 `HH:mm:ss` + - `${time+1h+15s+30s}` 计算当前时间一小时15分钟30秒之后的时间,并格式化为 `HH:mm:ss` 2. 复杂表达式模板(如果内置的格式化器无法满足需要,可以组合使用多个表达式) - - `${@year}年${@month}月${@day}日`,获取当前日期并按照 yyyy年MM月dd日格式返回 - + - `${@year}年${@month}月${@day}日`,获取当前日期并按照 yyyy年MM月dd日格式返回 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tomcat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tomcat.md index c306bebc550..b366ee3c2ac 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tomcat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tomcat.md @@ -11,67 +11,65 @@ keywords: [开源监控系统, 开源网站监控, Tomcat监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置Tomcat连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置Tomcat连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | JMX连接用户名 | +| 密码 | JMX连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 - #### 指标集合:memory_pool -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| name | 无 | 指标名称 | +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:code_cache -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| committed | kb | 总量 | +| init | kb | 初始化大小 | +| max | kb | 最大 | +| used | kb | 已使用 | #### 指标集合:class_loading -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|----------| +| LoadedClassCount | 个 | 已加载类数量 | +| TotalLoadedClassCount | 个 | 历史已加载类总量 | +| UnloadedClassCount | 个 | 未加载类数量 | #### 指标集合:thread -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------|------|-----------| +| TotalStartedThreadCount | 个 | 已经开始的线程数量 | +| ThreadCount | 个 | 线程数 | +| PeakThreadCount | 个 | 未加载类数量 | +| DaemonThreadCount | 个 | 守护进程数 | +| CurrentThreadUserTime | ms | 使用时间 | +| CurrentThreadCpuTime | ms | 使用CPU时间 | ### Tomcat开启JMX协议步骤 -1. 搭建好tomcat后,进入tomcat下的bin目录,修改catalina.sh文件 注意⚠️替换IP地址 +1. 搭建好tomcat后,进入tomcat下的bin目录,修改catalina.sh文件 注意⚠️替换IP地址 -2. vim catalina.sh +2. vim catalina.sh ```aidl CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote -Djava.rmi.server.hostname=10.1.1.52 -Dcom.sun.management.jmxremote.port=1099 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" ``` -参考: https://blog.csdn.net/weixin_41924764/article/details/108694239 +参考: https://blog.csdn.net/weixin_41924764/article/details/108694239 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ubuntu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ubuntu.md index e5d1be3a140..3ec51e5464a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ubuntu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ubuntu.md @@ -9,74 +9,74 @@ keywords: [开源监控系统, 开源操作系统监控, Ubuntu监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Linux SSH对外提供的端口,默认为22。 | +| 用户名 | SSH连接用户名,可选 | +| 密码 | SSH连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:basic -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| hostname | 无 | 主机名称 | +| version | 无 | 操作系统版本 | +| uptime | 无 | 系统运行时间 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------| +| info | 无 | CPU型号 | +| cores | 核数 | CPU内核数量 | +| interrupt | 个数 | CPU中断数量 | +| load | 无 | CPU最近1/5/15分钟的平均负载 | +| context_switch | 个数 | 当前上下文切换数量 | +| usage | % | CPU使用率 | #### 指标集合:memory -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|----------| +| total | Mb | 总内存容量 | +| used | Mb | 用户程序内存量 | +| free | Mb | 空闲内存容量 | +| buff_cache | Mb | 缓存占用内存 | +| available | Mb | 剩余可用内存容量 | +| usage | % | 内存使用率 | #### 指标集合:disk -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|-----------| +| disk_num | 块数 | 磁盘总数 | +| partition_num | 分区数 | 分区总数 | +| block_write | 块数 | 写入磁盘的总块数 | +| block_read | 块数 | 从磁盘读出的块数 | +| write_rate | iops | 每秒写磁盘块的速率 | #### 指标集合:interface -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|---------------| +| interface_name | 无 | 网卡名称 | +| receive_bytes | byte | 入站数据流量(bytes) | +| transmit_bytes | byte | 出站数据流量(bytes) | #### 指标集合:disk_free -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|---------| +| filesystem | 无 | 文件系统的名称 | +| used | Mb | 已使用磁盘大小 | +| available | Mb | 可用磁盘大小 | +| usage | % | 使用率 | +| mounted | 无 | 挂载点目录 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/udp_port.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/udp_port.md index 06c11717b25..ee2f388873b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/udp_port.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/udp_port.md @@ -10,7 +10,7 @@ keywords: [开源监控系统, 开源网络监控, UDP 端口可用性监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |:-------|--------------------------------------------------------| | 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头 (例如: https://, http://) 。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -26,6 +26,7 @@ keywords: [开源监控系统, 开源网络监控, UDP 端口可用性监控] #### 指标集合:概要 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |------|---------|--------| | 响应时间 | 毫秒 (ms) | 网站响应时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/website.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/website.md index 5dbb2f2c7c6..8efe5262612 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/website.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/website.md @@ -5,26 +5,26 @@ sidebar_label: 网站监测 keywords: [开源监控系统, 开源网站监控] --- -> 对网站是否可用,响应时间等指标进行监测 +> 对网站是否可用,响应时间等指标进行监测 -### 配置参数 +### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|---------|-------------------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | +| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | +| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | -### 采集指标 +### 采集指标 -#### 指标集合:summary +#### 指标集合:summary -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| responseTime | ms毫秒 | 网站响应时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| responseTime | ms毫秒 | 网站响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/websocket.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/websocket.md index ad8baa7d71b..3bd02f3ce18 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/websocket.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/websocket.md @@ -9,7 +9,7 @@ keywords: [ 开源监控系统, Websocket监控 ] ### 配置参数 -| 参数名称 | 参数帮助描述 | +| 参数名称 | 参数帮助描述 | |------------------|--------------------------------------------------------------| | WebSocket服务的Host | 被监控的Websocket的IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | | 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | @@ -23,7 +23,7 @@ keywords: [ 开源监控系统, Websocket监控 ] #### 指标集合:概要 -| 指标名称 | 指标单位 | 指标帮助描述 | +| 指标名称 | 指标单位 | 指标帮助描述 | |---------------|------|---------| | responseTime | ms | 响应时间 | | httpVersion | 无 | HTTP 版本 | @@ -31,3 +31,4 @@ keywords: [ 开源监控系统, Websocket监控 ] | statusMessage | 无 | 状态消息 | | connection | 无 | 表示连接方式 | | upgrade | 无 | 升级后的协议 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/windows.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/windows.md index 6a1c79b9ede..41447469e61 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/windows.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/windows.md @@ -6,38 +6,39 @@ keywords: [开源监控系统, 开源操作系统监控, Windows操作系统监 --- > 通过SNMP协议对Windows操作系统的通用性能指标进行采集监控。 -> 注意⚠️ Windows服务器需开启SNMP服务 +> 注意⚠️ Windows服务器需开启SNMP服务 参考资料: [什么是SNMP协议1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) [什么是SNMP协议2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) [Win配置SNMP英文](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) -[Win配置SNMP中文](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) +[Win配置SNMP中文](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Windows SNMP服务对外提供的端口,默认为 161。 | -| SNMP 版本 | SNMP协议版本 V1 V2c V3 | +| 参数名称 | 参数帮助描述 | +|----------|----------------------------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Windows SNMP服务对外提供的端口,默认为 161。 | +| SNMP 版本 | SNMP协议版本 V1 V2c V3 | | SNMP 团体字 | SNMP 协议团体名(Community Name),用于实现SNMP网络管理员访问SNMP管理代理时的身份验证。类似于密码,默认值为 public | -| 超时时间 | 协议连接超时时间 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 超时时间 | 协议连接超时时间 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:system -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| name | 无 | 主机名称 | -| descr | 无 | 操作系统描述 | -| uptime | 无 | 系统运行时间 | -| numUsers | 个数 | 当前用户数 | -| services | 个数 | 当前服务数量 | -| processes | 个数 | 当前进程数量 | -| responseTime | ms | 采集响应时间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|--------| +| name | 无 | 主机名称 | +| descr | 无 | 操作系统描述 | +| uptime | 无 | 系统运行时间 | +| numUsers | 个数 | 当前用户数 | +| services | 个数 | 当前服务数量 | +| processes | 个数 | 当前进程数量 | +| responseTime | ms | 采集响应时间 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/yarn.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/yarn.md index 2c88fe1e5a9..c35a0226876 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/yarn.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/yarn.md @@ -15,69 +15,70 @@ keywords: [大数据监控系统, Apache Yarn监控, 资源管理器监控] ## 配置参数 -| 参数名称 | 参数帮助描述 | -| ---------------- |---------------------------------------| -| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | -| 端口 | Apache Yarn 的监控端口号,默认为8088。 | -| 查询超时时间 | 查询 Apache Yarn 的超时时间,单位毫秒,默认6000毫秒。 | -| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | +| 参数名称 | 参数帮助描述 | +|--------|-------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | +| 端口 | Apache Yarn 的监控端口号,默认为8088。 | +| 查询超时时间 | 查询 Apache Yarn 的超时时间,单位毫秒,默认6000毫秒。 | +| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | ### 采集指标 #### 指标集合:ClusterMetrics -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- | -------- | ---------------------------------- | -| NumActiveNMs | | 当前存活的 NodeManager 个数 | -| NumDecommissionedNMs | | 当前 Decommissioned 的 NodeManager 个数 | -| NumDecommissioningNMs| | 集群正在下线的节点数 | -| NumLostNMs | | 集群丢失的节点数 | -| NumUnhealthyNMs | | 集群不健康的节点数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------------|------|------------------------------------| +| NumActiveNMs | | 当前存活的 NodeManager 个数 | +| NumDecommissionedNMs | | 当前 Decommissioned 的 NodeManager 个数 | +| NumDecommissioningNMs | | 集群正在下线的节点数 | +| NumLostNMs | | 集群丢失的节点数 | +| NumUnhealthyNMs | | 集群不健康的节点数 | #### 指标集合:JvmMetrics -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- | -------- | ------------------------------------ | -| MemNonHeapCommittedM | MB | JVM当前非堆内存大小已提交大小 | -| MemNonHeapMaxM | MB | JVM非堆最大可用内存 | -| MemNonHeapUsedM | MB | JVM当前已使用的非堆内存大小 | -| MemHeapCommittedM | MB | JVM当前已使用堆内存大小 | -| MemHeapMaxM | MB | JVM堆内存最大可用内存 | -| MemHeapUsedM | MB | JVM当前已使用堆内存大小 | -| GcTimeMillis | | JVM GC时间 | -| GcCount | | JVM GC次数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------|------|------------------| +| MemNonHeapCommittedM | MB | JVM当前非堆内存大小已提交大小 | +| MemNonHeapMaxM | MB | JVM非堆最大可用内存 | +| MemNonHeapUsedM | MB | JVM当前已使用的非堆内存大小 | +| MemHeapCommittedM | MB | JVM当前已使用堆内存大小 | +| MemHeapMaxM | MB | JVM堆内存最大可用内存 | +| MemHeapUsedM | MB | JVM当前已使用堆内存大小 | +| GcTimeMillis | | JVM GC时间 | +| GcCount | | JVM GC次数 | #### 指标集合:QueueMetrics -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------------------ | -------- | ------------------------------------ | -| queue | | 队列名称 | -| AllocatedVCores | | 分配的虚拟核数(已分配) | -| ReservedVCores | | 预留核数 | -| AvailableVCores | | 可用核数(尚未分配) | -| PendingVCores | | 阻塞调度核数 | -| AllocatedMB | MB | 已分配(已用)的内存大小 | -| AvailableMB | MB | 可用内存(尚未分配) | -| PendingMB | MB | 阻塞调度内存 | -| ReservedMB | MB | 预留内存 | -| AllocatedContainers | | 已分配(已用)的container数 | -| PendingContainers | | 阻塞调度container个数 | -| ReservedContainers | | 预留container数 | -| AggregateContainersAllocated | | 累积的container分配总数 | -| AggregateContainersReleased | | 累积的container释放总数 | -| AppsCompleted | | 完成的任务数 | -| AppsKilled | | 被杀掉的任务数 | -| AppsFailed | | 失败的任务数 | -| AppsPending | | 阻塞的任务数 | -| AppsRunning | | 提正在运行的任务数 | -| AppsSubmitted | | 提交过的任务数 | -| running_0 | | 运行时间小于60分钟的作业个数 | -| running_60 | | 运行时间介于60~300分钟的作业个数 | -| running_300 | | 运行时间介于300~1440分钟的作业个数 | -| running_1440 | | 运行时间大于1440分钟的作业个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------------|------|-----------------------| +| queue | | 队列名称 | +| AllocatedVCores | | 分配的虚拟核数(已分配) | +| ReservedVCores | | 预留核数 | +| AvailableVCores | | 可用核数(尚未分配) | +| PendingVCores | | 阻塞调度核数 | +| AllocatedMB | MB | 已分配(已用)的内存大小 | +| AvailableMB | MB | 可用内存(尚未分配) | +| PendingMB | MB | 阻塞调度内存 | +| ReservedMB | MB | 预留内存 | +| AllocatedContainers | | 已分配(已用)的container数 | +| PendingContainers | | 阻塞调度container个数 | +| ReservedContainers | | 预留container数 | +| AggregateContainersAllocated | | 累积的container分配总数 | +| AggregateContainersReleased | | 累积的container释放总数 | +| AppsCompleted | | 完成的任务数 | +| AppsKilled | | 被杀掉的任务数 | +| AppsFailed | | 失败的任务数 | +| AppsPending | | 阻塞的任务数 | +| AppsRunning | | 提正在运行的任务数 | +| AppsSubmitted | | 提交过的任务数 | +| running_0 | | 运行时间小于60分钟的作业个数 | +| running_60 | | 运行时间介于60~300分钟的作业个数 | +| running_300 | | 运行时间介于300~1440分钟的作业个数 | +| running_1440 | | 运行时间大于1440分钟的作业个数 | #### 指标集合:runtime -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------------------- | -------- | ---------------------------- | -| StartTime | | 启动时间戳 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|--------| +| StartTime | | 启动时间戳 | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/zookeeper.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/zookeeper.md index 0783ceaf3fb..14d50c3c90d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/zookeeper.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/zookeeper.md @@ -12,7 +12,7 @@ keywords: [开源监控系统, Zookeeper监控监控] > 监控 zookeeper 目前的实现方案使用的是 zookeeper 提供的四字命令 + netcat 来收集指标数据 > 需要用户自己将zookeeper的四字命令加入白名单 -1. 加白名单步骤 +1. 加白名单步骤 > 1.找到我们 zookeeper 的配置文件,一般是 `zoo.cfg` > @@ -25,76 +25,76 @@ keywords: [开源监控系统, Zookeeper监控监控] # 将所有命令添加到白名单中 4lw.commands.whitelist=* ``` + > 3.重启服务 -```shell +```shell zkServer.sh restart ``` -2. netcat 协议 +2. netcat 协议 目前实现方案需要我们部署zookeeper的linux服务器,安装netcat的命令环境 > netcat安装步骤 -```shell -yum install -y nc -``` +> +> ```shell +> yum install -y nc +> ``` 如果终端显示以下信息则说明安装成功 + ```shell Complete! ``` - ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Zookeeper的Linux服务器SSH端口。 | -| 查询超时时间 | 设置Zookeeper连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | Zookeeper所在Linux连接用户名 | -| 密码 | Zookeeper所在Linux连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Zookeeper的Linux服务器SSH端口。 | +| 查询超时时间 | 设置Zookeeper连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 用户名 | Zookeeper所在Linux连接用户名 | +| 密码 | Zookeeper所在Linux连接密码 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:conf -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| clientPort | 无 | 端口 | -| dataDir | 无 | 数据快照文件目录,默认10万次操作生成一次快照 | -| dataDirSize | kb | 数据快照文件大小 | -| dataLogDir | 无 | 事务日志文件目录,生产环境放在独立磁盘上 | -| dataLogSize | kb | 事务日志文件大小 | -| tickTime | ms | 服务器之间或客户端与服务器之间维持心跳的时间间隔 | -| minSessionTimeout | ms| 最小session超时时间 心跳时间x2 指定时间小于该时间默认使用此时间 | -| maxSessionTimeout | ms |最大session超时时间 心跳时间x20 指定时间大于该时间默认使用此时间 | -| serverId | 无 | 服务器编号 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|----------------------------------------| +| clientPort | 无 | 端口 | +| dataDir | 无 | 数据快照文件目录,默认10万次操作生成一次快照 | +| dataDirSize | kb | 数据快照文件大小 | +| dataLogDir | 无 | 事务日志文件目录,生产环境放在独立磁盘上 | +| dataLogSize | kb | 事务日志文件大小 | +| tickTime | ms | 服务器之间或客户端与服务器之间维持心跳的时间间隔 | +| minSessionTimeout | ms | 最小session超时时间 心跳时间x2 指定时间小于该时间默认使用此时间 | +| maxSessionTimeout | ms | 最大session超时时间 心跳时间x20 指定时间大于该时间默认使用此时间 | +| serverId | 无 | 服务器编号 | #### 指标集合:stats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| zk_version | 无 | 服务器版本 | -| zk_server_state | 无 | 服务器角色 | -| zk_num_alive_connections | 个 | 连接数 | -| zk_avg_latency | ms | 平均延时 | -| zk_outstanding_requests | 个 | 堆积请求数 | -| zk_znode_count | 个 | znode结点数量 | -| zk_packets_sent | 个 | 发包数 | -| zk_packets_received | 个 | 收包数 | -| zk_watch_count | 个 | watch数量 | -| zk_max_file_descriptor_count | 个 | 最大文件描述符数量 | -| zk_approximate_data_size | kb | 数据大小 | -| zk_open_file_descriptor_count | 个 | 打开的文件描述符数量 | -| zk_max_latency | ms | 最大延时 | -| zk_ephemerals_count | 个 | 临时节点数 | -| zk_min_latency | ms | 最小延时 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------------------|------|------------| +| zk_version | 无 | 服务器版本 | +| zk_server_state | 无 | 服务器角色 | +| zk_num_alive_connections | 个 | 连接数 | +| zk_avg_latency | ms | 平均延时 | +| zk_outstanding_requests | 个 | 堆积请求数 | +| zk_znode_count | 个 | znode结点数量 | +| zk_packets_sent | 个 | 发包数 | +| zk_packets_received | 个 | 收包数 | +| zk_watch_count | 个 | watch数量 | +| zk_max_file_descriptor_count | 个 | 最大文件描述符数量 | +| zk_approximate_data_size | kb | 数据大小 | +| zk_open_file_descriptor_count | 个 | 打开的文件描述符数量 | +| zk_max_latency | ms | 最大延时 | +| zk_ephemerals_count | 个 | 临时节点数 | +| zk_min_latency | ms | 最小延时 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md index cd0274a5f98..5c3f1f90feb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md @@ -16,7 +16,6 @@ slug: / [![QQ](https://img.shields.io/badge/QQ-630061200-orange)](https://qm.qq.com/q/FltGGGIX2m) [![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UCri75zfWX0GHqJFPENEbLow?logo=youtube&label=YouTube%20Channel)](https://www.youtube.com/channel/UCri75zfWX0GHqJFPENEbLow) - ## 🎡 介绍 [Apache HertzBeat](https://github.com/apache/hertzbeat) (incubating) 是一个易用友好的开源实时监控告警系统,无需 Agent,高性能集群,兼容 Prometheus,提供强大的自定义监控和状态页构建能力。 @@ -31,7 +30,7 @@ slug: / - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 - 提供强大的状态页构建能力,轻松向用户传达您产品服务的实时状态。 -> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 +> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 ---- @@ -48,7 +47,6 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ![hertzbeat](/img/home/9.png) - ### 内置监控类型 **官方内置了大量的监控模版类型,方便用户直接在页面添加使用,一款监控类型对应一个YML监控模版** @@ -113,11 +111,11 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ### 高性能集群 -> 当监控数量指数级上升,采集性能下降或者环境不稳定容易造成采集器单点故障时,这时我们的采集器集群就出场了。 +> 当监控数量指数级上升,采集性能下降或者环境不稳定容易造成采集器单点故障时,这时我们的采集器集群就出场了。 -- `HertzBeat` 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 -- 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 -- 单机模式与集群模式相互切换部署非常方便,无需额外组件部署。 +- `HertzBeat` 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 +- 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 +- 单机模式与集群模式相互切换部署非常方便,无需额外组件部署。 ![hertzbeat](/img/docs/cluster-arch.png) @@ -134,10 +132,10 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ### 易用友好 -- 集 **监控+告警+通知** All in one, 无需单独部署多个组件服务。 -- 全UI界面操作,不管是新增监控,修改监控模版,还是告警阈值通知,都可在WEB界面操作完成,无需要修改文件或脚本或重启。 -- 无需 Agent, 监控对端我们只需在WEB界面填写所需IP端口账户密码等参数即可。 -- 自定义友好,只需一个监控模版YML,自动生成对应监控类型的监控管理页面,数据图表页面,阈值配置等。 +- 集 **监控+告警+通知** All in one, 无需单独部署多个组件服务。 +- 全UI界面操作,不管是新增监控,修改监控模版,还是告警阈值通知,都可在WEB界面操作完成,无需要修改文件或脚本或重启。 +- 无需 Agent, 监控对端我们只需在WEB界面填写所需IP端口账户密码等参数即可。 +- 自定义友好,只需一个监控模版YML,自动生成对应监控类型的监控管理页面,数据图表页面,阈值配置等。 - 阈值告警通知友好,基于表达式阈值配置,多种告警通知渠道,支持告警静默,时段标签告警级别过滤等。 ### 完全开源 @@ -151,8 +149,7 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ![cncf](/img/home/cncf-landscape-left-logo.svg) ------ - +--- **`HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。** ----- @@ -269,7 +266,6 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 ![hertzbeat](/img/home/13.png) - ### 告警静默 - 当通过阈值规则判断触发告警后,会进入到告警静默,告警静默会根据规则对特定一次性时间段或周期性时候段的告警消息屏蔽静默,此时间段不发送告警消息。 @@ -301,8 +297,7 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 ![hertzbeat](/img/home/9.png) - ------ +--- **还有更多强大的功能快去探索呀。Have Fun!** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/design.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/design.md index 7799d12fe52..7f3854c3e12 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/design.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/design.md @@ -1,9 +1,9 @@ --- id: design title: 设计文档 -sidebar_label: 设计文档 +sidebar_label: 设计文档 --- -### HertzBeat 架构 +### HertzBeat 架构 -![architecture](/img/docs/hertzbeat-arch.svg) +![architecture](/img/docs/hertzbeat-arch.svg) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/resource.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/resource.md index 46699d69a23..0e01e014901 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/resource.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/resource.md @@ -1,20 +1,20 @@ --- id: resource title: 相关资源 -sidebar_label: 相关资源 +sidebar_label: 相关资源 --- -## 图标资源 +## 图标资源 -### HertzBeat LOGO +### HertzBeat LOGO ![logo](/img/hertzbeat-logo.svg) Download: [SVG](/img/hertzbeat-logo.svg) [PNG](/img/hertzbeat-logo.png) -### HertzBeat Brand LOGO +### HertzBeat Brand LOGO ![logo](/img/hertzbeat-brand.svg) -Download: [SVG](/img/hertzbeat-brand.svg) [PNG](/img/hertzbeat-brand.png) +Download: [SVG](/img/hertzbeat-brand.svg) [PNG](/img/hertzbeat-brand.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/account-modify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/account-modify.md index 710ae8cbee5..7ad94c08b93 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/account-modify.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/account-modify.md @@ -1,7 +1,7 @@ --- id: account-modify title: 配置修改账户密码和加密密钥 -sidebar_label: 更新账户和密钥 +sidebar_label: 更新账户和密钥 --- ## 更新账户 @@ -9,7 +9,7 @@ sidebar_label: 更新账户和密钥 Apache HertzBeat (incubating) 默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat 若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 修改位于安装目录下的 `/hertzbeat/config/sureness.yml` 的配置文件,docker环境目录为`opt/hertzbeat/config/sureness.yml`,建议提前挂载映射 -配置文件内容参考 项目仓库[/script/sureness.yml](https://github.com/apache/hertzbeat/blob/master/script/sureness.yml) +配置文件内容参考 项目仓库[/script/sureness.yml](https://github.com/apache/hertzbeat/blob/master/script/sureness.yml) ```yaml @@ -127,4 +127,4 @@ sureness: dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' ``` -**重启 HertzBeat 浏览器访问 http://ip:1157/ 即可探索使用 HertzBeat** +**重启 HertzBeat 浏览器访问 http://ip:1157/ 即可探索使用 HertzBeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md index 7dde7ec14e9..01380784169 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md @@ -1,23 +1,25 @@ --- id: custom-config title: 常见参数配置 -sidebar_label: 常见参数配置 +sidebar_label: 常见参数配置 --- 这里描述了如果配置短信服务器,内置可用性告警触发次数等。 -**`hertzbeat`的配置文件`application.yml`** +**`hertzbeat`的配置文件`application.yml`** -### 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 - 安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 +### 配置HertzBeat的配置文件 + +修改位于 `hertzbeat/config/application.yml` 的配置文件 +注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 +安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 1. 配置短信发送服务器 -> 只有成功配置了您自己的短信服务器,监控系统内触发的告警短信才会正常发送。 +> 只有成功配置了您自己的短信服务器,监控系统内触发的告警短信才会正常发送。 + +在`application.yml`新增如下腾讯平台短信服务器配置(参数需替换为您的短信服务器配置) -在`application.yml`新增如下腾讯平台短信服务器配置(参数需替换为您的短信服务器配置) ```yaml common: sms: @@ -28,15 +30,17 @@ common: sign-name: 赫兹跳动 template-id: 1343434 ``` + 1.1 腾讯云短信创建签名(sign-name) ![image](https://github.com/apache/hertzbeat/assets/40455946/3a4c287d-b23d-4398-8562-4894296af485) 1.2 腾讯云短信创建正文模板(template-id) + ``` 监控:{1},告警级别:{2}。内容:{3} ``` -![image](https://github.com/apache/hertzbeat/assets/40455946/face71a6-46d5-452c-bed3-59d2a975afeb) +![image](https://github.com/apache/hertzbeat/assets/40455946/face71a6-46d5-452c-bed3-59d2a975afeb) 1.3 腾讯云短信创建应用(app-id) ![image](https://github.com/apache/hertzbeat/assets/40455946/2732d710-37fa-4455-af64-48bba273c2f8) @@ -44,8 +48,7 @@ common: 1.4 腾讯云访问管理(secret-id、secret-key) ![image](https://github.com/apache/hertzbeat/assets/40455946/36f056f0-94e7-43db-8f07-82893c98024e) - -2. 配置告警自定义参数 +2. 配置告警自定义参数 ```yaml alerter: @@ -53,11 +56,12 @@ alerter: console-url: https://console.tancloud.io ``` -3. 使用外置redis代替内存存储实时指标数据 +3. 使用外置redis代替内存存储实时指标数据 -> 默认我们的指标实时数据存储在内存中,可以配置如下来使用redis代替内存存储。 +> 默认我们的指标实时数据存储在内存中,可以配置如下来使用redis代替内存存储。 + +注意⚠️ `memory.enabled: false, redis.enabled: true` -注意⚠️ `memory.enabled: false, redis.enabled: true` ```yaml warehouse: store: @@ -70,3 +74,4 @@ warehouse: port: 6379 password: 123456 ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/docker-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/docker-deploy.md index 89cb46ef951..aa01b6f5d30 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/docker-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/docker-deploy.md @@ -1,14 +1,14 @@ --- id: docker-deploy title: 通过 Docker 方式安装 HertzBeat -sidebar_label: Docker方式部署 +sidebar_label: Docker方式部署 --- -> 推荐使用 Docker 部署 Apache HertzBeat (incubating) +> 推荐使用 Docker 部署 Apache HertzBeat (incubating) 1. 下载安装Docker环境 Docker 工具自身的下载请参考以下资料: - [Docker官网文档](https://docs.docker.com/get-docker/) + [Docker官网文档](https://docs.docker.com/get-docker/) [菜鸟教程-Docker教程](https://www.runoob.com/docker/docker-tutorial.html) 安装完毕后终端查看Docker版本是否正常输出。 @@ -16,7 +16,6 @@ sidebar_label: Docker方式部署 $ docker -v Docker version 20.10.12, build e91ed57 ``` - 2. 拉取HertzBeat Docker镜像 镜像版本TAG可查看 [dockerhub 官方镜像仓库](https://hub.docker.com/r/apache/hertzbeat/tags) 或者使用 [quay.io 镜像仓库](https://quay.io/repository/apache/hertzbeat) @@ -25,12 +24,13 @@ sidebar_label: Docker方式部署 $ docker pull apache/hertzbeat $ docker pull apache/hertzbeat-collector ``` + 若网络超时或者使用 + ```shell $ docker pull quay.io/tancloud/hertzbeat $ docker pull quay.io/tancloud/hertzbeat-collector ``` - 3. 部署HertzBeat您可能需要掌握的几条命令 ```shell @@ -46,25 +46,22 @@ sidebar_label: Docker方式部署 ctrl+d或者 $ exit ``` - 4. 挂载并配置HertzBeat的配置文件(可选) 下载 `application.yml` 文件到主机目录下,例如: $(pwd)/application.yml - 下载源 [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + 下载源 [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - 若需使用邮件发送告警,需替换 `application.yml` 里面的邮件服务器参数 - - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) - - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](tdengine-init) - - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](iotdb-init) - + - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) + - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](tdengine-init) + - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](iotdb-init) 5. 挂载并配置HertzBeat用户配置文件,自定义用户密码(可选) HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat 若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 下载 `sureness.yml` 文件到主机目录下,例如: $(pwd)/sureness.yml 下载源 [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) - 具体修改步骤参考 [配置修改账户密码](account-modify) - -6. 启动HertzBeat Docker容器 + 具体修改步骤参考 [配置修改账户密码](account-modify) +6. 启动HertzBeat Docker容器 -```shell +```shell $ docker run -d -p 1157:1157 -p 1158:1158 \ -e LANG=zh_CN.UTF-8 \ -e TZ=Asia/Shanghai \ @@ -76,34 +73,31 @@ $ docker run -d -p 1157:1157 -p 1158:1158 \ --name hertzbeat apache/hertzbeat ``` - 这条命令启动一个运行HertzBeat的Docker容器,并且将容器的1157端口映射到宿主机的1157端口上。若宿主机已有进程占用该端口,则需要修改主机映射端口。 - - `docker run -d` : 通过Docker运行一个容器,使其在后台运行 - - `-e LANG=zh_CN.UTF-8` : 设置系统语言 - - `-e TZ=Asia/Shanghai` : 设置系统时区 - - `-p 1157:1157 -p 1158:1158` : 映射容器端口到主机端口,请注意,前面是宿主机的端口号,后面是容器的端口号。1157是WEB端口,1158是集群端口。 - - `-v $(pwd)/data:/opt/hertzbeat/data` : (可选,数据持久化)重要⚠️ 挂载H2数据库文件到本地主机,保证数据不会因为容器的创建删除而丢失 - - `-v $(pwd)/logs:/opt/hertzbeat/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 - - `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (可选,不需要可删除)挂载上上一步修改的本地配置文件到容器中,即使用本地配置文件覆盖容器配置文件。我们需要修改此配置文件的MYSQL,TDengine配置信息来连接外部服务。 - - `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (可选,不需要可删除)挂载上一步修改的账户配置文件到容器中,若无修改账户需求可删除此命令参数。 - - - 注意⚠️ 挂载文件时,前面参数为你自定义本地文件地址,后面参数为docker容器内文件地址(固定) - - - `--name hertzbeat` : 命名容器名称 hertzbeat - - - `--restart=always`:(可选,不需要可删除)使容器在Docker启动后自动重启。若您未在容器创建时指定该参数,可通过以下命令实现该容器自启。 - - ```shell - $ docker update --restart=always hertzbeat - ``` - - - `apache/hertzbeat` : 使用拉取最新的的HertzBeat官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat`代替。** + 这条命令启动一个运行HertzBeat的Docker容器,并且将容器的1157端口映射到宿主机的1157端口上。若宿主机已有进程占用该端口,则需要修改主机映射端口。 + +- `docker run -d` : 通过Docker运行一个容器,使其在后台运行 +- `-e LANG=zh_CN.UTF-8` : 设置系统语言 +- `-e TZ=Asia/Shanghai` : 设置系统时区 +- `-p 1157:1157 -p 1158:1158` : 映射容器端口到主机端口,请注意,前面是宿主机的端口号,后面是容器的端口号。1157是WEB端口,1158是集群端口。 +- `-v $(pwd)/data:/opt/hertzbeat/data` : (可选,数据持久化)重要⚠️ 挂载H2数据库文件到本地主机,保证数据不会因为容器的创建删除而丢失 +- `-v $(pwd)/logs:/opt/hertzbeat/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 +- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (可选,不需要可删除)挂载上上一步修改的本地配置文件到容器中,即使用本地配置文件覆盖容器配置文件。我们需要修改此配置文件的MYSQL,TDengine配置信息来连接外部服务。 +- `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (可选,不需要可删除)挂载上一步修改的账户配置文件到容器中,若无修改账户需求可删除此命令参数。 +- 注意⚠️ 挂载文件时,前面参数为你自定义本地文件地址,后面参数为docker容器内文件地址(固定) +- `--name hertzbeat` : 命名容器名称 hertzbeat +- `--restart=always`:(可选,不需要可删除)使容器在Docker启动后自动重启。若您未在容器创建时指定该参数,可通过以下命令实现该容器自启。 + + ```shell + $ docker update --restart=always hertzbeat + ``` +- `apache/hertzbeat` : 使用拉取最新的的HertzBeat官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat`代替。** 7. 开始探索HertzBeat - 浏览器访问 http://ip:1157/ 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 + 浏览器访问 http://ip:1157/ 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 8. 部署采集器集群(可选) -```shell +```shell $ docker run -d \ -e IDENTITY=custom-collector-name \ -e MODE=public \ @@ -112,52 +106,57 @@ $ docker run -d \ --name hertzbeat-collector apache/hertzbeat-collector ``` - 这条命令启动一个运行HertzBeat采集器的Docker容器,并直连上了HertzBeat主服务节点。 - - `docker run -d` : 通过Docker运行一个容器,使其在后台运行 - - `-e IDENTITY=custom-collector-name` : (可选) 设置采集器的唯一标识名称。⚠️注意多采集器时采集器名称需保证唯一性。 - - `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 - - `-e MANAGER_HOST=127.0.0.1` : 重要⚠️ 设置连接的主HertzBeat服务地址IP。 - - `-e MANAGER_PORT=1158` : (可选) 设置连接的主HertzBeat服务地址端口,默认 1158. - - `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 - - `--name hertzbeat-collector` : 命名容器名称 hertzbeat-collector - - `apache/hertzbeat-collector` : 使用拉取最新的的HertzBeat采集器官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat-collector`代替。** +这条命令启动一个运行HertzBeat采集器的Docker容器,并直连上了HertzBeat主服务节点。 +- `docker run -d` : 通过Docker运行一个容器,使其在后台运行 +- `-e IDENTITY=custom-collector-name` : (可选) 设置采集器的唯一标识名称。⚠️注意多采集器时采集器名称需保证唯一性。 +- `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 +- `-e MANAGER_HOST=127.0.0.1` : 重要⚠️ 设置连接的主HertzBeat服务地址IP。 +- `-e MANAGER_PORT=1158` : (可选) 设置连接的主HertzBeat服务地址端口,默认 1158. +- `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 +- `--name hertzbeat-collector` : 命名容器名称 hertzbeat-collector +- `apache/hertzbeat-collector` : 使用拉取最新的的HertzBeat采集器官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat-collector`代替。** -8. 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 +8. 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 -**HAVE FUN** +**HAVE FUN** -### Docker部署常见问题 +### Docker部署常见问题 **最多的问题就是网络问题,请先提前排查** 1. **MYSQL,TDENGINE或IotDB和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** -此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 + 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 + > 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP -> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` +> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` 2. **按照流程部署,访问 http://ip:1157/ 无界面** -请参考下面几点排查问题: + 请参考下面几点排查问题: + > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 -3. **日志报错TDengine连接或插入SQL失败** +3. **日志报错TDengine连接或插入SQL失败** + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter +> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter + +4. **监控历史图表长时间都一直无数据** -4. **监控历史图表长时间都一直无数据** > 一:Tdengine或IoTDB是否配置,未配置则无历史图表数据 > 二:Tdengine的数据库`hertzbeat`是否创建 -> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB或Tdengine IP账户密码等配置是否正确 +> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB或Tdengine IP账户密码等配置是否正确 5. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - -> 安装初始化此数据库参考 [TDengine安装初始化](tdengine-init) 或 [IoTDB安装初始化](iotdb-init) +> 安装初始化此数据库参考 [TDengine安装初始化](tdengine-init) 或 [IoTDB安装初始化](iotdb-init) 6. 安装配置了时序数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 请检查配置参数是否正确 > iot-db 或td-engine enable 是否设置为true > 注意⚠️若hertzbeat和IotDB,TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md index c83f0638605..5bf5faaf76e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md @@ -1,7 +1,7 @@ --- id: greptime-init title: 依赖时序数据库服务GreptimeDB安装初始化 -sidebar_label: 使用GreptimeDB存储指标数据(可选) +sidebar_label: 使用GreptimeDB存储指标数据(可选) --- Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) @@ -11,18 +11,21 @@ Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任 GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage. -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** + +### 通过Docker方式安装GreptimeDB -### 通过Docker方式安装GreptimeDB > 可参考官方网站[安装教程](https://docs.greptime.com/getting-started/overview) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装GreptimeDB +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装GreptimeDB ```shell $ docker run -p 4000-4004:4000-4004 \ @@ -33,16 +36,16 @@ $ docker run -p 4000-4004:4000-4004 \ --rpc-addr 0.0.0.0:4001 ``` - `-v /opt/greptimedb:/tmp/greptimedb` 为greptimedb数据目录本地持久化挂载,需将`/opt/greptimedb`替换为实际本地存在的目录 - 使用```$ docker ps```查看数据库是否启动成功 +`-v /opt/greptimedb:/tmp/greptimedb` 为greptimedb数据目录本地持久化挂载,需将`/opt/greptimedb`替换为实际本地存在的目录 +使用```$ docker ps```查看数据库是否启动成功 -### 在hertzbeat的`application.yml`配置文件配置此数据库连接 +### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** ```yaml warehouse: @@ -57,8 +60,9 @@ warehouse: 2. 重启 HertzBeat -### 常见问题 +### 常见问题 1. 时序数据库 GreptimeDB 或者 IoTDB 或者 TDengine 是否都需要配置,能不能都用 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md index 427f07af423..ccff933ac1f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md @@ -1,36 +1,39 @@ --- id: influxdb-init title: 依赖时序数据库服务InfluxDB安装初始化 -sidebar_label: 使用InfluxDB存储指标数据(可选) +sidebar_label: 使用InfluxDB存储指标数据(可选) --- Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) > 我们推荐使用并长期支持 VictoriaMetrics 作为存储。 -InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海量时序数据的高性能读、高性能写、高效存储与实时分析等。 注意支持⚠️ 1.x版本。 +InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海量时序数据的高性能读、高性能写、高效存储与实时分析等。 注意支持⚠️ 1.x版本。 **注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** ### 1. 直接使用华为云服务 GaussDB For Influx -> 开通使用[华为云云数据库 GaussDB For Influx](https://www.huaweicloud.com/product/gaussdbforinflux.html) - +> 开通使用[华为云云数据库 GaussDB For Influx](https://www.huaweicloud.com/product/gaussdbforinflux.html) +> > 获取云数据库对外暴露连接地址,账户密码即可 ⚠️注意云数据库默认开启了SSL,云数据库地址应使用 `https:` -### 2. 通过Docker方式安装InfluxDB +### 2. 通过Docker方式安装InfluxDB + > 可参考官方网站[安装教程](https://hub.docker.com/_/influxdb) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装InfluxDB 1.x +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装InfluxDB 1.x ```shell $ docker run -p 8086:8086 \ @@ -38,17 +41,16 @@ $ docker run -p 8086:8086 \ influxdb:1.8 ``` - `-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 - 使用```$ docker ps```查看数据库是否启动成功 +`-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 +使用```$ docker ps```查看数据库是否启动成功 - -### 在hertzbeat的`application.yml`配置文件配置此数据库连接 +### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** ```yaml warehouse: @@ -67,10 +69,9 @@ warehouse: 2. 重启 HertzBeat -### 常见问题 +### 常见问题 -1. 时序数据库InfluxDb, IoTDB和TDengine是否都需要配置,能不能都用 +1. 时序数据库InfluxDb, IoTDB和TDengine是否都需要配置,能不能都用 > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/iotdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/iotdb-init.md index b4888e2af03..b24eba892a2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/iotdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/iotdb-init.md @@ -3,6 +3,7 @@ id: iotdb-init title: 依赖时序数据库服务IoTDB安装初始化 sidebar_label: 使用IoTDB存储指标数据(可选) --- + Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) > 我们推荐使用并长期支持 VictoriaMetrics 作为存储。 @@ -89,16 +90,15 @@ warehouse: 参数说明: - -| 参数名称 | 参数说明 | -| ------------------- |-------------------------------------------| +| 参数名称 | 参数说明 | +|---------------------|-------------------------------------------| | enabled | 是否启用 | | host | IoTDB数据库地址 | | rpc-port | IoTDB数据库端口 | | node-urls | IoTDB集群地址 | | username | IoTDB数据库账户 | | password | IoTDB数据库密码 | -| version | IoTDB数据库版本,已废弃,仅支持V1.* | +| version | IoTDB数据库版本,已废弃,仅支持V1.* | | query-timeout-in-ms | 查询超时时间 | | expire-time | 数据存储时间,默认'7776000000'(90天,单位为毫秒,-1代表永不过期) | @@ -122,3 +122,4 @@ warehouse: > iot-db enable是否设置为true > 注意⚠️若hertzbeat和IotDB都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/mysql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/mysql-change.md index b25a8408387..1d122575d2c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/mysql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/mysql-change.md @@ -1,23 +1,27 @@ --- id: mysql-change title: 关系型数据库使用 Mysql 替换依赖的 H2 存储系统元数据 -sidebar_label: 元数据使用Mysql存储(可选) +sidebar_label: 元数据使用Mysql存储(可选) --- -MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) 除了支持使用默认内置的H2数据库外,还可以切换为使用MYSQL存储监控信息,告警信息,配置信息等结构化关系数据。 + +MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) 除了支持使用默认内置的H2数据库外,还可以切换为使用MYSQL存储监控信息,告警信息,配置信息等结构化关系数据。 注意⚠️ 使用外置Mysql数据库替换内置H2数据库为可选项,但建议生产环境配置,以提供更好的性能 -> 如果您已有MYSQL环境,可直接跳到数据库创建那一步。 +> 如果您已有MYSQL环境,可直接跳到数据库创建那一步。 + +### 通过Docker方式安装MYSQL -### 通过Docker方式安装MYSQL 1. 下载安装Docker环境 Docker 的安装请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后请于终端检查Docker版本输出是否正常。 + 安装完毕后请于终端检查Docker版本输出是否正常。 + ``` $ docker -v Docker version 20.10.12, build e91ed57 ``` -2. Docker安装MYSQl +2. Docker安装MYSQl + ``` $ docker run -d --name mysql \ -p 3306:3306 \ @@ -26,26 +30,29 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) --restart=always \ mysql:5.7 ``` + `-v /opt/data:/var/lib/mysql` 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 使用```$ docker ps```查看数据库是否启动成功 -### 数据库创建 +### 数据库创建 + 1. 进入MYSQL或使用客户端连接MYSQL服务 - `mysql -uroot -p123456` + `mysql -uroot -p123456` 2. 创建名称为hertzbeat的数据库 `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. 查看hertzbeat数据库是否创建成功 `show databases;` -### 修改hertzbeat的配置文件application.yml切换数据源 +### 修改hertzbeat的配置文件application.yml切换数据源 - 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 - ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://github.com/hertzbeat/hertzbeat/raw/master/script/application.yml) + ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://github.com/hertzbeat/hertzbeat/raw/master/script/application.yml) + + 需修改部分原参数: - 需修改部分原参数: ```yaml spring: datasource: @@ -54,7 +61,9 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) password: 123456 url: jdbc:h2:./data/hertzbeat;MODE=MYSQL ``` - 具体替换参数如下,需根据mysql环境配置账户密码IP: + + 具体替换参数如下,需根据mysql环境配置账户密码IP: + ```yaml spring: datasource: @@ -66,12 +75,11 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) jpa: database: mysql ``` +- 通过docker启动时,建议修改host为宿主机的外网IP地址,包括mysql连接字符串和redis。 -- 通过docker启动时,建议修改host为宿主机的外网IP地址,包括mysql连接字符串和redis。 +**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** -**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** - -### 常见问题 +### 常见问题 1. 缺少hibernate的mysql方言,导致启动异常 Caused by: org.hibernate.HibernateException: Access to DialectResolutionInfo cannot be null when 'hibernate.dialect' not set @@ -87,3 +95,4 @@ spring: hibernate: dialect: org.hibernate.dialect.MySQL5InnoDBDialect ``` + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md index b5f9d7e0456..894c58f1e3b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md @@ -1,21 +1,23 @@ --- id: package-deploy title: 通过安装包安装 HertzBeat -sidebar_label: 安装包方式部署 +sidebar_label: 安装包方式部署 --- + > Apache HertzBeat (incubating) 支持在Linux Windows Mac系统安装运行,CPU支持X86/ARM64。 - + 1. 下载HertzBeat安装包 下载您系统环境对应的安装包 `hertzbeat-xx.tar.gz` `hertzbeat-collector-xx.tar.gz` - [下载页面](/docs/download) - 2. 配置HertzBeat的配置文件(可选) - 解压安装包到主机 eg: /opt/hertzbeat - ``` + 解压安装包到主机 eg: /opt/hertzbeat + + ``` $ tar zxvf hertzbeat-xx.tar.gz or $ unzip -o hertzbeat-xx.zip ``` + 修改位于 `hertzbeat/config/application.yml` 的配置文件(可选),您可以根据需求修改配置文件 - 若需使用邮件发送告警,需替换`application.yml`里面的邮件服务器参数 - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) @@ -26,20 +28,20 @@ sidebar_label: 安装包方式部署 3. 配置用户配置文件(可选,自定义配置用户密码) HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat 若需要新增删除修改账户或密码,可以通过修改位于 `hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 - 具体参考 [配置修改账户密码](account-modify) + 具体参考 [配置修改账户密码](account-modify) 4. 部署启动 - 执行位于安装目录hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat - ``` + 执行位于安装目录hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat + + ``` $ ./startup.sh ``` - 5. 开始探索HertzBeat - 浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 - + 浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 6. 部署采集器集群(可选) - - 下载解压您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [下载页面](/docs/download) + - 下载解压您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [下载页面](/docs/download) - 配置采集器的配置文件 `hertzbeat-collector/config/application.yml` 里面的连接主HertzBeat服务的对外IP,端口,当前采集器名称(需保证唯一性)等参数 `identity` `mode` (public or private) `manager-host` `manager-port` + ```yaml collector: dispatch: @@ -56,7 +58,6 @@ sidebar_label: 安装包方式部署 **HAVE FUN** - ### 安装包部署常见问题 **最多的问题就是网络环境问题,请先提前排查** @@ -67,25 +68,30 @@ sidebar_label: 安装包方式部署 要求:JAVA17环境 下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) 安装后命令行检查是否成功安装 - ``` - $ java -version - java version "17.0.9" - Java(TM) SE Runtime Environment 17.0.9 (build 17.0.9+8-LTS-237) - Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) - ``` +``` +$ java -version +java version "17.0.9" +Java(TM) SE Runtime Environment 17.0.9 (build 17.0.9+8-LTS-237) +Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) + +``` 2. **按照流程部署,访问 http://ip:1157/ 无界面** 请参考下面几点排查问题: + > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 > 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 3. **日志报错TDengine连接或插入SQL失败** + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter +> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter 4. **监控历史图表长时间都一直无数据** + > 一:时序数据库是否配置,未配置则无历史图表数据 > 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 -> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 时序数据库 IP账户密码等配置是否正确 +> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 时序数据库 IP账户密码等配置是否正确 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md index 3d554d84f0a..6a48c741201 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md @@ -3,42 +3,43 @@ id: postgresql-change title: 关系型数据库使用 PostgreSQL 替换依赖的 H2 存储系统元数据 sidebar_label: 元数据使用PostgreSQL存储(可选) --- -PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBMS)。Apache HertzBeat (incubating) 除了支持使用默认内置的H2数据库外,还可以切换为使用PostgreSQL存储监控信息,告警信息,配置信息等结构化关系数据。 -注意⚠️ 使用外置PostgreSQL数据库替换内置H2数据库为可选项,但建议生产环境配置,以提供更好的性能 +PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBMS)。Apache HertzBeat (incubating) 除了支持使用默认内置的H2数据库外,还可以切换为使用PostgreSQL存储监控信息,告警信息,配置信息等结构化关系数据。 -> 如果您已有PostgreSQL环境,可直接跳到数据库创建那一步。 +注意⚠️ 使用外置PostgreSQL数据库替换内置H2数据库为可选项,但建议生产环境配置,以提供更好的性能 +> 如果您已有PostgreSQL环境,可直接跳到数据库创建那一步。 -### 通过Docker方式安装PostgreSQL +### 通过Docker方式安装PostgreSQL 1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. + ``` $ docker -v Docker version 20.10.12, build e91ed57 ``` - 2. Docker安装 PostgreSQL + ``` $ docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` + 使用```$ docker ps```查看数据库是否启动成功 3. Create database in container manually or with [script](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-postgresql-iotdb/conf/sql/schema.sql). -### 数据库创建 +### 数据库创建 + +1. 进入 PostgreSQL 或使用客户端连接 PostgreSQL 服务 -1. 进入 PostgreSQL 或使用客户端连接 PostgreSQL 服务 ``` su - postgres psql ``` - 2. 创建名称为hertzbeat的数据库 `CREATE DATABASE hertzbeat;` - 3. 查看hertzbeat数据库是否创建成功 `\l` @@ -58,7 +59,9 @@ spring: password: 123456 url: jdbc:h2:./data/hertzbeat;MODE=MYSQL ``` + 具体替换参数如下,需根据 PostgreSQL 环境配置账户密码IP: + ```yaml spring: config: @@ -81,4 +84,4 @@ spring: dialect: org.hibernate.dialect.PostgreSQLDialect ``` -**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** +**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md index 90d38032d0e..918bfdea17a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md @@ -1,18 +1,18 @@ --- id: quickstart title: 快速开始 -sidebar_label: 快速开始 +sidebar_label: 快速开始 --- ### 🐕 开始使用 - 如果您是想将 Apache HertzBeat (incubating) 部署到本地搭建监控系统,请参考下面的部署文档进行操作。 +### 🍞 HertzBeat安装 -### 🍞 HertzBeat安装 > HertzBeat支持通过源码安装启动,Docker容器运行和安装包方式安装部署,CPU架构支持X86/ARM64。 -#### 方式一:Docker方式快速安装 +#### 方式一:Docker方式快速安装 1. `docker` 环境仅需一条命令即可开始 @@ -29,14 +29,15 @@ sidebar_label: 快速开始 ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 - `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 -更多配置详细步骤参考 [通过Docker方式安装HertzBeat](docker-deploy) +更多配置详细步骤参考 [通过Docker方式安装HertzBeat](docker-deploy) -#### 方式二:通过安装包安装 +#### 方式二:通过安装包安装 1. 下载您系统环境对应的安装包`hertzbeat-xx.tar.gz` [Download](https://hertzbeat.apache.org/docs/download) 2. 配置 HertzBeat 的配置文件 `hertzbeat/config/application.yml`(可选) @@ -45,6 +46,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 5. 部署采集器集群(可选) - 下载您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [Download](https://hertzbeat.apache.org/docs/download) - 配置采集器的配置文件 `hertzbeat-collector/config/application.yml` 里面的连接主HertzBeat服务的对外IP,端口,当前采集器名称(需保证唯一性)等参数 `identity` `mode` (public or private) `manager-host` `manager-port` + ```yaml collector: dispatch: @@ -59,9 +61,9 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - 启动 `$ ./bin/startup.sh ` 或 `bin/startup.bat` - 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 -更多配置详细步骤参考 [通过安装包安装HertzBeat](package-deploy) +更多配置详细步骤参考 [通过安装包安装HertzBeat](package-deploy) -#### 方式三:本地代码启动 +#### 方式三:本地代码启动 1. 此为前后端分离项目,本地代码调试需要分别启动后端工程`manager`和前端工程`web-app` 2. 后端:需要`maven3+`, `java17`和`lombok`环境,修改`YML`配置信息并启动`manager`服务 @@ -74,7 +76,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 通过 [docker-compose部署脚本](https://github.com/apache/hertzbeat/tree/master/script/docker-compose) 一次性把 mysql 数据库, iotdb/tdengine 时序数据库和 hertzbeat 安装部署。 -详细步骤参考 [docker-compose部署方案](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/README.md) +详细步骤参考 [docker-compose部署方案](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/README.md) ##### 方式五:Kubernetes Helm Charts 部署 hertzbeat+collector+mysql+iotdb @@ -121,4 +123,4 @@ $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ apache/iotdb:0.13.3-node ``` -详细步骤参考 [使用时序数据库IoTDB存储指标数据(可选)](iotdb-init) +详细步骤参考 [使用时序数据库IoTDB存储指标数据(可选)](iotdb-init) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/rainbond-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/rainbond-deploy.md index 5da0679d327..8e01b8cf7a2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/rainbond-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/rainbond-deploy.md @@ -1,7 +1,7 @@ --- id: rainbond-deploy title: 使用 Rainbond 部署 HertzBeat -sidebar_label: Rainbond方式部署 +sidebar_label: Rainbond方式部署 --- 如果你不熟悉 Kubernetes,想在 Kubernetes 中安装 Apache HertzBeat (incubating),可以使用 Rainbond 来部署。Rainbond 是一个基于 Kubernetes 构建的云原生应用管理平台,可以很简单的将你的应用部署到 Kubernetes中。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/sslcert-practice.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/sslcert-practice.md index 62cd3e157a3..a6efb36482d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/sslcert-practice.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/sslcert-practice.md @@ -4,7 +4,6 @@ title: SSL证书过期监控使用案例 sidebar_label: 使用案例 --- - 现在大部分网站都默认支持 HTTPS,我们申请的证书一般是3个月或者1年,很容易随着时间的流逝SSL证书过期了我们却没有第一时间发现,或者在过期之前没有及时更新证书。 这篇文章介绍如果使用 hertzbeat 监控系统来检测我们网站的SSL证书有效期,当证书过期时或证书快过期前几天,给我们发告警消息。 @@ -13,7 +12,6 @@ sidebar_label: 使用案例 Apache HertzBeat (incubating) 一个拥有强大自定义监控能力,无需Agent的实时监控工具。网站监测,PING连通性,端口可用性,数据库,操作系统,中间件,API监控,阈值告警,告警通知(邮件微信钉钉飞书)。 - github: https://github.com/apache/hertzbeat #### 安装 HertzBeat @@ -30,10 +28,8 @@ github: https://github.com/apache/hertzbeat > 系统页面 -> 监控菜单 -> SSL证书 -> 新增SSL证书 - ![](/img/docs/start/ssl_1.png) - 2. 配置监控网站 > 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md index 7fcd8fd460e..85c6db66eb2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md @@ -1,31 +1,33 @@ --- id: tdengine-init title: 依赖时序数据库服务TDengine安装初始化 -sidebar_label: 使用TDengine存储指标数据(可选) +sidebar_label: 使用TDengine存储指标数据(可选) --- Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) > 我们推荐使用并长期支持 VictoriaMetrics 作为存储。 -TDengine是一款开源物联网时序型数据库,我们用其存储采集到的监控指标历史数据。 注意支持⚠️ 3.x版本。 +TDengine是一款开源物联网时序型数据库,我们用其存储采集到的监控指标历史数据。 注意支持⚠️ 3.x版本。 **注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有TDengine环境,可直接跳到创建数据库实例那一步。 +### 通过Docker方式安装TDengine -### 通过Docker方式安装TDengine > 可参考官方网站[安装教程](https://docs.taosdata.com/get-started/docker/) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装TDengine +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装TDengine ```shell $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ @@ -34,23 +36,23 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ tdengine/tdengine:3.0.4.0 ``` - `-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 - `-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 - 使用```$ docker ps```查看数据库是否启动成功 +`-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 +`-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 +使用```$ docker ps```查看数据库是否启动成功 + +### 创建数据库实例 -### 创建数据库实例 +> [TDengine CLI 小技巧](https://docs.taosdata.com/develop/model/) -> [TDengine CLI 小技巧](https://docs.taosdata.com/develop/model/) +1. 进入数据库Docker容器 -1. 进入数据库Docker容器 ``` $ docker exec -it tdengine /bin/bash ``` - 2. 修改账户密码 > 建议您修改密码。TDengine默认的账户密码是 root/taosdata - > 进入容器后,执行 `taos` 命令进入TDengine CLI , 如下: + > 进入容器后,执行 `taos` 命令进入TDengine CLI , 如下: ``` root@tdengine-server:~/TDengine-server# taos @@ -58,6 +60,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. taos> ``` + > 在 TDengine CLI 中使用 alter user 命令可以修改用户密码,缺省密码为 taosdata 3. 创建名称为hertzbeat的数据库 @@ -77,24 +80,23 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ taos> show databases; taos> use hertzbeat; ``` - 5. 退出TDengine CLI ``` 输入 q 或 quit 或 exit 回车 ``` -**注意⚠️若是安装包安装的TDengine** +**注意⚠️若是安装包安装的TDengine** > 除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter -### 在hertzbeat的`application.yml`配置文件配置此数据库连接 +### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** ```yaml warehouse: @@ -112,19 +114,24 @@ warehouse: 2. 重启 HertzBeat -### 常见问题 +### 常见问题 1. 时序数据库IoTDB和TDengine是否都需要配置,能不能都用 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 -2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] +2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 -3. 监控详情历史图片不展示或无数据,已经配置了TDengine +3. 监控详情历史图片不展示或无数据,已经配置了TDengine + > 请确认是否安装的TDengine版本为3.0以上,版本2.x不支持兼容 4. 安装配置了TDengine数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] + > 请检查配置参数是否正确 > td-engine enable是否设置为true > 注意⚠️若hertzbeat和TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP -> 可根据logs目录下启动日志排查 +> 可根据logs目录下启动日志排查 + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/upgrade.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/upgrade.md index b84726b38e0..e528f8b60c6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/upgrade.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/upgrade.md @@ -1,7 +1,7 @@ --- id: upgrade title: HertzBeat 新版本更新指引 -sidebar_label: 版本更新指引 +sidebar_label: 版本更新指引 --- **Apache HertzBeat (incubating) 的发布版本列表** @@ -14,8 +14,6 @@ HertzBeat 的元数据信息保存在 H2 或 Mysql, PostgreSQL 关系型数据 **升级前您需要保存备份好数据库的数据文件和监控模版文件** - - ### Docker部署方式的升级 1. 若使用了自定义监控模版 @@ -23,32 +21,26 @@ HertzBeat 的元数据信息保存在 H2 或 Mysql, PostgreSQL 关系型数据 - `docker cp hertzbeat:/opt/hertzbeat/define ./define` - 然后在后续升级启动 docker 容器的时候需要挂载上这个 define 目录,`-v $(pwd)/define:/opt/hertzbeat/define` - `-v $(pwd)/define:/opt/hertzbeat/define` - -2. 若使用内置默认 H2 数据库 +2. 若使用内置默认 H2 数据库 - 需挂载或备份 `-v $(pwd)/data:/opt/hertzbeat/data` 容器内的数据库文件目录 `/opt/hertzbeat/data` - 停止并删除容器,删除本地 HertzBeat docker 镜像,拉取新版本镜像 - 参考 [Docker安装HertzBeat](docker-deploy) 使用新镜像创建新的容器,注意需要将数据库文件目录挂载 `-v $(pwd)/data:/opt/hertzbeat/data` - 3. 若使用外置关系型数据库 Mysql, PostgreSQL - 无需挂载备份容器内的数据库文件目录 - 停止并删除容器,删除本地 HertzBeat docker 镜像,拉取新版本镜像 - 参考 [Docker安装HertzBeat](docker-deploy) 使用新镜像创建新的容器,`application.yml`配置数据库连接即可 - ### 安装包部署方式的升级 1. 若使用内置默认 H2 数据库 - - 备份安装包下的数据库文件目录 `/opt/hertzbeat/data` + - 备份安装包下的数据库文件目录 `/opt/hertzbeat/data` - 若有自定义监控模版,需备份 `/opt/hertzbeat/define` 下的模版YML - `bin/shutdown.sh` 停止 HertzBeat 进程,下载新安装包 - 参考 [安装包安装HertzBeat](package-deploy) 使用新安装包启动 - 2. 若使用外置关系型数据库 Mysql, PostgreSQL - 无需备份安装包下的数据库文件目录 - 若有自定义监控模版,需备份 `/opt/hertzbeat/define` 下的模版YML - `bin/shutdown.sh` 停止 HertzBeat 进程,下载新安装包 - 参考 [安装包安装HertzBeat](package-deploy) 使用新安装包启动,`application.yml`配置数据库连接即可 - - -**HAVE FUN** +**HAVE FUN** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md index 7668cefcaf3..05355fe6559 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md @@ -1,7 +1,7 @@ --- id: victoria-metrics-init title: 依赖时序数据库服务VictoriaMetrics安装初始化 -sidebar_label: 使用VictoriaMetrics存储指标数据(推荐) +sidebar_label: 使用VictoriaMetrics存储指标数据(推荐) --- Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) @@ -11,21 +11,23 @@ Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任 VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决方案和时序数据库,兼容 Prometheus 生态。推荐版本(VictoriaMetrics:v1.95.1+, HertzBeat:v1.4.3+) **注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** +**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有VictoriaMetrics环境,可直接跳到YML配置那一步。 +### 通过Docker方式安装VictoriaMetrics -### 通过Docker方式安装VictoriaMetrics > 可参考官方网站[安装教程](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Docker安装VictoriaMetrics +> 1. 下载安装Docker环境 +> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +> 安装完毕后终端查看Docker版本是否正常输出。 +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Docker安装VictoriaMetrics ```shell $ docker run -d -p 8428:8428 \ @@ -34,16 +36,16 @@ $ docker run -d -p 8428:8428 \ victoriametrics/victoria-metrics:v1.95.1 ``` - `-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` 为VictoriaMetrics数据目录本地持久化挂载 - 使用```$ docker ps```查看数据库是否启动成功 +`-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` 为VictoriaMetrics数据目录本地持久化挂载 +使用```$ docker ps```查看数据库是否启动成功 -3. 在hertzbeat的`application.yml`配置文件配置VictoriaMetrics数据库连接 +3. 在hertzbeat的`application.yml`配置文件配置VictoriaMetrics数据库连接 配置HertzBeat的配置文件 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 + 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** +**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** ```yaml warehouse: @@ -61,8 +63,9 @@ warehouse: 4. 重启 HertzBeat -### 常见问题 +### 常见问题 1. 时序数据库是否都需要配置,能不能都用 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,但会影响历史图表数据和存储时长等。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/template.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/template.md index 55d89c73cba..219620a230c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/template.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/template.md @@ -4,27 +4,27 @@ title: 监控模版中心 sidebar_label: 监控模版 --- -> Apache HertzBeat (incubating) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 - +> Apache HertzBeat (incubating) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 +> > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 > 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? -这是它的架构原理: +这是它的架构原理: ![hertzBeat](/img/docs/hertzbeat-arch.png) -**我们将所有监控采集类型(mysql,website,jvm,k8s)都定义为yml模版,用户可以导入这些模版到hertzbeat系统中,使其支持对应类型的监控,非常方便!** +**我们将所有监控采集类型(mysql,website,jvm,k8s)都定义为yml模版,用户可以导入这些模版到hertzbeat系统中,使其支持对应类型的监控,非常方便!** ![](/img/docs/advanced/extend-point-1.png) **欢迎大家一起贡献你使用过程中自定义的通用监控类型YML模版,可用的模板如下:** -### 应用服务监控模版 +### 应用服务监控模版  👉 [Website monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml)
- 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
- 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
- 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
+ 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
+ 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
+ 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
 👉 [Full site monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml)
 👉 [SSL Cert monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml)
 👉 [JVM monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml)
@@ -32,7 +32,7 @@ sidebar_label: 监控模版  👉 [SpringBoot3.0](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml)
 👉 [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml)
-### 数据库监控模版 +### 数据库监控模版  👉 [MYSQL database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml)
 👉 [MariaDB database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml)
@@ -49,7 +49,7 @@ sidebar_label: 监控模版  👉 [Redis Sentinel database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml)
 👉 [Redis Cluster database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml)
-### 操作系统监控模版 +### 操作系统监控模版  👉 [Linux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml)
 👉 [Windows operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml)
@@ -64,7 +64,6 @@ sidebar_label: 监控模版  👉 [AlmaLinux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml)
 👉 [Debian operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml)
- ### 中间件监控模版  👉 [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml)
@@ -78,13 +77,12 @@ sidebar_label: 监控模版  👉 [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml)
 👉 [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml)
- ### 云原生监控模版  👉 [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml)
 👉 [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml)
-### 网络监控模版 +### 网络监控模版  👉 [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml)
 👉 [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml)
diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http-default.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http-default.md index cb8cec5b7aa..9ccb0e9454b 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http-default.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-http-default.md @@ -4,14 +4,16 @@ title: HTTP Protocol System Default Parsing Method sidebar_label: Default Parsing Method --- -> After calling the HTTP api to obtain the response data, use the default parsing method of hertzbeat to parse the response data. +> After calling the HTTP api to obtain the response data, use the default parsing method of hertzbeat to parse the response data. -**The interface response data structure must be consistent with the data structure rules specified by hertzbeat** +**The interface response data structure must be consistent with the data structure rules specified by hertzbeat** -### HertzBeat data format specification -Note⚠️ The response data is JSON format. +### HertzBeat data format specification + +Note⚠️ The response data is JSON format. Single layer format :key-value + ```json { "metricName1": "metricValue", @@ -20,7 +22,9 @@ Single layer format :key-value "metricName4": "metricValue" } ``` + Multilayer format:Set key value in the array + ```json [ { @@ -37,9 +41,11 @@ Multilayer format:Set key value in the array } ] ``` + eg: -Query the CPU information of the custom system. The exposed interface is `/metrics/cpu`. We need `hostname,core,useage` Metric. -If there is only one virtual machine, its single-layer format is : +Query the CPU information of the custom system. The exposed interface is `/metrics/cpu`. We need `hostname,core,useage` Metric. +If there is only one virtual machine, its single-layer format is : + ```json { "hostname": "linux-1", @@ -49,7 +55,9 @@ If there is only one virtual machine, its single-layer format is : "runningTime": 100 } ``` -If there are multiple virtual machines, the multilayer format is: : + +If there are multiple virtual machines, the multilayer format is: : + ```json [ { @@ -76,7 +84,7 @@ If there are multiple virtual machines, the multilayer format is: : ] ``` -**The corresponding monitoring template yml can be configured as follows** +**The corresponding monitoring template yml can be configured as follows** ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -146,3 +154,4 @@ metrics: # Hertzbeat default parsing is used here parseType: default ``` + diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md index d2107ffba96..43d0b6cd9c9 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md @@ -8,12 +8,10 @@ Through this tutorial, we describe step by step how to add a monitoring type bas Before reading this tutorial, we hope that you are familiar with how to customize types, metrics, protocols, etc. from [Custom Monitoring](extend-point) and [http Protocol Customization](extend-http). - ### HTTP protocol parses the general response structure to obtain metric data > In many scenarios, we need to monitor the provided HTTP API interface and obtain the index value returned by the interface. In this article, we use the http custom protocol to parse our common http interface response structure, and obtain the fields in the returned body as metric data. - ``` { "code": 200, @@ -22,6 +20,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz } ``` + As above, usually our background API interface will design such a general return. The same is true for the background of the hertzbeat system. Today, we will use the hertzbeat API as an example, add a new monitoring type **hertzbeat**, and monitor and collect its system summary statistics API `http://localhost:1157/api/summary`, the response data is: @@ -63,16 +62,13 @@ As above, usually our background API interface will design such a general return **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - Here we define a custom monitoring type `app` named `hertzbeat` which use the HTTP protocol to collect data. **Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** - ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring category: custom @@ -200,39 +196,31 @@ metrics: **The addition is complete, now we save and apply. We can see that the system page has added a `hertzbeat` monitoring type. ** - ![](/img/docs/advanced/extend-http-example-1.png) - ### The system page adds the monitoring of `hertzbeat` monitoring type > We click Add `HertzBeat Monitoring Tool`, configure monitoring IP, port, collection cycle, account password in advanced settings, etc., click OK to add monitoring. - ![](/img/docs/advanced/extend-http-example-2.png) - ![](/img/docs/advanced/extend-http-example-3.png) > After a certain period of time (depending on the collection cycle), we can see the specific metric data and historical charts in the monitoring details! - ![](/img/docs/advanced/extend-http-example-4.png) - - ### Set threshold alarm notification > Next, we can set the threshold normally. After the alarm is triggered, we can view it in the alarm center, add recipients, set alarm notifications, etc. Have Fun!!! - ---- #### over! This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! -If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. +If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. **github: https://github.com/apache/hertzbeat** diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-token.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-token.md index f5396e3dde6..edd713acad9 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-token.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-token.md @@ -22,6 +22,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz "identifier": "admin" } ``` + **The response structure data is as follows**: ```json @@ -40,11 +41,9 @@ Before reading this tutorial, we hope that you are familiar with how to customiz **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - 1. The custom monitoring type needs to add a new configuration monitoring template yml. We directly reuse the `hertzbeat` monitoring type in Tutorial 1 and modify it based on it A monitoring configuration definition file named after the monitoring type - hertzbeat_token @@ -214,15 +213,12 @@ metrics: ![](/img/docs/advanced/extend-http-example-5.png) - ** After the addition is successful, we can see the `token`, `refreshToken` metric data we collected on the details page. ** ![](/img/docs/advanced/extend-http-example-6.png) ![](/img/docs/advanced/extend-http-example-7.png) - - ### Use `token` as a variable parameter to collect and use the following metricss **Add an index group definition `summary` in `app-hertzbeat_token.yml`, which is the same as `summary` in Tutorial 1, and set the collection priority to 1** @@ -334,8 +330,7 @@ metrics: # Response data analysis method: default-system rules, jsonPath-jsonPath script, website-website usability metric monitoring parseType: jsonPath parseScript: '$.data' - - +--- - name: summary # The smaller the index group scheduling priority (0-127), the higher the priority, and the index group with low priority will not be scheduled until the collection of index groups with high priority is completed, and the index groups with the same priority will be scheduled and collected in parallel # The metrics with priority 0 is the availability metrics, that is, it will be scheduled first, and other metricss will continue to be scheduled if the collection is successful, and the scheduling will be interrupted if the collection fails @@ -385,13 +380,13 @@ metrics: > Next, we can set the threshold normally. After the alarm is triggered, we can view it in the alarm center, add a new recipient, set alarm notification, etc. Have Fun!!! ----- +--- #### over! This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! -If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. +If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. **github: https://github.com/apache/hertzbeat** diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http-jsonpath.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http-jsonpath.md index 772c96d20d3..86a49c06756 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http-jsonpath.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-http-jsonpath.md @@ -3,16 +3,18 @@ id: extend-http-jsonpath title: HTTP Protocol JsonPath Parsing Method sidebar_label: JsonPath Parsing Method --- + > After calling the HTTP api to obtain the response data, use JsonPath script parsing method to parse the response data. -Note⚠️ The response data is JSON format. +Note⚠️ The response data is JSON format. + +**Use the JsonPath script to parse the response data into data that conforms to the data structure rules specified by HertzBeat** -**Use the JsonPath script to parse the response data into data that conforms to the data structure rules specified by HertzBeat** +#### JsonPath Operator -#### JsonPath Operator -[JSONPath online verification](https://www.jsonpath.cn) +[JSONPath online verification](https://www.jsonpath.cn) -| JSONPATH | Help description | +| JSONPATH | Help description | |------------------|----------------------------------------------------------------------------------------| | $ | Root object or element | | @ | Current object or element | @@ -25,8 +27,10 @@ Note⚠️ The response data is JSON format. | ?() | Filter (script) expression | | () | Script Expression | -#### HertzBeat data format specification +#### HertzBeat data format specification + Single layer format :key-value + ```json { "metricName1": "metricValue", @@ -35,7 +39,9 @@ Single layer format :key-value "metricName4": "metricValue" } ``` + Multilayer format:Set key value in the array + ```json [ { @@ -56,7 +62,8 @@ Multilayer format:Set key value in the array #### Example Query the value information of the custom system, and its exposed interface is `/metrics/person`. We need `type,num` Metric. -The raw data returned by the interface is as follows: +The raw data returned by the interface is as follows: + ```json { "firstName": "John", @@ -80,7 +87,8 @@ The raw data returned by the interface is as follows: } ``` -We use the jsonpath script to parse, and the corresponding script is: `$.number[*]`,The parsed data structure is as follows: +We use the jsonpath script to parse, and the corresponding script is: `$.number[*]`,The parsed data structure is as follows: + ```json [ { @@ -93,9 +101,10 @@ We use the jsonpath script to parse, and the corresponding script is: `$.number[ } ] ``` + This data structure conforms to the data format specification of HertzBeat, and the Metric `type,num` is successfully extracted. -**The corresponding monitoring template yml can be configured as follows** +**The corresponding monitoring template yml can be configured as follows** ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -163,3 +172,4 @@ metrics: parseType: jsonPath parseScript: '$.number[*]' ``` + diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http.md index a69b54e84d0..5c4735bd2cd 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-http.md @@ -1,35 +1,33 @@ --- id: extend-http title: HTTP Protocol Custom Monitoring -sidebar_label: HTTP Protocol Custom Monitoring +sidebar_label: HTTP Protocol Custom Monitoring --- -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use HTTP protocol to customize Metric monitoring +> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use HTTP protocol to customize Metric monitoring -### HTTP protocol collection process +### HTTP protocol collection process 【**Call HTTP API**】->【**Response Verification**】->【**Parse Response Data**】->【**Default method parsing|JsonPath script parsing | XmlPath parsing(todo) | Prometheus parsing**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of HTTP protocol. We need to configure HTTP request parameters, configure which Metrics to obtain, and configure the parsing method and parsing script for response data. -HTTP protocol supports us to customize HTTP request path, request header, request parameters, request method, request body, etc. +HTTP protocol supports us to customize HTTP request path, request header, request parameters, request method, request body, etc. **System default parsing method**:HTTP interface returns the JSON data structure specified by hertzbeat, that is, the default parsing method can be used to parse the data and extract the corresponding Metric data. For details, refer to [**System Default Parsing**](extend-http-default) -**JsonPath script parsing method**:Use JsonPath script to parse the response JSON data, return the data structure specified by the system, and then provide the corresponding Metric data. For details, refer to [**JsonPath Script Parsing**](extend-http-jsonpath) - +**JsonPath script parsing method**:Use JsonPath script to parse the response JSON data, return the data structure specified by the system, and then provide the corresponding Metric data. For details, refer to [**JsonPath Script Parsing**](extend-http-jsonpath) -### Custom Steps +### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ------- -Configuration usages of the monitoring templates yml are detailed below. Please pay attention to usage annotation. +Configuration usages of the monitoring templates yml are detailed below. Please pay attention to usage annotation. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. eg:Define a custom monitoring type `app` named `example_http` which use the HTTP protocol to collect data. @@ -204,3 +202,4 @@ metrics: basicAuthPassword: ^_^password^_^ parseType: default ``` + diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-jdbc.md b/home/versioned_docs/version-v1.4.x/advanced/extend-jdbc.md index 1748e372961..09e2c031e0c 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-jdbc.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-jdbc.md @@ -1,68 +1,73 @@ --- id: extend-jdbc title: JDBC Protocol Custom Monitoring -sidebar_label: JDBC Protocol Custom Monitoring +sidebar_label: JDBC Protocol Custom Monitoring --- -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use JDBC(support mysql,mariadb,postgresql,sqlserver at present) to customize Metric monitoring. -> JDBC protocol custom monitoring allows us to easily monitor Metrics we want by writing SQL query statement. -### JDBC protocol collection process -【**System directly connected to MYSQL**】->【**Run SQL query statement**】->【**parse reponse data: oneRow, multiRow, columns**】->【**Metric data extraction**】 +> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use JDBC(support mysql,mariadb,postgresql,sqlserver at present) to customize Metric monitoring. +> JDBC protocol custom monitoring allows us to easily monitor Metrics we want by writing SQL query statement. + +### JDBC protocol collection process + +【**System directly connected to MYSQL**】->【**Run SQL query statement**】->【**parse reponse data: oneRow, multiRow, columns**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of JDBC protocol. We need to configure SSH request parameters, configure which Metrics to obtain, and configure query SQL statements. -### Data parsing method +### Data parsing method + We can obtain the corresponding Metric data through the data fields queried by SQL and the Metric mapping we need. At present, there are three mapping parsing methods:oneRow, multiRow, columns. -#### **oneRow** -> Query a row of data, return the column name of the result set through query and map them to the queried field. +#### **oneRow** + +> Query a row of data, return the column name of the result set through query and map them to the queried field. eg: queried Metric fields:one two three four query SQL:select one, two, three, four from book limit 1; -Here the Metric field and the response data can be mapped into a row of collected data one by one. +Here the Metric field and the response data can be mapped into a row of collected data one by one. #### **multiRow** -> Query multiple rows of data, return the column names of the result set and map them to the queried fields. + +> Query multiple rows of data, return the column names of the result set and map them to the queried fields. eg: queried Metric fields:one two three four query SQL:select one, two, three, four from book; -Here the Metric field and the response data can be mapped into multiple rows of collected data one by one. +Here the Metric field and the response data can be mapped into multiple rows of collected data one by one. #### **columns** -> Collect a row of Metric data. By matching the two columns of queried data (key value), key and the queried field, value is the value of the query field. + +> Collect a row of Metric data. By matching the two columns of queried data (key value), key and the queried field, value is the value of the query field. eg: queried fields:one two three four query SQL:select key, value from book; -SQL response data: +SQL response data: -| key | value | -|---------|-------| -| one | 243 | -| two | 435 | -| three | 332 | -| four | 643 | +| key | value | +|-------|-------| +| one | 243 | +| two | 435 | +| three | 332 | +| four | 643 | Here by mapping the Metric field with the key of the response data, we can obtain the corresponding value as collection and monitoring data. -### Custom Steps +### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. -eg:Define a custom monitoring type `app` named `example_sql` which use the JDBC protocol to collect data. - +eg:Define a custom monitoring type `app` named `example_sql` which use the JDBC protocol to collect data. ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -236,3 +241,4 @@ metrics: sql: show global status where Variable_name like 'innodb%'; url: ^_^url^_^ ``` + diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-jmx.md b/home/versioned_docs/version-v1.4.x/advanced/extend-jmx.md index 2c051dc9c2a..d6d9efdb651 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-jmx.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-jmx.md @@ -1,12 +1,14 @@ --- id: extend-jmx title: JMX Protocol Custom Monitoring -sidebar_label: JMX Protocol Custom Monitoring +sidebar_label: JMX Protocol Custom Monitoring --- + > From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use JMX to customize Metric monitoring. > JMX protocol custom monitoring allows us to easily monitor Metrics we want by config JMX Mbeans Object. ### JMX protocol collection process + 【**Peer Server Enable Jmx Service**】->【**HertzBeat Connect Peer Server Jmx**】->【**Query Jmx Mbean Object Data**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of JMX protocol. We need to configure JMX request parameters, configure which Metrics to obtain, and configure Mbeans Object. @@ -15,25 +17,24 @@ It can be seen from the process that we define a monitoring type of JMX protocol By configuring the monitoring template YML metrics `field`, `aliasFields`, `objectName` of the `jmx` protocol to map and parse the `Mbean` object information exposed by the peer system. -### Custom Steps +### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ![](/img/docs/advanced/extend-point-1.png) ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. eg:Define a custom monitoring type `app` named `example_jvm` which use the JVM protocol to collect data. - ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring category: service @@ -191,3 +192,4 @@ metrics: objectName: java.lang:type=MemoryPool,name=* url: ^_^url^_^ ``` + diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-point.md b/home/versioned_docs/version-v1.4.x/advanced/extend-point.md index eba1811e4fc..314e3f1affa 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-point.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-point.md @@ -1,31 +1,29 @@ --- id: extend-point title: Custom Monitoring -sidebar_label: Custom Monitoring +sidebar_label: Custom Monitoring --- -> HertzBeat has custom monitoring ability. You only need to configure monitoring template yml to fit a custom monitoring type. -> Custom monitoring currently supports [HTTP protocol](extend-http),[JDBC protocol](extend-jdbc), [SSH protocol](extend-ssh), [JMX protocol](extend-jmx), [SNMP protocol](extend-snmp). And it will support more general protocols in the future. -### Custom Monitoring Steps +> HertzBeat has custom monitoring ability. You only need to configure monitoring template yml to fit a custom monitoring type. +> Custom monitoring currently supports [HTTP protocol](extend-http),[JDBC protocol](extend-jdbc), [SSH protocol](extend-ssh), [JMX protocol](extend-jmx), [SNMP protocol](extend-snmp). And it will support more general protocols in the future. -**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** +### Custom Monitoring Steps +**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ------- Configuration usages of the monitoring templates yml are detailed below. -### Monitoring Templates YML +### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. +> +> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. +eg:Define a custom monitoring type `app` named `example2` which use the HTTP protocol to collect data. -> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - -eg:Define a custom monitoring type `app` named `example2` which use the HTTP protocol to collect data. - -**Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** - +**Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -132,3 +130,4 @@ metrics: parseType: website ``` + diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-snmp.md b/home/versioned_docs/version-v1.4.x/advanced/extend-snmp.md index c97aea1f766..b3bb9173c87 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-snmp.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-snmp.md @@ -1,23 +1,22 @@ --- id: extend-snmp title: SNMP Protocol Custom Monitoring -sidebar_label: SNMP Protocol Custom Monitoring +sidebar_label: SNMP Protocol Custom Monitoring --- > From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use SNMP to customize Metric monitoring. > JMX protocol custom monitoring allows us to easily monitor Metrics we want by config SNMP MIB OIDs. ### SNMP protocol collection process + 【**Peer Server Enable SNMP Service**】->【**HertzBeat Connect Peer Server SNMP**】->【**Query Oids Data**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of Snmp protocol. We need to configure Snmp request parameters, configure which Metrics to obtain, and configure oids. - ### Data parsing method By configuring the metrics `field`, `aliasFields`, and `oids` under the `snmp` protocol of the monitoring template YML to capture the data specified by the peer and parse the mapping. - ### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** @@ -25,18 +24,17 @@ By configuring the metrics `field`, `aliasFields`, and `oids` under the `snmp` p ![](/img/docs/advanced/extend-point-1.png) ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. eg:Define a custom monitoring type `app` named `example_windows` which use the SNMP protocol to collect data. - ```yaml # The monitoring type category:service-application service monitoring db-database monitoring mid-middleware custom-custom monitoring os-operating system monitoring category: os @@ -171,3 +169,4 @@ metrics: processes: 1.3.6.1.2.1.25.1.6.0 location: 1.3.6.1.2.1.1.6.0 ``` + diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-ssh.md b/home/versioned_docs/version-v1.4.x/advanced/extend-ssh.md index 1567c9a762e..6db1d4a5675 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-ssh.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-ssh.md @@ -1,21 +1,25 @@ --- id: extend-ssh title: SSH Protocol Custom Monitoring -sidebar_label: SSH Protocol Custom Monitoring +sidebar_label: SSH Protocol Custom Monitoring --- -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use SSH protocol to customize Metric monitoring. -> SSH protocol custom monitoring allows us to easily monitor and collect the Linux Metrics we want by writing sh command script. -### SSH protocol collection process -【**System directly connected to Linux**】->【**Run shell command script statement**】->【**parse response data: oneRow, multiRow**】->【**Metric data extraction**】 +> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use SSH protocol to customize Metric monitoring. +> SSH protocol custom monitoring allows us to easily monitor and collect the Linux Metrics we want by writing sh command script. + +### SSH protocol collection process + +【**System directly connected to Linux**】->【**Run shell command script statement**】->【**parse response data: oneRow, multiRow**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of SSH protocol. We need to configure SSH request parameters, configure which Metrics to obtain, and configure query script statements. -### Data parsing method +### Data parsing method + We can obtain the corresponding Metric data through the data fields queried by the SHELL script and the Metric mapping we need. At present, there are two mapping parsing methods:oneRow and multiRow which can meet the needs of most Metrics. -#### **oneRow** -> Query out a column of data, return the field value (one value per row) of the result set through query and map them to the field. +#### **oneRow** + +> Query out a column of data, return the field value (one value per row) of the result set through query and map them to the field. eg: Metrics of Linux to be queried hostname-host name,uptime-start time @@ -23,31 +27,37 @@ Host name original query command:`hostname` Start time original query command:`uptime | awk -F "," '{print $1}'` Then the query script of the two Metrics in hertzbeat is(Use `;` Connect them together): `hostname; uptime | awk -F "," '{print $1}'` -The data responded by the terminal is: +The data responded by the terminal is: + ``` tombook 14:00:15 up 72 days -``` +``` + At last collected Metric data is mapped one by one as: hostname is `tombook` -uptime is `14:00:15 up 72 days` +uptime is `14:00:15 up 72 days` -Here the Metric field and the response data can be mapped into a row of collected data one by one +Here the Metric field and the response data can be mapped into a row of collected data one by one #### **multiRow** -> Query multiple rows of data, return the column names of the result set through the query, and map them to the Metric field of the query. + +> Query multiple rows of data, return the column names of the result set through the query, and map them to the Metric field of the query. eg: Linux memory related Metric fields queried:total-Total memory, used-Used memory,free-Free memory, buff-cache-Cache size, available-Available memory -Memory metrics original query command:`free -m`, Console response: +Memory metrics original query command:`free -m`, Console response: + ```shell total used free shared buff/cache available Mem: 7962 4065 333 1 3562 3593 Swap: 8191 33 8158 ``` + In hertzbeat multiRow format parsing requires a one-to-one mapping between the column name of the response data and the indicaotr value, so the corresponding query SHELL script is: `free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` -Console response is: +Console response is: + ```shell total used free buff_cache available 7962 4066 331 3564 3592 @@ -60,18 +70,17 @@ Here the Metric field and the response data can be mapped into collected data on **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. eg:Define a custom monitoring type `app` named `example_linux` which use the SSH protocol to collect data. - ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring category: os @@ -203,3 +212,4 @@ metrics: script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' parseType: multiRow ``` + diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-tutorial.md b/home/versioned_docs/version-v1.4.x/advanced/extend-tutorial.md index f3b93341874..363cb51e089 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-tutorial.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-tutorial.md @@ -8,12 +8,10 @@ Through this tutorial, we describe step by step how to customize and adapt a mon Before reading this tutorial, we hope that you are familiar with how to customize types, metrics, protocols, etc. from [Custom Monitoring](extend-point) and [Http Protocol Customization](extend-http). - ### HTTP protocol parses the general response structure to obtain metrics data > In many scenarios, we need to monitor the provided HTTP API interface and obtain the index value returned by the interface. In this article, we use the http custom protocol to parse our common http interface response structure, and obtain the fields in the returned body as metric data. - ``` { "code": 200, @@ -22,6 +20,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz } ``` + As above, usually our background API interface will design such a general return. The same is true for the background of the hertzbeat system. Today, we will use the hertzbeat API as an example, add a new monitoring type **hertzbeat**, and monitor and collect its system summary statistics API `http://localhost:1157/api/summary`, the response data is: @@ -58,17 +57,14 @@ As above, usually our background API interface will design such a general return **This time we get the metrics data such as `category`, `app`, `status`, `size`, `availableSize` under the app. ** - ### Add Monitoring Template Yml **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - Here we define a custom monitoring type `app` named `hertzbeat` which use the HTTP protocol to collect data. **Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** @@ -199,32 +195,24 @@ metrics: **The addition is complete, now we restart the hertzbeat system. We can see that the system page has added a `hertzbeat` monitoring type. ** - ![](/img/docs/advanced/extend-http-example-1.png) - ### The system page adds the monitoring of `hertzbeat` monitoring type > We click Add `HertzBeat Monitoring Tool`, configure monitoring IP, port, collection cycle, account password in advanced settings, etc., click OK to add monitoring. - ![](/img/docs/advanced/extend-http-example-2.png) - ![](/img/docs/advanced/extend-http-example-3.png) > After a certain period of time (depending on the collection cycle), we can see the specific metric data and historical charts in the monitoring details! - ![](/img/docs/advanced/extend-http-example-4.png) - - ### Set threshold alarm notification > Next, we can set the threshold normally. After the alarm is triggered, we can view it in the alarm center, add a new recipient, set alarm notification, etc. Have Fun!!! - ---- #### over! diff --git a/home/versioned_docs/version-v1.4.x/help/activemq.md b/home/versioned_docs/version-v1.4.x/help/activemq.md index 52e3090fde2..f24bc37fbbb 100644 --- a/home/versioned_docs/version-v1.4.x/help/activemq.md +++ b/home/versioned_docs/version-v1.4.x/help/activemq.md @@ -9,7 +9,7 @@ keywords: [open source monitoring tool, monitoring Apache ActiveMQ metrics] **Use Protocol: JMX** -### Pre-monitoring Operations +### Pre-monitoring Operations > You need to enable the `JMX` service on ActiveMQ, HertzBeat uses the JMX protocol to collect metrics from ActiveMQ. @@ -26,6 +26,7 @@ keywords: [open source monitoring tool, monitoring Apache ActiveMQ metrics] 2. Modify the `bin/env` file in the installation directory, configure the JMX port IP, etc. The original configuration information will be as follows + ```text # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" @@ -36,6 +37,7 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" ``` Update to the following configuration, ⚠️ pay attention to modify `local external IP` + ```text # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" @@ -52,7 +54,7 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" ### Configuration parameters -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | | Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | @@ -64,82 +66,81 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" | Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | | Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | -### Collect Metrics +### Collect Metrics #### metrics: broker -| Metric Name | Unit | Description | +| Metric Name | Unit | Description | |-------------------------|------|-----------------------------------------------------------------------| -| BrokerName | None | The name of the broker. | -| BrokerVersion | None | The version of the broker. | -| Uptime | None | Uptime of the broker. | +| BrokerName | None | The name of the broker. | +| BrokerVersion | None | The version of the broker. | +| Uptime | None | Uptime of the broker. | | UptimeMillis | ms | Uptime of the broker in milliseconds. | -| Persistent | None | Messages are synchronized to disk. | +| Persistent | None | Messages are synchronized to disk. | | MemoryPercentUsage | % | Percent of memory limit used. | | StorePercentUsage | % | Percent of store limit used. | | TempPercentUsage | % | Percent of temp limit used. | -| CurrentConnectionsCount | None | Attribute exposed for management | -| TotalConnectionsCount | None | Attribute exposed for management | -| TotalEnqueueCount | None | Number of messages that have been sent to the broker. | -| TotalDequeueCount | None | Number of messages that have been acknowledged on the broker. | -| TotalConsumerCount | None | Number of message consumers subscribed to destinations on the broker. | -| TotalProducerCount | None | Number of message producers active on destinations on the broker. | -| TotalMessageCount | None | Number of unacknowledged messages on the broker. | -| AverageMessageSize | None | Average message size on this broker | -| MaxMessageSize | None | Max message size on this broker | -| MinMessageSize | None | Min message size on this broker | - -#### metrics: topic - -| Metric Name | Unit | Description | -|-------------------------|------|-------------------------------------------------------------------------------------------| -| Name | None | Name of this destination. | -| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | -| MemoryPercentUsage | None | The percentage of the memory limit used | -| ProducerCount | None | Number of producers attached to this destination | -| ConsumerCount | None | Number of consumers subscribed to this destination. | -| EnqueueCount | None | Number of messages that have been sent to the destination. | -| DequeueCount | None | Number of messages that has been acknowledged (and removed) from the destination. | -| ForwardCount | None | Number of messages that have been forwarded (to a networked broker) from the destination. | -| InFlightCount | None | Number of messages that have been dispatched to, but not acknowledged by, consumers. | -| DispatchCount | None | Number of messages that has been delivered to consumers, including those not acknowledged | -| ExpiredCount | None | Number of messages that have been expired. | -| StoreMessageSize | B | The memory size of all messages in this destination's store. | -| AverageEnqueueTime | ms | Average time a message was held on this destination. | -| MaxEnqueueTime | ms | The longest time a message was held on this destination | -| MinEnqueueTime | ms | The shortest time a message was held on this destination | -| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | -| AverageMessageSize | B | Average message size on this destination | -| MaxMessageSize | B | Max message size on this destination | -| MinMessageSize | B | Min message size on this destination | - +| CurrentConnectionsCount | None | Attribute exposed for management | +| TotalConnectionsCount | None | Attribute exposed for management | +| TotalEnqueueCount | None | Number of messages that have been sent to the broker. | +| TotalDequeueCount | None | Number of messages that have been acknowledged on the broker. | +| TotalConsumerCount | None | Number of message consumers subscribed to destinations on the broker. | +| TotalProducerCount | None | Number of message producers active on destinations on the broker. | +| TotalMessageCount | None | Number of unacknowledged messages on the broker. | +| AverageMessageSize | None | Average message size on this broker | +| MaxMessageSize | None | Max message size on this broker | +| MinMessageSize | None | Min message size on this broker | + +#### metrics: topic + +| Metric Name | Unit | Description | +|--------------------|------|-------------------------------------------------------------------------------------------| +| Name | None | Name of this destination. | +| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | +| MemoryPercentUsage | None | The percentage of the memory limit used | +| ProducerCount | None | Number of producers attached to this destination | +| ConsumerCount | None | Number of consumers subscribed to this destination. | +| EnqueueCount | None | Number of messages that have been sent to the destination. | +| DequeueCount | None | Number of messages that has been acknowledged (and removed) from the destination. | +| ForwardCount | None | Number of messages that have been forwarded (to a networked broker) from the destination. | +| InFlightCount | None | Number of messages that have been dispatched to, but not acknowledged by, consumers. | +| DispatchCount | None | Number of messages that has been delivered to consumers, including those not acknowledged | +| ExpiredCount | None | Number of messages that have been expired. | +| StoreMessageSize | B | The memory size of all messages in this destination's store. | +| AverageEnqueueTime | ms | Average time a message was held on this destination. | +| MaxEnqueueTime | ms | The longest time a message was held on this destination | +| MinEnqueueTime | ms | The shortest time a message was held on this destination | +| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | +| AverageMessageSize | B | Average message size on this destination | +| MaxMessageSize | B | Max message size on this destination | +| MinMessageSize | B | Min message size on this destination | #### metrics: memory_pool -| Metric Name | Unit | Description | -|-------------| ----------- |----------------| -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | +| Metric Name | Unit | Description | +|-------------|------|--------------| +| name | | metrics name | +| committed | kb | total size | +| init | kb | init size | +| max | kb | max size | +| used | kb | used size | #### metrics: class_loading -| Metric Name | Unit | Description | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | | Loaded Class Count | -| TotalLoadedClassCount | | Total Loaded Class Count | -| UnloadedClassCount | | Unloaded Class Count | - +| Metric Name | Unit | Description | +|-----------------------|------|--------------------------| +| LoadedClassCount | | Loaded Class Count | +| TotalLoadedClassCount | | Total Loaded Class Count | +| UnloadedClassCount | | Unloaded Class Count | #### metrics: thread -| Metric Name | Unit | Description | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | | Total Started Thread Count | -| ThreadCount | | Thread Count | -| PeakThreadCount | | Peak Thread Count | -| DaemonThreadCount | | Daemon Thread Count | -| CurrentThreadUserTime | ms | Current Thread User Time | -| CurrentThreadCpuTime | ms | Current Thread Cpu Time | +| Metric Name | Unit | Description | +|-------------------------|------|----------------------------| +| TotalStartedThreadCount | | Total Started Thread Count | +| ThreadCount | | Thread Count | +| PeakThreadCount | | Peak Thread Count | +| DaemonThreadCount | | Daemon Thread Count | +| CurrentThreadUserTime | ms | Current Thread User Time | +| CurrentThreadCpuTime | ms | Current Thread Cpu Time | + diff --git a/home/versioned_docs/version-v1.4.x/help/airflow.md b/home/versioned_docs/version-v1.4.x/help/airflow.md index 5323ede8110..52367155d89 100644 --- a/home/versioned_docs/version-v1.4.x/help/airflow.md +++ b/home/versioned_docs/version-v1.4.x/help/airflow.md @@ -9,33 +9,31 @@ keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8080 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| HTTPS | 是否启用HTTPS | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-----------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | +| 端口 | 数据库对外提供的端口,默认为8080 | +| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | +| HTTPS | 是否启用HTTPS | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:airflow_health -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | -------------------- | -| metadatabase | 无 | metadatabase健康情况 | -| scheduler | 无 | scheduler健康情况 | -| triggerer | 无 | triggerer健康情况 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|------------------| +| metadatabase | 无 | metadatabase健康情况 | +| scheduler | 无 | scheduler健康情况 | +| triggerer | 无 | triggerer健康情况 | #### 指标集合:airflow_version -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | -------- | --------------- | -| value | 无 | Airflow版本 | -| git_version | 无 | Airflow git版本 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|---------------| +| value | 无 | Airflow版本 | +| git_version | 无 | Airflow git版本 | diff --git a/home/versioned_docs/version-v1.4.x/help/alert_console.md b/home/versioned_docs/version-v1.4.x/help/alert_console.md index e727fec4771..45ab7d791d3 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_console.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_console.md @@ -6,13 +6,13 @@ sidebar_label: Console address in alarm template > After the threshold is triggered, send the alarm information. When you notify through DingDing / enterprise Wechat / FeiShu robot or email, the alarm content has a detailed link to log in to the console. - ### Custom settings In our startup configuration file application.yml, find the following configuration + ```yml alerter: console-url: #Here is our custom console address ``` -The default value is the official console address of HertzBeat. \ No newline at end of file +The default value is the official console address of HertzBeat. diff --git a/home/versioned_docs/version-v1.4.x/help/alert_dingtalk.md b/home/versioned_docs/version-v1.4.x/help/alert_dingtalk.md index fb63d52aa48..b86ed662940 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_dingtalk.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_dingtalk.md @@ -5,15 +5,15 @@ sidebar_label: Alert DingDing robot notification keywords: [open source monitoring tool, open source alerter, open source DingDing robot notification] --- -> After the threshold is triggered send alarm information and notify the recipient by DingDing robot. +> After the threshold is triggered send alarm information and notify the recipient by DingDing robot. -### Operation steps +### Operation steps 1. **【DingDing desktop client】-> 【Group settings】-> 【Intelligent group assistant】-> 【Add new robot-select custom】-> 【Set robot name and avatar】-> 【Note⚠️Set custom keywords: HertzBeat】 ->【Copy its webhook address after adding successfully】** -> Note⚠️ When adding a robot, its custom keywords need to be set in the security setting block: HertzBeat. Other security settings or the IP segment don't need to be filled in. +> Note⚠️ When adding a robot, its custom keywords need to be set in the security setting block: HertzBeat. Other security settings or the IP segment don't need to be filled in. -![email](/img/docs/help/alert-notice-8.png) +![email](/img/docs/help/alert-notice-8.png) 2. **【Save access_token value of the WebHook address of the robot】** @@ -24,18 +24,18 @@ keywords: [open source monitoring tool, open source alerter, open source DingDin ![email](/img/docs/help/alert-notice-9.png) -4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### DingDing robot common issues -### DingDing robot common issues +1. DingDing group did not receive the robot alarm notification. -1. DingDing group did not receive the robot alarm notification. > Please check whether there is any triggered alarm information in the alarm center. > Please check whether DingDing robot is configured with security custom keywords :HertzBeat. > Please check whether the robot ACCESS_TOKEN is configured correctly and whether the alarm strategy association is configured. -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_discord.md b/home/versioned_docs/version-v1.4.x/help/alert_discord.md index 8dfdca384fa..7aa565c0acf 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_discord.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_discord.md @@ -45,7 +45,6 @@ keywords: [open source monitoring tool, open source alerter, open source Discord ![bot](/img/docs/help/discord-bot-7.png) - ### Add an alarm notification person in HertzBeat, the notification method is Discord Bot 1. **[Alarm notification] -> [Add recipient] -> [Select Discord robot notification method] -> [Set robot Token and ChannelId] -> [OK]** @@ -58,13 +57,12 @@ keywords: [open source monitoring tool, open source alerter, open source Discord ![email](/img/docs/help/alert-notice-policy.png) +### Discord Bot Notification FAQ -### Discord Bot Notification FAQ - -1. Discord doesn't receive bot alert notifications +1. Discord doesn't receive bot alert notifications > Please check whether the alarm information has been triggered in the alarm center > Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured -> Please check whether the bot is properly authorized by the Discord chat server +> Please check whether the bot is properly authorized by the Discord chat server Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_email.md b/home/versioned_docs/version-v1.4.x/help/alert_email.md index 353ae4673fe..fb6dc7fa571 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_email.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_email.md @@ -5,34 +5,35 @@ sidebar_label: Alert email notification keywords: [open source monitoring tool, open source alerter, open source email notification] --- -> After the threshold is triggered send alarm information and notify the recipient by email. +> After the threshold is triggered send alarm information and notify the recipient by email. -### Operation steps +### Operation steps -1. **【Alarm notification】->【Add new recipient】 ->【Select email notification method】** +1. **【Alarm notification】->【Add new recipient】 ->【Select email notification method】** -![email](/img/docs/help/alert-notice-1.png) +![email](/img/docs/help/alert-notice-1.png) 2. **【Get verification code】-> 【Enter email verification code】-> 【Confirm】** -![email](/img/docs/help/alert-notice-2.png) + ![email](/img/docs/help/alert-notice-2.png) -![email](/img/docs/help/alert-notice-3.png) +![email](/img/docs/help/alert-notice-3.png) -3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### Email notification common issues -### Email notification common issues +1. Hertzbeat deployed on its own intranet cannot receive email notifications -1. Hertzbeat deployed on its own intranet cannot receive email notifications -> Hertzbeat needs to configure its own mail server, not tancloud. Please confirm whether you have configured its own mail server in application.yml +> Hertzbeat needs to configure its own mail server, not tancloud. Please confirm whether you have configured its own mail server in application.yml + +2. Cloud environment tancloud cannot receive email notification -2. Cloud environment tancloud cannot receive email notification > Please check whether there is any triggered alarm information in the alarm center. > Please check whether the mailbox is configured correctly and whether the alarm strategy association is configured. -> Please check whether the warning email is blocked in the trash can of the mailbox. +> Please check whether the warning email is blocked in the trash can of the mailbox. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_enterprise_wechat_app.md b/home/versioned_docs/version-v1.4.x/help/alert_enterprise_wechat_app.md index f9e072e436a..1d5d41a15bc 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_enterprise_wechat_app.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_enterprise_wechat_app.md @@ -5,30 +5,30 @@ sidebar_label: Alert Enterprise Wechat App notification keywords: [open source monitoring tool, open source alerter, open source Enterprise Wechat App notification] --- -> After the threshold is triggered send alarm information and notify the recipient by enterprise WeChat App. +> After the threshold is triggered send alarm information and notify the recipient by enterprise WeChat App. -### Operation steps +### Operation steps 1. **【Enterprise Wechat backstage】-> 【App Management】-> 【Create an app】-> 【Set App message】->【Copy AgentId and Secret adding successfully】** -![email](/img/docs/help/alert-wechat-1.jpg) +![email](/img/docs/help/alert-wechat-1.jpg) 2. **【Alarm notification】->【Add new recipient】 ->【Select Enterprise WeChat App notification method】->【Set Enterprise WeChat ID,Enterprise App ID and Enterprise App Secret 】-> 【Confirm】** ![email](/img/docs/help/alert-wechat-2.jpg) -3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-wechat-3.jpg) +![email](/img/docs/help/alert-wechat-3.jpg) +### Enterprise WeChat App common issues -### Enterprise WeChat App common issues +1. Enterprise WeChat App did not receive the alarm notification. -1. Enterprise WeChat App did not receive the alarm notification. > Please check if the user has application permissions. > Please check if the enterprise application callback address settings are normal. > Please check if the server IP is on the enterprise application whitelist. -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_feishu.md b/home/versioned_docs/version-v1.4.x/help/alert_feishu.md index 56606012021..8f7e9391001 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_feishu.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_feishu.md @@ -5,30 +5,30 @@ sidebar_label: Alert FeiShu robot notification keywords: [open source monitoring tool, open source alerter, open source feishu bot notification] --- -> After the threshold is triggered send alarm information and notify the recipient by FeiShu robot. +> After the threshold is triggered send alarm information and notify the recipient by FeiShu robot. -### Operation steps +### Operation steps 1. **【FeiShu client】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** -2. **【Save the key value of the WebHook address of the robot】** +2. **【Save the key value of the WebHook address of the robot】** > eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select FeiShu robot notification method】->【Set FeiShu robot KEY】-> 【Confirm】** -4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### FeiShu robot notification common issues -### FeiShu robot notification common issues +1. FeiShu group did not receive the robot alarm notification. -1. FeiShu group did not receive the robot alarm notification. > Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. +> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_slack.md b/home/versioned_docs/version-v1.4.x/help/alert_slack.md index 2540a27451d..5148432fe8b 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_slack.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_slack.md @@ -25,12 +25,11 @@ Refer to the official website document [Sending messages using Incoming Webhooks ![email](/img/docs/help/alert-notice-policy.png) - ### Slack Notification FAQ 1. Slack did not receive the robot warning notification > Please check whether the alarm information has been triggered in the alarm center -> Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured +> Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_telegram.md b/home/versioned_docs/version-v1.4.x/help/alert_telegram.md index 1fbe4f0ae7e..cb60f266778 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_telegram.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_telegram.md @@ -54,13 +54,12 @@ Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token ![email](/img/docs/help/alert-notice-policy.png) - ### Telegram Bot Notification FAQ 1. Telegram did not receive the robot warning notification > Please check whether the alarm information has been triggered in the alarm center > Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured -> UserId should be the UserId of the recipient of the message +> UserId should be the UserId of the recipient of the message Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_threshold.md b/home/versioned_docs/version-v1.4.x/help/alert_threshold.md index 893f674164d..c62dee02704 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_threshold.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_threshold.md @@ -1,18 +1,18 @@ --- id: alert_threshold title: Threshold alarm configuration -sidebar_label: Threshold alarm configuration +sidebar_label: Threshold alarm configuration --- -> Configure the alarm threshold (warning alarm, critical alarm, emergency alarm) for the monitoring Metrics, and the system calculates and triggers the alarm according to the threshold configuration and the collected Metric data. +> Configure the alarm threshold (warning alarm, critical alarm, emergency alarm) for the monitoring Metrics, and the system calculates and triggers the alarm according to the threshold configuration and the collected Metric data. -### Operation steps +### Operation steps -1. **【Alarm configuration】->【Add new threshold】-> 【Confirm after configuration】** +1. **【Alarm configuration】->【Add new threshold】-> 【Confirm after configuration】** -![threshold](/img/docs/help/alert-threshold-1.png) +![threshold](/img/docs/help/alert-threshold-1.png) -As shown above: +As shown above: **Metric object**:Select the monitoring Metric object for which we need to configure the threshold. Eg:website monitoring type -> summary Metric set -> responseTime-response time Metric **Threshold trigger expression**:Calculate and judge whether to trigger the threshold according to this expression. See the page prompts for expression environment variables and operators. Eg:set the response time greater than 50 to trigger an alarm, and the expression is `responseTime > 50`. For detailed help on threshold expression, see [Threshold expression help](alert_threshold_expr) @@ -20,17 +20,17 @@ As shown above: **Trigger times**:How many times will the threshold be triggered before the alarm is really triggered. **Notification template**:Notification information Template sent after alarm triggering, See page prompts for template environment variables, eg:`${app}.${metrics}.${metric} Metric's value is ${responseTime}, greater than 50 triggers an alarm` **Global default**: Set whether this threshold is valid for such global Metrics, and the default is No. After adding a new threshold, you need to associate the threshold with the monitoring object, so that the threshold will take effect for this monitoring. -**Enable alarm**:This alarm threshold configuration is enabled or disabled. +**Enable alarm**:This alarm threshold configuration is enabled or disabled. -2. **Threshold association monitoring⚠️ 【Alarm configuration】-> 【Threshold just set】-> 【Configure associated monitoring】-> 【Confirm after configuration】** +2. **Threshold association monitoring⚠️ 【Alarm configuration】-> 【Threshold just set】-> 【Configure associated monitoring】-> 【Confirm after configuration】** -> **Note⚠️ After adding a new threshold, you need to associate the threshold with the monitoring object(That is, to set this threshold for which monitoring is effective), so that the threshold will take effect for this monitoring.**。 +> **Note⚠️ After adding a new threshold, you need to associate the threshold with the monitoring object(That is, to set this threshold for which monitoring is effective), so that the threshold will take effect for this monitoring.**。 -![threshold](/img/docs/help/alert-threshold-2.png) +![threshold](/img/docs/help/alert-threshold-2.png) -![threshold](/img/docs/help/alert-threshold-3.png) +![threshold](/img/docs/help/alert-threshold-3.png) **After the threshold alarm is configured, the alarm information that has been successfully triggered can be seen in 【alarm center】.** -**If you need to notify the relevant personnel of the alarm information by email, Wechat, DingDing and Feishu, it can be configured in 【alarm notification】.** +**If you need to notify the relevant personnel of the alarm information by email, Wechat, DingDing and Feishu, it can be configured in 【alarm notification】.** -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md b/home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md index e211514cad2..493d7fbce15 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md @@ -1,12 +1,12 @@ --- id: alert_threshold_expr title: Threshold trigger expression -sidebar_label: Threshold trigger expression +sidebar_label: Threshold trigger expression --- -> When we configure the threshold alarm, we need to configure the threshold trigger expression. The system calculates whether to trigger the alarm according to the expression and the monitoring index value. Here is a detailed introduction to the use of the expression. +> When we configure the threshold alarm, we need to configure the threshold trigger expression. The system calculates whether to trigger the alarm according to the expression and the monitoring index value. Here is a detailed introduction to the use of the expression. -#### Operators supported by expressions +#### Operators supported by expressions ``` equals(str1,str2) @@ -24,26 +24,27 @@ equals(str1,str2) ``` Rich operators allow us to define expressions freely. -Note⚠️ For the equality of string, please use `equals(str1,str2)`, while for the equality judgment of number, please use == or != +Note⚠️ For the equality of string, please use `equals(str1,str2)`, while for the equality judgment of number, please use == or != -#### Supported environment variables -> Environment variables, i.e. supported variables such as Metric values, are used in the expression. When the threshold value is calculated and judged, the variables will be replaced with actual values for calculation. +#### Supported environment variables + +> Environment variables, i.e. supported variables such as Metric values, are used in the expression. When the threshold value is calculated and judged, the variables will be replaced with actual values for calculation. Non fixed environment variables:These variables will change dynamically according to the monitoring Metric object we choose. For example, if we choose **response time Metric of website monitoring**, the environment variables will have `responseTime - This is the response time variable` If we want to set **when the response time of website monitoring is greater than 400** to trigger an alarm,the expression is `responseTime>400` Fixed environment variables(Rarely used):`instance : Row instance value` -This variable is mainly used to calculate multiple instances. For example, we collected `usage`(`usage is non fixed environment variables`) of disk C and disk D, but we only want to set the alarm when **the usage of C disk is greater than 80**. Then the expression is `equals(instance,"c")&&usage>80` +This variable is mainly used to calculate multiple instances. For example, we collected `usage`(`usage is non fixed environment variables`) of disk C and disk D, but we only want to set the alarm when **the usage of C disk is greater than 80**. Then the expression is `equals(instance,"c")&&usage>80` -#### Expression setting case +#### Expression setting case 1. Website monitoring -> Trigger alarm when the response time is greater than or equal to 400ms -`responseTime>=400` + `responseTime>=400` 2. API monitoring -> Trigger alarm when the response time is greater than 3000ms -`responseTime>3000` + `responseTime>3000` 3. Entire site monitoring -> Trigger alarm when URL(instance) path is `https://baidu.com/book/3` and the response time is greater than 200ms -`equals(instance,"https://baidu.com/book/3")&&responseTime>200` + `equals(instance,"https://baidu.com/book/3")&&responseTime>200` 4. MYSQL monitoring -> status Metric group -> Trigger alarm when hreads_running(number of running threads) Metric is greater than 7 -`threads_running>7` + `threads_running>7` -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_webhook.md b/home/versioned_docs/version-v1.4.x/help/alert_webhook.md index adc1b6f12f8..d1741d71481 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_webhook.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_webhook.md @@ -5,23 +5,24 @@ sidebar_label: Alert webHook notification keywords: [open source monitoring tool, open source alerter, open source webhook notification] --- -> After the threshold is triggered send alarm information and call the Webhook interface through post request to notify the recipient. +> After the threshold is triggered send alarm information and call the Webhook interface through post request to notify the recipient. -### Operation steps +### Operation steps -1. **【Alarm notification】->【Add new recipient】 ->【Select WebHook notification method】-> 【Set WebHook callback address】 -> 【Confirm】** +1. **【Alarm notification】->【Add new recipient】 ->【Select WebHook notification method】-> 【Set WebHook callback address】 -> 【Confirm】** ![email](/img/docs/help/alert-notice-5.png) -2. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +2. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) -### WebHook callback POST body BODY content +### WebHook callback POST body BODY content + +Content format:JSON -Content format:JSON ```json { "alarmId": 76456, @@ -43,24 +44,23 @@ Content format:JSON } ``` -| | | -|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | -| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | -| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | -| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | -| content | string title: The actual content of the alarm notification 告警通知实际内容 | -| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | -| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | -| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | -| tags | example: {key1:value1} | - +| | | +|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | +| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | +| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | +| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | +| content | string title: The actual content of the alarm notification 告警通知实际内容 | +| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | +| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | +| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | +| tags | example: {key1:value1} | +### Webhook notification common issues -### Webhook notification common issues +1. WebHook callback did not take effect -1. WebHook callback did not take effect > Please check whether there is any triggered alarm information in the alarm center. > Please check whether the configured webhook callback address is correct. -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_wework.md b/home/versioned_docs/version-v1.4.x/help/alert_wework.md index e862fae7ddf..ca14d5615fa 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_wework.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_wework.md @@ -5,34 +5,34 @@ sidebar_label: Alert enterprise Wechat notification keywords: [open source monitoring tool, open source alerter, open source WeWork notification] --- -> After the threshold is triggered send alarm information and notify the recipient by enterprise Wechat robot. +> After the threshold is triggered send alarm information and notify the recipient by enterprise Wechat robot. -### Operation steps +### Operation steps -1. **【Enterprise Wechat】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** +1. **【Enterprise Wechat】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** -![email](/img/docs/help/alert-notice-6.jpg) +![email](/img/docs/help/alert-notice-6.jpg) -2. **【Save the key value of the WebHook address of the robot】** +2. **【Save the key value of the WebHook address of the robot】** > eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -3. **【Alarm notification】->【Add new recipient】 ->【Select enterprise Wechat robot notification method】->【Set enterprise Wechat robot KEY】-> 【Confirm】** +3. **【Alarm notification】->【Add new recipient】 ->【Select enterprise Wechat robot notification method】->【Set enterprise Wechat robot KEY】-> 【Confirm】** ![email](/img/docs/help/alert-notice-7.png) -4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** - -![email](/img/docs/help/alert-notice-4.png) +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +![email](/img/docs/help/alert-notice-4.png) ### Enterprise Wechat robot common issues -1. The enterprise wechat group did not receive the robot alarm notification. +1. The enterprise wechat group did not receive the robot alarm notification. + > Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. +> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/api.md b/home/versioned_docs/version-v1.4.x/help/api.md index 8411dbad59e..136dd081926 100644 --- a/home/versioned_docs/version-v1.4.x/help/api.md +++ b/home/versioned_docs/version-v1.4.x/help/api.md @@ -5,31 +5,31 @@ sidebar_label: HTTP API keywords: [open source monitoring tool, monitoring http api] --- -> Call HTTP API interface, check whether the interface is available, and monitor its response time and other Metrics. +> Call HTTP API interface, check whether the interface is available, and monitor its response time and other Metrics. ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| Relative path | Suffix path of website address except IP port. For example, the relative path of `www.tancloud.io/console` website is `/console` | -| Request mode | Set the request mode of interface call:GET, POST, PUT, DELETE | -| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | -| Username | User name used for interface Basic authentication or Digest authentication | -| Password | Password used for interface Basic authentication or Digest authentication | -| Content-Type | Set the resource type when carrying the BODY request body data request | -| Request BODY | Set the carry BODY request body data, which is valid when PUT or POST request method is used | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | +| Relative path | Suffix path of website address except IP port. For example, the relative path of `www.tancloud.io/console` website is `/console` | +| Request mode | Set the request mode of interface call:GET, POST, PUT, DELETE | +| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | +| Username | User name used for interface Basic authentication or Digest authentication | +| Password | Password used for interface Basic authentication or Digest authentication | +| Content-Type | Set the resource type when carrying the BODY request body data request | +| Request BODY | Set the carry BODY request body data, which is valid when PUT or POST request method is used | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | -### Collection Metric +### Collection Metric -#### Metric set:summary +#### Metric set:summary -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| responseTime | ms | Website response time | +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-------------------------| +| responseTime | ms | Website response time | diff --git a/home/versioned_docs/version-v1.4.x/help/centos.md b/home/versioned_docs/version-v1.4.x/help/centos.md index 2a6ad2b0a6d..60b770ebf96 100644 --- a/home/versioned_docs/version-v1.4.x/help/centos.md +++ b/home/versioned_docs/version-v1.4.x/help/centos.md @@ -9,74 +9,74 @@ keywords: [open source monitoring tool, open source os monitoring tool, monitori ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Linux SSH. The default is 22 | -| Username | SSH connection user name, optional | -| Password | SSH connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Linux SSH. The default is 22 | +| Username | SSH connection user name, optional | +| Password | SSH connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| hostname | none | Host name | -| version | none | Operating system version | -| uptime | none | System running time | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------| +| hostname | none | Host name | +| version | none | Operating system version | +| uptime | none | System running time | #### Metric set:cpu -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| info | none | CPU model | -| cores | cores | Number of CPU cores | -| interrupt | number | Number of CPU interrupts | -| load | none | Average load of CPU in the last 1/5/15 minutes | -| context_switch | number | Number of current context switches | -| usage | % | CPU usage | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------------------------| +| info | none | CPU model | +| cores | cores | Number of CPU cores | +| interrupt | number | Number of CPU interrupts | +| load | none | Average load of CPU in the last 1/5/15 minutes | +| context_switch | number | Number of current context switches | +| usage | % | CPU usage | #### Metric set:memory -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| total | Mb | Total memory capacity | -| used | Mb | User program memory | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory occupied by cache | -| available | Mb | Remaining available memory capacity | -| usage | % | Memory usage | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------------------| +| total | Mb | Total memory capacity | +| used | Mb | User program memory | +| free | Mb | Free memory capacity | +| buff_cache | Mb | Memory occupied by cache | +| available | Mb | Remaining available memory capacity | +| usage | % | Memory usage | #### Metric set:disk -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| disk_num | blocks | Total number of disks | -| partition_num | partitions | Total number of partitions | -| block_write | blocks | Total number of blocks written to disk | -| block_read | blocks | Number of blocks read from disk | -| write_rate | iops | Rate of writing disk blocks per second | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|----------------------------------------| +| disk_num | blocks | Total number of disks | +| partition_num | partitions | Total number of partitions | +| block_write | blocks | Total number of blocks written to disk | +| block_read | blocks | Number of blocks read from disk | +| write_rate | iops | Rate of writing disk blocks per second | #### Metric set:interface -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| interface_name | none | Network card name | -| receive_bytes | byte | Inbound data traffic(bytes) | -| transmit_bytes | byte | Outbound data traffic(bytes) | +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------| +| interface_name | none | Network card name | +| receive_bytes | byte | Inbound data traffic(bytes) | +| transmit_bytes | byte | Outbound data traffic(bytes) | #### Metric set:disk_free -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| filesystem | none | File system name | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | usage | -| mounted | none | Mount point directory | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| filesystem | none | File system name | +| used | Mb | Used disk size | +| available | Mb | Available disk size | +| usage | % | usage | +| mounted | none | Mount point directory | + diff --git a/home/versioned_docs/version-v1.4.x/help/dm.md b/home/versioned_docs/version-v1.4.x/help/dm.md index 91b032fdf54..82159bf2408 100644 --- a/home/versioned_docs/version-v1.4.x/help/dm.md +++ b/home/versioned_docs/version-v1.4.x/help/dm.md @@ -9,41 +9,41 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameters -| Parameter name | Parameter help description | -| ------- | ---------- | -| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | -| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | -| Port | The port provided by the database externally, the default is 5236. | -| Query Timeout | Set the timeout when the SQL query does not respond to data, in ms milliseconds, the default is 3000 milliseconds. | -| database name | database instance name, optional. | -| username | database connection username, optional | -| password | database connection password, optional | -| URL | Database connection URL, optional | -| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | -| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | +| Parameter name | Parameter help description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | +| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | +| Port | The port provided by the database externally, the default is 5236. | +| Query Timeout | Set the timeout when the SQL query does not respond to data, in ms milliseconds, the default is 3000 milliseconds. | +| database name | database instance name, optional. | +| username | database connection username, optional | +| password | database connection password, optional | +| URL | Database connection URL, optional | +| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | +| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | ### Collect metrics #### Metric collection: basic -| Metric Name | Metric Unit | Metric Help Description | -| ------------ | -------- | ------------------ | -| PORT_NUM | None | Database exposed service port | -| CTL_PATH | None | Control File Path | -| MAX_SESSIONS | None | Maximum database connections | +| Metric Name | Metric Unit | Metric Help Description | +|--------------|-------------|-------------------------------| +| PORT_NUM | None | Database exposed service port | +| CTL_PATH | None | Control File Path | +| MAX_SESSIONS | None | Maximum database connections | #### Metric collection: status -| Metric Name | Metric Unit | Metric Help Description | -| -------- | -------- | ------------------ | -| status$ | None | Open/Close status of DM database | - +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|----------------------------------| +| status$ | None | Open/Close status of DM database | #### Metric collection: thread -| Metric Name | Metric Unit | Metric Help Description | -| ------------- | -------- | ------------------------- | -| dm_sql_thd | None | Thread for writing dmsql dmserver | -| dm_io_thd | None | IO threads, controlled by IO_THR_GROUPS parameter, default is 2 threads | -| dm_quit_thd | None | Thread used to perform a graceful shutdown of the database | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------------------------------------| +| dm_sql_thd | None | Thread for writing dmsql dmserver | +| dm_io_thd | None | IO threads, controlled by IO_THR_GROUPS parameter, default is 2 threads | +| dm_quit_thd | None | Thread used to perform a graceful shutdown of the database | + diff --git a/home/versioned_docs/version-v1.4.x/help/docker.md b/home/versioned_docs/version-v1.4.x/help/docker.md index fdd3098ce55..0e3a1f0b428 100644 --- a/home/versioned_docs/version-v1.4.x/help/docker.md +++ b/home/versioned_docs/version-v1.4.x/help/docker.md @@ -7,7 +7,6 @@ keywords: [open source monitoring tool, open source docker monitoring tool, moni > Collect and monitor general performance Metrics of Docker containers. - ## Pre-monitoring operations If you want to monitor the container information in `Docker`, you need to open the port according to the following steps, so that the collection request can obtain the corresponding information. @@ -31,7 +30,7 @@ This is equivalent to the **2375** port that is open to the outside world. Of co ```shell systemctl daemon-reload systemctl restart docker -```` +``` **Note: Remember to open the `2375` port number in the server console. ** @@ -42,65 +41,62 @@ Open the `2375` port number inside the server. ```shell firewall-cmd --zone=public --add-port=2375/tcp --permanent firewall-cmd --reload -```` - - - - +``` ### Configuration parameters -| Parameter name | Parameter help description | -| ------------ | ------------------------------- | -| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | -| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | -| Port | The port provided by the database externally, the default is 2375. | -| Query Timeout | Set the timeout when getting the Docker server API interface, in ms, the default is 3000 ms. | -| Container Name | Generally monitors all running container information. | -| username | connection username, optional | -| password | connection password, optional | -| URL | Database connection URL, optional, if configured, the parameters such as database name, username and password in the URL will override the parameters configured above | -| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | -| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | +| Parameter name | Parameter help description | +|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | +| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | +| Port | The port provided by the database externally, the default is 2375. | +| Query Timeout | Set the timeout when getting the Docker server API interface, in ms, the default is 3000 ms. | +| Container Name | Generally monitors all running container information. | +| username | connection username, optional | +| password | connection password, optional | +| URL | Database connection URL, optional, if configured, the parameters such as database name, username and password in the URL will override the parameters configured above | +| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | +| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | ### Collect metrics #### Metric collection: system -| Metric Name | Metric Unit | Metric Help Description | -| ------------------ | -------- | ----------------------- | -| Name | None | Server Name | -| version | none | docker version number | -| os | none | server version eg: linux x86_64 | -| root_dir | none | docker folder directory eg: /var/lib/docker | -| containers | None | Total number of containers (running + not running) | -| containers_running | None | Number of running containers | -| containers_paused | none | number of containers in pause | -| images | None | The total number of container images. | -| ncpu | none | ncpu | -| mem_total | MB | Total size of memory used | -| system_time | none | system time | +| Metric Name | Metric Unit | Metric Help Description | +|--------------------|-------------|----------------------------------------------------| +| Name | None | Server Name | +| version | none | docker version number | +| os | none | server version eg: linux x86_64 | +| root_dir | none | docker folder directory eg: /var/lib/docker | +| containers | None | Total number of containers (running + not running) | +| containers_running | None | Number of running containers | +| containers_paused | none | number of containers in pause | +| images | None | The total number of container images. | +| ncpu | none | ncpu | +| mem_total | MB | Total size of memory used | +| system_time | none | system time | #### Metric collection: containers -| Metric Name | Metric Unit | Metric Help Description | -| -------- | -------- | ------------ | -| id | None | The ID of the container in Docker | -| name | None | The container name in the Docker container | -| image | None | Image used by the Docker container | -| command | None | Default startup command in Docker | -| state | None | The running state of the container in Docker | -| status | None | Update time in Docker container | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|----------------------------------------------| +| id | None | The ID of the container in Docker | +| name | None | The container name in the Docker container | +| image | None | Image used by the Docker container | +| command | None | Default startup command in Docker | +| state | None | The running state of the container in Docker | +| status | None | Update time in Docker container | #### Metrics collection: stats -| Metric Name | Metric Unit | Metric Help Description | -| ---------------- | -------- | ------------------ | -| name | None | The name in the Docker container | -| available_memory | MB | The amount of memory that the Docker container can utilize | -| used_memory | MB | The amount of memory already used by the Docker container | -| memory_usage | None | Memory usage of the Docker container | -| cpu_delta | None | The number of CPUs already used by the Docker container | -| number_cpus | None | The number of CPUs that the Docker container can use | -| cpu_usage | None | Docker container CPU usage | +| Metric Name | Metric Unit | Metric Help Description | +|------------------|-------------|------------------------------------------------------------| +| name | None | The name in the Docker container | +| available_memory | MB | The amount of memory that the Docker container can utilize | +| used_memory | MB | The amount of memory already used by the Docker container | +| memory_usage | None | Memory usage of the Docker container | +| cpu_delta | None | The number of CPUs already used by the Docker container | +| number_cpus | None | The number of CPUs that the Docker container can use | +| cpu_usage | None | Docker container CPU usage | + diff --git a/home/versioned_docs/version-v1.4.x/help/doris_be.md b/home/versioned_docs/version-v1.4.x/help/doris_be.md index 2bc212ef3fb..8dcde7b549b 100644 --- a/home/versioned_docs/version-v1.4.x/help/doris_be.md +++ b/home/versioned_docs/version-v1.4.x/help/doris_be.md @@ -9,162 +9,163 @@ keywords: [开源监控系统, 开源数据库监控, DORIS数据库BE监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8040 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| 数据库名称 | 数据库实例名称,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-----------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | +| 端口 | 数据库对外提供的端口,默认为8040 | +| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | +| 数据库名称 | 数据库实例名称,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:doris_be_load_channel_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------- | -| value | 无 | 当前打开的 load channel 个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------------| +| value | 无 | 当前打开的 load channel 个数 | #### 指标集合:doris_be_memtable_flush_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------- | -| value | 无 | memtable写入磁盘的个数累计值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------| +| value | 无 | memtable写入磁盘的个数累计值 | #### 指标集合:doris_be_plan_fragment_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------- | -| value | 无 | 当前已接收的 fragment instance 的数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------| +| value | 无 | 当前已接收的 fragment instance 的数量 | #### 指标集合:doris_be_process_thread_num -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------------------------- | -| value | 无 | BE 进程线程数。通过 `/proc/pid/task` 采集 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------------------| +| value | 无 | BE 进程线程数。通过 `/proc/pid/task` 采集 | #### 指标集合:doris_be_query_scan_rows -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | 读取行数的累计值。这里只统计读取 Olap 表的数据量。并且是 RawRowsRead(部分数据行可能被索引跳过,并没有真正读取,但仍会记录到这个值中) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------------------------------------------------------| +| value | 无 | 读取行数的累计值。这里只统计读取 Olap 表的数据量。并且是 RawRowsRead(部分数据行可能被索引跳过,并没有真正读取,但仍会记录到这个值中) | #### 指标集合:doris_be_result_buffer_block_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------- | -| value | 无 | 当前查询结果缓存中的 query 个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------| +| value | 无 | 当前查询结果缓存中的 query 个数 | #### 指标集合:doris_be_send_batch_thread_pool_queue_size -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------------------- | -| value | 无 | 导入时用于发送数据包的线程池的排队个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------| +| value | 无 | 导入时用于发送数据包的线程池的排队个数 | #### 指标集合:doris_be_tablet_base_max_compaction_score -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------------- | -| value | 无 | 当前最大的 Base Compaction Score | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------------------| +| value | 无 | 当前最大的 Base Compaction Score | #### 指标集合:doris_be_timeout_canceled_fragment_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | --------------------------------------------- | -| value | 无 | 因超时而被取消的 fragment instance 数量累计值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|----------------------------------| +| value | 无 | 因超时而被取消的 fragment instance 数量累计值 | #### 指标集合:doris_be_load_rows -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------- | -| value | 无 | 通过 tablet sink 发送的行数累计 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------| +| value | 无 | 通过 tablet sink 发送的行数累计 | #### 指标集合:doris_be_all_rowsets_num -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------- | -| value | 无 | 当前所有 rowset 的个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| value | 无 | 当前所有 rowset 的个数 | #### 指标集合:doris_be_all_segments_num -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------- | -| value | 无 | 当前所有 segment 的个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------| +| value | 无 | 当前所有 segment 的个数 | #### 指标集合:doris_be_heavy_work_max_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------ | -| value | 无 | brpc heavy线程池线程个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------------| +| value | 无 | brpc heavy线程池线程个数 | #### 指标集合:doris_be_light_work_max_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------ | -| value | 无 | brpc light线程池线程个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------------| +| value | 无 | brpc light线程池线程个数 | #### 指标集合:doris_be_heavy_work_pool_queue_size -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------------------------------- | -| value | 无 | brpc heavy线程池队列最大长度,超过则阻塞提交work | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------------------| +| value | 无 | brpc heavy线程池队列最大长度,超过则阻塞提交work | #### 指标集合:doris_be_light_work_pool_queue_size -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------------------------------- | -| value | 无 | brpc light线程池队列最大长度,超过则阻塞提交work | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------------------| +| value | 无 | brpc light线程池队列最大长度,超过则阻塞提交work | #### 指标集合:doris_be_heavy_work_active_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------- | -| value | 无 | brpc heavy线程池活跃线程数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------| +| value | 无 | brpc heavy线程池活跃线程数 | #### 指标集合:doris_be_light_work_active_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------- | -| value | 无 | brpc light线程池活跃线程数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------| +| value | 无 | brpc light线程池活跃线程数 | #### 指标集合:doris_be_compaction_bytes_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------- | -------- | ---------------------------------- | -| base | 字节 | Base Compaction 的数据量累计 | -| cumulative | 字节 | Cumulative Compaction 的数据量累计 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|------------------------------| +| base | 字节 | Base Compaction 的数据量累计 | +| cumulative | 字节 | Cumulative Compaction 的数据量累计 | #### 指标集合:doris_be_disks_avail_capacity -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------- | -| path | 无 | 指定数据目录 | -| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的剩余空间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------------------------| +| path | 无 | 指定数据目录 | +| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的剩余空间 | #### 指标集合:doris_be_disks_total_capacity -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------- | -| path | 无 | 指定数据目录 | -| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的全部空间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------------------------| +| path | 无 | 指定数据目录 | +| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的全部空间 | #### 指标集合:doris_be_local_bytes_read_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | --------------------------------- | -| value | 字节 | 由 `LocalFileReader` 读取的字节数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|----------------------------| +| value | 字节 | 由 `LocalFileReader` 读取的字节数 | #### 指标集合:doris_be_local_bytes_written_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | --------------------------------- | -| value | 字节 | 由 `LocalFileWriter` 写入的字节数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|----------------------------| +| value | 字节 | 由 `LocalFileWriter` 写入的字节数 | #### 指标集合:doris_be_memory_allocated_bytes -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | --------------------------------------------------- | -| value | 字节 | BE 进程物理内存大小,取自 `/proc/self/status/VmRSS` | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------------------| +| value | 字节 | BE 进程物理内存大小,取自 `/proc/self/status/VmRSS` | + diff --git a/home/versioned_docs/version-v1.4.x/help/doris_fe.md b/home/versioned_docs/version-v1.4.x/help/doris_fe.md index fc55fd25b65..67c4de34042 100644 --- a/home/versioned_docs/version-v1.4.x/help/doris_fe.md +++ b/home/versioned_docs/version-v1.4.x/help/doris_fe.md @@ -9,119 +9,119 @@ keywords: [开源监控系统, 开源数据库监控, DORIS数据库FE监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8030 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| 数据库名称 | 数据库实例名称,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-----------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | +| 端口 | 数据库对外提供的端口,默认为8030 | +| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | +| 数据库名称 | 数据库实例名称,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:doris_fe_connection_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------- | -| value | 无 | 当前FE的MySQL端口连接数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| value | 无 | 当前FE的MySQL端口连接数 | #### 指标集合:doris_fe_edit_log_clean 不应失败,如失败,需人工介入 -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------- | -| success | 无 | 清理历史元数据日志成功的次数 | -| failed | 无 | 清理历史元数据日志失败的次数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------------| +| success | 无 | 清理历史元数据日志成功的次数 | +| failed | 无 | 清理历史元数据日志失败的次数 | #### 指标集合:doris_fe_edit_log -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------------- | -------- | ------------------------ | -| write | 无 | 元数据日志写入次数的计数 | -| read | 无 | 元数据日志读取次数的计数 | -| current | 无 | 元数据日志当前数量 | -| accumulated_bytes | 字节 | 元数据日志写入量的累计值 | -| current_bytes | 字节 | 元数据日志当前值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|--------------| +| write | 无 | 元数据日志写入次数的计数 | +| read | 无 | 元数据日志读取次数的计数 | +| current | 无 | 元数据日志当前数量 | +| accumulated_bytes | 字节 | 元数据日志写入量的累计值 | +| current_bytes | 字节 | 元数据日志当前值 | #### 指标集合:doris_fe_image_clean 不应失败,如失败,需人工介入 -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------------- | -| success | 无 | 清理历史元数据镜像文件成功的次数 | -| failed | 无 | 清理历史元数据镜像文件失败的次数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|------------------| +| success | 无 | 清理历史元数据镜像文件成功的次数 | +| failed | 无 | 清理历史元数据镜像文件失败的次数 | #### 指标集合:doris_fe_image_write 不应失败,如失败,需人工介入 -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------- | -| success | 无 | 生成元数据镜像文件成功的次数 | -| failed | 无 | 生成元数据镜像文件失败的次数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------------| +| success | 无 | 生成元数据镜像文件成功的次数 | +| failed | 无 | 生成元数据镜像文件失败的次数 | #### 指标集合:doris_fe_query_err -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------- | -| value | 无 | 错误查询的累积值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|----------| +| value | 无 | 错误查询的累积值 | #### 指标集合:doris_fe_max_journal_id -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | 当前FE节点最大元数据日志ID。如果是Master FE,则是当前写入的最大ID,如果是非Master FE,则代表当前回放的元数据日志最大ID。用于观察多个FE之间的 id 是否差距过大。过大则表示元数据同步出现问题 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------------------------------------------------------------------------------------------------| +| value | 无 | 当前FE节点最大元数据日志ID。如果是Master FE,则是当前写入的最大ID,如果是非Master FE,则代表当前回放的元数据日志最大ID。用于观察多个FE之间的 id 是否差距过大。过大则表示元数据同步出现问题 | #### 指标集合:doris_fe_max_tablet_compaction_score -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | 所有BE节点中最大的 compaction score 值。该值可以观测当前集群最大的 compaction score,以判断是否过高。如过高则可能出现查询或写入延迟 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------------------------------------------------------------------| +| value | 无 | 所有BE节点中最大的 compaction score 值。该值可以观测当前集群最大的 compaction score,以判断是否过高。如过高则可能出现查询或写入延迟 | #### 指标集合:doris_fe_qps -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------ | -| value | 无 | 当前FE每秒查询数量(仅统计查询请求) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------| +| value | 无 | 当前FE每秒查询数量(仅统计查询请求) | #### 指标集合:doris_fe_query_err_rate -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------- | -| value | 无 | 每秒错误查询数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------| +| value | 无 | 每秒错误查询数 | #### 指标集合:doris_fe_report_queue_size -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | BE的各种定期汇报任务在FE端的队列长度,该值反映了汇报任务在 Master FE 节点上的阻塞程度,数值越大,表示FE处理能力不足 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------------------------------------------------| +| value | 无 | BE的各种定期汇报任务在FE端的队列长度,该值反映了汇报任务在 Master FE 节点上的阻塞程度,数值越大,表示FE处理能力不足 | #### 指标集合:doris_fe_rps -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------------------------- | -| value | 无 | 当前FE每秒请求数量(包含查询以及其他各类语句) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------| +| value | 无 | 当前FE每秒请求数量(包含查询以及其他各类语句) | #### 指标集合:doris_fe_scheduled_tablet_num -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | Master FE节点正在调度的 tablet 数量。包括正在修复的副本和正在均衡的副本,该数值可以反映当前集群,正在迁移的 tablet 数量。如果长时间有值,说明集群不稳定 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------------------------------------------------------------------| +| value | 无 | Master FE节点正在调度的 tablet 数量。包括正在修复的副本和正在均衡的副本,该数值可以反映当前集群,正在迁移的 tablet 数量。如果长时间有值,说明集群不稳定 | #### 指标集合:doris_fe_txn_status 可以观测各个状态下导入事务的数量,来判断是否有堆积 -| 指标名称 | 指标单位 | 指标帮助描述 | -| --------- | -------- | ------------- | -| unknown | 无 | 未知 | -| prepare | 无 | 准备中 | -| committed | 无 | 已提交 | -| visible | 无 | 可见 | -| aborted | 无 | 已中止/已撤销 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------|------|---------| +| unknown | 无 | 未知 | +| prepare | 无 | 准备中 | +| committed | 无 | 已提交 | +| visible | 无 | 可见 | +| aborted | 无 | 已中止/已撤销 | + diff --git a/home/versioned_docs/version-v1.4.x/help/dynamic_tp.md b/home/versioned_docs/version-v1.4.x/help/dynamic_tp.md index 7418e17cc1b..fd36206bc6e 100644 --- a/home/versioned_docs/version-v1.4.x/help/dynamic_tp.md +++ b/home/versioned_docs/version-v1.4.x/help/dynamic_tp.md @@ -24,6 +24,7 @@ management: exposure: include: '*' ``` + Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has response json data as follows: ```json @@ -60,7 +61,6 @@ Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has respo 3. Add DynamicTp monitoring under HertzBeat middleware monitoring - ### Configuration parameters | Parameter name | Parameter help description | @@ -78,24 +78,25 @@ Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has respo #### Metric collection: thread_pool -| Metric Name | Metric Unit | Metric Help Description | -|---------|------|------------------------| -| pool_name | None | Thread pool name | -| core_pool_size | None | Number of core threads | -| maximum_pool_size | None | Maximum number of threads | -| queue_type | None | Task queue type | -| queue_capacity | MB | task queue capacity | -| queue_size | None | The current occupied size of the task queue | -| fair | None | Queue mode, SynchronousQueue will be used | -| queue_remaining_capacity | MB | task queue remaining size | -| active_count | None | Number of active threads | -| task_count | None | Total number of tasks | -| completed_task_count | None | Number of completed tasks | -| largest_pool_size | None | The largest number of threads in history | -| pool_size | none | current number of threads | -| wait_task_count | None | Number of tasks waiting to be executed | -| reject_count | None | Number of rejected tasks | -| reject_handler_name | None | Reject policy type | -| dynamic | None | Dynamic thread pool or not | -| run_timeout_count | None | Number of running timeout tasks | -| queue_timeout_count | None | Number of tasks waiting for timeout | +| Metric Name | Metric Unit | Metric Help Description | +|--------------------------|-------------|---------------------------------------------| +| pool_name | None | Thread pool name | +| core_pool_size | None | Number of core threads | +| maximum_pool_size | None | Maximum number of threads | +| queue_type | None | Task queue type | +| queue_capacity | MB | task queue capacity | +| queue_size | None | The current occupied size of the task queue | +| fair | None | Queue mode, SynchronousQueue will be used | +| queue_remaining_capacity | MB | task queue remaining size | +| active_count | None | Number of active threads | +| task_count | None | Total number of tasks | +| completed_task_count | None | Number of completed tasks | +| largest_pool_size | None | The largest number of threads in history | +| pool_size | none | current number of threads | +| wait_task_count | None | Number of tasks waiting to be executed | +| reject_count | None | Number of rejected tasks | +| reject_handler_name | None | Reject policy type | +| dynamic | None | Dynamic thread pool or not | +| run_timeout_count | None | Number of running timeout tasks | +| queue_timeout_count | None | Number of tasks waiting for timeout | + diff --git a/home/versioned_docs/version-v1.4.x/help/fullsite.md b/home/versioned_docs/version-v1.4.x/help/fullsite.md index 3246fa31d82..6145f238bdc 100644 --- a/home/versioned_docs/version-v1.4.x/help/fullsite.md +++ b/home/versioned_docs/version-v1.4.x/help/fullsite.md @@ -7,28 +7,29 @@ keywords: [open source monitoring tool, open source website monitoring tool, mon > Available or not to monitor all pages of the website. > A website often has multiple pages provided by different services. We monitor the full site by collecting the SiteMap exposed by the website. -> Note⚠️ This monitoring requires your website to support SiteMap. We support SiteMap in XML and TXT formats. +> Note⚠️ This monitoring requires your website to support SiteMap. We support SiteMap in XML and TXT formats. -### Configuration parameter +### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| SiteMap | Relative path of website SiteMap address, eg:/sitemap.xml | -| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | +| SiteMap | Relative path of website SiteMap address, eg:/sitemap.xml | +| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | -### Collection Metric +### Collection Metric -#### Metric set:summary +#### Metric set:summary + +| Metric name | Metric unit | Metric help description | +|--------------|-------------|------------------------------------------------------| +| url | none | URL path of web page | +| statusCode | none | Response HTTP status code for requesting the website | +| responseTime | ms | Website response time | +| errorMsg | none | Error message feedback after requesting the website | -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| url | none | URL path of web page | -| statusCode | none | Response HTTP status code for requesting the website | -| responseTime | ms | Website response time | -| errorMsg | none | Error message feedback after requesting the website | diff --git a/home/versioned_docs/version-v1.4.x/help/guide.md b/home/versioned_docs/version-v1.4.x/help/guide.md index cabcbe69506..ee506b0d6cd 100644 --- a/home/versioned_docs/version-v1.4.x/help/guide.md +++ b/home/versioned_docs/version-v1.4.x/help/guide.md @@ -5,25 +5,25 @@ sidebar_label: Help Center --- > Hertzbeat - An open source, real-time monitoring tool with custom-monitor and agentLess. -> Help documents and auxiliary information during use +> Help documents and auxiliary information during use ## 🔬 Monitoring services > Regularly collect and monitor the performance Metrics exposed by end-to-end services, provide visual interfaces, and process data for alarm and other service scheduling. > Planned monitoring type:application service, database, operating system, cloud native, open source middleware. -### Application service monitoring +### Application service monitoring - 👉 [Website monitoring](website)
- 👉 [HTTP API](api)
- 👉 [PING Connectivity](ping)
- 👉 [Port availability](port)
+ 👉 [Website monitoring](website)
+ 👉 [HTTP API](api)
+ 👉 [PING Connectivity](ping)
+ 👉 [Port availability](port)
 👉 [Full site monitoring](fullsite)
 👉 [SSL Cert monitoring](ssl_cert)
 👉 [JVM monitoring](jvm)
 👉 [SpringBoot2.0](springboot2)
-### Database monitoring +### Database monitoring  👉 [MYSQL database monitoring](mysql)
 👉 [MariaDB database monitoring](mariadb)
@@ -34,7 +34,7 @@ sidebar_label: Help Center  👉 [OpenGauss database monitoring](opengauss)
 👉 [IoTDB database monitoring](iotdb)
-### Operating system monitoring +### Operating system monitoring  👉 [Linux operating system monitoring](linux)
 👉 [Windows operating system monitoring](windows)
@@ -59,34 +59,33 @@ sidebar_label: Help Center *** -## 💡 Alarm service +## 💡 Alarm service > More liberal threshold alarm configuration (calculation expression), supports email, SMS, WebHook, DingDing, WeChat and FeiShu for alarm notification. > The positioning of alarm service is to trigger the threshold accurately and timely, and the alarm notification can be reached in time. -### Alarm center +### Alarm center > The triggered alarm information center provides query and filtering of alarm deletion, alarm processing, mark unprocessed, alarm level status, etc. -### Alarm configuration +### Alarm configuration > The Metric threshold configuration provides the Metric threshold configuration in the form of expression, which can set the alarm level, trigger times, alarm notification template and whether it is enabled, correlation monitoring and other functions. -More details see 👉 [threshold alarm](alert_threshold)
-   👉 [Threshold expression](alert_threshold_expr) +More details see 👉 [threshold alarm](alert_threshold)
+   👉 [Threshold expression](alert_threshold_expr) -### Alarm notification +### Alarm notification > After triggering the alarm information, in addition to being displayed in the alarm center list, it can also be notified to the designated recipient in a specified way (e-mail, wechat and FeiShu etc.) > Alarm notification provides different types of notification methods, such as email recipient, enterprise wechat robot notification, DingDing robot notification, and FeiShu robot notification. -> After setting the receiver, you need to set the associated alarm notification strategy to configure which alarm information is sent to which receiver. - +> After setting the receiver, you need to set the associated alarm notification strategy to configure which alarm information is sent to which receiver.  👉 [Configure Email Notification](alert_email)
 👉 [Configure Discord Notification](alert_webhook)
 👉 [Configure Slack Notification](alert_webhook)
 👉 [Configure Telegram Notification](alert_webhook)
- 👉 [Configure WebHook Notification](alert_webhook)
+ 👉 [Configure WebHook Notification](alert_webhook)
 👉 [Configure enterprise WeChat Robot Notification](alert_wework)
- 👉 [Configure DingDing Robot Notification](alert_dingtalk)
- 👉 [Configure FeiShu Robot Notification](alert_feishu)
+ 👉 [Configure DingDing Robot Notification](alert_dingtalk)
+ 👉 [Configure FeiShu Robot Notification](alert_feishu)
diff --git a/home/versioned_docs/version-v1.4.x/help/hadoop.md b/home/versioned_docs/version-v1.4.x/help/hadoop.md index f0a458ecc9f..56f19472277 100644 --- a/home/versioned_docs/version-v1.4.x/help/hadoop.md +++ b/home/versioned_docs/version-v1.4.x/help/hadoop.md @@ -11,9 +11,10 @@ keywords: [Open Source Monitoring System, Open Source Java Monitoring, Hadoop JV ### Pre-monitoring steps ->You need to enable JMX service in the Hadoop application before monitoring. HertzBeat uses the JMX protocol to collect performance metrics from Hadoop's JVM. +> You need to enable JMX service in the Hadoop application before monitoring. HertzBeat uses the JMX protocol to collect performance metrics from Hadoop's JVM. ### Steps to enable JMX protocol in the Hadoop application + Add JVM parameters when the application starts. ⚠️Note that you can customize the exposed port and external IP. - 1.Enter the hadoop-env.sh configuration file and enter the following command in the terminal: @@ -31,12 +32,12 @@ export HADOOP_OPTS= "$HADOOP_OPTS -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false " ``` -- 3.Save and exit, and then execute "start-all.sh" in the "$HADOOP_HOME/sbin" directory to restart the service. +- 3.Save and exit, and then execute "start-all.sh" in the "$HADOOP_HOME/sbin" directory to restart the service. ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -70,16 +71,15 @@ export HADOOP_OPTS= "$HADOOP_OPTS #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|--------------------------| | LoadedClassCount | | Loaded Class Count | | TotalLoadedClassCount | | Total Loaded Class Count | | UnloadedClassCount | | Unloaded Class Count | - #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|----------------------------| | TotalStartedThreadCount | | Total Started Thread Count | | ThreadCount | | Thread Count | @@ -88,4 +88,3 @@ export HADOOP_OPTS= "$HADOOP_OPTS | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.4.x/help/hive.md b/home/versioned_docs/version-v1.4.x/help/hive.md index ec0d7dee398..806969c2e7c 100644 --- a/home/versioned_docs/version-v1.4.x/help/hive.md +++ b/home/versioned_docs/version-v1.4.x/help/hive.md @@ -16,6 +16,7 @@ If you want to monitor information in `Apache Hive` with this monitoring type, y ```shell hive --service metastore & ``` + **2. Enable hive server2:** ```shell @@ -24,55 +25,53 @@ hive --service hiveserver2 & ### Configure parameters -| Parameter name | Parameter Help describes the | -| ------------ |-------------------------------------------------------------------------------------------------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| Port | The default port provided by the database is 10002. | -| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | +| Parameter name | Parameter Help describes the | +|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| Port | The default port provided by the database is 10002. | +| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | | The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | ### Collect metrics #### metric Collection: basic -| Metric Name | metric unit | Metrics help describe | -|-------------| -------- |--------------------------------| -| vm_name | None | The name of the virtual machine (VM) running HiveServer2. | -| vm_vendor | None | The vendor or provider of the virtual machine. | -| vm_version | None | The version of the virtual machine. | -| up_time | None | The duration for which HiveServer2 has been running. | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-----------------------------------------------------------| +| vm_name | None | The name of the virtual machine (VM) running HiveServer2. | +| vm_vendor | None | The vendor or provider of the virtual machine. | +| vm_version | None | The version of the virtual machine. | +| up_time | None | The duration for which HiveServer2 has been running. | #### metric Collection: enviroment -| Metric Name | metric unit | Metrics help describe | -|-----------------| -------- |-------------------------------------| -| https_proxyPort | None | The port number used for HTTPS proxy communication. | -| os_name | None | The name of the operating system on which HiveServer2 is running. | -| os_version | None | The version of the operating system.| -| os_arch | None | The architecture of the operating system.| -| java_runtime_name | None | The name of the Java runtime environment used by HiveServer2. | -| java_runtime_version | None | The version of the Java runtime environment. | +| Metric Name | metric unit | Metrics help describe | +|----------------------|-------------|-------------------------------------------------------------------| +| https_proxyPort | None | The port number used for HTTPS proxy communication. | +| os_name | None | The name of the operating system on which HiveServer2 is running. | +| os_version | None | The version of the operating system. | +| os_arch | None | The architecture of the operating system. | +| java_runtime_name | None | The name of the Java runtime environment used by HiveServer2. | +| java_runtime_version | None | The version of the Java runtime environment. | #### metric Collection: thread -| Metric Name | metric unit | Metrics help describe | -| ---------------- |------|--------------------| -| thread_count | None | The current number of threads being used by HiveServer2. | -| total_started_thread | None | The total count of threads started by HiveServer2 since its launch. | -| peak_thread_count | None | The highest number of threads used by HiveServer2 at any given time. | -| daemon_thread_count | None | The number of daemon threads currently active in HiveServer2. | +| Metric Name | metric unit | Metrics help describe | +|----------------------|-------------|----------------------------------------------------------------------| +| thread_count | None | The current number of threads being used by HiveServer2. | +| total_started_thread | None | The total count of threads started by HiveServer2 since its launch. | +| peak_thread_count | None | The highest number of threads used by HiveServer2 at any given time. | +| daemon_thread_count | None | The number of daemon threads currently active in HiveServer2. | #### metric Collection: code_cache -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|--------------------------------------------| -| committed | MB | The amount of memory currently allocated for the memory pool. | -| init | MB | The initial amount of memory requested for the memory pool. | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-------------------------------------------------------------------------| +| committed | MB | The amount of memory currently allocated for the memory pool. | +| init | MB | The initial amount of memory requested for the memory pool. | | max | MB | The maximum amount of memory that can be allocated for the memory pool. | -| used | MB | The amount of memory currently being used by the memory pool. | - - +| used | MB | The amount of memory currently being used by the memory pool. | diff --git a/home/versioned_docs/version-v1.4.x/help/iotdb.md b/home/versioned_docs/version-v1.4.x/help/iotdb.md index 0e4dcad9912..bec827feb73 100644 --- a/home/versioned_docs/version-v1.4.x/help/iotdb.md +++ b/home/versioned_docs/version-v1.4.x/help/iotdb.md @@ -61,33 +61,33 @@ predefinedMetrics: #### Metric collection: cluster_node_status -| Metric Name | Metric Unit | Metric Help Description | -| --------- |------|-------------------------| -| name | None | Node name IP | -| status | None | Node status, 1=online 2=offline | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|---------------------------------| +| name | None | Node name IP | +| status | None | Node status, 1=online 2=offline | #### Metric collection: jvm_memory_committed_bytes -| Metric Name | Metric Unit | Metric Help Description | -|-------|------|------------------| -| area | none | heap memory or nonheap memory | -| id | none | memory block | -| value | MB | The memory size currently requested by the JVM | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|------------------------------------------------| +| area | none | heap memory or nonheap memory | +| id | none | memory block | +| value | MB | The memory size currently requested by the JVM | #### Metric collection: jvm_memory_used_bytes -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------| -| area | none | heap memory or nonheap memory | -| id | none | memory block | -| value | MB | JVM used memory size | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------| +| area | none | heap memory or nonheap memory | +| id | none | memory block | +| value | MB | JVM used memory size | #### Metric collection: jvm_threads_states_threads -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------| -| state | none | thread state | -| count | None | The number of threads corresponding to the thread state | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|---------------------------------------------------------| +| state | none | thread state | +| count | None | The number of threads corresponding to the thread state | #### Index collection: quantity business data @@ -114,7 +114,8 @@ predefinedMetrics: #### Metric collection: thrift_connections -| Metric Name | Metric Unit | Metric Help Description | -|-------|------|-------------| -| name | None | name | -| connection | none | thrift current connection number | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|----------------------------------| +| name | None | name | +| connection | none | thrift current connection number | + diff --git a/home/versioned_docs/version-v1.4.x/help/issue.md b/home/versioned_docs/version-v1.4.x/help/issue.md index 40fd7848f64..6e20f392661 100644 --- a/home/versioned_docs/version-v1.4.x/help/issue.md +++ b/home/versioned_docs/version-v1.4.x/help/issue.md @@ -1,52 +1,63 @@ --- id: issue title: Common issues -sidebar_label: Common issues +sidebar_label: Common issues --- -### Monitoring common issues +### Monitoring common issues -1. **Page feedback:monitor.host: Monitoring Host must be ipv4, ipv6 or domain name** -> As shown in the information, the entered monitoring Host must be ipv4, ipv6 or domain name, and cannot carry a protocol header, such as http +1. **Page feedback:monitor.host: Monitoring Host must be ipv4, ipv6 or domain name** -2. **The website API and other monitoring feedback statusCode:403 or 401, but the opposite end service itself does not need authentication, and the direct access of the browser is OK** -> Please check whether it is blocked by the firewall. For example, BaoTa/aaPanel have set the blocking of `User-Agent=Apache-HttpClient` in the request header by default. If it is blocked, please delete this blocking rule. (user-agent has been simulated as a browser in the v1.0.beat5 version. This problem does not exist) +> As shown in the information, the entered monitoring Host must be ipv4, ipv6 or domain name, and cannot carry a protocol header, such as http + +2. **The website API and other monitoring feedback statusCode:403 or 401, but the opposite end service itself does not need authentication, and the direct access of the browser is OK** + +> Please check whether it is blocked by the firewall. For example, BaoTa/aaPanel have set the blocking of `User-Agent=Apache-HttpClient` in the request header by default. If it is blocked, please delete this blocking rule. (user-agent has been simulated as a browser in the v1.0.beat5 version. This problem does not exist) 3. Ping connectivity monitoring exception when installing hertzbeat for package deployment. -The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 + The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 + > The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. > When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address 4. If the history chart on the monitoring page is not displayed,popup [please configure dependency service on TDengine time series database] + > As shown in the popup window,the premise of history chart display is that you need install and configure hertzbeat's dependency service - TDengine database. -> Installation and initialization this database refers to [TDengine Installation and Initialization](../start/tdengine-init). +> Installation and initialization this database refers to [TDengine Installation and Initialization](../start/tdengine-init). -### Docker Deployment common issues +### Docker Deployment common issues 1. **MYSQL, TDENGINE and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** -The problems lies in Docker container failed to visit and connect localhost port. Beacuse the docker default network mode is Bridge mode which can't access loacl machine through localhost. + The problems lies in Docker container failed to visit and connect localhost port. Beacuse the docker default network mode is Bridge mode which can't access loacl machine through localhost. + > Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. -> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` +> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` 2. **According to the process deploy,visit http://ip:1157/ no interface** -Please refer to the following points to troubleshoot issuess: + Please refer to the following points to troubleshoot issuess: + > one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. -> two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. -> > three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. +> two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. +> +>> three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. + +3. **Log an error TDengine connection or insert SQL failed** -3. **Log an error TDengine connection or insert SQL failed** > one:Check whether database account and password configured is correct, the database is created. -> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. +> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. ### Package Deployment common issues 1. **According to the process deploy,visit http://ip:1157/ no interface** Please refer to the following points to troubleshoot issuess: + > one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. > two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. > three: Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 2. **Log an error TDengine connection or insert SQL failed** + > one:Check whether database account and password configured is correct, the database is created. -> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. +> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. + diff --git a/home/versioned_docs/version-v1.4.x/help/jetty.md b/home/versioned_docs/version-v1.4.x/help/jetty.md index 3e5230aa9d0..6e069553dba 100644 --- a/home/versioned_docs/version-v1.4.x/help/jetty.md +++ b/home/versioned_docs/version-v1.4.x/help/jetty.md @@ -23,6 +23,7 @@ keywords: [open source monitoring tool, open source jetty web server monitoring java -jar $JETTY_HOME/start.jar --add-module=jmx java -jar $JETTY_HOME/start.jar --add-module=jmx-remote ``` + Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file 2. Edit the `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file to modify the JMX IP port and other parameters. @@ -50,7 +51,7 @@ Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -73,19 +74,17 @@ Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` | max | kb | max size | | used | kb | used size | - #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|--------------------------| | LoadedClassCount | | Loaded Class Count | | TotalLoadedClassCount | | Total Loaded Class Count | | UnloadedClassCount | | Unloaded Class Count | - #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|----------------------------| | TotalStartedThreadCount | | Total Started Thread Count | | ThreadCount | | Thread Count | diff --git a/home/versioned_docs/version-v1.4.x/help/jvm.md b/home/versioned_docs/version-v1.4.x/help/jvm.md index 95b1545fffc..3b47e0e7a8a 100644 --- a/home/versioned_docs/version-v1.4.x/help/jvm.md +++ b/home/versioned_docs/version-v1.4.x/help/jvm.md @@ -24,7 +24,7 @@ Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#rem ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -58,16 +58,15 @@ Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#rem #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|--------------------------| | LoadedClassCount | | Loaded Class Count | | TotalLoadedClassCount | | Total Loaded Class Count | | UnloadedClassCount | | Unloaded Class Count | - #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|----------------------------| | TotalStartedThreadCount | | Total Started Thread Count | | ThreadCount | | Thread Count | @@ -76,4 +75,3 @@ Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#rem | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.4.x/help/kafka.md b/home/versioned_docs/version-v1.4.x/help/kafka.md index 067cabef0e9..f86913733b1 100644 --- a/home/versioned_docs/version-v1.4.x/help/kafka.md +++ b/home/versioned_docs/version-v1.4.x/help/kafka.md @@ -27,70 +27,64 @@ exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by JMX | -| Username | JMX connection user name, optional | -| Password | JMX connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by JMX | +| Username | JMX connection user name, optional | +| Password | JMX connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metrics #### Metrics Set:server_info -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| Version | | Kafka Version | -| StartTimeMs | ms | Start Time | -| CommitId | | Version Commit ID | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| Version | | Kafka Version | +| StartTimeMs | ms | Start Time | +| CommitId | | Version Commit ID | #### Metrics Set:memory_pool -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| name | | metrics name | +| committed | kb | total size | +| init | kb | init size | +| max | kb | max size | +| used | kb | used size | #### Metrics Set:active_controller_count -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| Value | | server active controller count | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------| +| Value | | server active controller count | #### Metrics Set:broker_partition_count -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| Value | | broker partition count | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| Value | | broker partition count | #### Metrics Set:broker_leader_count -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| Value | | broker leader count | - - -#### Metrics Set:broker_handler_avg_percent - -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| EventType | | event type | -| RateUnit | | rate unit | -| Count | | percent count | -| OneMinuteRate | % | One Minute Rate | -| FiveMinuteRate | % | Five Minute Rate | -| MeanRate | % | Mean Rate | -| FifteenMinuteRate | % | Fifteen Minute Rate | - - - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| Value | | broker leader count | + +#### Metrics Set:broker_handler_avg_percent + +| Metric name | Metric unit | Metric help description | +|-------------------|-------------|-------------------------| +| EventType | | event type | +| RateUnit | | rate unit | +| Count | | percent count | +| OneMinuteRate | % | One Minute Rate | +| FiveMinuteRate | % | Five Minute Rate | +| MeanRate | % | Mean Rate | +| FifteenMinuteRate | % | Fifteen Minute Rate | diff --git a/home/versioned_docs/version-v1.4.x/help/kubernetes.md b/home/versioned_docs/version-v1.4.x/help/kubernetes.md index 8e10896c6d1..45adda576fc 100644 --- a/home/versioned_docs/version-v1.4.x/help/kubernetes.md +++ b/home/versioned_docs/version-v1.4.x/help/kubernetes.md @@ -28,6 +28,7 @@ kubectl describe secret {secret} -n kube-system ``` #### method two: + ```shell kubectl create serviceaccount cluster-admin kubectl create clusterrolebinding cluster-admin-manual --clusterrole=cluster-admin --serviceaccount=default:cluster-admin @@ -36,59 +37,60 @@ kubectl create token --duration=1000h cluster-admin ### Configure parameters -| Parameter name | Parameter Help describes the | -|-------------|------------------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| APiServer port | K8s APiServer port, default 6443 | -| token | Authorize the Access Token | -| URL | The database connection URL is optional, if configured, the database name, user name and password parameters in the URL will override the parameter | configured above -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | +| Parameter name | Parameter Help describes the | +|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| APiServer port | K8s APiServer port, default 6443 | +| token | Authorize the Access Token | +| URL | The database connection URL is optional, if configured, the database name, user name and password parameters in the URL will override the parameter | configured above | +| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | ### Collect metrics #### metric collection: nodes -| Metric Name | metric unit | Metrics help describe | -| ------------------ | -------- |--------| -| node_name | None | Node name | -| is_ready | None | Node Status | -| capacity_cpu | None | CPU capacity | -| allocatable_cpu | None | CPU | allotted -| capacity_memory | None | Memory capacity | -| allocatable_memory | None | Memory | allocated -| creation_time | None | Node creation time | +| Metric Name | metric unit | Metrics help describe | +|--------------------|-------------|-----------------------|-----------| +| node_name | None | Node name | +| is_ready | None | Node Status | +| capacity_cpu | None | CPU capacity | +| allocatable_cpu | None | CPU | allotted | +| capacity_memory | None | Memory capacity | +| allocatable_memory | None | Memory | allocated | +| creation_time | None | Node creation time | #### metric Collection: namespaces -| Metric Name | metric unit | Metrics help describe | -| -------- | -------- |-------------| -| namespace | None | namespace name | -| status | None | Status | -| creation_time | None | Created | +| Metric Name | metric unit | Metrics help describe | +|---------------|-------------|-----------------------| +| namespace | None | namespace name | +| status | None | Status | +| creation_time | None | Created | #### metric collection: pods -| Metric Name | metric unit | Metrics help describe | -| ---------------- | -------- |----------------| -| pod | None | Pod name | -| namespace | None | The namespace | to which the pod belongs -| status | None | Pod status | -| restart | None | Number of restarts | -| host_ip | None | The IP address of the host is | -| pod_ip | None | pod ip | -| creation_time | None | Pod creation time | -| start_time | None | Pod startup time | +| Metric Name | metric unit | Metrics help describe | +|---------------|-------------|-------------------------------|--------------------------| +| pod | None | Pod name | +| namespace | None | The namespace | to which the pod belongs | +| status | None | Pod status | +| restart | None | Number of restarts | +| host_ip | None | The IP address of the host is | +| pod_ip | None | pod ip | +| creation_time | None | Pod creation time | +| start_time | None | Pod startup time | #### metric Collection: services -| Metric Name | metric unit | Metrics help describe | -| ---------------- |------|--------------------------------------------------------| -| service | None | Service Name | -| namespace | None | The namespace | to which the service belongs -| type | None | Service Type ClusterIP NodePort LoadBalancer ExternalName | -| cluster_ip | None | cluster ip | -| selector | None | tag selector matches | -| creation_time | None | Created | +| Metric Name | metric unit | Metrics help describe | +|---------------|-------------|-----------------------------------------------------------|------------------------------| +| service | None | Service Name | +| namespace | None | The namespace | to which the service belongs | +| type | None | Service Type ClusterIP NodePort LoadBalancer ExternalName | +| cluster_ip | None | cluster ip | +| selector | None | tag selector matches | +| creation_time | None | Created | + diff --git a/home/versioned_docs/version-v1.4.x/help/linux.md b/home/versioned_docs/version-v1.4.x/help/linux.md index 05e3405ff6e..6c22028114c 100644 --- a/home/versioned_docs/version-v1.4.x/help/linux.md +++ b/home/versioned_docs/version-v1.4.x/help/linux.md @@ -9,74 +9,74 @@ keywords: [open source monitoring tool, open source linux monitoring tool, monit ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Linux SSH. The default is 22 | -| Username | SSH connection user name, optional | -| Password | SSH connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Linux SSH. The default is 22 | +| Username | SSH connection user name, optional | +| Password | SSH connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| hostname | none | Host name | -| version | none | Operating system version | -| uptime | none | System running time | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------| +| hostname | none | Host name | +| version | none | Operating system version | +| uptime | none | System running time | #### Metric set:cpu -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| info | none | CPU model | -| cores | cores | Number of CPU cores | -| interrupt | number | Number of CPU interrupts | -| load | none | Average load of CPU in the last 1/5/15 minutes | -| context_switch | number | Number of current context switches | -| usage | % | CPU usage | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------------------------| +| info | none | CPU model | +| cores | cores | Number of CPU cores | +| interrupt | number | Number of CPU interrupts | +| load | none | Average load of CPU in the last 1/5/15 minutes | +| context_switch | number | Number of current context switches | +| usage | % | CPU usage | #### Metric set:memory -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| total | Mb | Total memory capacity | -| used | Mb | User program memory | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory occupied by cache | -| available | Mb | Remaining available memory capacity | -| usage | % | Memory usage | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------------------| +| total | Mb | Total memory capacity | +| used | Mb | User program memory | +| free | Mb | Free memory capacity | +| buff_cache | Mb | Memory occupied by cache | +| available | Mb | Remaining available memory capacity | +| usage | % | Memory usage | #### Metric set:disk -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| disk_num | blocks | Total number of disks | -| partition_num | partitions | Total number of partitions | -| block_write | blocks | Total number of blocks written to disk | -| block_read | blocks | Number of blocks read from disk | -| write_rate | iops | Rate of writing disk blocks per second | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|----------------------------------------| +| disk_num | blocks | Total number of disks | +| partition_num | partitions | Total number of partitions | +| block_write | blocks | Total number of blocks written to disk | +| block_read | blocks | Number of blocks read from disk | +| write_rate | iops | Rate of writing disk blocks per second | #### Metric set:interface -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| interface_name | none | Network card name | -| receive_bytes | byte | Inbound data traffic(bytes) | -| transmit_bytes | byte | Outbound data traffic(bytes) | +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------| +| interface_name | none | Network card name | +| receive_bytes | byte | Inbound data traffic(bytes) | +| transmit_bytes | byte | Outbound data traffic(bytes) | #### Metric set:disk_free -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| filesystem | none | File system name | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | usage | -| mounted | none | Mount point directory | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| filesystem | none | File system name | +| used | Mb | Used disk size | +| available | Mb | Available disk size | +| usage | % | usage | +| mounted | none | Mount point directory | + diff --git a/home/versioned_docs/version-v1.4.x/help/mariadb.md b/home/versioned_docs/version-v1.4.x/help/mariadb.md index e72668fe791..374e6e6a081 100644 --- a/home/versioned_docs/version-v1.4.x/help/mariadb.md +++ b/home/versioned_docs/version-v1.4.x/help/mariadb.md @@ -9,49 +9,46 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 3306 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 3306 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| version | none | Database version | -| port | none | Database exposure service port | -| datadir | none | Database storage data disk address | -| max_connections | none | Database maximum connections | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|------------------------------------| +| version | none | Database version | +| port | none | Database exposure service port | +| datadir | none | Database storage data disk address | +| max_connections | none | Database maximum connections | #### Metric set:status -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| threads_created | none | MariaDB created total connections | -| threads_connected | none | MariaDB connected connections | -| threads_cached | none | MariaDB current cached connections | -| threads_running | none | MariaDB current active connections | - +| Metric name | Metric unit | Metric help description | +|-------------------|-------------|------------------------------------| +| threads_created | none | MariaDB created total connections | +| threads_connected | none | MariaDB connected connections | +| threads_cached | none | MariaDB current cached connections | +| threads_running | none | MariaDB current active connections | #### Metric set:innodb -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| innodb_data_reads | none | innodb average number of reads from files per second | -| innodb_data_writes | none | innodb average number of writes from file per second | -| innodb_data_read | KB | innodb average amount of data read per second | -| innodb_data_written | KB | innodb average amount of data written per second | - - +| Metric name | Metric unit | Metric help description | +|---------------------|-------------|------------------------------------------------------| +| innodb_data_reads | none | innodb average number of reads from files per second | +| innodb_data_writes | none | innodb average number of writes from file per second | +| innodb_data_read | KB | innodb average amount of data read per second | +| innodb_data_written | KB | innodb average amount of data written per second | diff --git a/home/versioned_docs/version-v1.4.x/help/memcached.md b/home/versioned_docs/version-v1.4.x/help/memcached.md index 5d89ce0977b..920da021e6b 100644 --- a/home/versioned_docs/version-v1.4.x/help/memcached.md +++ b/home/versioned_docs/version-v1.4.x/help/memcached.md @@ -14,7 +14,7 @@ The default YML configuration for the memcache version is in compliance with 1.4 You need to use the stats command to view the parameters that your memcache can monitor ``` -### +### **1、Obtain usable parameter indicators through commands such as stats、stats setting、stats settings. @@ -36,7 +36,7 @@ STAT version 1.4.15 ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -49,7 +49,7 @@ STAT version 1.4.15 #### Metrics Set:server_info -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |------------------|-------------|---------------------------------------------------| | pid | | Memcache server process ID | | uptime | s | The number of seconds the server has been running | @@ -66,4 +66,5 @@ STAT version 1.4.15 | cmd_set | | Set command request count | | cmd_flush | | Flush command request count | | get_misses | | Get command misses | -| delete_misses | | Delete command misses | \ No newline at end of file +| delete_misses | | Delete command misses | + diff --git a/home/versioned_docs/version-v1.4.x/help/mysql.md b/home/versioned_docs/version-v1.4.x/help/mysql.md index 3f07be99380..dca64b3f9f0 100644 --- a/home/versioned_docs/version-v1.4.x/help/mysql.md +++ b/home/versioned_docs/version-v1.4.x/help/mysql.md @@ -7,9 +7,9 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo > Collect and monitor the general performance Metrics of MySQL database. Support MYSQL5+. -### Configuration parameter +### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -27,31 +27,28 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| version | none | Database version | -| port | none | Database exposure service port | -| datadir | none | Database storage data disk address | -| max_connections | none | Database maximum connections | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|------------------------------------| +| version | none | Database version | +| port | none | Database exposure service port | +| datadir | none | Database storage data disk address | +| max_connections | none | Database maximum connections | #### Metric set:status -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| threads_created | none | MySql created total connections | -| threads_connected | none | MySql connected connections | -| threads_cached | none | MySql current cached connections | -| threads_running | none | MySql current active connections | - +| Metric name | Metric unit | Metric help description | +|-------------------|-------------|----------------------------------| +| threads_created | none | MySql created total connections | +| threads_connected | none | MySql connected connections | +| threads_cached | none | MySql current cached connections | +| threads_running | none | MySql current active connections | #### Metric set:innodb -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| innodb_data_reads | none | innodb average number of reads from files per second | -| innodb_data_writes | none | innodb average number of writes from file per second | -| innodb_data_read | KB | innodb average amount of data read per second | -| innodb_data_written | KB | innodb average amount of data written per second | - - +| Metric name | Metric unit | Metric help description | +|---------------------|-------------|------------------------------------------------------| +| innodb_data_reads | none | innodb average number of reads from files per second | +| innodb_data_writes | none | innodb average number of writes from file per second | +| innodb_data_read | KB | innodb average amount of data read per second | +| innodb_data_written | KB | innodb average amount of data written per second | diff --git a/home/versioned_docs/version-v1.4.x/help/nebulagraph.md b/home/versioned_docs/version-v1.4.x/help/nebulagraph.md index ae2cfb4683f..c23e39c14fe 100644 --- a/home/versioned_docs/version-v1.4.x/help/nebulagraph.md +++ b/home/versioned_docs/version-v1.4.x/help/nebulagraph.md @@ -14,7 +14,7 @@ The monitoring has two parts,nebulaGraph_stats and rocksdb_stats. nebulaGraph_stats is nebulaGraph's statistics, and rocksdb_stats is rocksdb's statistics. ``` -### +### **1、Obtain available parameters through the stats and rocksdb stats interfaces.** @@ -36,7 +36,7 @@ The default port is 19779 and the access address is:http://ip:19779/rocksdb_stat ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -55,7 +55,7 @@ The default port is 19779 and the access address is:http://ip:19779/rocksdb_stat Too many indicators, related links are as follows **https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |---------------------------------------|-------------|--------------------------------------------------------------| | num_queries_hit_memory_watermark_rate | | The rate of statements that reached the memory watermark. | | num_queries_hit_memory_watermark_sum | | The sum of statements that reached the memory watermark. | @@ -67,8 +67,9 @@ Too many indicators, related links are as follows Too many indicators, related links are as follows **https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |----------------------------|-------------|-------------------------------------------------------------| | rocksdb.backup.read.bytes | | Number of bytes read during the RocksDB database backup. | | rocksdb.backup.write.bytes | | Number of bytes written during the RocksDB database backup. | | ... | | ... | + diff --git a/home/versioned_docs/version-v1.4.x/help/nginx.md b/home/versioned_docs/version-v1.4.x/help/nginx.md index 99bb389000c..f630e4d4d24 100644 --- a/home/versioned_docs/version-v1.4.x/help/nginx.md +++ b/home/versioned_docs/version-v1.4.x/help/nginx.md @@ -20,6 +20,7 @@ If you want to monitor information in 'Nginx' with this monitoring type, you nee ```shell nginx -V ``` + View whether it contains `--with-http_stub_status_module`, if not, you need to recompile and install Nginx. 2. Compile and install Nginx, add `ngx_http_stub_status_module` module @@ -50,6 +51,7 @@ server { } } ``` + 4. Reload Nginx ```shell @@ -107,14 +109,13 @@ nginx -s reload 4. Access `http://localhost/req-status` in the browser to view the Nginx monitoring status information. - **Refer Doc: https://github.com/zls0424/ngx_req_status** **⚠️Attention: The endpoint path of the monitoring module is `/nginx-status` `/req-status`** ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -128,28 +129,27 @@ nginx -s reload #### Metrics Set:nginx_status -| Metric name | Metric unit | Metric help description | -|-------------|-------------|------------------------------------------| -| accepts | | Accepted connections | -| handled | | Successfully processed connections | -| active | | Currently active connections | -| dropped | | Discarded connections | -| requests | | Client requests | -| reading | | Connections performing read operations | -| writing | | Connections performing write operations | -| waiting | | Waiting connections | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-----------------------------------------| +| accepts | | Accepted connections | +| handled | | Successfully processed connections | +| active | | Currently active connections | +| dropped | | Discarded connections | +| requests | | Client requests | +| reading | | Connections performing read operations | +| writing | | Connections performing write operations | +| waiting | | Waiting connections | #### Metrics Set:req_status -| Metric name | Metric unit | Metric help description | -|-------------|-------------|---------------------------------| -| zone_name | | Group category | -| key | | Group name | -| max_active | | Maximum concurrent connections | -| max_bw | kb | Maximum bandwidth | -| traffic | kb | Total traffic | -| requests | | Total requests | -| active | | Current concurrent connections | -| bandwidth | kb | Current bandwidth | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------| +| zone_name | | Group category | +| key | | Group name | +| max_active | | Maximum concurrent connections | +| max_bw | kb | Maximum bandwidth | +| traffic | kb | Total traffic | +| requests | | Total requests | +| active | | Current concurrent connections | +| bandwidth | kb | Current bandwidth | diff --git a/home/versioned_docs/version-v1.4.x/help/ntp.md b/home/versioned_docs/version-v1.4.x/help/ntp.md index 5eca6c58e80..666f2a6b39a 100644 --- a/home/versioned_docs/version-v1.4.x/help/ntp.md +++ b/home/versioned_docs/version-v1.4.x/help/ntp.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source NTP monitoring tool, monito ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -23,7 +23,7 @@ keywords: [ open source monitoring tool, open source NTP monitoring tool, monito #### Metrics Set:summary -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |--------------|-------------|------------------------------------------------------------------------------------------| | responseTime | ms | The time it takes for the NTP server to respond to a request). | | time | ms | The current time reported by the NTP server). | diff --git a/home/versioned_docs/version-v1.4.x/help/opengauss.md b/home/versioned_docs/version-v1.4.x/help/opengauss.md index 650882861e8..28171658951 100644 --- a/home/versioned_docs/version-v1.4.x/help/opengauss.md +++ b/home/versioned_docs/version-v1.4.x/help/opengauss.md @@ -5,54 +5,52 @@ sidebar_label: OpenGauss Database keywords: [open source monitoring tool, open source database monitoring tool, monitoring opengauss database metrics] --- -> Collect and monitor the general performance Metrics of OpenGauss database. +> Collect and monitor the general performance Metrics of OpenGauss database. ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 5432 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 5432 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| server_version | none | Version number of the database server | -| port | none | Database server exposure service port | -| server_encoding | none | Character set encoding of database server | -| data_directory | none | Database storage data disk address | -| max_connections | connections | Database maximum connections | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|-------------------------------------------| +| server_version | none | Version number of the database server | +| port | none | Database server exposure service port | +| server_encoding | none | Character set encoding of database server | +| data_directory | none | Database storage data disk address | +| max_connections | connections | Database maximum connections | #### Metric set:state -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | none | Database name, or share-object is a shared object | -| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | -| deadlocks | number | Number of deadlocks detected in the database | -| blks_read | times | The number of disk blocks read in the database | -| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | -| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | -| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | -| stats_reset | none | The last time these statistics were reset | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | none | Database name, or share-object is a shared object | +| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | +| deadlocks | number | Number of deadlocks detected in the database | +| blks_read | times | The number of disk blocks read in the database | +| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | +| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | +| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | +| stats_reset | none | The last time these statistics were reset | #### Metric set:activity -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| running | connections | Number of current client connections | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------------| +| running | connections | Number of current client connections | diff --git a/home/versioned_docs/version-v1.4.x/help/oracle.md b/home/versioned_docs/version-v1.4.x/help/oracle.md index 5410e53decb..50d2f6422bc 100644 --- a/home/versioned_docs/version-v1.4.x/help/oracle.md +++ b/home/versioned_docs/version-v1.4.x/help/oracle.md @@ -9,7 +9,7 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -27,37 +27,38 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| database_version | none | Database version | -| database_type | none | Database type | -| hostname | none | Host name | -| instance_name | none | Database instance name | -| startup_time | none | Database start time | -| status | none | Database status | +| Metric name | Metric unit | Metric help description | +|------------------|-------------|-------------------------| +| database_version | none | Database version | +| database_type | none | Database type | +| hostname | none | Host name | +| instance_name | none | Database instance name | +| startup_time | none | Database start time | +| status | none | Database status | #### Metric set:tablespace -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| file_id | none | File ID | -| file_name | none | File name | -| tablespace_name | none | Table space name | -| status | none | Status | -| bytes | MB | Size | -| blocks | none | Number of blocks | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|-------------------------| +| file_id | none | File ID | +| file_name | none | File name | +| tablespace_name | none | Table space name | +| status | none | Status | +| bytes | MB | Size | +| blocks | none | Number of blocks | #### Metric set:user_connect -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| username | none | Username | -| counts | number | Current connection counts | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|---------------------------| +| username | none | Username | +| counts | number | Current connection counts | #### Metric set:performance -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| qps | QPS | I/O Requests per second | -| tps | TPS | User transaction per second | -| mbps | MBPS | I/O Megabytes per second | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-----------------------------| +| qps | QPS | I/O Requests per second | +| tps | TPS | User transaction per second | +| mbps | MBPS | I/O Megabytes per second | + diff --git a/home/versioned_docs/version-v1.4.x/help/ping.md b/home/versioned_docs/version-v1.4.x/help/ping.md index c5603fdfbce..7c894f488ff 100644 --- a/home/versioned_docs/version-v1.4.x/help/ping.md +++ b/home/versioned_docs/version-v1.4.x/help/ping.md @@ -5,32 +5,33 @@ sidebar_label: PING connectivity keywords: [open source monitoring tool, open source network monitoring tool, monitoring ping metrics] --- -> Ping the opposite end HOST address and judge its connectivity. +> Ping the opposite end HOST address and judge its connectivity. ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Ping timeout | Set the timeout when Ping does not respond to data, unit:ms, default: 3000ms | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Ping timeout | Set the timeout when Ping does not respond to data, unit:ms, default: 3000ms | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:summary -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| responseTime | ms | Website response time | - +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-------------------------| +| responseTime | ms | Website response time | ### Common Problem 1. Ping connectivity monitoring exception when installing hertzbeat for package deployment. The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 + > The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. > When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address + diff --git a/home/versioned_docs/version-v1.4.x/help/pop3.md b/home/versioned_docs/version-v1.4.x/help/pop3.md index 822192ad66d..fffff2a494f 100644 --- a/home/versioned_docs/version-v1.4.x/help/pop3.md +++ b/home/versioned_docs/version-v1.4.x/help/pop3.md @@ -24,10 +24,9 @@ If you want to monitor information in 'POP3' with this monitoring type, you just 5. 通过POP3服务器域名,端口号,qq邮箱账号以及授权码连接POP3服务器,采集监控指标 ``` - ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -42,9 +41,8 @@ If you want to monitor information in 'POP3' with this monitoring type, you just #### Metrics Set:email_status -| Metric name | Metric unit | Metric help description | -|--------------|-------------|------------------------------------------| -| email_count | | Number of emails | -| mailbox_size | kb | The total size of emails in the mailbox | - +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-----------------------------------------| +| email_count | | Number of emails | +| mailbox_size | kb | The total size of emails in the mailbox | diff --git a/home/versioned_docs/version-v1.4.x/help/port.md b/home/versioned_docs/version-v1.4.x/help/port.md index ef5f659ad44..35ff99fcb57 100644 --- a/home/versioned_docs/version-v1.4.x/help/port.md +++ b/home/versioned_docs/version-v1.4.x/help/port.md @@ -7,25 +7,23 @@ keywords: [open source monitoring tool, open source port monitoring tool, monito > Judge whether the exposed port of the opposite end service is available, then judge whether the opposite end service is available, and collect Metrics such as response time for monitoring. -### Configuration parameter - -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| Connection timeout | Waiting timeout for port connection, unit:ms, default: 3000ms | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +### Configuration parameter + +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | +| Connection timeout | Waiting timeout for port connection, unit:ms, default: 3000ms | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:summary -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| responseTime | ms | Website response time | - - +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-------------------------| +| responseTime | ms | Website response time | diff --git a/home/versioned_docs/version-v1.4.x/help/postgresql.md b/home/versioned_docs/version-v1.4.x/help/postgresql.md index de14f9d62eb..57834a713bd 100644 --- a/home/versioned_docs/version-v1.4.x/help/postgresql.md +++ b/home/versioned_docs/version-v1.4.x/help/postgresql.md @@ -9,50 +9,48 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 5432 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 5432 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| server_version | none | Version number of the database server | -| port | none | Database server exposure service port | -| server_encoding | none | Character set encoding of database server | -| data_directory | none | Database storage data disk address | -| max_connections | connections | Database maximum connections | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|-------------------------------------------| +| server_version | none | Version number of the database server | +| port | none | Database server exposure service port | +| server_encoding | none | Character set encoding of database server | +| data_directory | none | Database storage data disk address | +| max_connections | connections | Database maximum connections | #### Metric set:state -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | none | Database name, or share-object is a shared object | -| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | -| deadlocks | number | Number of deadlocks detected in the database | -| blks_read | times | The number of disk blocks read in the database | -| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | -| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | -| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | -| stats_reset | none | The last time these statistics were reset | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | none | Database name, or share-object is a shared object | +| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | +| deadlocks | number | Number of deadlocks detected in the database | +| blks_read | times | The number of disk blocks read in the database | +| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | +| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | +| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | +| stats_reset | none | The last time these statistics were reset | #### Metric set:activity -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| running | connections | Number of current client connections | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------------| +| running | connections | Number of current client connections | diff --git a/home/versioned_docs/version-v1.4.x/help/rabbitmq.md b/home/versioned_docs/version-v1.4.x/help/rabbitmq.md index 1bcd3ea5851..917ca63c3d3 100644 --- a/home/versioned_docs/version-v1.4.x/help/rabbitmq.md +++ b/home/versioned_docs/version-v1.4.x/help/rabbitmq.md @@ -7,7 +7,7 @@ keywords: [open source monitoring tool, open source rabbitmq monitoring tool, mo > Monitoring the running status of RabbitMQ message middleware, nodes, topics and other related metrics. -### Pre-monitoring Operations +### Pre-monitoring Operations > HertzBeat uses RabbitMQ Management's Rest Api to collect RabbitMQ metric data. > Therefore, you need to enable the Management plug-in in your RabbitMQ environment @@ -24,7 +24,7 @@ rabbitmq-plugins enable rabbitmq_management ### Configuration parameters -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | | Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | @@ -40,7 +40,7 @@ rabbitmq-plugins enable rabbitmq_management #### metrics: overview -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |--------------------|-------------|------------------------------------| | product_version | None | Product Version | | product_name | None | Product name | @@ -52,7 +52,7 @@ rabbitmq-plugins enable rabbitmq_management #### metrics: object_totals -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |-------------|-------------|-----------------------------| | channels | none | total number of channels | | connections | none | total number of connections | @@ -62,24 +62,24 @@ rabbitmq-plugins enable rabbitmq_management #### metrics: nodes -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |--------------------|-------------|-----------------------------------------------------------| | name | None | The node name | | type | None | The node type | | running | None | Running state | | os_pid | None | Pid in OS | -| mem_limit | MB | Memory usage high watermark | -| mem_used | MB | Total amount of memory used | +| mem_limit | MB | Memory usage high watermark | +| mem_used | MB | Total amount of memory used | | fd_total | None | File descriptors available | -| fd_used | None | File descriptors used | -| sockets_total | None | Sockets available | -| sockets_used | None | Sockets used | -| proc_total | None | Erlang process limit | -| proc_used | None | Erlang processes used | -| disk_free_limit | GB | Free disk space low watermark | +| fd_used | None | File descriptors used | +| sockets_total | None | Sockets available | +| sockets_used | None | Sockets used | +| proc_total | None | Erlang process limit | +| proc_used | None | Erlang processes used | +| disk_free_limit | GB | Free disk space low watermark | | disk_free | GB | Free disk space | -| gc_num | None | GC runs | -| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | +| gc_num | None | GC runs | +| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | | context_switches | None | Context_switches num | | io_read_count | None | Total number of read operations | | io_read_bytes | KB | Total data size read into disk | @@ -100,27 +100,27 @@ rabbitmq-plugins enable rabbitmq_management | queue_deleted | None | queue deleted num | | connection_closed | None | connection closed num | - #### metrics: queues -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |------------------------------|-------------|--------------------------------------------------------------------------------------------------------------------------------------| -| name | None | The name of the queue with non-ASCII characters escaped as in C. | +| name | None | The name of the queue with non-ASCII characters escaped as in C. | | node | None | The queue on the node name | -| state | None | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | +| state | None | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | | type | None | Queue type, one of: quorum, stream, classic. | -| vhost | None | vhost path | +| vhost | None | vhost path | | auto_delete | None | Whether the queue will be deleted automatically when no longer used | -| policy | None | Effective policy name for the queue. | +| policy | None | Effective policy name for the queue. | | consumers | None | Number of consumers. | | memory | B | Bytes of memory allocated by the runtime for the queue, including stack, heap and internal structures. | | messages_ready | None | Number of messages ready to be delivered to clients | -| messages_unacknowledged | None | Number of messages delivered to clients but not yet acknowledged | +| messages_unacknowledged | None | Number of messages delivered to clients but not yet acknowledged | | messages | None | Sum of ready and unacknowledged messages (queue depth) | -| messages_ready_ram | None | Number of messages from messages_ready which are resident in ram | +| messages_ready_ram | None | Number of messages from messages_ready which are resident in ram | | messages_persistent | None | Total number of persistent messages in the queue (will always be 0 for transient queues) | -| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | +| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | | message_bytes_ready | B | Like message_bytes but counting only those messages ready to be delivered to clients | -| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | +| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | | message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | | message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | + diff --git a/home/versioned_docs/version-v1.4.x/help/redis.md b/home/versioned_docs/version-v1.4.x/help/redis.md index dd9b304e1ce..58248fb0b45 100644 --- a/home/versioned_docs/version-v1.4.x/help/redis.md +++ b/home/versioned_docs/version-v1.4.x/help/redis.md @@ -2,244 +2,239 @@ id: redis title: 监控:REDIS数据库监控 sidebar_label: REDIS数据库 -keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] +keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] --- > 对REDIS数据库的通用性能指标进行采集监控。支持REDIS1.0+。 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | -| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | +| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:server -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| redis_version | 无 | Redis 服务器版本 | -| redis_git_sha1 | 无 | Git SHA1 | -| redis_git_dirty | 无 | Git dirty flag | -| redis_build_id | 无 | redis 构建的id | -| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | -| os | 无 | Redis 服务器的宿主操作系统 | -| arch_bits | 无 | 架构(32 或 64 位) | -| multiplexing_api | 无 | Redis使用的事件循环机制| -| atomicvar_api | 无 | Redis使用的原子 API | -| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本| -| process_id | 无 | 服务器进程的PID | -| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | -| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | -| tcp_port | 无 | TCP/IP侦听端口 | -| server_time_usec | 无 | 微秒级精度的基于时间的系统时间| -| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | -| uptime_in_days | 无 | 自Redis服务器启动后的天数 | -| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | -| configured_hz | 无 | 服务器配置的频率设置 | -| lru_clock | 无 | 时钟每分钟递增,用于LRU管理| -| executable | 无 | 服务器可执行文件的路径 | -| config_file | 无 | 配置文件的路径 | -| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志| -| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------|------|-----------------------------------------------| +| redis_version | 无 | Redis 服务器版本 | +| redis_git_sha1 | 无 | Git SHA1 | +| redis_git_dirty | 无 | Git dirty flag | +| redis_build_id | 无 | redis 构建的id | +| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | +| os | 无 | Redis 服务器的宿主操作系统 | +| arch_bits | 无 | 架构(32 或 64 位) | +| multiplexing_api | 无 | Redis使用的事件循环机制 | +| atomicvar_api | 无 | Redis使用的原子 API | +| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本 | +| process_id | 无 | 服务器进程的PID | +| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | +| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | +| tcp_port | 无 | TCP/IP侦听端口 | +| server_time_usec | 无 | 微秒级精度的基于时间的系统时间 | +| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | +| uptime_in_days | 无 | 自Redis服务器启动后的天数 | +| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | +| configured_hz | 无 | 服务器配置的频率设置 | +| lru_clock | 无 | 时钟每分钟递增,用于LRU管理 | +| executable | 无 | 服务器可执行文件的路径 | +| config_file | 无 | 配置文件的路径 | +| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志 | +| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。 | #### 指标集合:clients -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | -| cluster_connections | 无 | 群集总线使用的套接字数量的近似值| -| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。| -| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | -| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | -| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | -| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING)| -| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------|------|--------------------------------------------------------------------------------| +| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | +| cluster_connections | 无 | 群集总线使用的套接字数量的近似值 | +| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。 | +| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | +| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | +| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | +| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING) | +| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | #### 指标集合:memory -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | -| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | -| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字| -| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值| -| used_memory_peak | byte | Redis消耗的峰值内存(字节)| -| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | -| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和| -| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节)| -| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | -| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | -| allocator_allocated | byte| 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同| -| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片| -| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | -| total_system_memory | byte | Redis主机的内存总量 | -| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_lua | byte | Lua引擎使用的字节数 | -| used_memory_lua_human | KB | 上一个值的人类可读值 | -| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | -| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | -| number_of_cached_scripts | 无 |缓存的lua脚本数量 | -| maxmemory | byte | maxmemory配置指令的值| -| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | -| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | -| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | -| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | -| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | -| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | -| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | -| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | -| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | -| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | -| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。| -| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | -| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | -| mem_clients_normal | 无 | 普通客户端使用的内存 | -| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | -| mem_allocator | 无 | 内存分配器,在编译时选择。 | -| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | -| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL)| -| lazyfreed_objects | 无 | 已延迟释放的对象数。| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|----------|-----------------------------------------------------------------------------------------------| +| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | +| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | +| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字 | +| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_peak | byte | Redis消耗的峰值内存(字节) | +| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | +| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和 | +| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节) | +| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | +| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | +| allocator_allocated | byte | 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同 | +| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片 | +| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | +| total_system_memory | byte | Redis主机的内存总量 | +| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_lua | byte | Lua引擎使用的字节数 | +| used_memory_lua_human | KB | 上一个值的人类可读值 | +| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | +| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | +| number_of_cached_scripts | 无 | 缓存的lua脚本数量 | +| maxmemory | byte | maxmemory配置指令的值 | +| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | +| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | +| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | +| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | +| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | +| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | +| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | +| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | +| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | +| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | +| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。 | +| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | +| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | +| mem_clients_normal | 无 | 普通客户端使用的内存 | +| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | +| mem_allocator | 无 | 内存分配器,在编译时选择。 | +| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | +| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL) | +| lazyfreed_objects | 无 | 已延迟释放的对象数。 | #### 指标集合:persistence -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是| -| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | -| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | -| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比| -| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | -| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | -| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | -| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | -| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | -| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功| -| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | -| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | -| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | -| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | -| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite| -| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | -| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | -| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | -| aof_last_write_status | 无 | 上次aof写入状态 | -| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | -| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------------|--------|-----------------------------------------------------------------------------------------------------| +| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是 | +| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | +| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | +| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比 | +| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | +| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | +| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | +| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | +| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | +| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功 | +| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | +| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | +| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | +| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | +| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | +| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite | +| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | +| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | +| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | +| aof_last_write_status | 无 | 上次aof写入状态 | +| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | +| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | +| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | #### 指标集合:stats -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total_connections_received | 无 | 服务器接受的连接总数 | -| total_commands_processed | 无 | 服务器处理的命令总数 | -| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | -| total_net_input_bytes | byte | 从网络读取的字节总数 | -| total_net_output_bytes | byte | 写入网络的总字节数 | -| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | -| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | -| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数| -| sync_full | 无 | 具有副本的完整重新同步数 | -| sync_partial_ok | 无 | 接受的部分重新同步请求数 | -| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | -| expired_keys | 无 | 过期的key总数 | -| expired_stale_perc | 无 | 可能过期key的百分比 | -| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | -| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | -| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | -| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | -| keyspace_misses | 无 | 在主dict 中未查到key的次数 | -| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | -| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | -| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | -| total_forks | 无 | 自服务器启动以来的fork操作总数| -| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | -| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | -| active_defrag_hits | 无 | 主动碎片整理命中次数 | -| active_defrag_misses | 无 | 主动碎片整理未命中次数 | -| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | -| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数| -| tracking_total_keys | 无 | key 查询的总数| -| tracking_total_items | 无 | item查询的总数 | -| tracking_total_prefixes | 无 | 前缀查询的总数 | -| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | -| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | -| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | -| total_reads_processed | 无 | 正在读取的请求数 | -| total_writes_processed | 无 | 正在写入的请求数 | -| io_threaded_reads_processed | 无 | 正在读取的线程数| -| io_threaded_writes_processed | 无 | 正在写入的线程数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|----------------------------------------------------| +| total_connections_received | 无 | 服务器接受的连接总数 | +| total_commands_processed | 无 | 服务器处理的命令总数 | +| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | +| total_net_input_bytes | byte | 从网络读取的字节总数 | +| total_net_output_bytes | byte | 写入网络的总字节数 | +| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | +| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | +| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数 | +| sync_full | 无 | 具有副本的完整重新同步数 | +| sync_partial_ok | 无 | 接受的部分重新同步请求数 | +| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | +| expired_keys | 无 | 过期的key总数 | +| expired_stale_perc | 无 | 可能过期key的百分比 | +| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | +| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | +| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | +| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | +| keyspace_misses | 无 | 在主dict 中未查到key的次数 | +| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | +| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | +| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | +| total_forks | 无 | 自服务器启动以来的fork操作总数 | +| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | +| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | +| active_defrag_hits | 无 | 主动碎片整理命中次数 | +| active_defrag_misses | 无 | 主动碎片整理未命中次数 | +| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | +| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数 | +| tracking_total_keys | 无 | key 查询的总数 | +| tracking_total_items | 无 | item查询的总数 | +| tracking_total_prefixes | 无 | 前缀查询的总数 | +| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | +| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | +| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | +| total_reads_processed | 无 | 正在读取的请求数 | +| total_writes_processed | 无 | 正在写入的请求数 | +| io_threaded_reads_processed | 无 | 正在读取的线程数 | +| io_threaded_writes_processed | 无 | 正在写入的线程数 | #### 指标集合:replication -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| role | 无 | 节点角色 master 主节点 slave 从节点 | -| connected_slaves | 无 | 连接的从节点数 | -| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | -| master_replid | 无 | 实例启动的随机字符串| -| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID| -| master_repl_offset | 无 | 主从同步偏移量 | -| second_repl_offset | 无 | 接受从服务ID的最大偏移量| -| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | -| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | -| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | -| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|-------------------------------------------------------------------------------------| +| role | 无 | 节点角色 master 主节点 slave 从节点 | +| connected_slaves | 无 | 连接的从节点数 | +| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | +| master_replid | 无 | 实例启动的随机字符串 | +| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID | +| master_repl_offset | 无 | 主从同步偏移量 | +| second_repl_offset | 无 | 接受从服务ID的最大偏移量 | +| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | +| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | +| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | +| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和| -| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和| -| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和| -| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | -| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU| -| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|------|------------------------| +| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和 | +| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和 | +| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和 | +| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | +| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU | +| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | #### 指标集合:errorstats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| errorstat_ERR | 无 | 错误累计出现的次数 | -| errorstat_MISCONF | 无 | | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|-----------| +| errorstat_ERR | 无 | 错误累计出现的次数 | +| errorstat_MISCONF | 无 | | #### 指标集合:cluster -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|--------------------| +| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是 | #### 指标集合:commandstats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数| -| cmdstat_get | 无 | get命令的统计信息 | -| cmdstat_setnx | 无 | setnx命令的统计信息 | -| cmdstat_hset | 无 | hset命令的统计信息 | -| cmdstat_hget | 无 | hget命令的统计信息 | -| cmdstat_lpush | 无 | lpush命令的统计信息 | -| cmdstat_rpush | 无 | rpush命令的统计信息 | -| cmdstat_lpop | 无 | lpop命令的统计信息 | -| cmdstat_rpop | 无 | rpop命令的统计信息 | -| cmdstat_llen | 无 | llen命令的统计信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|---------------------------------------------------------------------------------------------------------------------------| +| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数 | +| cmdstat_get | 无 | get命令的统计信息 | +| cmdstat_setnx | 无 | setnx命令的统计信息 | +| cmdstat_hset | 无 | hset命令的统计信息 | +| cmdstat_hget | 无 | hget命令的统计信息 | +| cmdstat_lpush | 无 | lpush命令的统计信息 | +| cmdstat_rpush | 无 | rpush命令的统计信息 | +| cmdstat_lpop | 无 | lpop命令的统计信息 | +| cmdstat_rpop | 无 | rpop命令的统计信息 | +| cmdstat_llen | 无 | llen命令的统计信息 | + diff --git a/home/versioned_docs/version-v1.4.x/help/shenyu.md b/home/versioned_docs/version-v1.4.x/help/shenyu.md index 01523769c78..c7f12bbfaf0 100644 --- a/home/versioned_docs/version-v1.4.x/help/shenyu.md +++ b/home/versioned_docs/version-v1.4.x/help/shenyu.md @@ -5,27 +5,27 @@ sidebar_label: Apache ShenYu keywords: [open source monitoring tool, open source apache shenyu monitoring tool, monitoring apache shenyu metrics] --- -> monitor ShenYu running status(JVM-related), include request response and other related metrics. +> monitor ShenYu running status(JVM-related), include request response and other related metrics. -## Pre-monitoring operations +## Pre-monitoring operations -Enable `metrics` plugin in ShenYu, expose it's prometheus metrics endpoint。 +Enable `metrics` plugin in ShenYu, expose it's prometheus metrics endpoint。 -Refer [ShenYu Document](https://shenyu.apache.org/docs/plugin-center/observability/metrics-plugin) +Refer [ShenYu Document](https://shenyu.apache.org/docs/plugin-center/observability/metrics-plugin) -Two Steps Mainly: +Two Steps Mainly: -1. add metrics plugin dependency in gateway's pom.xml. +1. add metrics plugin dependency in gateway's pom.xml. ```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + ``` -2. modify this config in shenyu gateway yaml. +2. modify this config in shenyu gateway yaml. ```yaml shenyu: @@ -57,75 +57,74 @@ Finally, restart the access gateway metrics endpoint `http://ip:8090` to respond #### Index collection: shenyu_request_total -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-------------------| -| value | None | Collect all requests from ShenYu gateway | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|------------------------------------------| +| value | None | Collect all requests from ShenYu gateway | #### Metric collection: shenyu_request_throw_created -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-------------------| -| value | None | Collect the number of abnormal requests from ShenYu Gateway | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------------------------| +| value | None | Collect the number of abnormal requests from ShenYu Gateway | #### Metric collection: process_cpu_seconds_total -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------| -| value | none | total user and system CPU elapsed seconds | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------| +| value | none | total user and system CPU elapsed seconds | #### Metric collection: process_open_fds -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-------------| -| value | none | number of open file descriptors | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|---------------------------------| +| value | none | number of open file descriptors | #### Metric collection: process_max_fds -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|----------------| -| value | none | maximum number of open file descriptors | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-----------------------------------------| +| value | none | maximum number of open file descriptors | #### Metric collection: jvm_info | Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-----------| -| runtime | none | JVM version information | -| vendor | none | JVM version information | -| version | None | JVM version information | +|-------------|-------------|-------------------------| +| runtime | none | JVM version information | +| vendor | none | JVM version information | +| version | None | JVM version information | #### Metric collection: jvm_memory_bytes_used -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------| -| area | None | JVM memory area | -| value | MB | used size of the given JVM memory region | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|------------------------------------------| +| area | None | JVM memory area | +| value | MB | used size of the given JVM memory region | #### Metric collection: jvm_memory_pool_bytes_used -| Metric Name | Metric Unit | Metric Help Description | -|--------|------|-----------------| -| pool | None | JVM memory pool | -| value | MB | used size of the given JVM memory pool | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|----------------------------------------| +| pool | None | JVM memory pool | +| value | MB | used size of the given JVM memory pool | #### Metric collection: jvm_memory_pool_bytes_committed -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------| -| pool | None | JVM memory pool | -| value | MB | The committed size of the given JVM memory pool | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------------| +| pool | None | JVM memory pool | +| value | MB | The committed size of the given JVM memory pool | #### Metric collection: jvm_memory_pool_bytes_max -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------| ----------- | -| pool | None | JVM memory pool | -| value | MB | The maximum size of the memory pool for the given JVM | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------------------| +| pool | None | JVM memory pool | +| value | MB | The maximum size of the memory pool for the given JVM | #### Metric collection: jvm_threads_state -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-------------| -| state | none | thread state | -| value | None | The number of threads corresponding to the thread state | - +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|---------------------------------------------------------| +| state | none | thread state | +| value | None | The number of threads corresponding to the thread state | diff --git a/home/versioned_docs/version-v1.4.x/help/smtp.md b/home/versioned_docs/version-v1.4.x/help/smtp.md index 971de82c3e0..fedb17e0040 100644 --- a/home/versioned_docs/version-v1.4.x/help/smtp.md +++ b/home/versioned_docs/version-v1.4.x/help/smtp.md @@ -13,12 +13,11 @@ Determine whether the server is available through the hello command in SMTP > see https://datatracker.ietf.org/doc/html/rfc821#page-13 - **Protocol Use:SMTP** ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -33,7 +32,7 @@ Determine whether the server is available through the hello command in SMTP #### Metrics Set:summary -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |--------------|-------------|----------------------------------------------------------------| | responseTime | ms | The time it takes for the SMTP server to respond to a request. | | response | | Response Status. | diff --git a/home/versioned_docs/version-v1.4.x/help/spark.md b/home/versioned_docs/version-v1.4.x/help/spark.md index 3d4b44828ea..41865300024 100644 --- a/home/versioned_docs/version-v1.4.x/help/spark.md +++ b/home/versioned_docs/version-v1.4.x/help/spark.md @@ -15,12 +15,9 @@ keywords: [open source monitoring tool, open source java spark monitoring tool, Refer: https://spark.apache.org/docs/latest/spark-standalone.html - **监控配置spark的监控主要分为Master、Worker、driver、executor监控。Master和Worker的监控在spark集群运行时即可监控,Driver和Excutor的监控需要针对某一个app来进行监控。** **如果都要监控,需要根据以下步骤来配置** - - ## 第一步 **修改$SPARK_HOME/conf/spark-env.sh,添加以下语句:** @@ -36,8 +33,6 @@ export SPARK_DAEMON_JAVA_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.manageme 语句中有$JMX_PORT,这个的值可以自定义,也可以获取一个随机数作为端口号。 如果端口自定义为一个具体的值,而 spark 的 Master 和其中之一的 Worker 在同一台机器上,会出现端口冲突的情况。 - - ## 第二步 **vim $SPARK_HOME/conf/metrics.properties 添加如下内容** @@ -50,10 +45,6 @@ driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource ``` - - - - ## 第三步 **vim $SPARK_HOME/conf/spark-defaults.conf,添加以下项为driver和executor设置监控端口,在有程序运行的情况下,此端口会被打开。** @@ -69,11 +60,9 @@ gement.jmxremote.port=8711 在spark的Master和Worker正常运行以及spark-submit提交了一个程序的情况下,可以从linux中查询出端口号码。 - - ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -107,16 +96,15 @@ gement.jmxremote.port=8711 #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|--------------------------| | LoadedClassCount | | Loaded Class Count | | TotalLoadedClassCount | | Total Loaded Class Count | | UnloadedClassCount | | Unloaded Class Count | - #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|----------------------------| | TotalStartedThreadCount | | Total Started Thread Count | | ThreadCount | | Thread Count | @@ -125,4 +113,3 @@ gement.jmxremote.port=8711 | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.4.x/help/spring_gateway.md b/home/versioned_docs/version-v1.4.x/help/spring_gateway.md index ae24228c222..66c5f0b4f29 100644 --- a/home/versioned_docs/version-v1.4.x/help/spring_gateway.md +++ b/home/versioned_docs/version-v1.4.x/help/spring_gateway.md @@ -19,6 +19,7 @@ If you want to monitor information in 'Spring Gateway' with this monitoring type spring-boot-starter-actuator ``` + **2. Modify the YML configuration exposure metric interface:** ```yaml @@ -35,56 +36,55 @@ management: ### Configure parameters -| Parameter name | Parameter Help describes the | -| ------------ |------------------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| Port | The default port provided by the database is 8080. | -| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | +| Parameter name | Parameter Help describes the | +|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| Port | The default port provided by the database is 8080. | +| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | +| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | ### Collect metrics #### metric Collection: Health -| Metric Name | metric unit | Metrics help describe | -| ------------------ | -------- |--------------------------------| -| status | None | Service health: UP, Down | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|--------------------------| +| status | None | Service health: UP, Down | #### metric Collection: enviroment -| Metric Name | metric unit | Metrics help describe | -|---------| -------- |----------------------------| -| profile | None | The application runs profile: prod, dev, test | -| port | None | Apply the exposed port | -| os | None | Run the operating system | -| os_arch | None | Run the operating system architecture | -| jdk_vendor | None | jdk vendor | -| jvm_version | None | jvm version | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-----------------------------------------------| +| profile | None | The application runs profile: prod, dev, test | +| port | None | Apply the exposed port | +| os | None | Run the operating system | +| os_arch | None | Run the operating system architecture | +| jdk_vendor | None | jdk vendor | +| jvm_version | None | jvm version | #### metric Collection: threads -| Metric Name | metric unit | Metrics help describe | -| ---------------- |------|--------------------| -| state | None | Thread status | -| number | None | This thread state corresponds to | number of threads +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|----------------------------------|-------------------| +| state | None | Thread status | +| number | None | This thread state corresponds to | number of threads | #### metric Collection: memory_used -| Metric Name | metric unit | Metrics help describe | -|---------|------|------------| -| space | None | Memory space name | -| mem_used | MB | This space occupies a memory size of | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|--------------------------------------| +| space | None | Memory space name | +| mem_used | MB | This space occupies a memory size of | #### metric Collection: route_info -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|--------------------------------------| -| route_id | None | Route id | -| predicate | None | This is a routing matching rule | -| uri | None | This is a service resource identifier| -| order | None | The priority of this route | - +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|---------------------------------------| +| route_id | None | Route id | +| predicate | None | This is a routing matching rule | +| uri | None | This is a service resource identifier | +| order | None | The priority of this route | diff --git a/home/versioned_docs/version-v1.4.x/help/springboot2.md b/home/versioned_docs/version-v1.4.x/help/springboot2.md index ca46530f77b..6452aff270e 100644 --- a/home/versioned_docs/version-v1.4.x/help/springboot2.md +++ b/home/versioned_docs/version-v1.4.x/help/springboot2.md @@ -19,6 +19,7 @@ If you want to monitor information in 'SpringBoot' with this monitoring type, yo spring-boot-starter-actuator ``` + **2. Modify the YML configuration exposure metric interface:** ```yaml @@ -29,7 +30,9 @@ management: include: '*' enabled-by-default: on ``` + *Note: If your project also introduces authentication related dependencies, such as springboot security, the interfaces exposed by SpringBoot Actor may be intercepted. In this case, you need to manually release these interfaces. Taking springboot security as an example, you should add the following code to the Security Configuration class:* + ```java public class SecurityConfig extends WebSecurityConfigurerAdapter{ @Override @@ -45,47 +48,49 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ } } ``` + ### Configure parameters -| Parameter name | Parameter Help describes the | -| ------------ |------------------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| Port | The default port provided by the database is 8080. | -| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | +| Parameter name | Parameter Help describes the | +|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| Port | The default port provided by the database is 8080. | +| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | +| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | ### Collect metrics #### metric Collection: Health -| Metric Name | metric unit | Metrics help describe | -| ------------------ | -------- |--------------------------------| -| status | None | Service health: UP, Down | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|--------------------------| +| status | None | Service health: UP, Down | #### metric Collection: enviroment -| Metric Name | metric unit | Metrics help describe | -|---------| -------- |----------------------------| -| profile | None | The application runs profile: prod, dev, test | -| port | None | Apply the exposed port | -| os | None | Run the operating system | -| os_arch | None | Run the operating system architecture | -| jdk_vendor | None | jdk vendor | -| jvm_version | None | jvm version | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-----------------------------------------------| +| profile | None | The application runs profile: prod, dev, test | +| port | None | Apply the exposed port | +| os | None | Run the operating system | +| os_arch | None | Run the operating system architecture | +| jdk_vendor | None | jdk vendor | +| jvm_version | None | jvm version | #### metric Collection: threads -| Metric Name | metric unit | Metrics help describe | -| ---------------- |------|--------------------| -| state | None | Thread status | -| number | None | This thread state corresponds to | number of threads +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|----------------------------------|-------------------| +| state | None | Thread status | +| number | None | This thread state corresponds to | number of threads | #### metric Collection: memory_used -| Metric Name | metric unit | Metrics help describe | -|---------|------|------------| -| space | None | Memory space name | -| mem_used | MB | This space occupies a memory size of | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|--------------------------------------| +| space | None | Memory space name | +| mem_used | MB | This space occupies a memory size of | + diff --git a/home/versioned_docs/version-v1.4.x/help/sqlserver.md b/home/versioned_docs/version-v1.4.x/help/sqlserver.md index cc12abf0d7e..71bd8ebdc83 100644 --- a/home/versioned_docs/version-v1.4.x/help/sqlserver.md +++ b/home/versioned_docs/version-v1.4.x/help/sqlserver.md @@ -9,51 +9,49 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 1433 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 1433 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| machine_name | none | Windows computer name running the server instance | -| server_name | none | Server and instance information SQL Server associated with Windows instance | -| version | none | Version of the instance,SQL Server,format is "major.minor.build.revision" | -| edition | none | The product SQL server version of the installed instance | -| start_time | none | Database start time | +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-----------------------------------------------------------------------------| +| machine_name | none | Windows computer name running the server instance | +| server_name | none | Server and instance information SQL Server associated with Windows instance | +| version | none | Version of the instance,SQL Server,format is "major.minor.build.revision" | +| edition | none | The product SQL server version of the installed instance | +| start_time | none | Database start time | #### Metric set:performance_counters -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| database_pages | none | Database pages, Number of pages obtained (buffer pool) | -| target_pages | none | Target pages, The desired number of pages that the buffer pool must have | -| page_life_expectancy | s | Page life expectancy. The time that data pages stay in the buffer pool. This time is generally greater than 300 | -| buffer_cache_hit_ratio | % | Buffer cache hit ratio, Database buffer pool cache hit rate. The probability that the requested data is found in the buffer pool is generally greater than 80%, otherwise the buffer pool capacity may be too small | -| checkpoint_pages_sec | none | Checkpoint pages/sec, The number of dirty pages written to the disk by the checkpoint per second. If the data is too high, it indicates that there is a lack of memory capacity | -| page_reads_sec | none | Page reads/sec, Number of pages read per second in the cache pool | -| page_writes_sec | none | Page writes/sec, Number of pages written per second in the cache pool | - +| Metric name | Metric unit | Metric help description | +|------------------------|-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| database_pages | none | Database pages, Number of pages obtained (buffer pool) | +| target_pages | none | Target pages, The desired number of pages that the buffer pool must have | +| page_life_expectancy | s | Page life expectancy. The time that data pages stay in the buffer pool. This time is generally greater than 300 | +| buffer_cache_hit_ratio | % | Buffer cache hit ratio, Database buffer pool cache hit rate. The probability that the requested data is found in the buffer pool is generally greater than 80%, otherwise the buffer pool capacity may be too small | +| checkpoint_pages_sec | none | Checkpoint pages/sec, The number of dirty pages written to the disk by the checkpoint per second. If the data is too high, it indicates that there is a lack of memory capacity | +| page_reads_sec | none | Page reads/sec, Number of pages read per second in the cache pool | +| page_writes_sec | none | Page writes/sec, Number of pages written per second in the cache pool | #### Metric set:connection -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| user_connection | none | Number of connected sessions | - +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|------------------------------| +| user_connection | none | Number of connected sessions | ### Common Problem @@ -61,10 +59,12 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo jdk version: jdk11 Description of the problem: SQL Server 2019 uses the SA user connection to report an error -Error message: +Error message: + ```text The driver could not establish a secure connection to SQL Server by using Secure Sockets Layer (SSL) encryption. Error: "PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target". ClientConnectionId:xxxxxxxxxxxxxxxxxxxx ``` + Screenshot of the problem: ![issue](https://user-images.githubusercontent.com/38679717/206621658-c0741d48-673d-45ff-9a3b-47d113064c12.png) diff --git a/home/versioned_docs/version-v1.4.x/help/ssl_cert.md b/home/versioned_docs/version-v1.4.x/help/ssl_cert.md index 7265bd12a59..e7b60fc8a89 100644 --- a/home/versioned_docs/version-v1.4.x/help/ssl_cert.md +++ b/home/versioned_docs/version-v1.4.x/help/ssl_cert.md @@ -9,25 +9,26 @@ keywords: [open source monitoring tool, open source ssl cert monitoring tool, mo ### Configuration parameters -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | -| Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | -| Port | The port provided by the website, https generally defaults to 443. | -| Relative path | The suffix path of the website address except the IP port, for example, `www.tancloud.io/console` The relative path of the website is `/console`. | -| Acquisition Interval | Interval time for monitoring periodic data collection, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | -| Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | +| Parameter name | Parameter help description | +|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | +| Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | +| Port | The port provided by the website, https generally defaults to 443. | +| Relative path | The suffix path of the website address except the IP port, for example, `www.tancloud.io/console` The relative path of the website is `/console`. | +| Acquisition Interval | Interval time for monitoring periodic data collection, in seconds, the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | +| Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | ### Collect metrics #### Metric collection: certificate -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|----------| -| subject | none | certificate name | -| expired | no | expired or not | -| start_time | None | Validity start time | -| start_timestamp | ms millisecond | Validity start timestamp | -| end_time | None | Expiration time | -| end_timestamp | ms milliseconds | expiration timestamp | +| Metric Name | Metric Unit | Metric Help Description | +|-----------------|-----------------|--------------------------| +| subject | none | certificate name | +| expired | no | expired or not | +| start_time | None | Validity start time | +| start_timestamp | ms millisecond | Validity start timestamp | +| end_time | None | Expiration time | +| end_timestamp | ms milliseconds | expiration timestamp | + diff --git a/home/versioned_docs/version-v1.4.x/help/tomcat.md b/home/versioned_docs/version-v1.4.x/help/tomcat.md index 8b35808ffc8..60591f85579 100644 --- a/home/versioned_docs/version-v1.4.x/help/tomcat.md +++ b/home/versioned_docs/version-v1.4.x/help/tomcat.md @@ -11,61 +11,60 @@ keywords: [open source monitoring tool, open source tomcat monitoring tool, moni ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by JMX | -| Username | JMX connection user name, optional | -| Password | JMX connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by JMX | +| Username | JMX connection user name, optional | +| Password | JMX connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metrics #### Metrics Set:memory_pool -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| name | | metrics name | +| committed | kb | total size | +| init | kb | init size | +| max | kb | max size | +| used | kb | used size | #### Metrics Set:code_cache -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| committed | kb | total size | +| init | kb | init size | +| max | kb | max size | +| used | kb | used size | #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| LoadedClassCount | | Loaded Class Count | -| TotalLoadedClassCount | | Total Loaded Class Count | -| UnloadedClassCount | | Unloaded Class Count | - +| Metric name | Metric unit | Metric help description | +|-----------------------|-------------|--------------------------| +| LoadedClassCount | | Loaded Class Count | +| TotalLoadedClassCount | | Total Loaded Class Count | +| UnloadedClassCount | | Unloaded Class Count | #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| TotalStartedThreadCount | | Total Started Thread Count | -| ThreadCount | | Thread Count | -| PeakThreadCount | | Peak Thread Count | -| DaemonThreadCount | | Daemon Thread Count | -| CurrentThreadUserTime | ms | Current Thread User Time | -| CurrentThreadCpuTime | ms | Current Thread Cpu Time | +| Metric name | Metric unit | Metric help description | +|-------------------------|-------------|----------------------------| +| TotalStartedThreadCount | | Total Started Thread Count | +| ThreadCount | | Thread Count | +| PeakThreadCount | | Peak Thread Count | +| DaemonThreadCount | | Daemon Thread Count | +| CurrentThreadUserTime | ms | Current Thread User Time | +| CurrentThreadCpuTime | ms | Current Thread Cpu Time | ### Tomcat Enable JMX Protocol -1. After building tomcat, enter the bin directory under tomcat and modify the catalina.sh file +1. After building tomcat, enter the bin directory under tomcat and modify the catalina.sh file 2. vim catalina.sh Attention⚠️ Replace Hostname And Port diff --git a/home/versioned_docs/version-v1.4.x/help/ubuntu.md b/home/versioned_docs/version-v1.4.x/help/ubuntu.md index e7d368c9ea3..8d3b65ce195 100644 --- a/home/versioned_docs/version-v1.4.x/help/ubuntu.md +++ b/home/versioned_docs/version-v1.4.x/help/ubuntu.md @@ -9,74 +9,74 @@ keywords: [open source monitoring tool, open source linux ubuntu monitoring tool ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Linux SSH. The default is 22 | -| Username | SSH connection user name, optional | -| Password | SSH connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Linux SSH. The default is 22 | +| Username | SSH connection user name, optional | +| Password | SSH connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| hostname | none | Host name | -| version | none | Operating system version | -| uptime | none | System running time | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------| +| hostname | none | Host name | +| version | none | Operating system version | +| uptime | none | System running time | #### Metric set:cpu -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| info | none | CPU model | -| cores | cores | Number of CPU cores | -| interrupt | number | Number of CPU interrupts | -| load | none | Average load of CPU in the last 1/5/15 minutes | -| context_switch | number | Number of current context switches | -| usage | % | CPU usage | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------------------------| +| info | none | CPU model | +| cores | cores | Number of CPU cores | +| interrupt | number | Number of CPU interrupts | +| load | none | Average load of CPU in the last 1/5/15 minutes | +| context_switch | number | Number of current context switches | +| usage | % | CPU usage | #### Metric set:memory -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| total | Mb | Total memory capacity | -| used | Mb | User program memory | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory occupied by cache | -| available | Mb | Remaining available memory capacity | -| usage | % | Memory usage | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------------------| +| total | Mb | Total memory capacity | +| used | Mb | User program memory | +| free | Mb | Free memory capacity | +| buff_cache | Mb | Memory occupied by cache | +| available | Mb | Remaining available memory capacity | +| usage | % | Memory usage | #### Metric set:disk -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| disk_num | blocks | Total number of disks | -| partition_num | partitions | Total number of partitions | -| block_write | blocks | Total number of blocks written to disk | -| block_read | blocks | Number of blocks read from disk | -| write_rate | iops | Rate of writing disk blocks per second | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|----------------------------------------| +| disk_num | blocks | Total number of disks | +| partition_num | partitions | Total number of partitions | +| block_write | blocks | Total number of blocks written to disk | +| block_read | blocks | Number of blocks read from disk | +| write_rate | iops | Rate of writing disk blocks per second | #### Metric set:interface -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| interface_name | none | Network card name | -| receive_bytes | byte | Inbound data traffic(bytes) | -| transmit_bytes | byte | Outbound data traffic(bytes) | +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------| +| interface_name | none | Network card name | +| receive_bytes | byte | Inbound data traffic(bytes) | +| transmit_bytes | byte | Outbound data traffic(bytes) | #### Metric set:disk_free -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| filesystem | none | File system name | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | usage | -| mounted | none | Mount point directory | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| filesystem | none | File system name | +| used | Mb | Used disk size | +| available | Mb | Available disk size | +| usage | % | usage | +| mounted | none | Mount point directory | + diff --git a/home/versioned_docs/version-v1.4.x/help/website.md b/home/versioned_docs/version-v1.4.x/help/website.md index e227de0dd1f..afe86397c9e 100644 --- a/home/versioned_docs/version-v1.4.x/help/website.md +++ b/home/versioned_docs/version-v1.4.x/help/website.md @@ -5,25 +5,26 @@ sidebar_label: Website Monitor keywords: [open source monitoring tool, open source website monitoring tool, monitoring website metrics] --- -> Monitor whether the website is available, response time and other Metrics. +> Monitor whether the website is available, response time and other Metrics. -### Configuration parameter +### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| Relative path | Suffix path of website address except IP port. For example, the relative path of `www.tancloud.io/console` website is `/console` | -| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | +| Relative path | Suffix path of website address except IP port. For example, the relative path of `www.tancloud.io/console` website is `/console` | +| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | -### Collection Metric +### Collection Metric -#### Metric set:summary +#### Metric set:summary + +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-------------------------| +| responseTime | ms | Website response time | -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| responseTime | ms | Website response time | diff --git a/home/versioned_docs/version-v1.4.x/help/windows.md b/home/versioned_docs/version-v1.4.x/help/windows.md index 82e36d23470..e4be2bd6d96 100644 --- a/home/versioned_docs/version-v1.4.x/help/windows.md +++ b/home/versioned_docs/version-v1.4.x/help/windows.md @@ -6,38 +6,39 @@ keywords: [open source monitoring tool, open source windows monitoring tool, mon --- > Collect and monitor the general performance Metrics of Windows operating system through SNMP protocol. -> Note⚠️ You need to start SNMP service for Windows server. +> Note⚠️ You need to start SNMP service for Windows server. References: [What is SNMP protocol 1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) [What is SNMP protocol 2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) [Win configure SNMP in English](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) -[Win configure SNMP in Chinese](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) +[Win configure SNMP in Chinese](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Windows SNMP service. The default is 161 | -| SNMP version | SNMP protocol version V1 V2c V3 | +| Parameter name | Parameter help description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Windows SNMP service. The default is 161 | +| SNMP version | SNMP protocol version V1 V2c V3 | | SNMP community Word | SNMP agreement community name(Community Name). It is used to realize the authentication of SNMP network administrator when accessing SNMP management agent. Similar to password, the default value is public | -| Timeout | Protocol connection timeout | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Timeout | Protocol connection timeout | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:system -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | none | Host name | -| descr | none | Operating system description | -| uptime | none | System running time | -| numUsers | number | Current number of users | -| services | number | Current number of services | -| processes | number | Current number of processes | -| responseTime | ms | Collection response time | +| Metric name | Metric unit | Metric help description | +|--------------|-------------|------------------------------| +| name | none | Host name | +| descr | none | Operating system description | +| uptime | none | System running time | +| numUsers | number | Current number of users | +| services | number | Current number of services | +| processes | number | Current number of processes | +| responseTime | ms | Collection response time | + diff --git a/home/versioned_docs/version-v1.4.x/help/zookeeper.md b/home/versioned_docs/version-v1.4.x/help/zookeeper.md index b7a34f49eda..f14b0bb8273 100644 --- a/home/versioned_docs/version-v1.4.x/help/zookeeper.md +++ b/home/versioned_docs/version-v1.4.x/help/zookeeper.md @@ -10,10 +10,12 @@ keywords: [open source monitoring tool, open source zookeeper monitoring tool, m ### PreRequisites #### Zookeeper four word command ->The current implementation scheme uses the four word command provided by zookeeper to collect Metrics. -Users need to add the four word command of zookeeper to the white list by themselves. + +> The current implementation scheme uses the four word command provided by zookeeper to collect Metrics. +> Users need to add the four word command of zookeeper to the white list by themselves. Steps + > 1.Find our zookeeper configuration file, which is usually zoo.cfg. > > 2.Add the following commands to the configuration file @@ -28,73 +30,74 @@ Steps > 3.Restart service -```shell +```shell zkServer.sh restart ``` #### netcat protocol + The current implementation scheme requires us to deploy the Linux server of zookeeper Command environment for installing netcat > netcat installation steps -```shell -yum install -y nc -``` +> +> ```shell +> yum install -y nc +> ``` If the terminal displays the following information, the installation is successful + ```shell Complete! ``` ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Zookeeper. The default is 2181 | -| Query timeout | Set the timeout of Zookeeper connection, unit: ms, default: 3000ms | -| Username | User name of the Linux connection where Zookeeper is located | -| Password | Password of the Linux connection where Zookeeper is located | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Zookeeper. The default is 2181 | +| Query timeout | Set the timeout of Zookeeper connection, unit: ms, default: 3000ms | +| Username | User name of the Linux connection where Zookeeper is located | +| Password | Password of the Linux connection where Zookeeper is located | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:conf -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| clientPort | none | Port | -| dataDir | none | Data snapshot file directory. By default, 100000 operations generate a snapshot | -| dataDirSize | kb | Data snapshot file size | -| dataLogDir | none | Transaction log file directory, production environment on a separate disk | -| dataLogSize | kb | Transaction log file size | -| tickTime | ms | Time interval between servers or between clients and servers to maintain heartbeat | -| minSessionTimeout | ms | Minimum session timeout. Heartbeat timex2. The specified time is less than this time, which is used by default | -| maxSessionTimeout | ms | Maximum session timeout. Heartbeat timex20. The specified time is greater than this time, which is used by default | -| serverId | none | Server id | - +| Metric name | Metric unit | Metric help description | +|-------------------|-------------|--------------------------------------------------------------------------------------------------------------------| +| clientPort | none | Port | +| dataDir | none | Data snapshot file directory. By default, 100000 operations generate a snapshot | +| dataDirSize | kb | Data snapshot file size | +| dataLogDir | none | Transaction log file directory, production environment on a separate disk | +| dataLogSize | kb | Transaction log file size | +| tickTime | ms | Time interval between servers or between clients and servers to maintain heartbeat | +| minSessionTimeout | ms | Minimum session timeout. Heartbeat timex2. The specified time is less than this time, which is used by default | +| maxSessionTimeout | ms | Maximum session timeout. Heartbeat timex20. The specified time is greater than this time, which is used by default | +| serverId | none | Server id | #### Metric set:stats -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| zk_version | none | Server version | -| zk_server_state | none | Server role | -| zk_num_alive_connections | number | Number of connections | -| zk_avg_latency | ms | Average latency | -| zk_outstanding_requests | number | Number of outstanding requests | -| zk_znode_count | number | Number of znode | -| zk_packets_sent | number | Number of packets sent | -| zk_packets_received | number | Number of packets received | -| zk_watch_count | number | Number of watch | -| zk_max_file_descriptor_count | number | Maximum number of file descriptors | -| zk_approximate_data_size | kb | data size | -| zk_open_file_descriptor_count | number | Number of open file descriptors | -| zk_max_latency | ms | Max latency | -| zk_ephemerals_count | number | Number of ephemeral nodes | -| zk_min_latency | ms | Min latency | - +| Metric name | Metric unit | Metric help description | +|-------------------------------|-------------|------------------------------------| +| zk_version | none | Server version | +| zk_server_state | none | Server role | +| zk_num_alive_connections | number | Number of connections | +| zk_avg_latency | ms | Average latency | +| zk_outstanding_requests | number | Number of outstanding requests | +| zk_znode_count | number | Number of znode | +| zk_packets_sent | number | Number of packets sent | +| zk_packets_received | number | Number of packets received | +| zk_watch_count | number | Number of watch | +| zk_max_file_descriptor_count | number | Maximum number of file descriptors | +| zk_approximate_data_size | kb | data size | +| zk_open_file_descriptor_count | number | Number of open file descriptors | +| zk_max_latency | ms | Max latency | +| zk_ephemerals_count | number | Number of ephemeral nodes | +| zk_min_latency | ms | Min latency | diff --git a/home/versioned_docs/version-v1.4.x/introduce.md b/home/versioned_docs/version-v1.4.x/introduce.md index 63a35e80ab9..60b8a623bf4 100644 --- a/home/versioned_docs/version-v1.4.x/introduce.md +++ b/home/versioned_docs/version-v1.4.x/introduce.md @@ -5,7 +5,7 @@ sidebar_label: Introduce slug: / --- -> A real-time monitoring system with agentless, performance cluster, prometheus-compatible, custom monitoring and status page building capabilities. +> A real-time monitoring system with agentless, performance cluster, prometheus-compatible, custom monitoring and status page building capabilities. [![Discord](https://img.shields.io/badge/Chat-Discord-7289DA?logo=discord)](https://discord.gg/Fb6M73htGr) [![Reddit](https://img.shields.io/badge/Reddit-Community-7289DA?logo=reddit)](https://www.reddit.com/r/hertzbeat/) @@ -34,11 +34,9 @@ slug: / * Provides flexible alarm threshold rules and timely notifications delivered via `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. * Provides powerful status page building capabilities, easily communicate the real-time status of your service to users. +> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system. -> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system. - ----- - +--- ### Powerful Monitoring Templates > Before we discuss the customizable monitoring capabilities of HertzBeat, which we mentioned at the beginning, let's introduce the different monitoring templates of HertzBeat. And it is because of this monitoring template design that the advanced features come later. @@ -51,7 +49,6 @@ Do you believe that users can just write a monitoring template on the UI page, c ![hertzbeat](/img/home/9.png) - ### Built-in Monitoring Types **There are a lot of built-in monitoring templates for users to add directly on the page, one monitoring type corresponds to one YML monitoring template**. @@ -99,7 +96,7 @@ Do you believe that users can just write a monitoring template on the UI page, c > From the previous introduction of **Monitoring Templates**, it is clear that `HertzBeat` has powerful customization features. > Each monitor type is considered as a monitor template, no matter it is built-in or user-defined. You can easily add, modify and delete indicators by modifying the monitoring template. -> The templates contain a series of functions such as protocol configuration, environment variables, metrics conversion, metrics calculation, units conversion, metrics collection, etc., which help users to collect the metrics they want. +> The templates contain a series of functions such as protocol configuration, environment variables, metrics conversion, metrics calculation, units conversion, metrics collection, etc., which help users to collect the metrics they want. ![hertzbeat](/img/docs/custom-arch.png) @@ -107,12 +104,12 @@ Do you believe that users can just write a monitoring template on the UI page, c > For users who have used various systems, the most troublesome thing is the installation, deployment, debugging and upgrading of various `agents`. > You need to install one `agent` per host, and several corresponding `agents` to monitor different application middleware, and the number of monitoring can easily reach thousands, so writing a batch script may ease the burden. -> The problem of whether the version of `agent` is compatible with the main application, debugging the communication between `agent` and the main application, upgrading the `agent` synchronization and so on and so forth, are all big headaches. +> The problem of whether the version of `agent` is compatible with the main application, debugging the communication between `agent` and the main application, upgrading the `agent` synchronization and so on and so forth, are all big headaches. -The principle of `HertzBeat` is to use different protocols to connect directly to the end system, and use the `PULL` form to pull the collected data, without the need for the user to deploy and install `Agent` | `Exporter` on the host of the end, etc. For example, monitoring the `linux operating system`. +The principle of `HertzBeat` is to use different protocols to connect directly to the end system, and use the `PULL` form to pull the collected data, without the need for the user to deploy and install `Agent` | `Exporter` on the host of the end, etc. For example, monitoring the `linux operating system`. - For example, if you want to monitor `linux OS`, you can just input the IP port account password or key on `HertzBeat` side. -- For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. +- For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. **Password and other sensitive information is encrypted on all links**. @@ -131,7 +128,7 @@ The principle of `HertzBeat` is to use different protocols to connect directly t > Two locations, three centers, multi-cloud environments, multi-isolated networks, you may have heard of these scenarios. When there is a need for a unified monitoring system to monitor the IT resources of different isolated networks, this is where our Cloud Edge Collaboration comes in. In an isolated network where multiple networks are not connected, we need to deploy a monitoring system in each network in the previous solution, which leads to data non-interoperability and inconvenient management, deployment and maintenance. -`HertzBeat` provides the ability of cloud edge collaboration, can be deployed in multiple isolated networks edge collector, collector in the isolated network within the monitoring task collection, collection of data reported by the main service unified scheduling management display. +`HertzBeat` provides the ability of cloud edge collaboration, can be deployed in multiple isolated networks edge collector, collector in the isolated network within the monitoring task collection, collection of data reported by the main service unified scheduling management display. ![hertzbeat](/img/docs/cluster-arch.png) @@ -150,12 +147,11 @@ In an isolated network where multiple networks are not connected, we need to dep - Built on `Java+SpringBoot+TypeScript+Angular` mainstream technology stack , convenient secondary development . - Open source is not the same as free, dev based on HertzBeat must retain the logo, name, page footnotes, copyright, etc. - **HertzBeat has been included in the [CNCF Observability And Analysis - Monitoring Landscape](https://landscape.cncf.io/card-mode?category=monitoring&grouping=category)** ![cncf](/img/home/cncf-landscape-left-logo.svg) ------ +--- **HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system.** @@ -273,7 +269,6 @@ Built-in support for monitoring types include: ![hertzbeat](/img/home/13.png) - ### Alarm Silence - When the alarm is triggered by the threshold rule, it will enter into the alarm silence, the alarm silence will be based on the rules of a specific one-time time period or periodic time period of the alarm message blocking silence, this time period does not send alarm messages. @@ -305,7 +300,6 @@ Built-in support for monitoring types include: ![hertzbeat](/img/home/9.png) - ----- **There's so much more to discover. Have Fun!** diff --git a/home/versioned_docs/version-v1.4.x/others/contact.md b/home/versioned_docs/version-v1.4.x/others/contact.md index 02d956093b4..9411e50d072 100644 --- a/home/versioned_docs/version-v1.4.x/others/contact.md +++ b/home/versioned_docs/version-v1.4.x/others/contact.md @@ -1,7 +1,7 @@ --- id: contact title: Join discussion -sidebar_label: Discussion +sidebar_label: Discussion --- > If you need any help or want to exchange suggestions during the use process, you can discuss and exchange through ISSUE or Github Discussion. @@ -12,7 +12,6 @@ sidebar_label: Discussion [Follow Us Twitter](https://twitter.com/hertzbeat1024) - ##### Github Discussion Welcome to Discuss in [Github Discussion](https://github.com/apache/hertzbeat/discussions) diff --git a/home/versioned_docs/version-v1.4.x/others/contributing.md b/home/versioned_docs/version-v1.4.x/others/contributing.md index 48c32ef703a..237eb63860e 100644 --- a/home/versioned_docs/version-v1.4.x/others/contributing.md +++ b/home/versioned_docs/version-v1.4.x/others/contributing.md @@ -1,7 +1,7 @@ --- id: contributing title: Contributing Guide -sidebar_label: Contributing Guide +sidebar_label: Contributing Guide --- > We are committed to maintaining a happy community that helps each other, welcome every contributor to join us! @@ -33,7 +33,6 @@ Even small corrections to typos are very welcome :) > To get HertzBeat code running on your development tools, and able to debug with breakpoints. > This is a front-end and back-end separation project. To start the local code, the back-end [manager](https://github.com/apache/hertzbeat/tree/master/manager) and the front-end [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) must be started separately. - - Backend start 1. Requires `maven3+`, `java11` and `lombok` environments @@ -60,23 +59,31 @@ Of course, if you have a good idea, you can also propose it directly on GitHub D 1. First you need to fork your target [hertzbeat repository](https://github.com/apache/hertzbeat). 2. Then download the code locally with git command: + ```shell git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended ``` + 3. After the download is complete, please refer to the getting started guide or README file of the target repository to initialize the project. 4. Then, you can refer to the following command to submit the code: + ```shell git checkout -b a-feature-branch #Recommended ``` + 5. Submit the coed as a commit, the commit message format specification required: [module name or type name] feature or bugfix or doc: custom message. + ```shell git add git commit -m '[docs]feature: necessary instructions' #Recommended ``` + 6. Push to the remote repository + ```shell git push origin a-feature-branch ``` + 7. Then you can initiate a new PR (Pull Request) on GitHub. Please note that the title of the PR needs to conform to our spec, and write the necessary description in the PR to facilitate code review by Committers and other contributors. @@ -125,14 +132,15 @@ Public WeChat: `tancloudtech` ## 🥐 Architecture - **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** Provide monitoring management, system management basic services. + > Provides monitoring management, monitoring configuration management, system user management, etc. -- **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** Provide metrics data collection services. +> - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** Provide metrics data collection services. > Use common protocols to remotely collect and obtain peer-to-peer metrics data. -- **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** Provide monitoring data warehousing services. +> - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** Provide monitoring data warehousing services. > Metrics data management, data query, calculation and statistics. -- **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** Provide alert service. +> - **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** Provide alert service. > Alarm calculation trigger, monitoring status linkage, alarm configuration, and alarm notification. -- **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** Provide web ui. +> - **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** Provide web ui. > Angular Web UI. -![hertzBeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat/home/static/img/docs/hertzbeat-arch.svg) +![hertzBeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat/home/static/img/docs/hertzbeat-arch.svg) diff --git a/home/versioned_docs/version-v1.4.x/others/design.md b/home/versioned_docs/version-v1.4.x/others/design.md index 83d15a5c4a3..64e248f5b2d 100644 --- a/home/versioned_docs/version-v1.4.x/others/design.md +++ b/home/versioned_docs/version-v1.4.x/others/design.md @@ -1,13 +1,13 @@ --- id: design title: Design Document -sidebar_label: Design Document +sidebar_label: Design Document --- -### HertzBeat Architecture +### HertzBeat Architecture -![architecture](https://cdn.jsdelivr.net/gh/apache/hertzbeat/home/static/img/docs/hertzbeat-arch.svg) +![architecture](https://cdn.jsdelivr.net/gh/apache/hertzbeat/home/static/img/docs/hertzbeat-arch.svg) -### TanCloud Architecture +### TanCloud Architecture -TanCloud is a SAAS cluster version based on HertzBeat, which adopts a multi-cluster and multi-tenant architecture model. +TanCloud is a SAAS cluster version based on HertzBeat, which adopts a multi-cluster and multi-tenant architecture model. diff --git a/home/versioned_docs/version-v1.4.x/others/developer.md b/home/versioned_docs/version-v1.4.x/others/developer.md index 7e5793415a9..e8d651af594 100644 --- a/home/versioned_docs/version-v1.4.x/others/developer.md +++ b/home/versioned_docs/version-v1.4.x/others/developer.md @@ -1,10 +1,10 @@ --- -id: developer -title: Contributors -sidebar_label: Contributors +id: developer +title: Contributors +sidebar_label: Contributors --- -## ✨ HertzBeat Members +## ✨ HertzBeat Members
@@ -26,18 +26,18 @@ sidebar_label: Contributors
-cert - +cert ## ✨ HertzBeat Contributors -Thanks to these wonderful people, welcome to join us: [Contributor Guide](contributing) +Thanks to these wonderful people, welcome to join us: [Contributor Guide](contributing) -cert +cert + @@ -259,5 +259,4 @@ Thanks to these wonderful people, welcome to join us: [Contributor Guide](contr - diff --git a/home/versioned_docs/version-v1.4.x/others/hertzbeat.md b/home/versioned_docs/version-v1.4.x/others/hertzbeat.md index cf6e7484db3..ab3dc5bf36a 100644 --- a/home/versioned_docs/version-v1.4.x/others/hertzbeat.md +++ b/home/versioned_docs/version-v1.4.x/others/hertzbeat.md @@ -1,10 +1,9 @@ --- id: hertzbeat title: HertzBeat 开源实时监控系统 -sidebar_label: HertzBeat 实时监控 +sidebar_label: HertzBeat 实时监控 --- - > 易用友好的开源实时监控告警系统,无需Agent,高性能集群,强大自定义监控能力。 ![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/web-monitor.svg) @@ -17,7 +16,6 @@ sidebar_label: HertzBeat 实时监控 ![hertzbeat](https://img.shields.io/badge/monitor-network-red) ![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/alert.svg) - ## 🎡 介绍 [HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,高性能集群,无需 Agent 的开源实时监控告警系统。 @@ -30,8 +28,7 @@ sidebar_label: HertzBeat 实时监控 - 高性能,支持多采集器集群横向扩展,支持多隔离网络监控,云边协同。 - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式消息及时送达。 - -> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 +> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 ---- @@ -42,12 +39,12 @@ sidebar_label: HertzBeat 实时监控 - 基于`Java+SpringBoot+TypeScript+Angular`主流技术栈构建,方便的二次开发。 - 但开源不等同于免费,基于HertzBeat二次开发需保留logo,名称,页面脚注,版权等。 -### 强大的监控模版 +### 强大的监控模版 -> 开始我们就说 HertzBeat 的特点是自定义监控能力,无需 Agent。在讨论这两点之前,我们先介绍下 HertzBeat 的不一样的监控模版。而正是因为这样的监控模版设计,才会有了后面的高级特性。 +> 开始我们就说 HertzBeat 的特点是自定义监控能力,无需 Agent。在讨论这两点之前,我们先介绍下 HertzBeat 的不一样的监控模版。而正是因为这样的监控模版设计,才会有了后面的高级特性。 HertzBeat 自身并没有去创造一种采集数据协议,让对端来适配它。而是充分使用了现有的生态,SNMP采集网络交换机路由器信息,JMX采集JAVA应用信息,JDBC规范采集数据集信息,SSH直连执行脚本获取回显信息,HTTP+(JsonPath | prometheus等)解析接口信息,IPMI采集服务器信息等等。 -HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可配置化,最后使其都可以通过编写YML格式监控模版的形式,来制定模版使用这些协议来采集任何想要的指标信息。 +HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可配置化,最后使其都可以通过编写YML格式监控模版的形式,来制定模版使用这些协议来采集任何想要的指标信息。 ![hertzbeat](/img/blog/multi-protocol.png) @@ -55,7 +52,6 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ![hertzbeat](/img/home/9.png) - ### 内置监控类型 **一款监控类型对应一个YML监控模版** @@ -87,26 +83,26 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 - 和更多自定义监控模版。 - 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 -### 强大自定义功能 +### 强大自定义功能 > 由前面的**监控模版**介绍,大概清楚了 HertzBeat 拥有的强大自定义功能。 > 我们将每个监控类型都视为一个监控模版,不管是官方内置的还是后期用户自定义新增的。用户都可以方便的通过修改监控模版来新增修改删除监控指标。 -> 模版里面包含各个协议的使用,指标别名转换,指标计算,单位转换等一系列功能,帮助用户能采集到自己想要的监控指标。 +> 模版里面包含各个协议的使用,指标别名转换,指标计算,单位转换等一系列功能,帮助用户能采集到自己想要的监控指标。 ![hertzbeat](/img/docs/custom-arch.png) -### 无需 Agent +### 无需 Agent > 对于使用过各种系统的用户来说,可能最麻烦头大的不过就是各种 agent 的安装部署调试了。 > 每台主机得装个 agent,为了监控不同应用中间件可能还得装几个对应的 agent,量上来了轻轻松松上千个,写个批量脚本可能会减轻点负担。 -> agent 的版本是否与主应用兼容, agent 与主应用的通讯调试, agent 的同步升级等等等等,这些全是头大的点。 +> agent 的版本是否与主应用兼容, agent 与主应用的通讯调试, agent 的同步升级等等等等,这些全是头大的点。 HertzBeat 的原理就是使用不同的协议去直连对端系统,采集 PULL 的形式去拉取采集数据,无需用户在对端主机上部署安装 Agent | Exporter等。 比如监控 linux, 在 HertzBeat 端输入IP端口账户密码或密钥即可。 比如监控 mysql, 在 HertzBeat 端输入IP端口账户密码即可。 -**密码等敏感信息全链路加密** +**密码等敏感信息全链路加密** -### 高性能集群 +### 高性能集群 > 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 > 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 @@ -114,16 +110,16 @@ HertzBeat 的原理就是使用不同的协议去直连对端系统,采集 PUL ![hertzbeat](/img/docs/cluster-arch.png) -### 云边协同 +### 云边协同 > 支持部署边缘采集器集群,与主 HertzBeat 服务云边协同提升采集能力。 在多个网络不相通的隔离网络中,在以往的方案中我们需要在每个网络都部署一套监控系统,这导致数据不互通,管理部署维护都不方便。 -HertzBeat 提供云边协同能力,可以在多个隔离网络部署边缘采集器,采集器在隔离网络内部进行监控任务采集,采集数据上报,由主 HertzBeat 服务统一调度管理展示。 +HertzBeat 提供云边协同能力,可以在多个隔离网络部署边缘采集器,采集器在隔离网络内部进行监控任务采集,采集数据上报,由主 HertzBeat 服务统一调度管理展示。 ![hertzbeat](/img/docs/cluster-arch.png) -### 易用友好 +### 易用友好 > 集 **监控+告警+通知** All in one, 无需单独部署多个组件服务。 > 全UI界面操作,不管是新增监控,修改监控模版,还是告警阈值通知,都可在WEB界面操作完成,无需要修改文件或脚本或重启。 @@ -131,15 +127,12 @@ HertzBeat 提供云边协同能力,可以在多个隔离网络部署边缘采 > 自定义友好,只需一个监控模版YML,自动生成对应监控类型的监控管理页面,数据图表页面,阈值配置等。 > 阈值告警通知友好,基于表达式阈值配置,多种告警通知渠道,支持告警静默,时段标签告警级别过滤等。 - - ------ - +--- **`HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。** ----- -## 即刻体验一波 +## 即刻体验一波 Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` 浏览器访问 `http://localhost:1157` 默认账户密码 `admin/hertzbeat` @@ -148,7 +141,7 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 - HertzBeat 的用户管理统一由配置文件 `sureness.yml` 维护,用户可以通过修改此文件来新增删除修改用户信息,用户角色权限等。默认账户密码 admin/hertzbeat -![hertzbeat](/img/home/0.png) +![hertzbeat](/img/home/0.png) ### 概览页面 @@ -156,7 +149,7 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 - 展示当前注册的采集器集群状态,包括采集器的上线状态,监控任务,启动时间,IP地址,名称等。 - 下发展示了最近告警信息列表,告警级别分布情况,告警处理率情况。 -![hertzbeat](/img/home/1.png) +![hertzbeat](/img/home/1.png) ### 监控中心 @@ -194,7 +187,7 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) -![hertzbeat](/img/home/2.png) +![hertzbeat](/img/home/2.png) ### 新增修改监控 @@ -211,9 +204,9 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 - 监控历史数据报告,以趋势图表的形式展示了当前监控数值类型的指标的历史值,支持查询小时,天,月的历史数据,支持配置页面刷新时间。 - ⚠️注意监控历史图表需配置外置时序数据库才能获取完整功能,时序数据库支持: IOTDB, TDengine, InfluxDB, GreptimeDB -![hertzbeat](/img/home/3.png) +![hertzbeat](/img/home/3.png) -![hertzbeat](/img/home/4.png) +![hertzbeat](/img/home/4.png) ### 告警中心 @@ -229,7 +222,7 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 - 阈值规则支持可视化页面配置或表达式规则配置,灵活性更高。 - 支持配置触发次数,告警级别,通知模版,关联指定监控等。 -![hertzbeat](/img/home/6.png) +![hertzbeat](/img/home/6.png) ![hertzbeat](/img/home/11.png) @@ -242,7 +235,6 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 ![hertzbeat](/img/home/13.png) - ### 告警静默 - 当通过阈值规则判断触发告警后,会进入到告警静默,告警静默会根据规则对特定一次性时间段或周期性时候段的告警消息屏蔽静默,此时间段不发送告警消息。 @@ -264,17 +256,16 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 ![hertzbeat](/img/home/17.png) -![hertzbeat](/img/home/8.png) +![hertzbeat](/img/home/8.png) ### 监控模版 - HertzBeat 将 `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` 等协议规范可配置化,只需在浏览器配置监控模版 `YML` 就能使用这些协议去自定义采集想要的指标。您相信只需配置下就能立刻适配一款 `K8s` 或 `Docker` 等新的监控类型吗? - 同理我们内置的所有监控类型(mysql,website,jvm,k8s)也一一映射为对应的监控模版,用户可以新增修改监控模版来自定义监控功能。 -![hertzbeat](/img/home/9.png) - +![hertzbeat](/img/home/9.png) ------ +--- **`HertzBeat`更多强大的功能欢迎使用探索。Have Fun!** diff --git a/home/versioned_docs/version-v1.4.x/others/huaweicloud.md b/home/versioned_docs/version-v1.4.x/others/huaweicloud.md index b934a5c2ca3..9f1d408fc79 100644 --- a/home/versioned_docs/version-v1.4.x/others/huaweicloud.md +++ b/home/versioned_docs/version-v1.4.x/others/huaweicloud.md @@ -1,26 +1,23 @@ --- id: huaweicloud title: HertzBeat & HuaweiCloud -sidebar_label: HertzBeat & HuaweiCloud +sidebar_label: HertzBeat & HuaweiCloud --- -### HertzBeat 与 HuaweiCloud 的开源合作需求Issue +### HertzBeat 与 HuaweiCloud 的开源合作需求Issue > 欢迎大家对感兴趣的Issue领取贡献。 -- [Task] support using Huawei Cloud OBS to store custom define yml file [#841](https://github.com/apache/hertzbeat/issues/841) +- [Task] support using Huawei Cloud OBS to store custom define yml file [#841](https://github.com/apache/hertzbeat/issues/841) - [Task] support Huawei Cloud CCE metrics monitoring [#839](https://github.com/apache/hertzbeat/issues/839) - [Task] support EulerOS metrics monitoring [#838](https://github.com/apache/hertzbeat/issues/838) - [Task] support using Huawei Cloud SMN send alarm notification message [#837](https://github.com/apache/hertzbeat/issues/837) - [Task] support using GaussDB For Influx store history metrics data [#836](https://github.com/apache/hertzbeat/issues/836) - - - -### 关于 HuaweiCloud 开源活动 +### 关于 HuaweiCloud 开源活动 HuaweiCloud 华为云将面向开源软件工具链与环境、开源应用构建和开源生态组件构建这三大重点场景,提供技术支持、奖金支持、活动支持,邀请更多的开发者,携手构建开源for HuaweiCloud。 -开发者将开源软件工具、开源应用和开源组件与华为云对象存储OBS、数仓DWS、云容器CCE等云服务对接,同时基于Terraform模板,上架到华为云云商店,支持其他开发者一键部署使用开源组件 ,称为“开源xxx for HuaweiCloud”。 +开发者将开源软件工具、开源应用和开源组件与华为云对象存储OBS、数仓DWS、云容器CCE等云服务对接,同时基于Terraform模板,上架到华为云云商店,支持其他开发者一键部署使用开源组件 ,称为“开源xxx for HuaweiCloud”。 感兴趣的开发者可以查看:华为云开源项目仓库 https://gitee.com/HuaweiCloudDeveloper/huaweicloud-cloud-native-plugins-kits 了解更多。 diff --git a/home/versioned_docs/version-v1.4.x/others/images-deploy.md b/home/versioned_docs/version-v1.4.x/others/images-deploy.md index 782dbe83c05..ff350e763e0 100644 --- a/home/versioned_docs/version-v1.4.x/others/images-deploy.md +++ b/home/versioned_docs/version-v1.4.x/others/images-deploy.md @@ -1,10 +1,9 @@ --- id: images-deploy title: HertzBeat 华为云镜像部署 -sidebar_label: HertzBeat 华为云镜像部署快速指引 +sidebar_label: HertzBeat 华为云镜像部署快速指引 --- - > 易用友好的开源实时监控告警工具,无需Agent,强大自定义监控能力。 [![discord](https://img.shields.io/badge/chat-on%20discord-brightgreen)](https://discord.gg/Fb6M73htGr) @@ -20,20 +19,18 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 ![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/custom-monitor.svg) ![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/alert.svg) - ## 🎡 介绍 > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 > 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等监控,阈值告警通知一步到位。 > 更自由化的阈值规则(计算表达式),`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式及时送达。 - +> > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 > 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? - +> > `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 > 当然我们也提供了对应的 **[SAAS版本监控云](https://console.tancloud.cn)**,中小团队和个人无需再为了监控自己的网站资源,而去部署学习一套繁琐的监控系统,**[登录即可免费开始](https://console.tancloud.cn)**。 - ---- ![hertzbeat](/img/home/1.png) @@ -64,9 +61,9 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 - 和更多的自定义监控。 - 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 -## 镜像部署 +## 镜像部署 -> HertzBeat支持在Linux Windows Mac系统安装运行,CPU支持X86/ARM64。 +> HertzBeat支持在Linux Windows Mac系统安装运行,CPU支持X86/ARM64。 1. 开通服务器时选用 HertzBeat 镜像 2. 启动服务器 @@ -78,7 +75,6 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](../start/tdengine-init) - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](../start/iotdb-init) - 4. 配置用户配置文件(可选,自定义配置用户密码) HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat 若需要新增删除修改账户或密码,可以通过修改位于 `/opt/hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 @@ -86,10 +82,10 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 5. 部署启动 执行位于安装目录/opt/hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat - ``` + + ``` $ ./startup.sh ``` - 6. 开始探索HertzBeat 浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 @@ -101,11 +97,14 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 1. **按照流程部署,访问 http://ip:1157/ 无界面** 请参考下面几点排查问题: + > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 > 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 2. **监控历史图表长时间都一直无数据** + > 一:Tdengine或IoTDB是否配置,未配置则无历史图表数据 > 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 -> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB 或 Tdengine IP账户密码等配置是否正确 +> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB 或 Tdengine IP账户密码等配置是否正确 + diff --git a/home/versioned_docs/version-v1.4.x/others/resource.md b/home/versioned_docs/version-v1.4.x/others/resource.md index a52a3173b10..4aeae0293b6 100644 --- a/home/versioned_docs/version-v1.4.x/others/resource.md +++ b/home/versioned_docs/version-v1.4.x/others/resource.md @@ -1,18 +1,18 @@ --- id: resource title: Related resources -sidebar_label: Related resources +sidebar_label: Related resources --- ## HertzBeat Introduce PDF -Download: [PDF](http://cdn.hertzbeat.com/hertzbeat.pdf) +Download: [PDF](http://cdn.hertzbeat.com/hertzbeat.pdf) -## Icon Resources +## Icon Resources -### HertzBeat Logo +### HertzBeat Logo -![logo](/img/hertzbeat-logo.svg) +![logo](/img/hertzbeat-logo.svg) -Download: [SVG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.svg) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.jpg) +Download: [SVG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.svg) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.jpg) diff --git a/home/versioned_docs/version-v1.4.x/others/sponsor.md b/home/versioned_docs/version-v1.4.x/others/sponsor.md index 6d6b57e15cd..ae338dbea3e 100644 --- a/home/versioned_docs/version-v1.4.x/others/sponsor.md +++ b/home/versioned_docs/version-v1.4.x/others/sponsor.md @@ -1,19 +1,13 @@ --- id: sponsor title: Sponsor -sidebar_label: Sponsor +sidebar_label: Sponsor --- +**Hertzbeat is completely free for individuals or enterprises. If you like this project and are willing to help, buy us a cup of coffee** -**Hertzbeat is completely free for individuals or enterprises. If you like this project and are willing to help, buy us a cup of coffee** - - -![wechat-alipay](/img/docs/pay.png) - +![wechat-alipay](/img/docs/pay.png) Thanks [JiShi Information(build a new microwave + optical transaction network)](https://www.flarespeed.com) sponsored server node. -Thanks [TianShang cloud computing(new wisdom cloud)](https://www.tsyvps.com/aff/BZBEGYLX) sponsored server node. - - - +Thanks [TianShang cloud computing(new wisdom cloud)](https://www.tsyvps.com/aff/BZBEGYLX) sponsored server node. diff --git a/home/versioned_docs/version-v1.4.x/start/account-modify.md b/home/versioned_docs/version-v1.4.x/start/account-modify.md index 0f6abc5b234..b1c618a8eda 100644 --- a/home/versioned_docs/version-v1.4.x/start/account-modify.md +++ b/home/versioned_docs/version-v1.4.x/start/account-modify.md @@ -1,7 +1,7 @@ --- id: account-modify title: Modify Account Username Password -sidebar_label: Update Account +sidebar_label: Update Account --- HertzBeat default built-in three user accounts, respectively admin/hertzbeat tom/hertzbeat guest/hertzbeat @@ -114,7 +114,6 @@ account: > This secret is the key for account security encryption management and needs to be updated to your custom key string of the same length. - Update the `application.yml` file in the `config` directory, modify the `sureness.jwt.secret` parameter to your custom key string of the same length. ```yaml @@ -126,4 +125,4 @@ sureness: dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' ``` -**Restart HertzBeat, access http://ip:1157/ to explore** +**Restart HertzBeat, access http://ip:1157/ to explore** diff --git a/home/versioned_docs/version-v1.4.x/start/custom-config.md b/home/versioned_docs/version-v1.4.x/start/custom-config.md index 5a60d6b02b9..7554498bc6e 100644 --- a/home/versioned_docs/version-v1.4.x/start/custom-config.md +++ b/home/versioned_docs/version-v1.4.x/start/custom-config.md @@ -1,7 +1,7 @@ --- id: custom-config title: Advanced Params Config -sidebar_label: Advanced Params Config +sidebar_label: Advanced Params Config --- This describes how to configure the SMS server, the number of built-in availability alarm triggers, etc. @@ -12,13 +12,14 @@ This describes how to configure the SMS server, the number of built-in availabil Modify the configuration file located at `hertzbeat/config/application.yml` Note ⚠️The docker container method needs to mount the application.yml file to the local host -The installation package can be decompressed and modified in `hertzbeat/config/application.yml` +The installation package can be decompressed and modified in `hertzbeat/config/application.yml` 1. Configure the SMS sending server > Only when your own SMS server is successfully configured, the alarm SMS triggered in the monitoring tool will be sent normally. -Add the following Tencent platform SMS server configuration in `application.yml` (parameters need to be replaced with your SMS server configuration) +Add the following Tencent platform SMS server configuration in `application.yml` (parameters need to be replaced with your SMS server configuration) + ```yaml common: sms: @@ -32,7 +33,6 @@ common: 2. Configure alarm custom parameters - ```yaml alerter: # Custom console address @@ -44,6 +44,7 @@ alerter: > By default, the real-time data of our metrics is stored in memory, which can be configured as follows to use redis instead of memory storage. Note ⚠️ `memory.enabled: false, redis.enabled: true` + ```yaml warehouse: store: @@ -56,3 +57,4 @@ warehouse: port: 6379 password: 123456 ``` + diff --git a/home/versioned_docs/version-v1.4.x/start/docker-deploy.md b/home/versioned_docs/version-v1.4.x/start/docker-deploy.md index 644079fde82..6b1cafd90d3 100644 --- a/home/versioned_docs/version-v1.4.x/start/docker-deploy.md +++ b/home/versioned_docs/version-v1.4.x/start/docker-deploy.md @@ -1,52 +1,51 @@ --- id: docker-deploy title: Install HertzBeat via Docker -sidebar_label: Install via Docker +sidebar_label: Install via Docker --- > Recommend to use docker deploy HertzBeat - 1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. + ``` $ docker -v Docker version 20.10.12, build e91ed57 ``` - 2. pull HertzBeat Docker mirror you can look up the mirror version TAG in [dockerhub mirror repository](https://hub.docker.com/r/apache/hertzbeat/tags) - or in [quay.io mirror repository](https://quay.io/repository/apache/hertzbeat) + or in [quay.io mirror repository](https://quay.io/repository/apache/hertzbeat) + ```shell $ docker pull apache/hertzbeat $ docker pull apache/hertzbeat-collector ``` - or + + or + ```shell $ docker pull quay.io/tancloud/hertzbeat $ docker pull quay.io/tancloud/hertzbeat-collector ``` - 3. Mounted HertzBeat configuration file (optional) Download and config `application.yml` in the host directory, eg:`$(pwd)/application.yml` Download from [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) or [gitee/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml). - You can modify the configuration yml file according to your needs. - - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` - - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) - - **Recommended** If you need to use the time series database TDengine to store metric data, you need to replace the `warehouse.store.td-engine` parameter in `application.yml` for specific steps, see [Using TDengine to store metrics data](tdengine-init) - - **Recommended** If you need to use the time series database IotDB to store the metric database, you need to replace the `warehouse.storeiot-db` parameter in `application.yml` For specific steps, see [Use IotDB to store metrics data](iotdb-init) - + You can modify the configuration yml file according to your needs. + - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` + - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) + - **Recommended** If you need to use the time series database TDengine to store metric data, you need to replace the `warehouse.store.td-engine` parameter in `application.yml` for specific steps, see [Using TDengine to store metrics data](tdengine-init) + - **Recommended** If you need to use the time series database IotDB to store the metric database, you need to replace the `warehouse.storeiot-db` parameter in `application.yml` For specific steps, see [Use IotDB to store metrics data](iotdb-init) 4. Mounted the account file(optional) HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` If you need update account or password, configure `sureness.yml`. Ignore this step without this demand. Download and config `sureness.yml` in the host directory,eg:`$(pwd)/sureness.yml` Download from [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) or [gitee/script/sureness.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/sureness.yml) - For detail steps, please refer to [Configure Account Password](account-modify) - -5. Start the HertzBeat Docker container + For detail steps, please refer to [Configure Account Password](account-modify) +5. Start the HertzBeat Docker container -```shell +```shell $ docker run -d -p 1157:1157 -p 1158:1158 \ -e LANG=en_US.UTF-8 \ -e TZ=Asia/Shanghai \ @@ -57,25 +56,25 @@ $ docker run -d -p 1157:1157 -p 1158:1158 \ --name hertzbeat apache/hertzbeat ``` - This command starts a running HertzBeat Docker container with mapping port 1157-1158. If existing processes on the host use the port, please modify host mapped port. - - `docker run -d` : Run a container in the background via Docker - - `-p 1157:1157 -p 1158:1158` : Mapping container ports to the host, 1157 is web-ui port, 1158 is cluster port. - - `-e LANG=en_US.UTF-8` : Set the system language - - `-e TZ=Asia/Shanghai` : Set the system timezone - - `-v $(pwd)/data:/opt/hertzbeat/data` : (optional, data persistence) Important⚠️ Mount the H2 database file to the local host, to ensure that the data is not lost due creating or deleting container. - - `-v $(pwd)/logs:/opt/hertzbeat/logs` : (optional, if you don't have a need, just delete it) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. - - `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional, if you don't have a need, just delete it) Mount the local configuration file into the container which has been modified in the previous step, namely using the local configuration file to cover container configuration file. - - `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (optional, if you don't have a need, just delete it) Mount account configuration file modified in the previous step into the container. Delete this command parameters if no needs. - - `--name hertzbeat` : Naming container name hertzbeat - - `apache/hertzbeat` : Use the pulled latest HertzBeat official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat` instead if you pull `quay.io` docker image.** +This command starts a running HertzBeat Docker container with mapping port 1157-1158. If existing processes on the host use the port, please modify host mapped port. +- `docker run -d` : Run a container in the background via Docker +- `-p 1157:1157 -p 1158:1158` : Mapping container ports to the host, 1157 is web-ui port, 1158 is cluster port. +- `-e LANG=en_US.UTF-8` : Set the system language +- `-e TZ=Asia/Shanghai` : Set the system timezone +- `-v $(pwd)/data:/opt/hertzbeat/data` : (optional, data persistence) Important⚠️ Mount the H2 database file to the local host, to ensure that the data is not lost due creating or deleting container. +- `-v $(pwd)/logs:/opt/hertzbeat/logs` : (optional, if you don't have a need, just delete it) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. +- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional, if you don't have a need, just delete it) Mount the local configuration file into the container which has been modified in the previous step, namely using the local configuration file to cover container configuration file. +- `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (optional, if you don't have a need, just delete it) Mount account configuration file modified in the previous step into the container. Delete this command parameters if no needs. +- `--name hertzbeat` : Naming container name hertzbeat +- `apache/hertzbeat` : Use the pulled latest HertzBeat official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat` instead if you pull `quay.io` docker image.** -6. Begin to explore HertzBeat +6. Begin to explore HertzBeat - Access `http://ip:1157/` using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! + Access `http://ip:1157/` using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! 7. Deploy collector cluster (Optional) -```shell +```shell $ docker run -d \ -e IDENTITY=custom-collector-name \ -e MODE=public \ @@ -84,53 +83,61 @@ $ docker run -d \ --name hertzbeat-collector apache/hertzbeat-collector ``` - This command starts a running HertzBeat-Collector container. - - `docker run -d` : Run a container in the background via Docker - - `-e IDENTITY=custom-collector-name` : (optional) Set the collector unique identity name. Attention the clusters collector name must unique. - - `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. - - `-e MANAGER_HOST=127.0.0.1` : Important⚠️ Set the main hertzbeat server ip. - - `-e MANAGER_PORT=1158` : (optional) Set the main hertzbeat server port, default 1158. - - `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (optional) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. - - `--name hertzbeat-collector` : Naming container name hertzbeat-collector - - `apache/hertzbeat-collector` : Use the pulled latest HertzBeat-Collector official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat-collector` instead if you pull `quay.io` docker image.** +This command starts a running HertzBeat-Collector container. +- `docker run -d` : Run a container in the background via Docker +- `-e IDENTITY=custom-collector-name` : (optional) Set the collector unique identity name. Attention the clusters collector name must unique. +- `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. +- `-e MANAGER_HOST=127.0.0.1` : Important⚠️ Set the main hertzbeat server ip. +- `-e MANAGER_PORT=1158` : (optional) Set the main hertzbeat server port, default 1158. +- `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (optional) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. +- `--name hertzbeat-collector` : Naming container name hertzbeat-collector +- `apache/hertzbeat-collector` : Use the pulled latest HertzBeat-Collector official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat-collector` instead if you pull `quay.io` docker image.** -8. Access `http://localhost:1157` and you will see the registered new collector in dashboard. +8. Access `http://localhost:1157` and you will see the registered new collector in dashboard. -**HAVE FUN** +**HAVE FUN** -### FAQ +### FAQ **The most common problem is network problems, please check in advance** 1. **MYSQL, TDENGINE, IoTDB and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** -The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. + The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. + > Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. -> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` +> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` 2. **According to the process deploy,visit http://ip:1157/ no interface** -Please refer to the following points to troubleshoot issues: + Please refer to the following points to troubleshoot issues: + > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. > 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. > 3:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. -3. **Log an error TDengine connection or insert SQL failed** +3. **Log an error TDengine connection or insert SQL failed** + > 1:Check whether database account and password configured is correct, the database is created. -> 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. +> 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. + +4. **Historical monitoring charts have been missing data for a long time** -4. **Historical monitoring charts have been missing data for a long time** > 1:Check whether you configure Tdengine or IoTDB. No configuration means no historical chart data. -> 2:Check whether Tdengine database `hertzbeat` is created. +> 2:Check whether Tdengine database `hertzbeat` is created. > 3: Check whether IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. 5. If the history chart on the monitoring page is not displayed,popup [please configure time series database] + > As shown in the popup window,the premise of history chart display is that you need install and configure hertzbeat's dependency service - IoTDB or TDengine database. -> Installation and initialization this database refer to [TDengine Installation](tdengine-init) or [IoTDB Installation](iotdb-init) +> Installation and initialization this database refer to [TDengine Installation](tdengine-init) or [IoTDB Installation](iotdb-init) + +6. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed -6. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed -> Please confirm whether the installed TDengine version is near 2.4.0.12, version 3.0 and 2.2 are not compatible. +> Please confirm whether the installed TDengine version is near 2.4.0.12, version 3.0 and 2.2 are not compatible. 7. The time series database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure dependent time series database] + > Please check if the configuration parameters are correct > Is iot-db or td-engine enable set to true > Note⚠️If both hertzbeat and IotDB, TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed > You can check the startup logs according to the logs directory + diff --git a/home/versioned_docs/version-v1.4.x/start/greptime-init.md b/home/versioned_docs/version-v1.4.x/start/greptime-init.md index e4fcbe982ef..10dafda79b1 100644 --- a/home/versioned_docs/version-v1.4.x/start/greptime-init.md +++ b/home/versioned_docs/version-v1.4.x/start/greptime-init.md @@ -7,22 +7,25 @@ sidebar_label: Use GreptimeDB Store Metrics HertzBeat's historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) We recommend VictoriaMetrics for long term support. -GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. +GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage. -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** +**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** + +### Install GreptimeDB via Docker -### Install GreptimeDB via Docker > Refer to the official website [installation tutorial](https://docs.greptime.com/getting-started/overview) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Install GreptimeDB with Docker +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Install GreptimeDB with Docker ```shell $ docker run -p 4000-4004:4000-4004 \ @@ -32,10 +35,11 @@ $ docker run -p 4000-4004:4000-4004 \ --http-addr 0.0.0.0:4000 \ --rpc-addr 0.0.0.0:4001 \ ``` - `-v /opt/greptimedb:/tmp/greptimedb` is local persistent mount of greptimedb data directory. `/opt/greptimedb` should be replaced with the actual local directory. - use```$ docker ps``` to check if the database started successfully -### Configure the database connection in hertzbeat `application.yml` configuration file +`-v /opt/greptimedb:/tmp/greptimedb` is local persistent mount of greptimedb data directory. `/opt/greptimedb` should be replaced with the actual local directory. +use```$ docker ps``` to check if the database started successfully + +### Configure the database connection in hertzbeat `application.yml` configuration file 1. Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) diff --git a/home/versioned_docs/version-v1.4.x/start/influxdb-init.md b/home/versioned_docs/version-v1.4.x/start/influxdb-init.md index 72cf7ca6129..0bea6129fb6 100644 --- a/home/versioned_docs/version-v1.4.x/start/influxdb-init.md +++ b/home/versioned_docs/version-v1.4.x/start/influxdb-init.md @@ -1,7 +1,7 @@ --- id: influxdb-init title: Use Time Series Database InfluxDB to Store Metrics Data (Optional) -sidebar_label: Use InfluxDB Store Metrics +sidebar_label: Use InfluxDB Store Metrics --- HertzBeat's historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) @@ -11,41 +11,45 @@ TDengine is the Time Series Data Platform where developers build IoT, analytics, **Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** **⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** -Note⚠️ Need InfluxDB 1.x Version. +Note⚠️ Need InfluxDB 1.x Version. ### 1. Use HuaweiCloud GaussDB For Influx > Use [HuaweiCloud GaussDB For Influx](https://www.huaweicloud.com/product/gaussdbforinflux.html) - -> Get the `GaussDB For Influx` service url, username and password config. +> +> Get the `GaussDB For Influx` service url, username and password config. ⚠️Note `GaussDB For Influx` enable SSL default, the service url should use `https:` -### 2. Install TDengine via Docker +### 2. Install TDengine via Docker + > Refer to the official website [installation tutorial](https://hub.docker.com/_/influxdb) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Install InfluxDB with Docker - ``` - $ docker run -p 8086:8086 \ - -v /opt/influxdb:/var/lib/influxdb \ - influxdb:1.8 - ``` - `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. - use```$ docker ps``` to check if the database started successfully - - -### Configure the database connection in hertzbeat `application.yml` configuration file +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Install InfluxDB with Docker +> +> ``` +> $ docker run -p 8086:8086 \ +> -v /opt/influxdb:/var/lib/influxdb \ +> influxdb:1.8 +> ``` +> +> `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. +> use```$ docker ps``` to check if the database started successfully + +### Configure the database connection in hertzbeat `application.yml` configuration file 1. Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Replace `warehouse.store.influxdb` data source parameters, URL account and password. + Replace `warehouse.store.influxdb` data source parameters, URL account and password. ```yaml warehouse: @@ -70,3 +74,4 @@ warehouse: 1. Do both the time series databases InfluxDB, IoTDB and TDengine need to be configured? Can they both be used? > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. + diff --git a/home/versioned_docs/version-v1.4.x/start/iotdb-init.md b/home/versioned_docs/version-v1.4.x/start/iotdb-init.md index 57329621958..d015527ac1b 100644 --- a/home/versioned_docs/version-v1.4.x/start/iotdb-init.md +++ b/home/versioned_docs/version-v1.4.x/start/iotdb-init.md @@ -1,30 +1,32 @@ --- id: iotdb-init title: Use Time Series Database IoTDB to Store Metrics Data (Optional) -sidebar_label: Use IoTDB Store Metrics +sidebar_label: Use IoTDB Store Metrics --- HertzBeat's historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) We recommend VictoriaMetrics for long term support. -Apache IoTDB is a software system that integrates the collection, storage, management and analysis of time series data of the Internet of Things. We use it to store and analyze the historical data of monitoring metrics collected. Support V0.13+ version and V1.0.+ version. +Apache IoTDB is a software system that integrates the collection, storage, management and analysis of time series data of the Internet of Things. We use it to store and analyze the historical data of monitoring metrics collected. Support V0.13+ version and V1.0.+ version. **Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** - -> If you already have an IoTDB environment, you can skip directly to the YML configuration step. +**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** +> If you already have an IoTDB environment, you can skip directly to the YML configuration step. ### Install IoTDB via Docker + > Refer to the official website [installation tutorial](https://iotdb.apache.org/UserGuide/V0.13.x/QuickStart/WayToGetIoTDB.html) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Install IoTDB via Docker +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Install IoTDB via Docker ```shell $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ @@ -33,14 +35,14 @@ $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ apache/iotdb:0.13.3-node ``` - `-v /opt/iotdb/data:/iotdb/data` is local persistent mount of IotDB data directory.`/iotdb/data` should be replaced with the actual local directory. - use```$ docker ps``` to check if the database started successfully +`-v /opt/iotdb/data:/iotdb/data` is local persistent mount of IotDB data directory.`/iotdb/data` should be replaced with the actual local directory. +use```$ docker ps``` to check if the database started successfully -3. Configure the database connection in hertzbeat `application.yml`configuration file +3. Configure the database connection in hertzbeat `application.yml`configuration file Modify `hertzbeat/config/application.yml` configuration file Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Config the `warehouse.store.jpa.enabled` `false`. Replace `warehouse.store.iot-db` data source parameters, HOST account and password. + Config the `warehouse.store.jpa.enabled` `false`. Replace `warehouse.store.iot-db` data source parameters, HOST account and password. ``` warehouse: @@ -62,18 +64,22 @@ warehouse: expire-time: '7776000000' ``` -4. Restart HertzBeat +4. Restart HertzBeat -### FAQ +### FAQ 1. Do both the time series databases IoTDB and TDengine need to be configured? Can they both be used? + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. 2. The historical chart of the monitoring page is not displayed, and pops up [Unable to provide historical chart data, please configure to rely on the time series database] + > As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database 3. The TDengine database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure the dependent time series database] + > Please check if the configuration parameters are correct > Is td-engine enable set to true > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory +> You can check the startup logs according to the logs directory + diff --git a/home/versioned_docs/version-v1.4.x/start/mysql-change.md b/home/versioned_docs/version-v1.4.x/start/mysql-change.md index 16bbab1b067..5dc513d3650 100644 --- a/home/versioned_docs/version-v1.4.x/start/mysql-change.md +++ b/home/versioned_docs/version-v1.4.x/start/mysql-change.md @@ -1,41 +1,48 @@ --- id: mysql-change title: Use MYSQL Replace H2 Database to Store Metadata(Optional) -sidebar_label: Use MYSQL Instead of H2 +sidebar_label: Use MYSQL Instead of H2 --- -MYSQL is a reliable relational database. In addition to default built-in H2 database, HertzBeat allow you to use MYSQL to store structured relational data such as monitoring information, alarm information and configuration information. -> If you have the MYSQL environment, can be directly to database creation step. +MYSQL is a reliable relational database. In addition to default built-in H2 database, HertzBeat allow you to use MYSQL to store structured relational data such as monitoring information, alarm information and configuration information. + +> If you have the MYSQL environment, can be directly to database creation step. + +### Install MYSQL via Docker -### Install MYSQL via Docker 1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 - After the installation you can check if the Docker version normally output at the terminal. + After the installation you can check if the Docker version normally output at the terminal. + ``` $ docker -v Docker version 20.10.12, build e91ed57 ``` -2. Install MYSQl with Docker +2. Install MYSQl with Docker + ``` $ docker run -d --name mysql -p 3306:3306 -v /opt/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7 ``` + `-v /opt/data:/var/lib/mysql` is local persistent mount of mysql data directory. `/opt/data` should be replaced with the actual local directory. use ```$ docker ps``` to check if the database started successfully -### Database creation +### Database creation + 1. Enter MYSQL or use the client to connect MYSQL service - `mysql -uroot -p123456` + `mysql -uroot -p123456` 2. Create database named hertzbeat `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. Check if hertzbeat database has been successfully created `show databases;` -### Modify hertzbeat's configuration file application.yml and switch data source +### Modify hertzbeat's configuration file application.yml and switch data source 1. Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `spring.database` data source parameters, URL account and password. + ```yaml spring: datasource: @@ -44,7 +51,9 @@ spring: password: 123456 url: jdbc:h2:./data/hertzbeat;MODE=MYSQL ``` - Specific replacement parameters are as follows and you need to configure account according to the mysql environment: + +Specific replacement parameters are as follows and you need to configure account according to the mysql environment: + ```yaml spring: datasource: @@ -54,4 +63,4 @@ spring: url: jdbc:mysql://localhost:3306/hertzbeat?useUnicode=true&characterEncoding=utf-8&useSSL=false ``` -**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** +**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/versioned_docs/version-v1.4.x/start/package-deploy.md b/home/versioned_docs/version-v1.4.x/start/package-deploy.md index 21b7e2f5fe6..0111ef08114 100644 --- a/home/versioned_docs/version-v1.4.x/start/package-deploy.md +++ b/home/versioned_docs/version-v1.4.x/start/package-deploy.md @@ -3,20 +3,22 @@ id: package-deploy title: Install HertzBeat via Package sidebar_label: Install via Package --- + > You can install and run HertzBeat on Linux Windows Mac system, and CPU supports X86/ARM64. 1. Download HertzBeat installation package - Download installation package `hertzbeat-xx.tar.gz` `hertzbeat-collector-xx.tar.gz` corresponding to your system environment - - download from [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) repository + Download installation package `hertzbeat-xx.tar.gz` `hertzbeat-collector-xx.tar.gz` corresponding to your system environment + - download from [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) repository - download from [Download](https://hertzbeat.apache.org/docs/download) repository - 2. Configure HertzBeat's configuration file(optional) - Unzip the installation package to the host eg: /opt/hertzbeat - ``` + Unzip the installation package to the host eg: /opt/hertzbeat + + ``` $ tar zxvf hertzbeat-xx.tar.gz or $ unzip -o hertzbeat-xx.zip ``` + Modify the configuration file `hertzbeat/config/application.yml` params according to your needs. - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) @@ -27,22 +29,23 @@ sidebar_label: Install via Package 3. Configure the account file(optional) HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` If you need add, delete or modify account or password, configure `hertzbeat/config/sureness.yml`. Ignore this step without this demand. - For detail steps, please refer to [Configure Account Password](account-modify) + For detail steps, please refer to [Configure Account Password](account-modify) 4. Start the service - Execute the startup script `startup.sh` in the installation directory `hertzbeat/bin/`, or `startup.bat` in windows. - ``` + Execute the startup script `startup.sh` in the installation directory `hertzbeat/bin/`, or `startup.bat` in windows. + + ``` $ ./startup.sh ``` +5. Begin to explore HertzBeat -5. Begin to explore HertzBeat - - Access http://localhost:1157/ using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! + Access http://localhost:1157/ using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! 6. Deploy collector clusters (Optional) - Download and unzip the collector release package `hertzbeat-collector-xx.tar.gz` to new machine [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) - Configure the collector configuration yml file `hertzbeat-collector/config/application.yml`: unique `identity` name, running `mode` (public or private), hertzbeat `manager-host`, hertzbeat `manager-port` + ```yaml collector: dispatch: @@ -59,7 +62,7 @@ sidebar_label: Install via Package **HAVE FUN** -### FAQ +### FAQ 1. **If using the package not contains JDK, you need to prepare the JAVA environment in advance** @@ -67,6 +70,7 @@ sidebar_label: Install via Package requirement:JDK11 ENV download JAVA installation package: [mirror website](https://repo.huaweicloud.com/java/jdk/) After installation use command line to check whether you install it successfully. + ``` $ java -version java version "11.0.12" @@ -74,18 +78,20 @@ sidebar_label: Install via Package Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.12+8-LTS-237, mixed mode) ``` - 2. **According to the process deploy,visit http://ip:1157/ no interface** Please refer to the following points to troubleshoot issues: + > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. > 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. > 3:Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 3. **Log an error TDengine connection or insert SQL failed** + > 1:Check whether database account and password configured is correct, the database is created. -> 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. +> 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. 4. **Monitoring historical charts with no data for a long time ** + > 1: Whether the time series database is configured or not, if it is not configured, there is no historical chart data. > 2: If you are using Tdengine, check whether the database `hertzbeat` of Tdengine is created. > 3: HertzBeat's configuration file `application.yml`, the dependent services in it, the time series, the IP account password, etc. are configured correctly. diff --git a/home/versioned_docs/version-v1.4.x/start/postgresql-change.md b/home/versioned_docs/version-v1.4.x/start/postgresql-change.md index 5be6b466223..a6c1fdb580a 100644 --- a/home/versioned_docs/version-v1.4.x/start/postgresql-change.md +++ b/home/versioned_docs/version-v1.4.x/start/postgresql-change.md @@ -3,27 +3,35 @@ id: postgresql-change title: Use PostgreSQL Replace H2 Database to Store Metadata(Optional) sidebar_label: Use PostgreSQL Instead of H2 --- + PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition to default built-in H2 database, HertzBeat allow you to use PostgreSQL to store structured relational data such as monitoring information, alarm information and configuration information. > If you have the PostgreSQL environment, can be directly to database creation step. ### Install PostgreSQL via Docker + 1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. + ``` $ docker -v Docker version 20.10.12, build e91ed57 ``` 2. Install PostgreSQL with Docker + ``` $ docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` + use```$ docker ps```to check if the database started successfully + 3. Create database in container manually or with [script](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-postgresql-iotdb/conf/sql/schema.sql). ### Database creation -1. Enter postgreSQL or use the client to connect postgreSQL service + +1. Enter postgreSQL or use the client to connect postgreSQL service + ``` su - postgres psql @@ -39,6 +47,7 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition Modify `hertzbeat/config/application.yml` configuration file Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `spring.database` data source parameters, URL account and password. + ```yaml spring: datasource: @@ -47,7 +56,9 @@ spring: password: 123456 url: jdbc:h2:./data/hertzbeat;MODE=MYSQL ``` + Specific replacement parameters are as follows and you need to configure account, ip, port according to the postgresql environment: + ```yaml spring: config: @@ -70,4 +81,4 @@ spring: dialect: org.hibernate.dialect.PostgreSQLDialect ``` -**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** +**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/versioned_docs/version-v1.4.x/start/quickstart.md b/home/versioned_docs/version-v1.4.x/start/quickstart.md index 2b5b528d784..909594e1275 100644 --- a/home/versioned_docs/version-v1.4.x/start/quickstart.md +++ b/home/versioned_docs/version-v1.4.x/start/quickstart.md @@ -1,7 +1,7 @@ --- id: quickstart title: Quick Start -sidebar_label: Quick Start +sidebar_label: Quick Start --- ### 🐕 Quick Start @@ -29,6 +29,7 @@ sidebar_label: Quick Start ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : set the collector unique identity name. - `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. - `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. @@ -45,6 +46,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do 5. Deploy collector clusters - Download the release package `hertzbeat-collector-xx.tar.gz` to new machine [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) - Configure the collector configuration yml file `hertzbeat-collector/config/application.yml`: unique `identity` name, running `mode` (public or private), hertzbeat `manager-host`, hertzbeat `manager-port` + ```yaml collector: dispatch: @@ -68,9 +70,9 @@ Detailed config refer to [Install HertzBeat via Package](https://hertzbeat.com/d 3. Web:need `nodejs npm angular-cli` environment, Run `ng serve --open` in `web-app` directory after backend startup. 4. Access `http://localhost:4200` to start, default account: `admin/hertzbeat` -Detailed steps refer to [CONTRIBUTING](../others/contributing) +Detailed steps refer to [CONTRIBUTING](../others/contributing) -##### 4:Install All(hertzbeat+mysql+iotdb/tdengine) via Docker-compose +##### 4:Install All(hertzbeat+mysql+iotdb/tdengine) via Docker-compose Install and deploy the mysql database, iotdb/tdengine database and hertzbeat at one time through [docker-compose deployment script](https://github.com/apache/hertzbeat/tree/master/script/docker-compose). @@ -82,4 +84,4 @@ Install HertzBeat cluster in a Kubernetes cluster by Helm chart. Detailed steps refer to [Artifact Hub](https://artifacthub.io/packages/helm/hertzbeat/hertzbeat) -**HAVE FUN** +**HAVE FUN** diff --git a/home/versioned_docs/version-v1.4.x/start/rainbond-deploy.md b/home/versioned_docs/version-v1.4.x/start/rainbond-deploy.md index 1ff4de9428f..d1fbf4763fa 100644 --- a/home/versioned_docs/version-v1.4.x/start/rainbond-deploy.md +++ b/home/versioned_docs/version-v1.4.x/start/rainbond-deploy.md @@ -1,7 +1,7 @@ --- id: rainbond-deploy title: Use Rainbond Deploy HertzBeat -sidebar_label: Install via Rainbond +sidebar_label: Install via Rainbond --- If you are unfamiliar with Kubernetes, and want to install HertzBeat in Kubernetes, you can use Rainbond to deploy. Rainbond is a cloud-native application management platform built on Kubernetes and simplifies the application deployment to Kubernetes. diff --git a/home/versioned_docs/version-v1.4.x/start/sslcert-practice.md b/home/versioned_docs/version-v1.4.x/start/sslcert-practice.md index 41c9c2ed686..43253ed946b 100644 --- a/home/versioned_docs/version-v1.4.x/start/sslcert-practice.md +++ b/home/versioned_docs/version-v1.4.x/start/sslcert-practice.md @@ -33,7 +33,6 @@ gitee: https://gitee.com/hertzbeat/hertzbeat > System Page -> Monitor Menu -> SSL Certificate -> Add SSL Certificate - ![](/img/docs/start/ssl_1.png) 2. Configure the monitoring website @@ -47,48 +46,38 @@ gitee: https://gitee.com/hertzbeat/hertzbeat > In the monitoring list, you can view the monitoring status, and in the monitoring details, you can view the metric data chart, etc. - ![](/img/docs/start/ssl_3.png) - ![](/img/docs/start/ssl_11.png) 4. Set the threshold (triggered when the certificate expires) > System Page -> Alarms -> Alarm Thresholds -> New Thresholds - ![](/img/docs/start/ssl_4.png) > Configure the threshold, select the SSL certificate metric object, configure the alarm expression-triggered when the metric `expired` is `true`, that is, `equals(expired,"true")`, set the alarm level notification template information, etc. - ![](/img/docs/start/ssl_5.png) > Associating thresholds with monitoring, in the threshold list, set which monitoring this threshold applies to. - ![](/img/docs/start/ssl_6.png) - 5. Set the threshold (triggered one week before the certificate expires) > In the same way, add a new configuration threshold and configure an alarm expression - when the metric expires timestamp `end_timestamp`, the `now()` function is the current timestamp, if the configuration triggers an alarm one week in advance: `end_timestamp <= (now( ) + 604800000)` , where `604800000` is the 7-day total time difference in milliseconds. - ![](/img/docs/start/ssl_7.png) > Finally, you can see the triggered alarm in the alarm center. - ![](/img/docs/start/ssl_8.png) - 6. Alarm notification (in time notification via Dingding WeChat Feishu, etc.) > Monitoring Tool -> Alarm Notification -> New Receiver - ![](/img/docs/start/ssl_10.png) For token configuration such as Dingding WeChat Feishu, please refer to the help document @@ -98,7 +87,6 @@ https://tancloud.cn/docs/help/alert_dingtalk > Alarm Notification -> New Alarm Notification Policy -> Enable Notification for the Recipient Just Configured - ![](/img/docs/start/ssl_11.png) 7. OK When the threshold is triggered, we can receive the corresponding alarm message. If there is no notification, you can also view the alarm information in the alarm center. diff --git a/home/versioned_docs/version-v1.4.x/start/tdengine-init.md b/home/versioned_docs/version-v1.4.x/start/tdengine-init.md index 928e9b25865..fc1615fa8c7 100644 --- a/home/versioned_docs/version-v1.4.x/start/tdengine-init.md +++ b/home/versioned_docs/version-v1.4.x/start/tdengine-init.md @@ -1,84 +1,89 @@ --- id: tdengine-init title: Use Time Series Database TDengine to Store Metrics Data (Optional) -sidebar_label: Use TDengine Store Metrics +sidebar_label: Use TDengine Store Metrics --- HertzBeat's historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) We recommend VictoriaMetrics for long term support. -TDengine is an open-source IoT time-series database, which we use to store the collected historical data of monitoring metrics. Pay attention to support ⚠️ 3.x version. +TDengine is an open-source IoT time-series database, which we use to store the collected historical data of monitoring metrics. Pay attention to support ⚠️ 3.x version. **Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** **⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** -Note⚠️ Need TDengine 3.x Version. +Note⚠️ Need TDengine 3.x Version. -> If you have TDengine environment, can directly skip to create a database instance. +> If you have TDengine environment, can directly skip to create a database instance. +### Install TDengine via Docker -### Install TDengine via Docker > Refer to the official website [installation tutorial](https://docs.taosdata.com/get-started/docker/) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Install TDengine with Docker - ```shell - $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ - -v /opt/taosdata:/var/lib/taos \ - --name tdengine -e TZ=Asia/Shanghai \ - tdengine/tdengine:3.0.4.0 - ``` - `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. - `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. - use```$ docker ps``` to check if the database started successfully - -### Create database instance +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Install TDengine with Docker +> +> ```shell +> $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ +> -v /opt/taosdata:/var/lib/taos \ +> --name tdengine -e TZ=Asia/Shanghai \ +> tdengine/tdengine:3.0.4.0 +> ``` +> +> `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. +> `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. +> use```$ docker ps``` to check if the database started successfully + +### Create database instance + +1. Enter database Docker container -1. Enter database Docker container ``` $ docker exec -it tdengine /bin/bash ``` 2. Create database named hertzbeat - After entering the container,execute `taos` command as follows: - + After entering the container,execute `taos` command as follows: + ``` root@tdengine-server:~/TDengine-server# taos Welcome to the TDengine shell from Linux, Client Version Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. taos> ``` - - execute commands to create database - + + execute commands to create database + ``` taos> show databases; taos> CREATE DATABASE hertzbeat KEEP 90 DURATION 10 BUFFER 16; ``` - + The above statements will create a database named hertzbeat. The data will be saved for 90 days (more than 90 days data will be automatically deleted). A data file every 10 days, memory blocks buffer is 16MB. -3. Check if hertzbeat database has been created success - +3. Check if hertzbeat database has been created success + ``` taos> show databases; taos> use hertzbeat; ``` -**Note⚠️If you install TDengine using package** +**Note⚠️If you install TDengine using package** > In addition to start the server,you must execute `systemctl start taosadapter` to start adapter -### Configure the database connection in hertzbeat `application.yml` configuration file +### Configure the database connection in hertzbeat `application.yml` configuration file 1. Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) Note⚠️The docker container way need to mount application.yml file locally,while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Replace `warehouse.store.td-engine` data source parameters, URL account and password. + Replace `warehouse.store.td-engine` data source parameters, URL account and password. ```yaml warehouse: @@ -100,16 +105,21 @@ warehouse: ### FAQ 1. Do both the time series databases IoTDB and TDengine need to be configured? Can they both be used? + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. 2. The historical chart of the monitoring page is not displayed, and pops up [Unable to provide historical chart data, please configure to rely on the time series database] + > As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database -3. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed -> Please confirm whether the installed TDengine version is 3.x, version 2.x are not compatible. +3. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed + +> Please confirm whether the installed TDengine version is 3.x, version 2.x are not compatible. 4. The TDengine database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure the dependent time series database] + > Please check if the configuration parameters are correct > Is td-engine enable set to true > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory +> You can check the startup logs according to the logs directory + diff --git a/home/versioned_docs/version-v1.4.x/start/upgrade.md b/home/versioned_docs/version-v1.4.x/start/upgrade.md index d892370838e..101564dc00b 100644 --- a/home/versioned_docs/version-v1.4.x/start/upgrade.md +++ b/home/versioned_docs/version-v1.4.x/start/upgrade.md @@ -1,7 +1,7 @@ --- id: upgrade title: HertzBeat New Version Upgrade -sidebar_label: Version Upgrade Guide +sidebar_label: Version Upgrade Guide --- **HertzBeat Release Version List** @@ -15,7 +15,6 @@ HertzBeat's metadata information is stored in H2 or Mysql, PostgreSQL relational **You need to save and back up the data files of the database and monitoring templates yml files before upgrading** - ### Upgrade For Docker Deploy 1. If using custom monitoring templates @@ -23,30 +22,26 @@ HertzBeat's metadata information is stored in H2 or Mysql, PostgreSQL relational - `docker cp hertzbeat:/opt/hertzbeat/define ./define` - And mount the template define directory when docker start `-v $(pwd)/define:/opt/hertzbeat/define` - `-v $(pwd)/define:/opt/hertzbeat/define` - -2. If using the built-in default H2 database +2. If using the built-in default H2 database - Need to mount or back up `-v $(pwd)/data:/opt/hertzbeat/data` database file directory in the container `/opt/hertzbeat/data` - Stop and delete the container, delete the local HertzBeat docker image, and pull the new version image - Refer to [Docker installation of HertzBeat](docker-deploy) to create a new container using a new image. Note that the database file directory needs to be mounted `-v $(pwd)/data:/opt/hertzbeat/data` - -3. If using external relational database Mysql, PostgreSQL +3. If using external relational database Mysql, PostgreSQL - No need to mount the database file directory in the backup container - Stop and delete the container, delete the local HertzBeat docker image, and pull the new version image - Refer to [Docker installation HertzBeat](docker-deploy) to create a new container using the new image, and configure the database connection in `application.yml` - ### Upgrade For Package Deploy -1. If using the built-in default H2 database +1. If using the built-in default H2 database - Back up the database file directory under the installation package `/opt/hertzbeat/data` - If there is a custom monitoring template, you need to back up the template YML under `/opt/hertzbeat/define` - `bin/shutdown.sh` stops the HertzBeat process and downloads the new installation package - Refer to [Installation package to install HertzBeat](package-deploy) to start using the new installation package - -2. If using external relational database Mysql, PostgreSQL +2. If using external relational database Mysql, PostgreSQL - No need to back up the database file directory under the installation package - If there is a custom monitoring template, you need to back up the template YML under `/opt/hertzbeat/define` - `bin/shutdown.sh` stops the HertzBeat process and downloads the new installation package - Refer to [Installation package to install HertzBeat](package-deploy) to start with the new installation package and configure the database connection in `application.yml` -**HAVE FUN** +**HAVE FUN** diff --git a/home/versioned_docs/version-v1.4.x/start/victoria-metrics-init.md b/home/versioned_docs/version-v1.4.x/start/victoria-metrics-init.md index c917d5cb7a4..66a91fd49af 100644 --- a/home/versioned_docs/version-v1.4.x/start/victoria-metrics-init.md +++ b/home/versioned_docs/version-v1.4.x/start/victoria-metrics-init.md @@ -1,13 +1,12 @@ --- id: victoria-metrics-init title: Use Time Series Database VictoriaMetrics to Store Metrics Data (Recommended) -sidebar_label: Use VictoriaMetrics Store Metrics +sidebar_label: Use VictoriaMetrics Store Metrics --- HertzBeat's historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) We recommend VictoriaMetrics for long term support. - VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.Recommend Version(VictoriaMetrics:v1.95.1+, HertzBeat:v1.4.3+) **Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** @@ -15,17 +14,19 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t > If you already have an VictoriaMetrics environment, you can skip directly to the YML configuration step. -### Install VictoriaMetrics via Docker +### Install VictoriaMetrics via Docker + > Refer to the official website [installation tutorial](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` - -2. Install VictoriaMetrics via Docker +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` + +2. Install VictoriaMetrics via Docker ```shell $ docker run -d -p 8428:8428 \ @@ -34,8 +35,8 @@ $ docker run -d -p 8428:8428 \ victoriametrics/victoria-metrics:v1.95.1 ``` - `-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` is local persistent mount of VictoriaMetrics data directory - use```$ docker ps``` to check if the database started successfully +`-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` is local persistent mount of VictoriaMetrics data directory +use```$ docker ps``` to check if the database started successfully 3. Configure the database connection in hertzbeat `application.yml`configuration file @@ -61,5 +62,7 @@ warehouse: ### FAQ -1. Do both the time series databases need to be configured? Can they both be used? +1. Do both the time series databases need to be configured? Can they both be used? + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which can affects the historical chart data. + diff --git a/home/versioned_docs/version-v1.4.x/template.md b/home/versioned_docs/version-v1.4.x/template.md index a02c1d11925..6e82517f324 100644 --- a/home/versioned_docs/version-v1.4.x/template.md +++ b/home/versioned_docs/version-v1.4.x/template.md @@ -4,27 +4,27 @@ title: Monitoring Template Here sidebar_label: Monitoring Template --- -> Hertzbeat is an open source, real-time monitoring tool with custom-monitor and agentLess. - +> Hertzbeat is an open source, real-time monitoring tool with custom-monitor and agentLess. +> > We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? -Here is the architecture. +Here is the architecture. ![hertzBeat](/img/docs/hertzbeat-arch.png) -**We define all monitoring collection types (mysql, website, jvm, k8s) as yml templates, and users can import these templates into the hertzbeat system to support corresponding types of monitoring, which is very convenient!** +**We define all monitoring collection types (mysql, website, jvm, k8s) as yml templates, and users can import these templates into the hertzbeat system to support corresponding types of monitoring, which is very convenient!** ![](/img/docs/advanced/extend-point-1.png) **Welcome everyone to contribute your customized general monitoring type YML template during use. The available templates are as follows:** -### Application service monitoring +### Application service monitoring  👉 [Website monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml)
- 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
- 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
- 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
+ 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
+ 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
+ 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
 👉 [Full site monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml)
 👉 [SSL Cert monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml)
 👉 [JVM monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml)
@@ -32,7 +32,7 @@ Here is the architecture.  👉 [SpringBoot3.0](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml)
 👉 [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml)
-### Database monitoring +### Database monitoring  👉 [MYSQL database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml)
 👉 [MariaDB database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml)
@@ -49,7 +49,7 @@ Here is the architecture.  👉 [Redis Sentinel database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml)
 👉 [Redis Cluster database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml)
-### Operating system monitoring +### Operating system monitoring  👉 [Linux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml)
 👉 [Windows operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml)
@@ -64,7 +64,6 @@ Here is the architecture.  👉 [AlmaLinux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml)
 👉 [Debian operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml)
- ### Middleware monitoring  👉 [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml)
@@ -78,13 +77,12 @@ Here is the architecture.  👉 [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml)
 👉 [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml)
- ### CloudNative monitoring  👉 [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml)
 👉 [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml)
-### Network monitoring +### Network monitoring  👉 [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml)
 👉 [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml)
diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-http-default.md b/home/versioned_docs/version-v1.5.x/advanced/extend-http-default.md index cb8cec5b7aa..9ccb0e9454b 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-http-default.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-http-default.md @@ -4,14 +4,16 @@ title: HTTP Protocol System Default Parsing Method sidebar_label: Default Parsing Method --- -> After calling the HTTP api to obtain the response data, use the default parsing method of hertzbeat to parse the response data. +> After calling the HTTP api to obtain the response data, use the default parsing method of hertzbeat to parse the response data. -**The interface response data structure must be consistent with the data structure rules specified by hertzbeat** +**The interface response data structure must be consistent with the data structure rules specified by hertzbeat** -### HertzBeat data format specification -Note⚠️ The response data is JSON format. +### HertzBeat data format specification + +Note⚠️ The response data is JSON format. Single layer format :key-value + ```json { "metricName1": "metricValue", @@ -20,7 +22,9 @@ Single layer format :key-value "metricName4": "metricValue" } ``` + Multilayer format:Set key value in the array + ```json [ { @@ -37,9 +41,11 @@ Multilayer format:Set key value in the array } ] ``` + eg: -Query the CPU information of the custom system. The exposed interface is `/metrics/cpu`. We need `hostname,core,useage` Metric. -If there is only one virtual machine, its single-layer format is : +Query the CPU information of the custom system. The exposed interface is `/metrics/cpu`. We need `hostname,core,useage` Metric. +If there is only one virtual machine, its single-layer format is : + ```json { "hostname": "linux-1", @@ -49,7 +55,9 @@ If there is only one virtual machine, its single-layer format is : "runningTime": 100 } ``` -If there are multiple virtual machines, the multilayer format is: : + +If there are multiple virtual machines, the multilayer format is: : + ```json [ { @@ -76,7 +84,7 @@ If there are multiple virtual machines, the multilayer format is: : ] ``` -**The corresponding monitoring template yml can be configured as follows** +**The corresponding monitoring template yml can be configured as follows** ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -146,3 +154,4 @@ metrics: # Hertzbeat default parsing is used here parseType: default ``` + diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md b/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md index 4c65e29b8da..93a5c1e2dce 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md @@ -8,12 +8,10 @@ Through this tutorial, we describe step by step how to add a monitoring type bas Before reading this tutorial, we hope that you are familiar with how to customize types, metrics, protocols, etc. from [Custom Monitoring](extend-point) and [http Protocol Customization](extend-http). - ### HTTP protocol parses the general response structure to obtain metric data > In many scenarios, we need to monitor the provided HTTP API interface and obtain the index value returned by the interface. In this article, we use the http custom protocol to parse our common http interface response structure, and obtain the fields in the returned body as metric data. - ``` { "code": 200, @@ -22,6 +20,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz } ``` + As above, usually our background API interface will design such a general return. The same is true for the background of the hertzbeat system. Today, we will use the hertzbeat API as an example, add a new monitoring type **hertzbeat**, and monitor and collect its system summary statistics API `http://localhost:1157/api/summary`, the response data is: @@ -63,16 +62,13 @@ As above, usually our background API interface will design such a general return **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - Here we define a custom monitoring type `app` named `hertzbeat` which use the HTTP protocol to collect data. **Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** - ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring category: custom @@ -200,38 +196,30 @@ metrics: **The addition is complete, now we save and apply. We can see that the system page has added a `hertzbeat` monitoring type. ** - ![](/img/docs/advanced/extend-http-example-1.png) - ### The system page adds the monitoring of `hertzbeat` monitoring type > We click Add `HertzBeat Monitoring Tool`, configure monitoring IP, port, collection cycle, account password in advanced settings, etc., click OK to add monitoring. - ![](/img/docs/advanced/extend-http-example-2.png) - ![](/img/docs/advanced/extend-http-example-3.png) > After a certain period of time (depending on the collection cycle), we can see the specific metric data and historical charts in the monitoring details! - ![](/img/docs/advanced/extend-http-example-4.png) - - ### Set threshold alarm notification > Next, we can set the threshold normally. After the alarm is triggered, we can view it in the alarm center, add recipients, set alarm notifications, etc. Have Fun!!! - ---- #### over! This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! -If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. +If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. **github: https://github.com/apache/hertzbeat** diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-token.md b/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-token.md index b9bbb61e72d..7881b048357 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-token.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-token.md @@ -22,6 +22,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz "identifier": "admin" } ``` + **The response structure data is as follows**: ```json @@ -40,11 +41,9 @@ Before reading this tutorial, we hope that you are familiar with how to customiz **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - 1. The custom monitoring type needs to add a new configuration monitoring template yml. We directly reuse the `hertzbeat` monitoring type in Tutorial 1 and modify it based on it A monitoring configuration definition file named after the monitoring type - hertzbeat_token @@ -214,15 +213,12 @@ metrics: ![](/img/docs/advanced/extend-http-example-5.png) - ** After the addition is successful, we can see the `token`, `refreshToken` metric data we collected on the details page. ** ![](/img/docs/advanced/extend-http-example-6.png) ![](/img/docs/advanced/extend-http-example-7.png) - - ### Use `token` as a variable parameter to collect and use the following metricss **Add an index group definition `summary` in `app-hertzbeat_token.yml`, which is the same as `summary` in Tutorial 1, and set the collection priority to 1** @@ -334,8 +330,7 @@ metrics: # Response data analysis method: default-system rules, jsonPath-jsonPath script, website-website usability metric monitoring parseType: jsonPath parseScript: '$.data' - - +--- - name: summary # The smaller the index group scheduling priority (0-127), the higher the priority, and the index group with low priority will not be scheduled until the collection of index groups with high priority is completed, and the index groups with the same priority will be scheduled and collected in parallel # The metrics with priority 0 is the availability metrics, that is, it will be scheduled first, and other metricss will continue to be scheduled if the collection is successful, and the scheduling will be interrupted if the collection fails @@ -385,12 +380,12 @@ metrics: > Next, we can set the threshold normally. After the alarm is triggered, we can view it in the alarm center, add a new recipient, set alarm notification, etc. Have Fun!!! ----- +--- #### over! This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! -If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. +If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. **github: https://github.com/apache/hertzbeat** diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-http-jsonpath.md b/home/versioned_docs/version-v1.5.x/advanced/extend-http-jsonpath.md index 772c96d20d3..86a49c06756 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-http-jsonpath.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-http-jsonpath.md @@ -3,16 +3,18 @@ id: extend-http-jsonpath title: HTTP Protocol JsonPath Parsing Method sidebar_label: JsonPath Parsing Method --- + > After calling the HTTP api to obtain the response data, use JsonPath script parsing method to parse the response data. -Note⚠️ The response data is JSON format. +Note⚠️ The response data is JSON format. + +**Use the JsonPath script to parse the response data into data that conforms to the data structure rules specified by HertzBeat** -**Use the JsonPath script to parse the response data into data that conforms to the data structure rules specified by HertzBeat** +#### JsonPath Operator -#### JsonPath Operator -[JSONPath online verification](https://www.jsonpath.cn) +[JSONPath online verification](https://www.jsonpath.cn) -| JSONPATH | Help description | +| JSONPATH | Help description | |------------------|----------------------------------------------------------------------------------------| | $ | Root object or element | | @ | Current object or element | @@ -25,8 +27,10 @@ Note⚠️ The response data is JSON format. | ?() | Filter (script) expression | | () | Script Expression | -#### HertzBeat data format specification +#### HertzBeat data format specification + Single layer format :key-value + ```json { "metricName1": "metricValue", @@ -35,7 +39,9 @@ Single layer format :key-value "metricName4": "metricValue" } ``` + Multilayer format:Set key value in the array + ```json [ { @@ -56,7 +62,8 @@ Multilayer format:Set key value in the array #### Example Query the value information of the custom system, and its exposed interface is `/metrics/person`. We need `type,num` Metric. -The raw data returned by the interface is as follows: +The raw data returned by the interface is as follows: + ```json { "firstName": "John", @@ -80,7 +87,8 @@ The raw data returned by the interface is as follows: } ``` -We use the jsonpath script to parse, and the corresponding script is: `$.number[*]`,The parsed data structure is as follows: +We use the jsonpath script to parse, and the corresponding script is: `$.number[*]`,The parsed data structure is as follows: + ```json [ { @@ -93,9 +101,10 @@ We use the jsonpath script to parse, and the corresponding script is: `$.number[ } ] ``` + This data structure conforms to the data format specification of HertzBeat, and the Metric `type,num` is successfully extracted. -**The corresponding monitoring template yml can be configured as follows** +**The corresponding monitoring template yml can be configured as follows** ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -163,3 +172,4 @@ metrics: parseType: jsonPath parseScript: '$.number[*]' ``` + diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-http.md b/home/versioned_docs/version-v1.5.x/advanced/extend-http.md index 242b63e8cf0..bab8800e7a1 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-http.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-http.md @@ -1,35 +1,33 @@ --- id: extend-http title: HTTP Protocol Custom Monitoring -sidebar_label: HTTP Protocol Custom Monitoring +sidebar_label: HTTP Protocol Custom Monitoring --- -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use HTTP protocol to customize Metric monitoring +> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use HTTP protocol to customize Metric monitoring -### HTTP protocol collection process +### HTTP protocol collection process 【**Call HTTP API**】->【**Response Verification**】->【**Parse Response Data**】->【**Default method parsing|JsonPath script parsing | XmlPath parsing(todo) | Prometheus parsing**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of HTTP protocol. We need to configure HTTP request parameters, configure which Metrics to obtain, and configure the parsing method and parsing script for response data. -HTTP protocol supports us to customize HTTP request path, request header, request parameters, request method, request body, etc. +HTTP protocol supports us to customize HTTP request path, request header, request parameters, request method, request body, etc. **System default parsing method**:HTTP interface returns the JSON data structure specified by hertzbeat, that is, the default parsing method can be used to parse the data and extract the corresponding Metric data. For details, refer to [**System Default Parsing**](extend-http-default) -**JsonPath script parsing method**:Use JsonPath script to parse the response JSON data, return the data structure specified by the system, and then provide the corresponding Metric data. For details, refer to [**JsonPath Script Parsing**](extend-http-jsonpath) - +**JsonPath script parsing method**:Use JsonPath script to parse the response JSON data, return the data structure specified by the system, and then provide the corresponding Metric data. For details, refer to [**JsonPath Script Parsing**](extend-http-jsonpath) -### Custom Steps +### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ------- -Configuration usages of the monitoring templates yml are detailed below. Please pay attention to usage annotation. +Configuration usages of the monitoring templates yml are detailed below. Please pay attention to usage annotation. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. eg:Define a custom monitoring type `app` named `example_http` which use the HTTP protocol to collect data. @@ -205,3 +203,4 @@ metrics: basicAuthPassword: ^_^password^_^ parseType: default ``` + diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-jdbc.md b/home/versioned_docs/version-v1.5.x/advanced/extend-jdbc.md index 3527ba60d5d..ec42f84f642 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-jdbc.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-jdbc.md @@ -1,68 +1,73 @@ --- id: extend-jdbc title: JDBC Protocol Custom Monitoring -sidebar_label: JDBC Protocol Custom Monitoring +sidebar_label: JDBC Protocol Custom Monitoring --- -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use JDBC(support mysql,mariadb,postgresql,sqlserver at present) to customize Metric monitoring. -> JDBC protocol custom monitoring allows us to easily monitor Metrics we want by writing SQL query statement. -### JDBC protocol collection process -【**System directly connected to MYSQL**】->【**Run SQL query statement**】->【**parse reponse data: oneRow, multiRow, columns**】->【**Metric data extraction**】 +> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use JDBC(support mysql,mariadb,postgresql,sqlserver at present) to customize Metric monitoring. +> JDBC protocol custom monitoring allows us to easily monitor Metrics we want by writing SQL query statement. + +### JDBC protocol collection process + +【**System directly connected to MYSQL**】->【**Run SQL query statement**】->【**parse reponse data: oneRow, multiRow, columns**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of JDBC protocol. We need to configure SSH request parameters, configure which Metrics to obtain, and configure query SQL statements. -### Data parsing method +### Data parsing method + We can obtain the corresponding Metric data through the data fields queried by SQL and the Metric mapping we need. At present, there are three mapping parsing methods:oneRow, multiRow, columns. -#### **oneRow** -> Query a row of data, return the column name of the result set through query and map them to the queried field. +#### **oneRow** + +> Query a row of data, return the column name of the result set through query and map them to the queried field. eg: queried Metric fields:one two three four query SQL:select one, two, three, four from book limit 1; -Here the Metric field and the response data can be mapped into a row of collected data one by one. +Here the Metric field and the response data can be mapped into a row of collected data one by one. #### **multiRow** -> Query multiple rows of data, return the column names of the result set and map them to the queried fields. + +> Query multiple rows of data, return the column names of the result set and map them to the queried fields. eg: queried Metric fields:one two three four query SQL:select one, two, three, four from book; -Here the Metric field and the response data can be mapped into multiple rows of collected data one by one. +Here the Metric field and the response data can be mapped into multiple rows of collected data one by one. #### **columns** -> Collect a row of Metric data. By matching the two columns of queried data (key value), key and the queried field, value is the value of the query field. + +> Collect a row of Metric data. By matching the two columns of queried data (key value), key and the queried field, value is the value of the query field. eg: queried fields:one two three four query SQL:select key, value from book; -SQL response data: +SQL response data: -| key | value | -|---------|-------| -| one | 243 | -| two | 435 | -| three | 332 | -| four | 643 | +| key | value | +|-------|-------| +| one | 243 | +| two | 435 | +| three | 332 | +| four | 643 | Here by mapping the Metric field with the key of the response data, we can obtain the corresponding value as collection and monitoring data. -### Custom Steps +### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. -eg:Define a custom monitoring type `app` named `example_sql` which use the JDBC protocol to collect data. - +eg:Define a custom monitoring type `app` named `example_sql` which use the JDBC protocol to collect data. ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -236,3 +241,4 @@ metrics: sql: show global status where Variable_name like 'innodb%'; url: ^_^url^_^ ``` + diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-jmx.md b/home/versioned_docs/version-v1.5.x/advanced/extend-jmx.md index 5284118f8a7..2f9ba992f63 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-jmx.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-jmx.md @@ -1,12 +1,14 @@ --- id: extend-jmx title: JMX Protocol Custom Monitoring -sidebar_label: JMX Protocol Custom Monitoring +sidebar_label: JMX Protocol Custom Monitoring --- + > From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use JMX to customize Metric monitoring. > JMX protocol custom monitoring allows us to easily monitor Metrics we want by config JMX Mbeans Object. ### JMX protocol collection process + 【**Peer Server Enable Jmx Service**】->【**HertzBeat Connect Peer Server Jmx**】->【**Query Jmx Mbean Object Data**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of JMX protocol. We need to configure JMX request parameters, configure which Metrics to obtain, and configure Mbeans Object. @@ -15,25 +17,24 @@ It can be seen from the process that we define a monitoring type of JMX protocol By configuring the monitoring template YML metrics `field`, `aliasFields`, `objectName` of the `jmx` protocol to map and parse the `Mbean` object information exposed by the peer system. -### Custom Steps +### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ![](/img/docs/advanced/extend-point-1.png) ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. eg:Define a custom monitoring type `app` named `example_jvm` which use the JVM protocol to collect data. - ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring category: service @@ -191,3 +192,4 @@ metrics: objectName: java.lang:type=MemoryPool,name=* url: ^_^url^_^ ``` + diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-ngql.md b/home/versioned_docs/version-v1.5.x/advanced/extend-ngql.md index c07c5ae8ca0..2047e1d1cf5 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-ngql.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-ngql.md @@ -22,9 +22,9 @@ Mapping the fields returned by NGQL queries to the metrics we need allows us to For example: - online_meta_count#SHOW HOSTS META#Status#ONLINE - Counts the number of rows returned by `SHOW HOSTS META` where Status equals ONLINE. +Counts the number of rows returned by `SHOW HOSTS META` where Status equals ONLINE. - online_meta_count#SHOW HOSTS META## - Counts the number of rows returned by `SHOW HOSTS META`. +Counts the number of rows returned by `SHOW HOSTS META`. #### **oneRow** @@ -68,12 +68,13 @@ Notes: ![HertzBeat Page](/img/docs/advanced/extend-point-1.png) ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Template YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. -> Monitoring template is used to define the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information, etc. +> Monitoring template is used to define the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information, etc. eg: Customize a monitoring type named example_ngql, which collects metric data using NGQL. @@ -165,3 +166,4 @@ metrics: - match (v:tag2) return "tag2" as name ,count(v) as cnt timeout: ^_^timeout^_^ ``` + diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-point.md b/home/versioned_docs/version-v1.5.x/advanced/extend-point.md index eba1811e4fc..314e3f1affa 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-point.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-point.md @@ -1,31 +1,29 @@ --- id: extend-point title: Custom Monitoring -sidebar_label: Custom Monitoring +sidebar_label: Custom Monitoring --- -> HertzBeat has custom monitoring ability. You only need to configure monitoring template yml to fit a custom monitoring type. -> Custom monitoring currently supports [HTTP protocol](extend-http),[JDBC protocol](extend-jdbc), [SSH protocol](extend-ssh), [JMX protocol](extend-jmx), [SNMP protocol](extend-snmp). And it will support more general protocols in the future. -### Custom Monitoring Steps +> HertzBeat has custom monitoring ability. You only need to configure monitoring template yml to fit a custom monitoring type. +> Custom monitoring currently supports [HTTP protocol](extend-http),[JDBC protocol](extend-jdbc), [SSH protocol](extend-ssh), [JMX protocol](extend-jmx), [SNMP protocol](extend-snmp). And it will support more general protocols in the future. -**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** +### Custom Monitoring Steps +**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ------- Configuration usages of the monitoring templates yml are detailed below. -### Monitoring Templates YML +### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. +> +> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. +eg:Define a custom monitoring type `app` named `example2` which use the HTTP protocol to collect data. -> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - -eg:Define a custom monitoring type `app` named `example2` which use the HTTP protocol to collect data. - -**Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** - +**Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring @@ -132,3 +130,4 @@ metrics: parseType: website ``` + diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-snmp.md b/home/versioned_docs/version-v1.5.x/advanced/extend-snmp.md index c97aea1f766..b3bb9173c87 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-snmp.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-snmp.md @@ -1,23 +1,22 @@ --- id: extend-snmp title: SNMP Protocol Custom Monitoring -sidebar_label: SNMP Protocol Custom Monitoring +sidebar_label: SNMP Protocol Custom Monitoring --- > From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use SNMP to customize Metric monitoring. > JMX protocol custom monitoring allows us to easily monitor Metrics we want by config SNMP MIB OIDs. ### SNMP protocol collection process + 【**Peer Server Enable SNMP Service**】->【**HertzBeat Connect Peer Server SNMP**】->【**Query Oids Data**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of Snmp protocol. We need to configure Snmp request parameters, configure which Metrics to obtain, and configure oids. - ### Data parsing method By configuring the metrics `field`, `aliasFields`, and `oids` under the `snmp` protocol of the monitoring template YML to capture the data specified by the peer and parse the mapping. - ### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** @@ -25,18 +24,17 @@ By configuring the metrics `field`, `aliasFields`, and `oids` under the `snmp` p ![](/img/docs/advanced/extend-point-1.png) ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. eg:Define a custom monitoring type `app` named `example_windows` which use the SNMP protocol to collect data. - ```yaml # The monitoring type category:service-application service monitoring db-database monitoring mid-middleware custom-custom monitoring os-operating system monitoring category: os @@ -171,3 +169,4 @@ metrics: processes: 1.3.6.1.2.1.25.1.6.0 location: 1.3.6.1.2.1.1.6.0 ``` + diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-ssh.md b/home/versioned_docs/version-v1.5.x/advanced/extend-ssh.md index 772ee315207..bf960376179 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-ssh.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-ssh.md @@ -1,21 +1,25 @@ --- id: extend-ssh title: SSH Protocol Custom Monitoring -sidebar_label: SSH Protocol Custom Monitoring +sidebar_label: SSH Protocol Custom Monitoring --- -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use SSH protocol to customize Metric monitoring. -> SSH protocol custom monitoring allows us to easily monitor and collect the Linux Metrics we want by writing sh command script. -### SSH protocol collection process -【**System directly connected to Linux**】->【**Run shell command script statement**】->【**parse response data: oneRow, multiRow**】->【**Metric data extraction**】 +> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use SSH protocol to customize Metric monitoring. +> SSH protocol custom monitoring allows us to easily monitor and collect the Linux Metrics we want by writing sh command script. + +### SSH protocol collection process + +【**System directly connected to Linux**】->【**Run shell command script statement**】->【**parse response data: oneRow, multiRow**】->【**Metric data extraction**】 It can be seen from the process that we define a monitoring type of SSH protocol. We need to configure SSH request parameters, configure which Metrics to obtain, and configure query script statements. -### Data parsing method +### Data parsing method + We can obtain the corresponding Metric data through the data fields queried by the SHELL script and the Metric mapping we need. At present, there are two mapping parsing methods:oneRow and multiRow which can meet the needs of most Metrics. -#### **oneRow** -> Query out a column of data, return the field value (one value per row) of the result set through query and map them to the field. +#### **oneRow** + +> Query out a column of data, return the field value (one value per row) of the result set through query and map them to the field. eg: Metrics of Linux to be queried hostname-host name,uptime-start time @@ -23,31 +27,37 @@ Host name original query command:`hostname` Start time original query command:`uptime | awk -F "," '{print $1}'` Then the query script of the two Metrics in hertzbeat is(Use `;` Connect them together): `hostname; uptime | awk -F "," '{print $1}'` -The data responded by the terminal is: +The data responded by the terminal is: + ``` tombook 14:00:15 up 72 days -``` +``` + At last collected Metric data is mapped one by one as: hostname is `tombook` -uptime is `14:00:15 up 72 days` +uptime is `14:00:15 up 72 days` -Here the Metric field and the response data can be mapped into a row of collected data one by one +Here the Metric field and the response data can be mapped into a row of collected data one by one #### **multiRow** -> Query multiple rows of data, return the column names of the result set through the query, and map them to the Metric field of the query. + +> Query multiple rows of data, return the column names of the result set through the query, and map them to the Metric field of the query. eg: Linux memory related Metric fields queried:total-Total memory, used-Used memory,free-Free memory, buff-cache-Cache size, available-Available memory -Memory metrics original query command:`free -m`, Console response: +Memory metrics original query command:`free -m`, Console response: + ```shell total used free shared buff/cache available Mem: 7962 4065 333 1 3562 3593 Swap: 8191 33 8158 ``` + In hertzbeat multiRow format parsing requires a one-to-one mapping between the column name of the response data and the indicaotr value, so the corresponding query SHELL script is: `free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` -Console response is: +Console response is: + ```shell total used free buff_cache available 7962 4066 331 3564 3592 @@ -60,18 +70,17 @@ Here the Metric field and the response data can be mapped into collected data on **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** ------- + Configuration usages of the monitoring templates yml are detailed below. ### Monitoring Templates YML > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. eg:Define a custom monitoring type `app` named `example_linux` which use the SSH protocol to collect data. - ```yaml # The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring category: os @@ -203,3 +212,4 @@ metrics: script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' parseType: multiRow ``` + diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-tutorial.md b/home/versioned_docs/version-v1.5.x/advanced/extend-tutorial.md index 9f21219a29f..f991b5702e8 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-tutorial.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-tutorial.md @@ -8,12 +8,10 @@ Through this tutorial, we describe step by step how to customize and adapt a mon Before reading this tutorial, we hope that you are familiar with how to customize types, metrics, protocols, etc. from [Custom Monitoring](extend-point) and [Http Protocol Customization](extend-http). - ### HTTP protocol parses the general response structure to obtain metrics data > In many scenarios, we need to monitor the provided HTTP API interface and obtain the index value returned by the interface. In this article, we use the http custom protocol to parse our common http interface response structure, and obtain the fields in the returned body as metric data. - ``` { "code": 200, @@ -22,6 +20,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz } ``` + As above, usually our background API interface will design such a general return. The same is true for the background of the hertzbeat system. Today, we will use the hertzbeat API as an example, add a new monitoring type **hertzbeat**, and monitor and collect its system summary statistics API `http://localhost:1157/api/summary`, the response data is: @@ -58,17 +57,14 @@ As above, usually our background API interface will design such a general return **This time we get the metrics data such as `category`, `app`, `status`, `size`, `availableSize` under the app. ** - ### Add Monitoring Template Yml **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** > We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. - - +> > Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - Here we define a custom monitoring type `app` named `hertzbeat` which use the HTTP protocol to collect data. **Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** @@ -199,32 +195,24 @@ metrics: **The addition is complete, now we restart the hertzbeat system. We can see that the system page has added a `hertzbeat` monitoring type. ** - ![](/img/docs/advanced/extend-http-example-1.png) - ### The system page adds the monitoring of `hertzbeat` monitoring type > We click Add `HertzBeat Monitoring Tool`, configure monitoring IP, port, collection cycle, account password in advanced settings, etc., click OK to add monitoring. - ![](/img/docs/advanced/extend-http-example-2.png) - ![](/img/docs/advanced/extend-http-example-3.png) > After a certain period of time (depending on the collection cycle), we can see the specific metric data and historical charts in the monitoring details! - ![](/img/docs/advanced/extend-http-example-4.png) - - ### Set threshold alarm notification > Next, we can set the threshold normally. After the alarm is triggered, we can view it in the alarm center, add a new recipient, set alarm notification, etc. Have Fun!!! - ---- #### over! diff --git a/home/versioned_docs/version-v1.5.x/community/become_committer.md b/home/versioned_docs/version-v1.5.x/community/become_committer.md index f2824bdc4ed..382e69fb9bf 100644 --- a/home/versioned_docs/version-v1.5.x/community/become_committer.md +++ b/home/versioned_docs/version-v1.5.x/community/become_committer.md @@ -5,20 +5,20 @@ sidebar_position: 2 --- ## Become A Committer of Apache HertzBeat @@ -66,6 +66,7 @@ of the main website or HertzBeat's GitHub repositories. - +1 month with solid activity and engagement. ### Quality of contributions + - A solid general understanding of the project - Well tested, well-designed, following Apache HertzBeat coding standards, and simple patches. @@ -82,3 +83,4 @@ of the main website or HertzBeat's GitHub repositories. - Be involved in the design road map discussions with a professional and diplomatic approach even if there is a disagreement - Promoting the project by writing articles or holding events + diff --git a/home/versioned_docs/version-v1.5.x/community/become_pmc_member.md b/home/versioned_docs/version-v1.5.x/community/become_pmc_member.md index cf48cbe7c82..cd9dff4e02a 100644 --- a/home/versioned_docs/version-v1.5.x/community/become_pmc_member.md +++ b/home/versioned_docs/version-v1.5.x/community/become_pmc_member.md @@ -5,20 +5,20 @@ sidebar_position: 3 --- ## Become A PMC member of Apache HertzBeat @@ -66,6 +66,7 @@ of the main website or HertzBeat's GitHub repositories. - +3 month with solid activity and engagement. ### Quality of contributions + - A solid general understanding of the project - Well tested, well-designed, following Apache HertzBeat coding standards, and simple patches. @@ -82,3 +83,4 @@ of the main website or HertzBeat's GitHub repositories. - Be involved in the design road map discussions with a professional and diplomatic approach even if there is a disagreement - Promoting the project by writing articles or holding events + diff --git a/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md b/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md index 755a98f3fd7..c86438a577e 100644 --- a/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md +++ b/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md @@ -5,39 +5,36 @@ sidebar_position: 3 --- - ## 1 Pull Requests & Changes Rule 1. `ISSUE`/`PR`(pull request) driving and naming - - After creating a new `PR`, you need to associate the existing corresponding `ISSUE` at the Github Development button on the `PR` page (if there is no corresponding ISSUE, it is recommended to create a new corresponding ISSUE). + - After creating a new `PR`, you need to associate the existing corresponding `ISSUE` at the Github Development button on the `PR` page (if there is no corresponding ISSUE, it is recommended to create a new corresponding ISSUE). - - Title naming format - `[feature/bugfix/doc/improve/refactor/bug/cleanup] title` + - Title naming format + `[feature/bugfix/doc/improve/refactor/bug/cleanup] title` 2. Description - - Please fill in the `PR` template to describe the contribution. So that the reviewer can understand the problem and solution from the description, rather than just from the code. - - Check the CheckList - + - Please fill in the `PR` template to describe the contribution. So that the reviewer can understand the problem and solution from the description, rather than just from the code. + - Check the CheckList 3. It's recommended that `PR` should be arranged changes such as `cleanup`, `Refactor`, `improve`, and `feature` into separated `PRs`/`Commits`. - 4. Commit message(English, lowercase, no special characters) The commit of messages should follow a pattern similar to the `[feature/bugfix/doc/improve/refactor/bug/cleanup] title` @@ -50,11 +47,11 @@ sidebar_position: 3 3. Set **Checkstyle version** to **10.14.2**. 4. Set **Scan scope** to **Only Java sources (including tests)**. 5. Click **+** button in the **Configuration** section to open a dialog to choose the checkstyle config file. - 1. Enter a **Description**. For example, hertzbeat. - 2. Select **Use a local checkstyle file**. - 3. Set **File** to **script/checkstyle/checkstyle.xml**. - 4. Select **Store relative to project location**. - 5. Click **Next** → **Next** → **Finish**. + 1. Enter a **Description**. For example, hertzbeat. + 2. Select **Use a local checkstyle file**. + 3. Set **File** to **script/checkstyle/checkstyle.xml**. + 4. Select **Store relative to project location**. + 5. Click **Next** → **Next** → **Finish**. 6. Activate the configuration you just added by toggling the corresponding box. 7. Click **OK**. @@ -69,96 +66,94 @@ sidebar_position: 3 ### 3.1 Naming Style 1. Prioritize selecting nouns for variable naming, it's easier to distinguish between `variables` or `methods`. + ```java - Cache publicKeyCache; + Cache publicKeyCache; ``` - 2. Pinyin abbreviations are prohibited for variables (excluding nouns such as place names), such as chengdu. - 3. It is recommended to end variable names with a `type`. For variables of type `Collection/List`, take `xxxx` (plural representing multiple elements) or end with `xxxList` (specific type). For variables of type `map`, describe the `key` and `value` clearly: + ```java - Map idUserMap; - Map userIdNameMap; + Map idUserMap; + Map userIdNameMap; ``` - 4. That can intuitively know the type and meaning of the variable through its name. Method names should start with a verb first as follows: + ```java - void computeVcores(Object parameter1); + void computeVcores(Object parameter1); ``` + > Note: It is not necessary to strictly follow this rule in the `Builder` tool class. - ### 3.2 Constant Variables Definition -1. Redundant strings should be extracted as constants - >If a constant has been hardcoded twice or more times, please directly extract it as a constant and change the corresponding reference. - In generally, constants in `log` can be ignored to extract. - - - Negative demo: - - ```java - public static RestResponse success(Object data) { - RestResponse resp = new RestResponse(); - resp.put("status", "success"); - resp.put("code", ResponseCode.CODE_SUCCESS); - resp.put("data", data); - return resp; - } - - public static RestResponse error() { - RestResponse resp = new RestResponse(); - resp.put("status", "error"); - resp.put("code", ResponseCode.CODE_FAIL); - resp.put("data", null); - return resp; - } - ``` - - - Positive demo: - - > Strings are extracted as constant references. - - ```java - public static final String STATUS = "status"; - public static final String CODE = "code"; - public static final String DATA = "data"; - - public static RestResponse success(Object data) { - RestResponse resp = new RestResponse(); - resp.put(STATUS, "success"); - resp.put(CODE, ResponseCode.CODE_SUCCESS); - resp.put(DATA, data); - return resp; - } - - public static RestResponse error() { - RestResponse resp = new RestResponse(); - resp.put(STATUS, "error"); - resp.put(CODE, ResponseCode.CODE_FAIL); - resp.put(DATA, null); - return resp; - } - ``` +1. Redundant strings should be extracted as constants + + > If a constant has been hardcoded twice or more times, please directly extract it as a constant and change the corresponding reference. + > In generally, constants in `log` can be ignored to extract. + + - Negative demo: + + ```java + public static RestResponse success(Object data) { + RestResponse resp = new RestResponse(); + resp.put("status", "success"); + resp.put("code", ResponseCode.CODE_SUCCESS); + resp.put("data", data); + return resp; + } + + public static RestResponse error() { + RestResponse resp = new RestResponse(); + resp.put("status", "error"); + resp.put("code", ResponseCode.CODE_FAIL); + resp.put("data", null); + return resp; + } + ``` + - Positive demo: + + > Strings are extracted as constant references. + + ```java + public static final String STATUS = "status"; + public static final String CODE = "code"; + public static final String DATA = "data"; + + public static RestResponse success(Object data) { + RestResponse resp = new RestResponse(); + resp.put(STATUS, "success"); + resp.put(CODE, ResponseCode.CODE_SUCCESS); + resp.put(DATA, data); + return resp; + } + public static RestResponse error() { + RestResponse resp = new RestResponse(); + resp.put(STATUS, "error"); + resp.put(CODE, ResponseCode.CODE_FAIL); + resp.put(DATA, null); + return resp; + } + ``` 2. Ensure code readability and intuitiveness - - The string in the `annotation` symbol doesn't need to be extracted as constant. +- The string in the `annotation` symbol doesn't need to be extracted as constant. - - The referenced `package` or `resource` name doesn't need to be extracted as constant. +- The referenced `package` or `resource` name doesn't need to be extracted as constant. 3. Variables that have not been reassigned must also be declared as final types. -4. About the arrangement order of `constant/variable` lines +4. About the arrangement order of `constant/variable` lines Sort the variable lines in the class in the order of 1. `public static final V`, `static final V`,`protected static final V`, `private static final V` 2. `public static v`, `static v`,`protected static v`, `private static v` 3. `public v`, `v`, `protected v`, `private v` - ### 3.3 Methods Rule 1. Sort the methods in the class in the order of `public`, `protected`, `private` @@ -174,9 +169,9 @@ sidebar_position: 3 3. If there are too many lines of code in the method, please have a try on using multiple sub methods at appropriate points to segment the method body. Generally speaking, it needs to adhere to the following principles: - - Convenient testing - - Good semantics - - Easy to read + - Convenient testing + - Good semantics + - Easy to read In addition, it is also necessary to consider whether the splitting is reasonable in terms of components, logic, abstraction, and other aspects in the scenario. @@ -185,35 +180,31 @@ sidebar_position: 3 ### 3.4 Collection Rule 1. For `collection` returned values, unless there are special `concurrent` (such as thread safety), always return the `interface`, such as: - - - returns List if use `ArrayList` - - returns Map if use `HashMap` - - returns Set if use `HashSet` - + - returns List if use `ArrayList` + - returns Map if use `HashMap` + - returns Set if use `HashSet` 2. If there are multiple threads, the following declaration or returned types can be used: - ```java - private CurrentHashMap map; - public CurrentHashMap funName(); - ``` +```java +private CurrentHashMap map; +public CurrentHashMap funName(); +``` 3. Use `isEmpty()` instead of `length() == 0` or `size() == 0` + - Negative demo: - - Negative demo: - - ```java - if (pathPart.length() == 0) { - return; - } - ``` - - - Positive demo: + ```java + if (pathPart.length() == 0) { + return; + } + ``` + - Positive demo: - ```java - if (pathPart.isEmpty()) { - return; - } - ``` + ```java + if (pathPart.isEmpty()) { + return; + } + ``` ### 3.5 Concurrent Processing @@ -226,9 +217,8 @@ sidebar_position: 3 ### 3.6 Control/Condition Statements 1. Avoid unreasonable `condition/control` branches order leads to: - - - Multiple code line `depths` of `n+1` - - Redundant lines + - Multiple code line `depths` of `n+1` + - Redundant lines Generally speaking, if a method's code line depth exceeds `2+ Tabs` due to continuous nested `if... else..`, it should be considered to try - `merging branches`, @@ -237,77 +227,85 @@ Generally speaking, if a method's code line depth exceeds `2+ Tabs` due to conti to reduce code line depth and improve readability like follows: - Union or merge the logic into the next level calling - - Negative demo: - ```java - if (isInsert) { - save(platform); - } else { - updateById(platform); - } - ``` - - Positive demo: - ```java - saveOrUpdate(platform); - ``` +- Negative demo: + +```java +if (isInsert) { +save(platform); +} else { +updateById(platform); +} +``` + +- Positive demo: + +```java +saveOrUpdate(platform); +``` + - Merge the conditions - - Negative demo: - ```java - if (expression1) { - if(expression2) { - ...... - } - } - ``` - - Positive demo: - ```java - if (expression1 && expression2) { - ...... - } - ``` +- Negative demo: + +```java +if (expression1) { +if(expression2) { +...... +} +} + +``` + +- Positive demo: + + ```java + if (expression1 && expression2) { + ...... + } + ``` - Reverse the condition - - Negative demo: - - ```java - public void doSomething() { - // Ignored more deeper block lines - // ..... - if (condition1) { - ... - } else { - ... - } - } - ``` - - - Positive demo: - - ```java - public void doSomething() { - // Ignored more deeper block lines - // ..... - if (!condition1) { - ... - return; - } - // ... - } - ``` +- Negative demo: + + ```java + public void doSomething() { + // Ignored more deeper block lines + // ..... + if (condition1) { + ... + } else { + ... + } + } + ``` +- Positive demo: + + ```java + public void doSomething() { + // Ignored more deeper block lines + // ..... + if (!condition1) { + ... + return; + } + // ... + } + ``` - Using a single variable or method to reduce the complex conditional expression - - Negative demo: - ```java - if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { - ... - } - ``` - - - Positive demo: - ```java - if (containsSqlServer(dbType)) { - .... - } - //..... - // definition of the containsSqlServer - ``` +- Negative demo: + + ```java + if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { + ... + } + ``` +- Positive demo: + + ```java + if (containsSqlServer(dbType)) { + .... + } + //..... + // definition of the containsSqlServer + ``` > Using `sonarlint` and `better highlights` to check code depth looks like good in the future. @@ -315,22 +313,22 @@ to reduce code line depth and improve readability like follows: 1. Method lacks comments: - - `When`: When can the method be called - - `How`: How to use this method and how to pass parameters, etc. - - `What`: What functions does this method achieve - - `Note`: What should developers pay attention to when calling this method +- `When`: When can the method be called +- `How`: How to use this method and how to pass parameters, etc. +- `What`: What functions does this method achieve +- `Note`: What should developers pay attention to when calling this method 2. Missing necessary class header description comments. - Add `What`, `Note`, etc. like mentioned in the `1`. +Add `What`, `Note`, etc. like mentioned in the `1`. 3. The method declaration in the interface must be annotated. - - If the semantics of the implementation and the annotation content at the interface declaration are inconsistent, the specific implementation method also needs to be rewritten with annotations. +- If the semantics of the implementation and the annotation content at the interface declaration are inconsistent, the specific implementation method also needs to be rewritten with annotations. - - If the semantics of the method implementation are consistent with the annotation content at the interface declaration, it is not recommended to write annotations to avoid duplicate annotations. +- If the semantics of the method implementation are consistent with the annotation content at the interface declaration, it is not recommended to write annotations to avoid duplicate annotations. -4. The first word in the comment lines need to be capitalized, like `param` lines, `return` lines. +4. The first word in the comment lines need to be capitalized, like `param` lines, `return` lines. If a special reference as a subject does not need to be capitalized, special symbols such as quotation marks need to be noted. ### 3.8 Java Lambdas @@ -338,31 +336,29 @@ to reduce code line depth and improve readability like follows: 1. Prefer `non-capturing` lambdas (lambdas that do not contain references to the outer scope). Capturing lambdas need to create a new object instance for every call. `Non-capturing` lambdas can use the same instance for each invocation. - - Negative demo: +- Negative demo: - ```java - map.computeIfAbsent(key, x -> key.toLowerCase()) - ``` - - - Positive demo: + ```java + map.computeIfAbsent(key, x -> key.toLowerCase()) + ``` +- Positive demo: - ```java - map.computeIfAbsent(key, k -> k.toLowerCase()); - ``` + ```java + map.computeIfAbsent(key, k -> k.toLowerCase()); + ``` 2. Consider method references instead of inline lambdas - - Negative demo: - - ```java - map.computeIfAbsent(key, k-> Loader.load(k)); - ``` +- Negative demo: - - Positive demo: + ```java + map.computeIfAbsent(key, k-> Loader.load(k)); + ``` +- Positive demo: - ```java - map.computeIfAbsent(key, Loader::load); - ``` + ```java + map.computeIfAbsent(key, Loader::load); + ``` ### 3.9 Java Streams @@ -380,186 +376,180 @@ to reduce code line depth and improve readability like follows: 1. Use `StringUtils.isBlank` instead of `StringUtils.isEmpty` - - Negative demo: - - ```java - if (StringUtils.isEmpty(name)) { - return; - } - ``` - - - Positive demo: - - ```java - if (StringUtils.isBlank(name)) { - return; - } - ``` +- Negative demo: + + ```java + if (StringUtils.isEmpty(name)) { + return; + } + ``` +- Positive demo: + + ```java + if (StringUtils.isBlank(name)) { + return; + } + ``` 2. Use `StringUtils.isNotBlank` instead of `StringUtils.isNotEmpty` - - Negative demo: - - ```java - if (StringUtils.isNotEmpty(name)) { - return; - } - ``` - - - Positive demo: - - ```java - if (StringUtils.isNotBlank(name)) { - return; - } - ``` +- Negative demo: + + ```java + if (StringUtils.isNotEmpty(name)) { + return; + } + ``` +- Positive demo: + + ```java + if (StringUtils.isNotBlank(name)) { + return; + } + ``` 3. Use `StringUtils.isAllBlank` instead of `StringUtils.isAllEmpty` - - Negative demo: - - ```java - if (StringUtils.isAllEmpty(name, age)) { - return; - } - ``` - - - Positive demo: - - ```java - if (StringUtils.isAllBlank(name, age)) { - return; - } - ``` +- Negative demo: + + ```java + if (StringUtils.isAllEmpty(name, age)) { + return; + } + ``` +- Positive demo: + + ```java + if (StringUtils.isAllBlank(name, age)) { + return; + } + ``` ### 3.12 `Enum` Class 1. Enumeration value comparison - - Negative demo: - - ```java - if (status.equals(JobStatus.RUNNING)) { - return; - } - ``` - - - Positive demo: - - ```java - if (status == JobStatus.RUNNING) { - return; - } - ``` +- Negative demo: + + ```java + if (status.equals(JobStatus.RUNNING)) { + return; + } + ``` +- Positive demo: + + ```java + if (status == JobStatus.RUNNING) { + return; + } + ``` 2. Enumeration classes do not need to implement Serializable - - Negative demo: - - ```java - public enum JobStatus implements Serializable { - ... - } - ``` - - - Positive demo: - - ```java - public enum JobStatus { - ... - } - ``` +- Negative demo: + + ```java + public enum JobStatus implements Serializable { + ... + } + ``` +- Positive demo: + + ```java + public enum JobStatus { + ... + } + ``` 3. Use `Enum.name()` instead of `Enum.toString()` - - Negative demo: - - ```java - System.out.println(JobStatus.RUNNING.toString()); - ``` - - - Positive demo: - - ```java - System.out.println(JobStatus.RUNNING.name()); - ``` +- Negative demo: + + ```java + System.out.println(JobStatus.RUNNING.toString()); + ``` +- Positive demo: + + ```java + System.out.println(JobStatus.RUNNING.name()); + ``` 4. Enumeration class names uniformly use the Enum suffix - - Negative demo: - - ```java - public enum JobStatus { - ... - } - ``` - - - Positive demo: - - ```java - public enum JobStatusEnum { - ... - } - ``` +- Negative demo: + + ```java + public enum JobStatus { + ... + } + ``` +- Positive demo: + + ```java + public enum JobStatusEnum { + ... + } + ``` ### 3.13 `Deprecated` Annotation - - Negative demo: +- Negative demo: - ```java - @deprecated - public void process(String input) { - ... - } - ``` +```java +@deprecated +public void process(String input) { + ... +} +``` - - Positive demo: +- Positive demo: - ```java - @Deprecated - public void process(String input) { - ... - } - ``` +```java +@Deprecated +public void process(String input) { + ... +} +``` ## 4 Log 1. Use `placeholders` for log output: - - Negative demo - ```java - log.info("Deploy cluster request " + deployRequest); - ``` - - Positive demo - ```java - log.info("load plugin:{} to {}", file.getName(), appPlugins); - ``` +- Negative demo -2. Pay attention to the selection of `log level` when printing logs + ```java + log.info("Deploy cluster request " + deployRequest); + ``` +- Positive demo + + ```java + log.info("load plugin:{} to {}", file.getName(), appPlugins); + ``` - When printing the log content, if the actual parameters of the log placeholder are passed, it is necessary to avoid premature evaluation to avoid unnecessary evaluation caused by the log level. +2. Pay attention to the selection of `log level` when printing logs - - Negative demo: +When printing the log content, if the actual parameters of the log placeholder are passed, it is necessary to avoid premature evaluation to avoid unnecessary evaluation caused by the log level. - Assuming the current log level is `INFO`: +- Negative demo: - ```java - // ignored declaration lines. - List userList = getUsersByBatch(1000); - LOG.debug("All users: {}", getAllUserIds(userList)); - ``` + Assuming the current log level is `INFO`: - - Positive demo: + ```java + // ignored declaration lines. + List userList = getUsersByBatch(1000); + LOG.debug("All users: {}", getAllUserIds(userList)); + ``` +- Positive demo: - In this case, we should determine the log level in advance before making actual log calls as follows: + In this case, we should determine the log level in advance before making actual log calls as follows: - ```java - // ignored declaration lines. - List userList = getUsersByBatch(1000); - if (LOG.isDebugEnabled()) { - LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); - } - ``` + ```java + // ignored declaration lines. + List userList = getUsersByBatch(1000); + if (LOG.isDebugEnabled()) { + LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); + } + ``` ## 5 Testing @@ -568,8 +558,14 @@ to reduce code line depth and improve readability like follows: 2. The implemented interface needs to write the `e2e` test case script under the `e2e` module. ## References + - https://site.mockito.org/ - https://alibaba.github.io/p3c/ - https://rules.sonarsource.com/java/ - https://junit.org/junit5/ - https://streampark.apache.org/ + +``` + +``` + diff --git a/home/versioned_docs/version-v1.5.x/community/contact.md b/home/versioned_docs/version-v1.5.x/community/contact.md index 91a4659ae5c..decd6d27c8c 100644 --- a/home/versioned_docs/version-v1.5.x/community/contact.md +++ b/home/versioned_docs/version-v1.5.x/community/contact.md @@ -1,7 +1,7 @@ --- id: contact title: Join discussion -sidebar_label: Discussion +sidebar_label: Discussion --- > If you need any help or want to exchange suggestions during the use process, you can discuss and exchange through ISSUE or Github Discussion. diff --git a/home/versioned_docs/version-v1.5.x/community/contribution.md b/home/versioned_docs/version-v1.5.x/community/contribution.md index 06987053696..f7d932bcd2c 100644 --- a/home/versioned_docs/version-v1.5.x/community/contribution.md +++ b/home/versioned_docs/version-v1.5.x/community/contribution.md @@ -5,20 +5,20 @@ sidebar_position: 0 --- > We are committed to maintaining a happy community that helps each other, welcome every contributor to join us! @@ -50,7 +50,6 @@ Even small corrections to typos are very welcome :) > To get HertzBeat code running on your development tools, and able to debug with breakpoints. > This is a front-end and back-end separation project. To start the local code, the back-end manager and the front-end web-app must be started separately. - #### Backend start 1. Requires `maven3+`, `java17` and `lombok` environments @@ -88,23 +87,31 @@ Of course, if you have a good idea, you can also propose it directly on GitHub D 1. First you need to fork your target [hertzbeat repository](https://github.com/apache/hertzbeat). 2. Then download the code locally with git command: + ```shell git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended ``` + 3. After the download is complete, please refer to the getting started guide or README file of the target repository to initialize the project. 4. Then, you can refer to the following command to submit the code: + ```shell git checkout -b a-feature-branch #Recommended ``` + 5. Submit the coed as a commit, the commit message format specification required: [module name or type name]feature or bugfix or doc: custom message. + ```shell git add git commit -m '[docs]feature: necessary instructions' #Recommended ``` + 6. Push to the remote repository + ```shell git push origin a-feature-branch ``` + 7. Then you can initiate a new PR (Pull Request) on GitHub. Please note that the title of the PR needs to conform to our spec, and write the necessary description in the PR to facilitate code review by Committers and other contributors. @@ -137,6 +144,7 @@ git pull upstream master ``` ### HertzBeat Improvement Proposal (HIP) + If you have major new features(e.g., support metrics push gateway, support logs monitoring), you need to write a design document known as a HertzBeat Improvement Proposal (HIP). Before starting to write a HIP, make sure you follow the process [here](https://github.com/apache/hertzbeat/tree/master/hip). ### How to become a Committer? @@ -152,14 +160,15 @@ Add WeChat account `ahertzbeat` to pull you into the WeChat group. ## 🥐 Architecture - **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** Provide monitoring management, system management basic services. + > Provides monitoring management, monitoring configuration management, system user management, etc. -- **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** Provide metrics data collection services. +> - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** Provide metrics data collection services. > Use common protocols to remotely collect and obtain peer-to-peer metrics data. -- **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** Provide monitoring data warehousing services. +> - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** Provide monitoring data warehousing services. > Metrics data management, data query, calculation and statistics. -- **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** Provide alert service. +> - **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** Provide alert service. > Alarm calculation trigger, monitoring status linkage, alarm configuration, and alarm notification. -- **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** Provide web ui. +> - **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** Provide web ui. > Angular Web UI. ![hertzBeat](/img/docs/hertzbeat-arch.png) diff --git a/home/versioned_docs/version-v1.5.x/community/development.md b/home/versioned_docs/version-v1.5.x/community/development.md index 6bc9544a900..549084ce516 100644 --- a/home/versioned_docs/version-v1.5.x/community/development.md +++ b/home/versioned_docs/version-v1.5.x/community/development.md @@ -1,7 +1,7 @@ --- id: development title: How to Run or Build HertzBeat? -sidebar_label: Development +sidebar_label: Development --- ## Getting HertzBeat code up and running @@ -10,7 +10,6 @@ sidebar_label: Development > This is a front-end and back-end separation project. > To start the local code, the back-end [manager](https://github.com/apache/hertzbeat/tree/master/manager) and the front-end [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) must be started separately. - ### Backend start 1. Requires `maven3+`, `java17` and `lombok` environments @@ -37,9 +36,9 @@ sidebar_label: Development 7. Browser access to localhost:4200 to start, default account/password is *admin/hertzbeat* -## Build HertzBeat binary package +## Build HertzBeat binary package -> Requires `maven3+`, `java17`, `node` and `yarn` environments. +> Requires `maven3+`, `java17`, `node` and `yarn` environments. ### Frontend build @@ -53,7 +52,6 @@ sidebar_label: Development 5. Build web-app: `yarn package` - ### Backend build 1. Requires `maven3+`, `java17` environments diff --git a/home/versioned_docs/version-v1.5.x/community/document.md b/home/versioned_docs/version-v1.5.x/community/document.md index a4f871bb753..e561b3f1b2c 100644 --- a/home/versioned_docs/version-v1.5.x/community/document.md +++ b/home/versioned_docs/version-v1.5.x/community/document.md @@ -5,20 +5,20 @@ sidebar_position: 1 --- Good documentation is critical for any type of software. Any contribution that can improve the HertzBeat documentation is welcome. @@ -94,4 +94,3 @@ css and other style files are placed in the `src/css` directory > All pages doc can be directly jumped to the corresponding github resource modification page through the 'Edit this page' button at the bottom - diff --git a/home/versioned_docs/version-v1.5.x/community/how-to-release.md b/home/versioned_docs/version-v1.5.x/community/how-to-release.md index 8ffca0302cb..ed4bdfb636a 100644 --- a/home/versioned_docs/version-v1.5.x/community/how-to-release.md +++ b/home/versioned_docs/version-v1.5.x/community/how-to-release.md @@ -22,11 +22,11 @@ This release process is operated in the UbuntuOS(Windows,Mac), and the following ## 2. Preparing for release > First summarize the account information to better understand the operation process, will be used many times later. -- apache id: `muchunjin (APACHE LDAP UserName)` -- apache passphrase: `APACHE LDAP Passphrase` -- apache email: `muchunjin@apache.org` -- gpg real name: `muchunjin (Any name can be used, here I set it to the same name as the apache id)` -- gpg key passphrase: `The password set when creating the gpg key, you need to remember this password` +> - apache id: `muchunjin (APACHE LDAP UserName)` +> - apache passphrase: `APACHE LDAP Passphrase` +> - apache email: `muchunjin@apache.org` +> - gpg real name: `muchunjin (Any name can be used, here I set it to the same name as the apache id)` +> - gpg key passphrase: `The password set when creating the gpg key, you need to remember this password` ### 2.1 Key generation @@ -165,11 +165,10 @@ $ (gpg --list-sigs muchunjin@apache.org && gpg --export --armor muchunjin@apache $ svn ci -m "add gpg key for muchunjin" ``` -## 3. Prepare material package & release +## 3. Prepare material package & release #### 3.1 Based on the master branch, create a release-${release_version}-rcx branch, such as release-1.6.0-rc1, And create a tag named v1.6.0-rc1 based on the release-1.6.0-rc1 branch, and set this tag as pre-release. - ```shell git checkout master git checkout -b release-1.6.0-rc1 @@ -292,7 +291,7 @@ apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz: OK #### 3.7 Publish the dev directory of the Apache SVN material package -- Clone the dev directory +- Clone the dev directory ```shell # Check out the dev directory of the Apache SVN to the svn/dev directory under dist in the root directory of the Apache HertzBeat project @@ -331,7 +330,6 @@ svn commit -m "release for HertzBeat 1.6.0" > Visit the address https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1/ in the browser, check if existed the new material package - ## 4. Enter the community voting stage #### 4.1 Send a Community Vote Email @@ -340,7 +338,7 @@ Send a voting email in the community requires at least three `+1` and no `-1`. > `Send to`: dev@hertzbeat.apache.org
> `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0 rc1
-> `Body`: +> `Body`: ``` Hello HertzBeat Community: @@ -403,8 +401,7 @@ Dear HertzBeat community, Thanks for your review and vote for "Release Apache HertzBeat (incubating) 1.6.0-rc1" I'm happy to announce the vote has passed: - - +--- 4 binding +1, from: - cc @@ -412,17 +409,14 @@ I'm happy to announce the vote has passed: 1 non-binding +1, from: - Roc Marshal - - +--- no 0 or -1 votes. Vote thread: https://lists.apache.org/thread/t01b2lbtqzyt7j4dsbdp5qjc3gngjsdq - - +--- Thank you to everyone who helped us to verify and vote for this release. We will move to the ASF Incubator voting shortly. - - +--- Best, ChunJin Mu ``` @@ -474,17 +468,14 @@ More detailed checklist please refer: Steps to validate the release, Please refer to: • https://www.apache.org/info/verification.html • https://hertzbeat.apache.org/docs/community/how_to_verify_release - - +--- How to Build: https://hertzbeat.apache.org/docs/community/development/#build-hertzbeat-binary-package - - +--- Thanks, On behalf of Apache HertzBeat (incubating) community - - +--- Best, ChunJin Mu ``` @@ -546,10 +537,9 @@ svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 http https://github.com/apache/hertzbeat/blob/master/home/docs/download.md https://github.com/apache/hertzbeat/blob/master/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md - Open the official website address https://hertzbeat.apache.org/docs/download/ to see if there is a new version of the download -> It should be noted that the download link may take effect after an hour, so please pay attention to it. +> It should be noted that the download link may take effect after an hour, so please pay attention to it. #### 4.3 Generate a release on github @@ -560,11 +550,14 @@ You can modify it on the original RC Release without creating a new Release. ::: Then enter Release Title and Describe -- Release Title: +- Release Title: + ``` v1.6.0 ``` + - Describe: + ``` xxx release note: xxx @@ -596,8 +589,7 @@ Release Notes: https://github.com/apache/hertzbeat/releases/tag/v1.6.0 HertzBeat Resources: - Issue: https://github.com/apache/hertzbeat/issues - Mailing list: dev@hertzbeat.apache.org - - +--- Apache HertzBeat Team Best, @@ -608,4 +600,4 @@ This version release is over. --- -This doc refer from [Apache StreamPark](https://streampark.apache.org/) +This doc refer from [Apache StreamPark](https://streampark.apache.org/) diff --git a/home/versioned_docs/version-v1.5.x/community/how-to-verify.md b/home/versioned_docs/version-v1.5.x/community/how-to-verify.md index 41c3341b59e..38b507149b0 100644 --- a/home/versioned_docs/version-v1.5.x/community/how-to-verify.md +++ b/home/versioned_docs/version-v1.5.x/community/how-to-verify.md @@ -40,7 +40,6 @@ The package uploaded to dist must include the source code package, and the binar 3. Whether to include the sha512 of the source code package 4. If the binary package is uploaded, also check the contents listed in (2)-(4) - ### 2.2 Check gpg signature First import the publisher's public key. Import KEYS from the svn repository to the local environment. (The person who releases the version does not need to import it again, the person who helps to do the verification needs to import it, and the user name is enough for the person who issued the version) @@ -51,6 +50,7 @@ First import the publisher's public key. Import KEYS from the svn repository to $ curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # Download KEYS $ gpg --import KEYS # Import KEYS to local ``` + #### 2.2.2 Trust the public key Trust the KEY used in this version: @@ -123,8 +123,6 @@ check as follows: - [ ] Able to compile correctly - [ ] ..... - - ### 2.5 Check the source package > If the binary/web-binary package is uploaded, check the binary package. @@ -151,7 +149,6 @@ and check as follows: You can refer to this article: [ASF Third Party License Policy](https://apache.org/legal/resolved.html) - ## 3. Email reply If you initiate a posting vote, you can refer to this response example to reply to the email after verification @@ -170,9 +167,9 @@ If you have already voted on dev@hertzbeat.apache.org, you can take it directly //Incubator community voting, only IPMC members have binding binding,PPMC needs to be aware of binding changes Forward my +1 from dev@listhertzbeatnkis (non-binding) Copy my +1 from hertzbeat DEV ML (non-binding) -```` -::: +``` +::: Non-PPMC/Non-IPMC member: @@ -184,7 +181,7 @@ I checked: 3. LICENSE and NOTICE are exist 4. Build successfully on macOS(Big Sur) 5. -```` +``` PPMC/IPMC member: @@ -197,10 +194,8 @@ I checked: 3. LICENSE and NOTICE are exist 4. Build successfully on macOS(Big Sur) 5. -```` - - +``` --- -This doc refer from [Apache StreamPark](https://streampark.apache.org/) +This doc refer from [Apache StreamPark](https://streampark.apache.org/) diff --git a/home/versioned_docs/version-v1.5.x/community/mailing_lists.md b/home/versioned_docs/version-v1.5.x/community/mailing_lists.md index fed6e3928ee..c5ab8df7604 100644 --- a/home/versioned_docs/version-v1.5.x/community/mailing_lists.md +++ b/home/versioned_docs/version-v1.5.x/community/mailing_lists.md @@ -5,20 +5,20 @@ sidebar_position: 1 --- The [Developer Mailing List](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) is the community-recommended way to communicate and obtain the latest information. @@ -32,18 +32,17 @@ Before you post anything to the mailing lists, be sure that you already **subscr - Use this list for your HertzBeat questions - Used by HertzBeat contributors to discuss development of HertzBeat - -| List Name | Address | Subscribe | Unsubscribe | Archive | -|---------------------|------------------------------|------------------------------------------------------------|----------------------------------------------------------------|----------------------------------------------------------------------------| -| **Developer List** | dev@hertzbeat.apache.org | [subscribe](mailto:dev-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:dev-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | +| List Name | Address | Subscribe | Unsubscribe | Archive | +|--------------------|--------------------------|--------------------------------------------------------|------------------------------------------------------------|------------------------------------------------------------------------| +| **Developer List** | dev@hertzbeat.apache.org | [subscribe](mailto:dev-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:dev-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | ### Notification List - Notifications on changes to the HertzBeat codebase -| List Name | Address | Subscribe | Unsubscribe | Archive | -|-------------------------|------------------------------------|------------------------------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------| -| **Notification List** | notifications@hertzbeat.apache.org | [subscribe](mailto:notifications-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | +| List Name | Address | Subscribe | Unsubscribe | Archive | +|-----------------------|------------------------------------|------------------------------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------| +| **Notification List** | notifications@hertzbeat.apache.org | [subscribe](mailto:notifications-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | ## Steps for Subscription diff --git a/home/versioned_docs/version-v1.5.x/community/new_committer_process.md b/home/versioned_docs/version-v1.5.x/community/new_committer_process.md index b72ec6e30b7..0801f579000 100644 --- a/home/versioned_docs/version-v1.5.x/community/new_committer_process.md +++ b/home/versioned_docs/version-v1.5.x/community/new_committer_process.md @@ -5,20 +5,20 @@ sidebar_position: 4 --- [Apache New Committer Guideline](https://community.apache.org/newcommitter.html#new-committer-process) @@ -47,10 +47,9 @@ sidebar_position: 4 see **Committer Account Creation** - - Wait until root says it is done - - PMC Chair enables svn and other access - - Add committer to the appropriate groups in JIRA and CWiki - + - Wait until root says it is done + - PMC Chair enables svn and other access + - Add committer to the appropriate groups in JIRA and CWiki - Notify the committer of completion see **Committer Done Template** @@ -243,3 +242,4 @@ you can now help fix that. A PPMC member will announce your election to the dev list soon. ``` + diff --git a/home/versioned_docs/version-v1.5.x/community/new_pmc_member_process.md b/home/versioned_docs/version-v1.5.x/community/new_pmc_member_process.md index ebc84b92d67..414dad94a56 100644 --- a/home/versioned_docs/version-v1.5.x/community/new_pmc_member_process.md +++ b/home/versioned_docs/version-v1.5.x/community/new_pmc_member_process.md @@ -5,20 +5,20 @@ sidebar_position: 5 --- [Apache New Committer Guideline](https://community.apache.org/newcommitter.html#new-committer-process) @@ -81,7 +81,6 @@ ${Work list}[1] Note that, Voting ends one week from today, i.e. [midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) [Apache Voting Guidelines](https://community.apache.org/newcommitter.html) - ### Close Vote Template ```text @@ -283,3 +282,4 @@ A PPMC member helps manage and guide the direction of the project. Thanks, On behalf of the Apache HertzBeat (incubating) PPMC ``` + diff --git a/home/versioned_docs/version-v1.5.x/community/submit-code.md b/home/versioned_docs/version-v1.5.x/community/submit-code.md index dfea1601d87..15bfeba545a 100644 --- a/home/versioned_docs/version-v1.5.x/community/submit-code.md +++ b/home/versioned_docs/version-v1.5.x/community/submit-code.md @@ -5,22 +5,21 @@ sidebar_position: 2 --- - * First from the remote repository fork a copy of the code into your own repository * The remote dev and merge branch is `master`. @@ -28,49 +27,44 @@ sidebar_position: 2 * Clone your repository to your local ```shell - git clone git@github.com:/hertzbeat.git +git clone git@github.com:/hertzbeat.git ``` * Add remote repository address, named upstream ```shell - git remote add upstream git@github.com:apache/hertzbeat.git +git remote add upstream git@github.com:apache/hertzbeat.git ``` * View repository ```shell - git remote -v +git remote -v ``` - > At this time, there will be two repositories: origin (your own repository) and upstream (remote repository) +> At this time, there will be two repositories: origin (your own repository) and upstream (remote repository) * Get/Update remote repository code - ```shell - git fetch upstream - ``` - + ```shell + git fetch upstream + ``` * Synchronize remote repository code to local repository - ```shell - git checkout origin/dev - git merge --no-ff upstream/dev - ``` - + ```shell + git checkout origin/dev + git merge --no-ff upstream/dev + ``` * **⚠️Note that you must create a new branch to develop features `git checkout -b feature-xxx`. It is not recommended to use the master branch for direct development** - * After modifying the code locally, submit it to your own repository: - **Note that the submission information does not contain special characters** - ```shell - git commit -m 'commit content' - git push - ``` + **Note that the submission information does not contain special characters** + ```shell + git commit -m 'commit content' + git push + ``` * Submit changes to the remote repository, you can see a green button "Compare & pull request" on your repository page, click it. - * Select the modified local branch and the branch you want to merge with the past, you need input the message carefully, describe doc is important as code, click "Create pull request". - * Then the community Committers will do CodeReview, and then he will discuss some details (design, implementation, performance, etc.) with you, afterward you can directly update the code in this branch according to the suggestions (no need to create a new PR). When this pr is approved, the commit will be merged into the master branch - * Finally, congratulations, you have become an official contributor to HertzBeat ! You will be added to the contributor wall, you can contact the community to obtain a contributor certificate. + diff --git a/home/versioned_docs/version-v1.5.x/download.md b/home/versioned_docs/version-v1.5.x/download.md index 2c0df07bb7c..355fed91be7 100644 --- a/home/versioned_docs/version-v1.5.x/download.md +++ b/home/versioned_docs/version-v1.5.x/download.md @@ -18,12 +18,10 @@ sidebar_label: Download Previous releases of HertzBeat may be affected by security issues, please use the latest one. ::: - -| Version | Date | Download | Release Notes | +| Version | Date | Download | Release Notes | |---------|------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------| | v1.6.0 | 2024.06.10 | [apache-hertzbeat-1.6.0-incubating-bin.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-bin.tar.gz.sha512) )
[apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-collector-1.6.0-incubating-bin.tar.gz.sha512) )
[apache-hertzbeat-1.6.0-incubating-src.tar.gz](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz) ( [signature](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz.asc) , [sha512](https://downloads.apache.org/incubator/hertzbeat/1.6.0/apache-hertzbeat-1.6.0-incubating-src.tar.gz.sha512) ) | [release note](https://github.com/apache/hertzbeat/releases/tag/v1.6.0) | - ## All Archived Releases For older releases, please check the [archive](https://archive.apache.org/dist/incubator/hertzbeat/). @@ -34,3 +32,4 @@ For older releases, please check the [archive](https://archive.apache.org/dist/i - HertzBeat https://hub.docker.com/r/apache/hertzbeat - HertzBeat Collector https://hub.docker.com/r/apache/hertzbeat-collector + diff --git a/home/versioned_docs/version-v1.5.x/help/activemq.md b/home/versioned_docs/version-v1.5.x/help/activemq.md index 52e3090fde2..f24bc37fbbb 100644 --- a/home/versioned_docs/version-v1.5.x/help/activemq.md +++ b/home/versioned_docs/version-v1.5.x/help/activemq.md @@ -9,7 +9,7 @@ keywords: [open source monitoring tool, monitoring Apache ActiveMQ metrics] **Use Protocol: JMX** -### Pre-monitoring Operations +### Pre-monitoring Operations > You need to enable the `JMX` service on ActiveMQ, HertzBeat uses the JMX protocol to collect metrics from ActiveMQ. @@ -26,6 +26,7 @@ keywords: [open source monitoring tool, monitoring Apache ActiveMQ metrics] 2. Modify the `bin/env` file in the installation directory, configure the JMX port IP, etc. The original configuration information will be as follows + ```text # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" @@ -36,6 +37,7 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" ``` Update to the following configuration, ⚠️ pay attention to modify `local external IP` + ```text # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" @@ -52,7 +54,7 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" ### Configuration parameters -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | | Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | @@ -64,82 +66,81 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" | Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | | Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | -### Collect Metrics +### Collect Metrics #### metrics: broker -| Metric Name | Unit | Description | +| Metric Name | Unit | Description | |-------------------------|------|-----------------------------------------------------------------------| -| BrokerName | None | The name of the broker. | -| BrokerVersion | None | The version of the broker. | -| Uptime | None | Uptime of the broker. | +| BrokerName | None | The name of the broker. | +| BrokerVersion | None | The version of the broker. | +| Uptime | None | Uptime of the broker. | | UptimeMillis | ms | Uptime of the broker in milliseconds. | -| Persistent | None | Messages are synchronized to disk. | +| Persistent | None | Messages are synchronized to disk. | | MemoryPercentUsage | % | Percent of memory limit used. | | StorePercentUsage | % | Percent of store limit used. | | TempPercentUsage | % | Percent of temp limit used. | -| CurrentConnectionsCount | None | Attribute exposed for management | -| TotalConnectionsCount | None | Attribute exposed for management | -| TotalEnqueueCount | None | Number of messages that have been sent to the broker. | -| TotalDequeueCount | None | Number of messages that have been acknowledged on the broker. | -| TotalConsumerCount | None | Number of message consumers subscribed to destinations on the broker. | -| TotalProducerCount | None | Number of message producers active on destinations on the broker. | -| TotalMessageCount | None | Number of unacknowledged messages on the broker. | -| AverageMessageSize | None | Average message size on this broker | -| MaxMessageSize | None | Max message size on this broker | -| MinMessageSize | None | Min message size on this broker | - -#### metrics: topic - -| Metric Name | Unit | Description | -|-------------------------|------|-------------------------------------------------------------------------------------------| -| Name | None | Name of this destination. | -| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | -| MemoryPercentUsage | None | The percentage of the memory limit used | -| ProducerCount | None | Number of producers attached to this destination | -| ConsumerCount | None | Number of consumers subscribed to this destination. | -| EnqueueCount | None | Number of messages that have been sent to the destination. | -| DequeueCount | None | Number of messages that has been acknowledged (and removed) from the destination. | -| ForwardCount | None | Number of messages that have been forwarded (to a networked broker) from the destination. | -| InFlightCount | None | Number of messages that have been dispatched to, but not acknowledged by, consumers. | -| DispatchCount | None | Number of messages that has been delivered to consumers, including those not acknowledged | -| ExpiredCount | None | Number of messages that have been expired. | -| StoreMessageSize | B | The memory size of all messages in this destination's store. | -| AverageEnqueueTime | ms | Average time a message was held on this destination. | -| MaxEnqueueTime | ms | The longest time a message was held on this destination | -| MinEnqueueTime | ms | The shortest time a message was held on this destination | -| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | -| AverageMessageSize | B | Average message size on this destination | -| MaxMessageSize | B | Max message size on this destination | -| MinMessageSize | B | Min message size on this destination | - +| CurrentConnectionsCount | None | Attribute exposed for management | +| TotalConnectionsCount | None | Attribute exposed for management | +| TotalEnqueueCount | None | Number of messages that have been sent to the broker. | +| TotalDequeueCount | None | Number of messages that have been acknowledged on the broker. | +| TotalConsumerCount | None | Number of message consumers subscribed to destinations on the broker. | +| TotalProducerCount | None | Number of message producers active on destinations on the broker. | +| TotalMessageCount | None | Number of unacknowledged messages on the broker. | +| AverageMessageSize | None | Average message size on this broker | +| MaxMessageSize | None | Max message size on this broker | +| MinMessageSize | None | Min message size on this broker | + +#### metrics: topic + +| Metric Name | Unit | Description | +|--------------------|------|-------------------------------------------------------------------------------------------| +| Name | None | Name of this destination. | +| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | +| MemoryPercentUsage | None | The percentage of the memory limit used | +| ProducerCount | None | Number of producers attached to this destination | +| ConsumerCount | None | Number of consumers subscribed to this destination. | +| EnqueueCount | None | Number of messages that have been sent to the destination. | +| DequeueCount | None | Number of messages that has been acknowledged (and removed) from the destination. | +| ForwardCount | None | Number of messages that have been forwarded (to a networked broker) from the destination. | +| InFlightCount | None | Number of messages that have been dispatched to, but not acknowledged by, consumers. | +| DispatchCount | None | Number of messages that has been delivered to consumers, including those not acknowledged | +| ExpiredCount | None | Number of messages that have been expired. | +| StoreMessageSize | B | The memory size of all messages in this destination's store. | +| AverageEnqueueTime | ms | Average time a message was held on this destination. | +| MaxEnqueueTime | ms | The longest time a message was held on this destination | +| MinEnqueueTime | ms | The shortest time a message was held on this destination | +| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | +| AverageMessageSize | B | Average message size on this destination | +| MaxMessageSize | B | Max message size on this destination | +| MinMessageSize | B | Min message size on this destination | #### metrics: memory_pool -| Metric Name | Unit | Description | -|-------------| ----------- |----------------| -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | +| Metric Name | Unit | Description | +|-------------|------|--------------| +| name | | metrics name | +| committed | kb | total size | +| init | kb | init size | +| max | kb | max size | +| used | kb | used size | #### metrics: class_loading -| Metric Name | Unit | Description | -|-----------------------| ----------- | ----------- | -| LoadedClassCount | | Loaded Class Count | -| TotalLoadedClassCount | | Total Loaded Class Count | -| UnloadedClassCount | | Unloaded Class Count | - +| Metric Name | Unit | Description | +|-----------------------|------|--------------------------| +| LoadedClassCount | | Loaded Class Count | +| TotalLoadedClassCount | | Total Loaded Class Count | +| UnloadedClassCount | | Unloaded Class Count | #### metrics: thread -| Metric Name | Unit | Description | -|-------------------------| ----------- | ----------- | -| TotalStartedThreadCount | | Total Started Thread Count | -| ThreadCount | | Thread Count | -| PeakThreadCount | | Peak Thread Count | -| DaemonThreadCount | | Daemon Thread Count | -| CurrentThreadUserTime | ms | Current Thread User Time | -| CurrentThreadCpuTime | ms | Current Thread Cpu Time | +| Metric Name | Unit | Description | +|-------------------------|------|----------------------------| +| TotalStartedThreadCount | | Total Started Thread Count | +| ThreadCount | | Thread Count | +| PeakThreadCount | | Peak Thread Count | +| DaemonThreadCount | | Daemon Thread Count | +| CurrentThreadUserTime | ms | Current Thread User Time | +| CurrentThreadCpuTime | ms | Current Thread Cpu Time | + diff --git a/home/versioned_docs/version-v1.5.x/help/airflow.md b/home/versioned_docs/version-v1.5.x/help/airflow.md index 5323ede8110..52367155d89 100644 --- a/home/versioned_docs/version-v1.5.x/help/airflow.md +++ b/home/versioned_docs/version-v1.5.x/help/airflow.md @@ -9,33 +9,31 @@ keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8080 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| HTTPS | 是否启用HTTPS | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-----------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | +| 端口 | 数据库对外提供的端口,默认为8080 | +| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | +| HTTPS | 是否启用HTTPS | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:airflow_health -| 指标名称 | 指标单位 | 指标帮助描述 | -| ------------ | -------- | -------------------- | -| metadatabase | 无 | metadatabase健康情况 | -| scheduler | 无 | scheduler健康情况 | -| triggerer | 无 | triggerer健康情况 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------|------|------------------| +| metadatabase | 无 | metadatabase健康情况 | +| scheduler | 无 | scheduler健康情况 | +| triggerer | 无 | triggerer健康情况 | #### 指标集合:airflow_version -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | -------- | --------------- | -| value | 无 | Airflow版本 | -| git_version | 无 | Airflow git版本 | - - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|---------------| +| value | 无 | Airflow版本 | +| git_version | 无 | Airflow git版本 | diff --git a/home/versioned_docs/version-v1.5.x/help/alert_console.md b/home/versioned_docs/version-v1.5.x/help/alert_console.md index e727fec4771..45ab7d791d3 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_console.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_console.md @@ -6,13 +6,13 @@ sidebar_label: Console address in alarm template > After the threshold is triggered, send the alarm information. When you notify through DingDing / enterprise Wechat / FeiShu robot or email, the alarm content has a detailed link to log in to the console. - ### Custom settings In our startup configuration file application.yml, find the following configuration + ```yml alerter: console-url: #Here is our custom console address ``` -The default value is the official console address of HertzBeat. \ No newline at end of file +The default value is the official console address of HertzBeat. diff --git a/home/versioned_docs/version-v1.5.x/help/alert_dingtalk.md b/home/versioned_docs/version-v1.5.x/help/alert_dingtalk.md index fb63d52aa48..b86ed662940 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_dingtalk.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_dingtalk.md @@ -5,15 +5,15 @@ sidebar_label: Alert DingDing robot notification keywords: [open source monitoring tool, open source alerter, open source DingDing robot notification] --- -> After the threshold is triggered send alarm information and notify the recipient by DingDing robot. +> After the threshold is triggered send alarm information and notify the recipient by DingDing robot. -### Operation steps +### Operation steps 1. **【DingDing desktop client】-> 【Group settings】-> 【Intelligent group assistant】-> 【Add new robot-select custom】-> 【Set robot name and avatar】-> 【Note⚠️Set custom keywords: HertzBeat】 ->【Copy its webhook address after adding successfully】** -> Note⚠️ When adding a robot, its custom keywords need to be set in the security setting block: HertzBeat. Other security settings or the IP segment don't need to be filled in. +> Note⚠️ When adding a robot, its custom keywords need to be set in the security setting block: HertzBeat. Other security settings or the IP segment don't need to be filled in. -![email](/img/docs/help/alert-notice-8.png) +![email](/img/docs/help/alert-notice-8.png) 2. **【Save access_token value of the WebHook address of the robot】** @@ -24,18 +24,18 @@ keywords: [open source monitoring tool, open source alerter, open source DingDin ![email](/img/docs/help/alert-notice-9.png) -4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### DingDing robot common issues -### DingDing robot common issues +1. DingDing group did not receive the robot alarm notification. -1. DingDing group did not receive the robot alarm notification. > Please check whether there is any triggered alarm information in the alarm center. > Please check whether DingDing robot is configured with security custom keywords :HertzBeat. > Please check whether the robot ACCESS_TOKEN is configured correctly and whether the alarm strategy association is configured. -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_discord.md b/home/versioned_docs/version-v1.5.x/help/alert_discord.md index 8dfdca384fa..7aa565c0acf 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_discord.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_discord.md @@ -45,7 +45,6 @@ keywords: [open source monitoring tool, open source alerter, open source Discord ![bot](/img/docs/help/discord-bot-7.png) - ### Add an alarm notification person in HertzBeat, the notification method is Discord Bot 1. **[Alarm notification] -> [Add recipient] -> [Select Discord robot notification method] -> [Set robot Token and ChannelId] -> [OK]** @@ -58,13 +57,12 @@ keywords: [open source monitoring tool, open source alerter, open source Discord ![email](/img/docs/help/alert-notice-policy.png) +### Discord Bot Notification FAQ -### Discord Bot Notification FAQ - -1. Discord doesn't receive bot alert notifications +1. Discord doesn't receive bot alert notifications > Please check whether the alarm information has been triggered in the alarm center > Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured -> Please check whether the bot is properly authorized by the Discord chat server +> Please check whether the bot is properly authorized by the Discord chat server Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_email.md b/home/versioned_docs/version-v1.5.x/help/alert_email.md index 353ae4673fe..fb6dc7fa571 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_email.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_email.md @@ -5,34 +5,35 @@ sidebar_label: Alert email notification keywords: [open source monitoring tool, open source alerter, open source email notification] --- -> After the threshold is triggered send alarm information and notify the recipient by email. +> After the threshold is triggered send alarm information and notify the recipient by email. -### Operation steps +### Operation steps -1. **【Alarm notification】->【Add new recipient】 ->【Select email notification method】** +1. **【Alarm notification】->【Add new recipient】 ->【Select email notification method】** -![email](/img/docs/help/alert-notice-1.png) +![email](/img/docs/help/alert-notice-1.png) 2. **【Get verification code】-> 【Enter email verification code】-> 【Confirm】** -![email](/img/docs/help/alert-notice-2.png) + ![email](/img/docs/help/alert-notice-2.png) -![email](/img/docs/help/alert-notice-3.png) +![email](/img/docs/help/alert-notice-3.png) -3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### Email notification common issues -### Email notification common issues +1. Hertzbeat deployed on its own intranet cannot receive email notifications -1. Hertzbeat deployed on its own intranet cannot receive email notifications -> Hertzbeat needs to configure its own mail server, not tancloud. Please confirm whether you have configured its own mail server in application.yml +> Hertzbeat needs to configure its own mail server, not tancloud. Please confirm whether you have configured its own mail server in application.yml + +2. Cloud environment tancloud cannot receive email notification -2. Cloud environment tancloud cannot receive email notification > Please check whether there is any triggered alarm information in the alarm center. > Please check whether the mailbox is configured correctly and whether the alarm strategy association is configured. -> Please check whether the warning email is blocked in the trash can of the mailbox. +> Please check whether the warning email is blocked in the trash can of the mailbox. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_enterprise_wechat_app.md b/home/versioned_docs/version-v1.5.x/help/alert_enterprise_wechat_app.md index f9e072e436a..1d5d41a15bc 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_enterprise_wechat_app.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_enterprise_wechat_app.md @@ -5,30 +5,30 @@ sidebar_label: Alert Enterprise Wechat App notification keywords: [open source monitoring tool, open source alerter, open source Enterprise Wechat App notification] --- -> After the threshold is triggered send alarm information and notify the recipient by enterprise WeChat App. +> After the threshold is triggered send alarm information and notify the recipient by enterprise WeChat App. -### Operation steps +### Operation steps 1. **【Enterprise Wechat backstage】-> 【App Management】-> 【Create an app】-> 【Set App message】->【Copy AgentId and Secret adding successfully】** -![email](/img/docs/help/alert-wechat-1.jpg) +![email](/img/docs/help/alert-wechat-1.jpg) 2. **【Alarm notification】->【Add new recipient】 ->【Select Enterprise WeChat App notification method】->【Set Enterprise WeChat ID,Enterprise App ID and Enterprise App Secret 】-> 【Confirm】** ![email](/img/docs/help/alert-wechat-2.jpg) -3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-wechat-3.jpg) +![email](/img/docs/help/alert-wechat-3.jpg) +### Enterprise WeChat App common issues -### Enterprise WeChat App common issues +1. Enterprise WeChat App did not receive the alarm notification. -1. Enterprise WeChat App did not receive the alarm notification. > Please check if the user has application permissions. > Please check if the enterprise application callback address settings are normal. > Please check if the server IP is on the enterprise application whitelist. -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_feishu.md b/home/versioned_docs/version-v1.5.x/help/alert_feishu.md index 56606012021..8f7e9391001 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_feishu.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_feishu.md @@ -5,30 +5,30 @@ sidebar_label: Alert FeiShu robot notification keywords: [open source monitoring tool, open source alerter, open source feishu bot notification] --- -> After the threshold is triggered send alarm information and notify the recipient by FeiShu robot. +> After the threshold is triggered send alarm information and notify the recipient by FeiShu robot. -### Operation steps +### Operation steps 1. **【FeiShu client】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** -2. **【Save the key value of the WebHook address of the robot】** +2. **【Save the key value of the WebHook address of the robot】** > eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select FeiShu robot notification method】->【Set FeiShu robot KEY】-> 【Confirm】** -4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) +### FeiShu robot notification common issues -### FeiShu robot notification common issues +1. FeiShu group did not receive the robot alarm notification. -1. FeiShu group did not receive the robot alarm notification. > Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. +> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_slack.md b/home/versioned_docs/version-v1.5.x/help/alert_slack.md index 2540a27451d..5148432fe8b 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_slack.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_slack.md @@ -25,12 +25,11 @@ Refer to the official website document [Sending messages using Incoming Webhooks ![email](/img/docs/help/alert-notice-policy.png) - ### Slack Notification FAQ 1. Slack did not receive the robot warning notification > Please check whether the alarm information has been triggered in the alarm center -> Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured +> Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_telegram.md b/home/versioned_docs/version-v1.5.x/help/alert_telegram.md index 1fbe4f0ae7e..cb60f266778 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_telegram.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_telegram.md @@ -54,13 +54,12 @@ Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token ![email](/img/docs/help/alert-notice-policy.png) - ### Telegram Bot Notification FAQ 1. Telegram did not receive the robot warning notification > Please check whether the alarm information has been triggered in the alarm center > Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured -> UserId should be the UserId of the recipient of the message +> UserId should be the UserId of the recipient of the message Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_threshold.md b/home/versioned_docs/version-v1.5.x/help/alert_threshold.md index 0574b1924ba..2619fdbcf6e 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_threshold.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_threshold.md @@ -3,6 +3,7 @@ id: alert_threshold title: Threshold Alert Configuration sidebar_label: Threshold Alert Configuration --- + > Configure alert thresholds for monitoring metrics (warning alert, critical alert, emergency alert). The system triggers alerts based on threshold configuration and collected metric data. ## Operational Steps diff --git a/home/versioned_docs/version-v1.5.x/help/alert_threshold_expr.md b/home/versioned_docs/version-v1.5.x/help/alert_threshold_expr.md index a7a7e6bdbe0..6b8772388e6 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_threshold_expr.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_threshold_expr.md @@ -8,40 +8,40 @@ sidebar_label: Threshold Trigger Expression #### Supported Operators in Expressions -| Operator (Visual Configuration) | Operator (Expression Configuration) | Supported Types | Description | -| ------------------------------- | ----------------------------------- | ------------------------- | -------------------------- | -| Equals | equals(str1,str2) | String | Check if strings are equal | -| Not Equals | !equals(str1,str2) | String | Check if strings are not equal | -| Contains | contains(str1,str2) | String | Check if string contains | -| Not Contains | !contains(str1,str2) | String | Check if string does not contain | -| Matches | matches(str1,str2) | String | Check if string matches regex | -| Not Matches | !matches(str1,str2) | String | Check if string does not match regex | -| Exists | exists(obj) | String, Numeric, Time | Check if value exists | -| Not Exists | !exists(obj) | String, Numeric, Time | Check if value does not exist | -| Greater than | obj1 > obj2 | Numeric, Time | Check if value is greater than | -| Less than | obj1 < obj2 | Numeric, Time | Check if value is less than | -| Greater than or Equal to | obj1 >= obj2 | Numeric, Time | Check if value is greater than or equal to | -| Less than or Equal to | obj1 <= obj2 | Numeric, Time | Check if value is less than or equal to | -| Not Equal to | obj1 != obj2 | Numeric, Time | Check if values are not equal | -| Equal to | obj1 == obj2 | Numeric, Time | Check if values are equal | +| Operator (Visual Configuration) | Operator (Expression Configuration) | Supported Types | Description | +|---------------------------------|-------------------------------------|-----------------------|--------------------------------------------| +| Equals | equals(str1,str2) | String | Check if strings are equal | +| Not Equals | !equals(str1,str2) | String | Check if strings are not equal | +| Contains | contains(str1,str2) | String | Check if string contains | +| Not Contains | !contains(str1,str2) | String | Check if string does not contain | +| Matches | matches(str1,str2) | String | Check if string matches regex | +| Not Matches | !matches(str1,str2) | String | Check if string does not match regex | +| Exists | exists(obj) | String, Numeric, Time | Check if value exists | +| Not Exists | !exists(obj) | String, Numeric, Time | Check if value does not exist | +| Greater than | obj1 > obj2 | Numeric, Time | Check if value is greater than | +| Less than | obj1 < obj2 | Numeric, Time | Check if value is less than | +| Greater than or Equal to | obj1 >= obj2 | Numeric, Time | Check if value is greater than or equal to | +| Less than or Equal to | obj1 <= obj2 | Numeric, Time | Check if value is less than or equal to | +| Not Equal to | obj1 != obj2 | Numeric, Time | Check if values are not equal | +| Equal to | obj1 == obj2 | Numeric, Time | Check if values are equal | #### Expression Function Library List -| Supported Function Library | Description | -| -------------------------------- | -------------------------------------------------------------- | -| condition ? trueExpression : falseExpression | Ternary operator | -| toDouble(str) | Convert string to Double type | -| toBoolean(str) | Convert string to Boolean type | -| toInteger(str) | Convert string to Integer type | -| array[n] | Retrieve the nth element of an array | -| * | Multiplication | -| / | Division | -| % | Modulo | -| ( and ) | Parentheses for controlling the order of operations in logical or mathematical expressions | -| + | Addition | -| - | Subtraction | -| && | Logical AND operator | -| \|\| | Logical OR operator | +| Supported Function Library | Description | +|----------------------------------------------|--------------------------------------------------------------------------------------------| +| condition ? trueExpression : falseExpression | Ternary operator | +| toDouble(str) | Convert string to Double type | +| toBoolean(str) | Convert string to Boolean type | +| toInteger(str) | Convert string to Integer type | +| array[n] | Retrieve the nth element of an array | +| * | Multiplication | +| / | Division | +| % | Modulo | +| ( and ) | Parentheses for controlling the order of operations in logical or mathematical expressions | +| + | Addition | +| - | Subtraction | +| && | Logical AND operator | +| \|\| | Logical OR operator | #### Supported Environment Variables @@ -63,4 +63,4 @@ This variable is mainly used for calculations involving multiple instances. For 4. MYSQL Monitoring -> Alert when 'threads_running' metric under 'status' exceeds 7 `threads_running>7` -If you encounter any issues, feel free to discuss and provide feedback through our community group or ISSUE tracker! \ No newline at end of file +If you encounter any issues, feel free to discuss and provide feedback through our community group or ISSUE tracker! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_webhook.md b/home/versioned_docs/version-v1.5.x/help/alert_webhook.md index adc1b6f12f8..d1741d71481 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_webhook.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_webhook.md @@ -5,23 +5,24 @@ sidebar_label: Alert webHook notification keywords: [open source monitoring tool, open source alerter, open source webhook notification] --- -> After the threshold is triggered send alarm information and call the Webhook interface through post request to notify the recipient. +> After the threshold is triggered send alarm information and call the Webhook interface through post request to notify the recipient. -### Operation steps +### Operation steps -1. **【Alarm notification】->【Add new recipient】 ->【Select WebHook notification method】-> 【Set WebHook callback address】 -> 【Confirm】** +1. **【Alarm notification】->【Add new recipient】 ->【Select WebHook notification method】-> 【Set WebHook callback address】 -> 【Confirm】** ![email](/img/docs/help/alert-notice-5.png) -2. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +2. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) +![email](/img/docs/help/alert-notice-4.png) -### WebHook callback POST body BODY content +### WebHook callback POST body BODY content + +Content format:JSON -Content format:JSON ```json { "alarmId": 76456, @@ -43,24 +44,23 @@ Content format:JSON } ``` -| | | -|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | -| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | -| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | -| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | -| content | string title: The actual content of the alarm notification 告警通知实际内容 | -| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | -| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | -| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | -| tags | example: {key1:value1} | - +| | | +|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | +| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | +| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | +| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | +| content | string title: The actual content of the alarm notification 告警通知实际内容 | +| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | +| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | +| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | +| tags | example: {key1:value1} | +### Webhook notification common issues -### Webhook notification common issues +1. WebHook callback did not take effect -1. WebHook callback did not take effect > Please check whether there is any triggered alarm information in the alarm center. > Please check whether the configured webhook callback address is correct. -Other issues can be fed back through the communication group ISSUE! +Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_wework.md b/home/versioned_docs/version-v1.5.x/help/alert_wework.md index e862fae7ddf..ca14d5615fa 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_wework.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_wework.md @@ -5,34 +5,34 @@ sidebar_label: Alert enterprise Wechat notification keywords: [open source monitoring tool, open source alerter, open source WeWork notification] --- -> After the threshold is triggered send alarm information and notify the recipient by enterprise Wechat robot. +> After the threshold is triggered send alarm information and notify the recipient by enterprise Wechat robot. -### Operation steps +### Operation steps -1. **【Enterprise Wechat】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** +1. **【Enterprise Wechat】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** -![email](/img/docs/help/alert-notice-6.jpg) +![email](/img/docs/help/alert-notice-6.jpg) -2. **【Save the key value of the WebHook address of the robot】** +2. **【Save the key value of the WebHook address of the robot】** > eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -3. **【Alarm notification】->【Add new recipient】 ->【Select enterprise Wechat robot notification method】->【Set enterprise Wechat robot KEY】-> 【Confirm】** +3. **【Alarm notification】->【Add new recipient】 ->【Select enterprise Wechat robot notification method】->【Set enterprise Wechat robot KEY】-> 【Confirm】** ![email](/img/docs/help/alert-notice-7.png) -4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** +4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** - -![email](/img/docs/help/alert-notice-4.png) +> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** +![email](/img/docs/help/alert-notice-4.png) ### Enterprise Wechat robot common issues -1. The enterprise wechat group did not receive the robot alarm notification. +1. The enterprise wechat group did not receive the robot alarm notification. + > Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. +> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/almalinux.md b/home/versioned_docs/version-v1.5.x/help/almalinux.md index 380e1439ab4..40a07028c96 100644 --- a/home/versioned_docs/version-v1.5.x/help/almalinux.md +++ b/home/versioned_docs/version-v1.5.x/help/almalinux.md @@ -4,14 +4,14 @@ title: Monitoring AlmaLinux Operating System Monitoring sidebar_label: AlmaLinux Operating System keywords: [open-source monitoring system, open-source operating system monitoring, AlmaLinux operating system monitoring] --- + > Collect and monitor common performance metrics of the AlmaLinux operating system. ### Configuration Parameters - -| Parameter Name | Parameter Help Description | -| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------- | -| Monitoring Host | The monitored peer's IPv4, IPv6, or domain name. Note ⚠️ No protocol header (e.g., https://, http://). | +| Parameter Name | Parameter Help Description | +|-------------------|---------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | The monitored peer's IPv4, IPv6, or domain name. Note ⚠️ No protocol header (e.g., https://, http://). | | Task Name | A unique name to identify this monitoring task. | | Port | The port provided by Linux SSH, default is 22. | | Timeout | Set the connection timeout in milliseconds, default is 6000 ms. | @@ -28,18 +28,16 @@ keywords: [open-source monitoring system, open-source operating system monitorin #### Metric Set: Basic System Information - -| Metric Name | Metric Unit | Metric Help Description | -| -------------- | ----------- | ------------------------ | +| Metric Name | Metric Unit | Metric Help Description | +|----------------|-------------|--------------------------| | Host Name | None | Host name | | System Version | None | Operating system version | | Uptime | None | Uptime | #### Metric Set: CPU Information - -| Metric Name | Metric Unit | Metric Help Description | -| -------------- | ----------- | --------------------------------- | +| Metric Name | Metric Unit | Metric Help Description | +|----------------|-------------|-----------------------------------| | info | None | CPU model | | cores | Cores | Number of CPU cores | | interrupt | Count | Number of CPU interrupts | @@ -49,9 +47,8 @@ keywords: [open-source monitoring system, open-source operating system monitorin #### Metric Set: Memory Information - -| Metric Name | Metric Unit | Metric Help Description | -| ----------- | ----------- | ----------------------------------- | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------| | total | Mb | Total memory capacity | | used | Mb | Memory used by user programs | | free | Mb | Free memory capacity | @@ -61,9 +58,8 @@ keywords: [open-source monitoring system, open-source operating system monitorin #### Metric Set: Disk Information - -| Metric Name | Metric Unit | Metric Help Description | -| ------------- | ----------- | -------------------------------------- | +| Metric Name | Metric Unit | Metric Help Description | +|---------------|-------------|----------------------------------------| | disk_num | Count | Total number of disks | | partition_num | Count | Total number of partitions | | block_write | Blocks | Total number of blocks written to disk | @@ -72,18 +68,16 @@ keywords: [open-source monitoring system, open-source operating system monitorin #### Metric Set: Network Card Information - -| Metric Name | Metric Unit | Metric Help Description | -| -------------- | ----------- | ----------------------------- | +| Metric Name | Metric Unit | Metric Help Description | +|----------------|-------------|-------------------------------| | interface_name | None | Network card name | | receive_bytes | Byte | Inbound data traffic (bytes) | | transmit_bytes | Byte | Outbound data traffic (bytes) | #### Metric Set: File System - | Metric Name | Metric Unit | Metric Help Description | -| ----------- | ----------- | ----------------------- | +|-------------|-------------|-------------------------| | filesystem | None | Name of the file system | | used | Mb | Used disk size | | available | Mb | Available disk size | @@ -94,9 +88,8 @@ keywords: [open-source monitoring system, open-source operating system monitorin Statistics for the top 10 processes using the CPU. Statistics include: process ID, CPU usage, memory usage, and executed command. - | Metric Name | Metric Unit | Metric Help Description | -| ----------- | ----------- | ----------------------- | +|-------------|-------------|-------------------------| | pid | None | Process ID | | cpu_usage | % | CPU usage | | mem_usage | % | Memory usage | @@ -106,12 +99,12 @@ Statistics for the top 10 processes using the CPU. Statistics include: process I Statistics for the top 10 processes using memory. Statistics include: process ID, memory usage, CPU usage, and executed command. - | Metric Name | Metric Unit | Metric Help Description | -| ----------- | ----------- | ----------------------- | +|-------------|-------------|-------------------------| | pid | None | Process ID | | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | --- + diff --git a/home/versioned_docs/version-v1.5.x/help/api.md b/home/versioned_docs/version-v1.5.x/help/api.md index f9a0a60d390..98763e0eefe 100644 --- a/home/versioned_docs/version-v1.5.x/help/api.md +++ b/home/versioned_docs/version-v1.5.x/help/api.md @@ -5,33 +5,33 @@ sidebar_label: HTTP API keywords: [open source monitoring tool, monitoring http api] --- -> Call HTTP API interface, check whether the interface is available, and monitor its response time and other Metrics. +> Call HTTP API interface, check whether the interface is available, and monitor its response time and other Metrics. ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| Relative path | Suffix path of website address except IP port. For example, the relative path of `www.tancloud.io/console` website is `/console` | -| Request mode | Set the request mode of interface call:GET, POST, PUT, DELETE | -| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | -| Username | User name used for interface Basic authentication or Digest authentication | -| Password | Password used for interface Basic authentication or Digest authentication | -| Headers | HTTP request headers | -| Params | HTTP query params, support [time expression](time_expression) | -| Content-Type | Set the resource type when carrying the BODY request body data request | -| Request BODY | Set the carry BODY request body data, which is valid when PUT or POST request method is used | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | +| Relative path | Suffix path of website address except IP port. For example, the relative path of `www.tancloud.io/console` website is `/console` | +| Request mode | Set the request mode of interface call:GET, POST, PUT, DELETE | +| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | +| Username | User name used for interface Basic authentication or Digest authentication | +| Password | Password used for interface Basic authentication or Digest authentication | +| Headers | HTTP request headers | +| Params | HTTP query params, support [time expression](time_expression) | +| Content-Type | Set the resource type when carrying the BODY request body data request | +| Request BODY | Set the carry BODY request body data, which is valid when PUT or POST request method is used | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | -### Collection Metric +### Collection Metric -#### Metric set:summary +#### Metric set:summary -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| responseTime | ms | Website response time | +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-------------------------| +| responseTime | ms | Website response time | diff --git a/home/versioned_docs/version-v1.5.x/help/centos.md b/home/versioned_docs/version-v1.5.x/help/centos.md index 2a6ad2b0a6d..60b770ebf96 100644 --- a/home/versioned_docs/version-v1.5.x/help/centos.md +++ b/home/versioned_docs/version-v1.5.x/help/centos.md @@ -9,74 +9,74 @@ keywords: [open source monitoring tool, open source os monitoring tool, monitori ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Linux SSH. The default is 22 | -| Username | SSH connection user name, optional | -| Password | SSH connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Linux SSH. The default is 22 | +| Username | SSH connection user name, optional | +| Password | SSH connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| hostname | none | Host name | -| version | none | Operating system version | -| uptime | none | System running time | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------| +| hostname | none | Host name | +| version | none | Operating system version | +| uptime | none | System running time | #### Metric set:cpu -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| info | none | CPU model | -| cores | cores | Number of CPU cores | -| interrupt | number | Number of CPU interrupts | -| load | none | Average load of CPU in the last 1/5/15 minutes | -| context_switch | number | Number of current context switches | -| usage | % | CPU usage | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------------------------| +| info | none | CPU model | +| cores | cores | Number of CPU cores | +| interrupt | number | Number of CPU interrupts | +| load | none | Average load of CPU in the last 1/5/15 minutes | +| context_switch | number | Number of current context switches | +| usage | % | CPU usage | #### Metric set:memory -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| total | Mb | Total memory capacity | -| used | Mb | User program memory | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory occupied by cache | -| available | Mb | Remaining available memory capacity | -| usage | % | Memory usage | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------------------| +| total | Mb | Total memory capacity | +| used | Mb | User program memory | +| free | Mb | Free memory capacity | +| buff_cache | Mb | Memory occupied by cache | +| available | Mb | Remaining available memory capacity | +| usage | % | Memory usage | #### Metric set:disk -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| disk_num | blocks | Total number of disks | -| partition_num | partitions | Total number of partitions | -| block_write | blocks | Total number of blocks written to disk | -| block_read | blocks | Number of blocks read from disk | -| write_rate | iops | Rate of writing disk blocks per second | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|----------------------------------------| +| disk_num | blocks | Total number of disks | +| partition_num | partitions | Total number of partitions | +| block_write | blocks | Total number of blocks written to disk | +| block_read | blocks | Number of blocks read from disk | +| write_rate | iops | Rate of writing disk blocks per second | #### Metric set:interface -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| interface_name | none | Network card name | -| receive_bytes | byte | Inbound data traffic(bytes) | -| transmit_bytes | byte | Outbound data traffic(bytes) | +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------| +| interface_name | none | Network card name | +| receive_bytes | byte | Inbound data traffic(bytes) | +| transmit_bytes | byte | Outbound data traffic(bytes) | #### Metric set:disk_free -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| filesystem | none | File system name | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | usage | -| mounted | none | Mount point directory | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| filesystem | none | File system name | +| used | Mb | Used disk size | +| available | Mb | Available disk size | +| usage | % | usage | +| mounted | none | Mount point directory | + diff --git a/home/versioned_docs/version-v1.5.x/help/clickhouse.md b/home/versioned_docs/version-v1.5.x/help/clickhouse.md index 3e09687a450..efd873d1f32 100644 --- a/home/versioned_docs/version-v1.5.x/help/clickhouse.md +++ b/home/versioned_docs/version-v1.5.x/help/clickhouse.md @@ -4,91 +4,93 @@ title: Monitoring ClickHouse Database Monitoring sidebar_label: ClickHouse Database keywords: [open source monitoring system, open source database monitoring, ClickHouse database monitoring] --- + > Collect and monitor general performance metrics for the ClickHouse database. ### Configuration Parameters -| Parameter Name | Parameter Description | -| -------------- | ------------------------------------------------------------------------- | -| Monitor Host | IP address, IPV4, IPV6, or domain name of the host being monitored. Note ⚠️ without protocol prefix (e.g., https://, http://). | -| Task Name | Name identifying this monitoring, ensuring uniqueness. | -| Port | Port number of the database exposed to the outside, default is 8123. | -| Query Timeout | Timeout for SQL queries to respond, in milliseconds (ms), default is 6000ms. | -| Database Name | Name of the database instance, optional. | -| Username | Username for database connection, optional. | -| Password | Password for database connection, optional. | -| Collection Interval | Interval for periodic data collection during monitoring, in seconds, with a minimum interval of 30 seconds. | -| Tag Binding | Used for categorizing and managing monitored resources. | -| Description | Additional information to identify and describe this monitoring, where users can add remarks. | +| Parameter Name | Parameter Description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------| +| Monitor Host | IP address, IPV4, IPV6, or domain name of the host being monitored. Note ⚠️ without protocol prefix (e.g., https://, http://). | +| Task Name | Name identifying this monitoring, ensuring uniqueness. | +| Port | Port number of the database exposed to the outside, default is 8123. | +| Query Timeout | Timeout for SQL queries to respond, in milliseconds (ms), default is 6000ms. | +| Database Name | Name of the database instance, optional. | +| Username | Username for database connection, optional. | +| Password | Password for database connection, optional. | +| Collection Interval | Interval for periodic data collection during monitoring, in seconds, with a minimum interval of 30 seconds. | +| Tag Binding | Used for categorizing and managing monitored resources. | +| Description | Additional information to identify and describe this monitoring, where users can add remarks. | ### Collected Metrics #### Metric Set: ping Availability -| Metric Name | Metric Unit | Metric Description | -| ------------- | ----------- | ------------------ | -| responseTime | N/A | Response time | +| Metric Name | Metric Unit | Metric Description | +|--------------|-------------|--------------------| +| responseTime | N/A | Response time | #### Metric Set: Data from system.metrics table -| Metric Name | Metric Unit | Metric Description | -| ---------------------- | ----------- | ------------------------------------------------------------- | -| Query | N/A | Number of queries being executed | -| Merge | N/A | Number of background merges being executed | -| Move | N/A | Number of background moves being executed | -| PartMutation | N/A | Number of table mutations | -| ReplicatedFetch | N/A | Number of data blocks fetched from replicas | -| ReplicatedSend | N/A | Number of data blocks sent to replicas | -| ReplicatedChecks | N/A | Number of consistency checks on data blocks | -| QueryPreempted | N/A | Number of queries stopped or waiting | -| TCPConnection | N/A | Number of TCP connections | -| HTTPConnection | N/A | Number of HTTP connections | -| OpenFileForRead | N/A | Number of open readable files | -| OpenFileForWrite | N/A | Number of open writable files | -| QueryThread | N/A | Number of threads processing queries | -| ReadonlyReplica | N/A | Number of Replicated tables in read-only state | -| EphemeralNode | N/A | Number of ephemeral nodes in ZooKeeper | -| ZooKeeperWatch | N/A | Number of ZooKeeper event subscriptions | -| StorageBufferBytes | Bytes | Bytes in Buffer tables | -| VersionInteger | N/A | ClickHouse version number | -| RWLockWaitingReaders | N/A | Number of threads waiting for read-write lock on a table | -| RWLockWaitingWriters | N/A | Number of threads waiting for write lock on a table | -| RWLockActiveReaders | N/A | Number of threads holding read lock on a table | -| RWLockActiveWriters | N/A | Number of threads holding write lock on a table | -| GlobalThread | N/A | Number of threads in global thread pool | -| GlobalThreadActive | N/A | Number of active threads in global thread pool | -| LocalThread | N/A | Number of threads in local thread pool | -| LocalThreadActive | N/A | Number of active threads in local thread pool | +| Metric Name | Metric Unit | Metric Description | +|----------------------|-------------|----------------------------------------------------------| +| Query | N/A | Number of queries being executed | +| Merge | N/A | Number of background merges being executed | +| Move | N/A | Number of background moves being executed | +| PartMutation | N/A | Number of table mutations | +| ReplicatedFetch | N/A | Number of data blocks fetched from replicas | +| ReplicatedSend | N/A | Number of data blocks sent to replicas | +| ReplicatedChecks | N/A | Number of consistency checks on data blocks | +| QueryPreempted | N/A | Number of queries stopped or waiting | +| TCPConnection | N/A | Number of TCP connections | +| HTTPConnection | N/A | Number of HTTP connections | +| OpenFileForRead | N/A | Number of open readable files | +| OpenFileForWrite | N/A | Number of open writable files | +| QueryThread | N/A | Number of threads processing queries | +| ReadonlyReplica | N/A | Number of Replicated tables in read-only state | +| EphemeralNode | N/A | Number of ephemeral nodes in ZooKeeper | +| ZooKeeperWatch | N/A | Number of ZooKeeper event subscriptions | +| StorageBufferBytes | Bytes | Bytes in Buffer tables | +| VersionInteger | N/A | ClickHouse version number | +| RWLockWaitingReaders | N/A | Number of threads waiting for read-write lock on a table | +| RWLockWaitingWriters | N/A | Number of threads waiting for write lock on a table | +| RWLockActiveReaders | N/A | Number of threads holding read lock on a table | +| RWLockActiveWriters | N/A | Number of threads holding write lock on a table | +| GlobalThread | N/A | Number of threads in global thread pool | +| GlobalThreadActive | N/A | Number of active threads in global thread pool | +| LocalThread | N/A | Number of threads in local thread pool | +| LocalThreadActive | N/A | Number of active threads in local thread pool | #### Metric Set: Data from system.events table -| Metric Name | Metric Unit | Metric Description | -| ------------------------------------- | ----------- | ---------------------------------------------------------------------------------------------------- | -| Query | N/A | Number of queries to parse and possibly execute. Excludes queries rejected due to AST size limits, quota limits, or simultaneous query limits. May include internal queries initiated by ClickHouse. Subqueries are not counted. | -| SelectQuery | N/A | Number of Select queries possibly executed | -| InsertQuery | N/A | Number of Insert queries possibly executed | -| InsertedRows | N/A | Number of rows inserted into all tables | -| InsertedBytes | Bytes | Number of bytes inserted into all tables | -| FailedQuery | N/A | Number of failed queries | -| FailedSelectQuery | N/A | Number of failed Select queries | -| FileOpen | N/A | Number of file openings | -| MergeTreeDataWriterRows | N/A | Number of data rows written to MergeTree tables | -| MergeTreeDataWriterCompressedBytes | Bytes | Number of compressed data bytes written to MergeTree tables | +| Metric Name | Metric Unit | Metric Description | +|------------------------------------|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Query | N/A | Number of queries to parse and possibly execute. Excludes queries rejected due to AST size limits, quota limits, or simultaneous query limits. May include internal queries initiated by ClickHouse. Subqueries are not counted. | +| SelectQuery | N/A | Number of Select queries possibly executed | +| InsertQuery | N/A | Number of Insert queries possibly executed | +| InsertedRows | N/A | Number of rows inserted into all tables | +| InsertedBytes | Bytes | Number of bytes inserted into all tables | +| FailedQuery | N/A | Number of failed queries | +| FailedSelectQuery | N/A | Number of failed Select queries | +| FileOpen | N/A | Number of file openings | +| MergeTreeDataWriterRows | N/A | Number of data rows written to MergeTree tables | +| MergeTreeDataWriterCompressedBytes | Bytes | Number of compressed data bytes written to MergeTree tables | #### Metric Set: Data from system.asynchronous_metrics table -| Metric Name | Metric Unit | Metric Description | -| -------------------------------------- | ----------- | -------------------------------------- | -| AsynchronousMetricsCalculationTimeSpent | N/A | Time spent calculating asynchronous metrics (seconds) | -| jemalloc.arenas.all.muzzy_purged | N/A | Number of purged muzzy pages | -| jemalloc.arenas.all.dirty_purged | N/A | Number of purged dirty pages | -| BlockReadBytes_ram1 | N/A | Number of bytes read from ram1 block | -| jemalloc.background_thread.run_intervals | N/A | Number of intervals jemalloc background thread ran | -| BlockQueueTime_nbd13 | N/A | Queue wait time for nbd13 block | -| jemalloc.background_thread.num_threads | N/A | Number of jemalloc background threads | -| jemalloc.resident | N/A | Physical memory size allocated by jemalloc (bytes) | -| InterserverThreads | N/A | Number of Interserver threads | -| BlockWriteMerges_nbd7 | N/A | Number of block write merges for nbd7 block | -| MarkCacheBytes | N/A | Size of marks cache in StorageMergeTree | -| MarkCacheFiles | N/A | Number of files in marks cache for StorageMergeTree | -| MaxPartCountForPartition | N/A | Maximum active data blocks in partitions | \ No newline at end of file +| Metric Name | Metric Unit | Metric Description | +|------------------------------------------|-------------|-------------------------------------------------------| +| AsynchronousMetricsCalculationTimeSpent | N/A | Time spent calculating asynchronous metrics (seconds) | +| jemalloc.arenas.all.muzzy_purged | N/A | Number of purged muzzy pages | +| jemalloc.arenas.all.dirty_purged | N/A | Number of purged dirty pages | +| BlockReadBytes_ram1 | N/A | Number of bytes read from ram1 block | +| jemalloc.background_thread.run_intervals | N/A | Number of intervals jemalloc background thread ran | +| BlockQueueTime_nbd13 | N/A | Queue wait time for nbd13 block | +| jemalloc.background_thread.num_threads | N/A | Number of jemalloc background threads | +| jemalloc.resident | N/A | Physical memory size allocated by jemalloc (bytes) | +| InterserverThreads | N/A | Number of Interserver threads | +| BlockWriteMerges_nbd7 | N/A | Number of block write merges for nbd7 block | +| MarkCacheBytes | N/A | Size of marks cache in StorageMergeTree | +| MarkCacheFiles | N/A | Number of files in marks cache for StorageMergeTree | +| MaxPartCountForPartition | N/A | Maximum active data blocks in partitions | + diff --git a/home/versioned_docs/version-v1.5.x/help/debian.md b/home/versioned_docs/version-v1.5.x/help/debian.md index 65940c34a39..14cee060aaf 100644 --- a/home/versioned_docs/version-v1.5.x/help/debian.md +++ b/home/versioned_docs/version-v1.5.x/help/debian.md @@ -4,13 +4,13 @@ title: Monitoring Debian System Monitoring sidebar_label: Debian keywords: [Open Source Monitoring System, Operating System Monitoring, Debian Monitoring] --- + > Collect and monitor general performance metrics of the Debian system. ## Configuration Parameters - -| Parameter Name | Metric help description | -| ----------------------- | ----------------------------------------------------------------------------------------------------------------- | +| Parameter Name | Metric help description | +|-------------------------|-------------------------------------------------------------------------------------------------------------------| | Target Host | The monitored destination IPV4, IPV6, or domain name. Note: no protocol header (e.g., https://, http://). | | Task Name | A unique name to identify this monitoring task. | | Port | SSH port of the Debian system, default: 22 | @@ -28,18 +28,16 @@ keywords: [Open Source Monitoring System, Operating System Monitoring, Debian Mo #### Metric Set: Basic System Information - -| Metric Name | Metric Unit | Metric help description | -| -------------- | ----------- | ------------------------ | +| Metric Name | Metric Unit | Metric help description | +|----------------|-------------|--------------------------| | Host Name | N/A | Host name | | System Version | N/A | Operating system version | | Uptime | N/A | Boot time | #### Metric Set: CPU Information - -| Metric Name | Metric Unit | Metric help description | -| -------------- | ----------- | ----------------------- | +| Metric Name | Metric Unit | Metric help description | +|----------------|-------------|-------------------------| | Info | N/A | Model | | Cores | N/A | Number of cores | | Interrupt | N/A | Number of interrupts | @@ -49,9 +47,8 @@ keywords: [Open Source Monitoring System, Operating System Monitoring, Debian Mo #### Metric Set: Memory Information - -| Metric Name | Metric Unit | Metric help description | -| ------------------- | ----------- | ---------------------------- | +| Metric Name | Metric Unit | Metric help description | +|---------------------|-------------|------------------------------| | Total Memory | Mb | Total memory capacity | | User Program Memory | Mb | Memory used by user programs | | Free Memory | Mb | Free memory capacity | @@ -61,9 +58,8 @@ keywords: [Open Source Monitoring System, Operating System Monitoring, Debian Mo #### Metric Set: Disk Information - -| Metric Name | Metric Unit | Metric help description | -| ------------- | ----------- | ----------------------------- | +| Metric Name | Metric Unit | Metric help description | +|---------------|-------------|-------------------------------| | Disk Num | N/A | Total number of disks | | Partition Num | N/A | Total number of partitions | | Block Write | N/A | Number of disk blocks written | @@ -99,3 +95,4 @@ Metric Unit: - Memory Usage Rate: % - CPU Usage Rate: % + diff --git a/home/versioned_docs/version-v1.5.x/help/dm.md b/home/versioned_docs/version-v1.5.x/help/dm.md index 91b032fdf54..82159bf2408 100644 --- a/home/versioned_docs/version-v1.5.x/help/dm.md +++ b/home/versioned_docs/version-v1.5.x/help/dm.md @@ -9,41 +9,41 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameters -| Parameter name | Parameter help description | -| ------- | ---------- | -| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | -| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | -| Port | The port provided by the database externally, the default is 5236. | -| Query Timeout | Set the timeout when the SQL query does not respond to data, in ms milliseconds, the default is 3000 milliseconds. | -| database name | database instance name, optional. | -| username | database connection username, optional | -| password | database connection password, optional | -| URL | Database connection URL, optional | -| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | -| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | +| Parameter name | Parameter help description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | +| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | +| Port | The port provided by the database externally, the default is 5236. | +| Query Timeout | Set the timeout when the SQL query does not respond to data, in ms milliseconds, the default is 3000 milliseconds. | +| database name | database instance name, optional. | +| username | database connection username, optional | +| password | database connection password, optional | +| URL | Database connection URL, optional | +| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | +| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | ### Collect metrics #### Metric collection: basic -| Metric Name | Metric Unit | Metric Help Description | -| ------------ | -------- | ------------------ | -| PORT_NUM | None | Database exposed service port | -| CTL_PATH | None | Control File Path | -| MAX_SESSIONS | None | Maximum database connections | +| Metric Name | Metric Unit | Metric Help Description | +|--------------|-------------|-------------------------------| +| PORT_NUM | None | Database exposed service port | +| CTL_PATH | None | Control File Path | +| MAX_SESSIONS | None | Maximum database connections | #### Metric collection: status -| Metric Name | Metric Unit | Metric Help Description | -| -------- | -------- | ------------------ | -| status$ | None | Open/Close status of DM database | - +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|----------------------------------| +| status$ | None | Open/Close status of DM database | #### Metric collection: thread -| Metric Name | Metric Unit | Metric Help Description | -| ------------- | -------- | ------------------------- | -| dm_sql_thd | None | Thread for writing dmsql dmserver | -| dm_io_thd | None | IO threads, controlled by IO_THR_GROUPS parameter, default is 2 threads | -| dm_quit_thd | None | Thread used to perform a graceful shutdown of the database | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------------------------------------| +| dm_sql_thd | None | Thread for writing dmsql dmserver | +| dm_io_thd | None | IO threads, controlled by IO_THR_GROUPS parameter, default is 2 threads | +| dm_quit_thd | None | Thread used to perform a graceful shutdown of the database | + diff --git a/home/versioned_docs/version-v1.5.x/help/dns.md b/home/versioned_docs/version-v1.5.x/help/dns.md index 7587452c1a7..d8dbd8d0921 100644 --- a/home/versioned_docs/version-v1.5.x/help/dns.md +++ b/home/versioned_docs/version-v1.5.x/help/dns.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6. Note⚠️Without protocol header (eg: https://, http://). | | Monitoring name | Identify the name of this monitoring. The name needs to be unique. | @@ -28,7 +28,7 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito #### Metrics Set:Header -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|---------------------------------------------------| | Response Time | ms | Time taken for DNS server to respond to requests. | | Opcode | none | Type of the current message. | @@ -41,13 +41,13 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito ### Metrics Set: Question -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|-----------------------------------------------------------------------------------------------------------------------------------| | Section | none | Question record information, including the queried domain name, resource type, resource record class, and additional information. | ### Metrics Set: Answer -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|----------------------------------------------------------------------------------------------------------------------------| | Section0 | none | Answer record information, including the queried domain name, TTL, resource record class, resource type, and query result. | @@ -55,7 +55,7 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito ### Metrics Set: Authority -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------| | Section0 | none | SOA (Start of Authority) record for the domain name, including queried domain name, TTL, resource type, resource record class, and other information. | @@ -63,8 +63,9 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito ### Metrics Set: Additional -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|-----------------------------------------| | Section0 | none | Additional information for DNS queries. | > The metric set collects up to 10 records, with metric names from Section0 to Section9. + diff --git a/home/versioned_docs/version-v1.5.x/help/docker.md b/home/versioned_docs/version-v1.5.x/help/docker.md index fdd3098ce55..0e3a1f0b428 100644 --- a/home/versioned_docs/version-v1.5.x/help/docker.md +++ b/home/versioned_docs/version-v1.5.x/help/docker.md @@ -7,7 +7,6 @@ keywords: [open source monitoring tool, open source docker monitoring tool, moni > Collect and monitor general performance Metrics of Docker containers. - ## Pre-monitoring operations If you want to monitor the container information in `Docker`, you need to open the port according to the following steps, so that the collection request can obtain the corresponding information. @@ -31,7 +30,7 @@ This is equivalent to the **2375** port that is open to the outside world. Of co ```shell systemctl daemon-reload systemctl restart docker -```` +``` **Note: Remember to open the `2375` port number in the server console. ** @@ -42,65 +41,62 @@ Open the `2375` port number inside the server. ```shell firewall-cmd --zone=public --add-port=2375/tcp --permanent firewall-cmd --reload -```` - - - - +``` ### Configuration parameters -| Parameter name | Parameter help description | -| ------------ | ------------------------------- | -| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | -| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | -| Port | The port provided by the database externally, the default is 2375. | -| Query Timeout | Set the timeout when getting the Docker server API interface, in ms, the default is 3000 ms. | -| Container Name | Generally monitors all running container information. | -| username | connection username, optional | -| password | connection password, optional | -| URL | Database connection URL, optional, if configured, the parameters such as database name, username and password in the URL will override the parameters configured above | -| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | -| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | +| Parameter name | Parameter help description | +|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | +| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | +| Port | The port provided by the database externally, the default is 2375. | +| Query Timeout | Set the timeout when getting the Docker server API interface, in ms, the default is 3000 ms. | +| Container Name | Generally monitors all running container information. | +| username | connection username, optional | +| password | connection password, optional | +| URL | Database connection URL, optional, if configured, the parameters such as database name, username and password in the URL will override the parameters configured above | +| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | +| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | ### Collect metrics #### Metric collection: system -| Metric Name | Metric Unit | Metric Help Description | -| ------------------ | -------- | ----------------------- | -| Name | None | Server Name | -| version | none | docker version number | -| os | none | server version eg: linux x86_64 | -| root_dir | none | docker folder directory eg: /var/lib/docker | -| containers | None | Total number of containers (running + not running) | -| containers_running | None | Number of running containers | -| containers_paused | none | number of containers in pause | -| images | None | The total number of container images. | -| ncpu | none | ncpu | -| mem_total | MB | Total size of memory used | -| system_time | none | system time | +| Metric Name | Metric Unit | Metric Help Description | +|--------------------|-------------|----------------------------------------------------| +| Name | None | Server Name | +| version | none | docker version number | +| os | none | server version eg: linux x86_64 | +| root_dir | none | docker folder directory eg: /var/lib/docker | +| containers | None | Total number of containers (running + not running) | +| containers_running | None | Number of running containers | +| containers_paused | none | number of containers in pause | +| images | None | The total number of container images. | +| ncpu | none | ncpu | +| mem_total | MB | Total size of memory used | +| system_time | none | system time | #### Metric collection: containers -| Metric Name | Metric Unit | Metric Help Description | -| -------- | -------- | ------------ | -| id | None | The ID of the container in Docker | -| name | None | The container name in the Docker container | -| image | None | Image used by the Docker container | -| command | None | Default startup command in Docker | -| state | None | The running state of the container in Docker | -| status | None | Update time in Docker container | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|----------------------------------------------| +| id | None | The ID of the container in Docker | +| name | None | The container name in the Docker container | +| image | None | Image used by the Docker container | +| command | None | Default startup command in Docker | +| state | None | The running state of the container in Docker | +| status | None | Update time in Docker container | #### Metrics collection: stats -| Metric Name | Metric Unit | Metric Help Description | -| ---------------- | -------- | ------------------ | -| name | None | The name in the Docker container | -| available_memory | MB | The amount of memory that the Docker container can utilize | -| used_memory | MB | The amount of memory already used by the Docker container | -| memory_usage | None | Memory usage of the Docker container | -| cpu_delta | None | The number of CPUs already used by the Docker container | -| number_cpus | None | The number of CPUs that the Docker container can use | -| cpu_usage | None | Docker container CPU usage | +| Metric Name | Metric Unit | Metric Help Description | +|------------------|-------------|------------------------------------------------------------| +| name | None | The name in the Docker container | +| available_memory | MB | The amount of memory that the Docker container can utilize | +| used_memory | MB | The amount of memory already used by the Docker container | +| memory_usage | None | Memory usage of the Docker container | +| cpu_delta | None | The number of CPUs already used by the Docker container | +| number_cpus | None | The number of CPUs that the Docker container can use | +| cpu_usage | None | Docker container CPU usage | + diff --git a/home/versioned_docs/version-v1.5.x/help/doris_be.md b/home/versioned_docs/version-v1.5.x/help/doris_be.md index 2bc212ef3fb..8dcde7b549b 100644 --- a/home/versioned_docs/version-v1.5.x/help/doris_be.md +++ b/home/versioned_docs/version-v1.5.x/help/doris_be.md @@ -9,162 +9,163 @@ keywords: [开源监控系统, 开源数据库监控, DORIS数据库BE监控] ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ------------ | ------------------------------------------------------------ | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8040 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| 数据库名称 | 数据库实例名称,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|-----------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | +| 端口 | 数据库对外提供的端口,默认为8040 | +| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | +| 数据库名称 | 数据库实例名称,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:doris_be_load_channel_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------- | -| value | 无 | 当前打开的 load channel 个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------------| +| value | 无 | 当前打开的 load channel 个数 | #### 指标集合:doris_be_memtable_flush_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------------- | -| value | 无 | memtable写入磁盘的个数累计值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------| +| value | 无 | memtable写入磁盘的个数累计值 | #### 指标集合:doris_be_plan_fragment_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------- | -| value | 无 | 当前已接收的 fragment instance 的数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------| +| value | 无 | 当前已接收的 fragment instance 的数量 | #### 指标集合:doris_be_process_thread_num -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------------------------- | -| value | 无 | BE 进程线程数。通过 `/proc/pid/task` 采集 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------------------| +| value | 无 | BE 进程线程数。通过 `/proc/pid/task` 采集 | #### 指标集合:doris_be_query_scan_rows -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------------ | -| value | 无 | 读取行数的累计值。这里只统计读取 Olap 表的数据量。并且是 RawRowsRead(部分数据行可能被索引跳过,并没有真正读取,但仍会记录到这个值中) | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------------------------------------------------------| +| value | 无 | 读取行数的累计值。这里只统计读取 Olap 表的数据量。并且是 RawRowsRead(部分数据行可能被索引跳过,并没有真正读取,但仍会记录到这个值中) | #### 指标集合:doris_be_result_buffer_block_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------- | -| value | 无 | 当前查询结果缓存中的 query 个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------| +| value | 无 | 当前查询结果缓存中的 query 个数 | #### 指标集合:doris_be_send_batch_thread_pool_queue_size -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------------------- | -| value | 无 | 导入时用于发送数据包的线程池的排队个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------| +| value | 无 | 导入时用于发送数据包的线程池的排队个数 | #### 指标集合:doris_be_tablet_base_max_compaction_score -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------------- | -| value | 无 | 当前最大的 Base Compaction Score | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------------------| +| value | 无 | 当前最大的 Base Compaction Score | #### 指标集合:doris_be_timeout_canceled_fragment_count -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | --------------------------------------------- | -| value | 无 | 因超时而被取消的 fragment instance 数量累计值 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|----------------------------------| +| value | 无 | 因超时而被取消的 fragment instance 数量累计值 | #### 指标集合:doris_be_load_rows -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------- | -| value | 无 | 通过 tablet sink 发送的行数累计 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------| +| value | 无 | 通过 tablet sink 发送的行数累计 | #### 指标集合:doris_be_all_rowsets_num -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ---------------------- | -| value | 无 | 当前所有 rowset 的个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-----------------| +| value | 无 | 当前所有 rowset 的个数 | #### 指标集合:doris_be_all_segments_num -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------- | -| value | 无 | 当前所有 segment 的个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------| +| value | 无 | 当前所有 segment 的个数 | #### 指标集合:doris_be_heavy_work_max_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------ | -| value | 无 | brpc heavy线程池线程个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------------| +| value | 无 | brpc heavy线程池线程个数 | #### 指标集合:doris_be_light_work_max_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------ | -| value | 无 | brpc light线程池线程个数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|-------------------| +| value | 无 | brpc light线程池线程个数 | #### 指标集合:doris_be_heavy_work_pool_queue_size -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------------------------------- | -| value | 无 | brpc heavy线程池队列最大长度,超过则阻塞提交work | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------------------| +| value | 无 | brpc heavy线程池队列最大长度,超过则阻塞提交work | #### 指标集合:doris_be_light_work_pool_queue_size -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ----------------------------------------------- | -| value | 无 | brpc light线程池队列最大长度,超过则阻塞提交work | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|---------------------------------| +| value | 无 | brpc light线程池队列最大长度,超过则阻塞提交work | #### 指标集合:doris_be_heavy_work_active_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------- | -| value | 无 | brpc heavy线程池活跃线程数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------| +| value | 无 | brpc heavy线程池活跃线程数 | #### 指标集合:doris_be_light_work_active_threads -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | -------------------------- | -| value | 无 | brpc light线程池活跃线程数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------| +| value | 无 | brpc light线程池活跃线程数 | #### 指标集合:doris_be_compaction_bytes_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| ---------- | -------- | ---------------------------------- | -| base | 字节 | Base Compaction 的数据量累计 | -| cumulative | 字节 | Cumulative Compaction 的数据量累计 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------|------|------------------------------| +| base | 字节 | Base Compaction 的数据量累计 | +| cumulative | 字节 | Cumulative Compaction 的数据量累计 | #### 指标集合:doris_be_disks_avail_capacity -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------- | -| path | 无 | 指定数据目录 | -| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的剩余空间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------------------------| +| path | 无 | 指定数据目录 | +| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的剩余空间 | #### 指标集合:doris_be_disks_total_capacity -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | ------------------------------------------------------- | -| path | 无 | 指定数据目录 | -| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的全部空间 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|--------------------------------------------| +| path | 无 | 指定数据目录 | +| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的全部空间 | #### 指标集合:doris_be_local_bytes_read_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | --------------------------------- | -| value | 字节 | 由 `LocalFileReader` 读取的字节数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|----------------------------| +| value | 字节 | 由 `LocalFileReader` 读取的字节数 | #### 指标集合:doris_be_local_bytes_written_total -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | --------------------------------- | -| value | 字节 | 由 `LocalFileWriter` 写入的字节数 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|----------------------------| +| value | 字节 | 由 `LocalFileWriter` 写入的字节数 | #### 指标集合:doris_be_memory_allocated_bytes -| 指标名称 | 指标单位 | 指标帮助描述 | -| -------- | -------- | --------------------------------------------------- | -| value | 字节 | BE 进程物理内存大小,取自 `/proc/self/status/VmRSS` | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------|------|------------------------------------------| +| value | 字节 | BE 进程物理内存大小,取自 `/proc/self/status/VmRSS` | + diff --git a/home/versioned_docs/version-v1.5.x/help/doris_fe.md b/home/versioned_docs/version-v1.5.x/help/doris_fe.md index bb7a6b99d53..b478b2eaadb 100644 --- a/home/versioned_docs/version-v1.5.x/help/doris_fe.md +++ b/home/versioned_docs/version-v1.5.x/help/doris_fe.md @@ -4,6 +4,7 @@ title: Monitoring DORIS Database FE Monitoring sidebar_label: DORIS Database FE keywords: [Open Source Monitoring System, Open Source Database Monitoring, DORIS Database FE Monitoring] --- + > Collect and monitor general performance metrics for DORIS database FE. Supports DORIS 2.0.0. **Protocol: HTTP** @@ -14,9 +15,8 @@ Check the `fe/conf/fe.conf` file to obtain the value of the `http_port` configur ### Configuration Parameters - -| Parameter Name | Parameter Description | -| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Parameter Name | Parameter Description | +|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitor Host | The monitored target's IPV4, IPV6, or domain name. Note: Without the protocol header (e.g., https://, http://) | | Task Name | A unique name identifying this monitoring task | | Port | The port provided by the database to the outside, default is 8030 ,get the value of the`http_port` configuration item | @@ -30,26 +30,23 @@ Check the `fe/conf/fe.conf` file to obtain the value of the `http_port` configur #### Metric Set: doris_fe_connection_total - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | -------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|----------------------------------------------------| | value | None | The current number of MySQL port connections on FE | #### Metric Set: doris_fe_edit_log_clean Should not fail; if it does, manual intervention is required. - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|---------------------------------------------------------------| | success | None | The number of successful cleanups of historical metadata logs | | failed | None | The number of failed cleanups of historical metadata logs | #### Metric Set: doris_fe_edit_log - -| Metric Name | Metric Unit | Metric help description | -| ----------------- | ----------- | ------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------------|-------------|---------------------------------------------| | write | None | The count of metadata log write operations | | read | None | The count of metadata log read operations | | current | None | The current number of metadata logs | @@ -60,9 +57,8 @@ Should not fail; if it does, manual intervention is required. Should not fail; if it does, manual intervention is required. - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | -------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|----------------------------------------------------------------------| | success | None | The number of successful cleanups of historical metadata image files | | failed | None | The number of failed cleanups of historical metadata image files | @@ -70,77 +66,68 @@ Should not fail; if it does, manual intervention is required. Should not fail; if it does, manual intervention is required. - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ------------------------------------------------------------ | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|--------------------------------------------------------------| | success | None | The number of successful generations of metadata image files | | failed | None | The number of failed generations of metadata image files | #### Metric Set: doris_fe_query_err - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ----------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|-------------------------------------------| | value | None | The cumulative value of erroneous queries | #### Metric Set: doris_fe_max_journal_id - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | value | None | The current maximum metadata log ID on the FE node. If it is a Master FE, it is the maximum ID currently written; if it is a non-Master FE, it represents the maximum metadata log ID currently being replayed. Used to observe if there is a large gap between the IDs of multiple FEs. A large gap indicates issues with metadata synchronization | #### Metric Set: doris_fe_max_tablet_compaction_score - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | value | None | The largest compaction score value among all BE nodes. This value can observe the current cluster's maximum compaction score to judge if it is too high. If too high, there may be delays in queries or writes | #### Metric Set: doris_fe_qps - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ------------------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|---------------------------------------------------------------------------------| | value | None | The number of queries per second on the current FE (only counts query requests) | #### Metric Set: doris_fe_query_err_rate - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ------------------------------------------ | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|--------------------------------------------| | value | None | The number of erroneous queries per second | #### Metric Set: doris_fe_report_queue_size - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | value | None | The length of the queue for various regular reporting tasks on the BE side at the FE end. This value reflects the degree of blocking of reporting tasks on the Master FE node. A larger value indicates insufficient processing capacity on the FE | #### Metric Set: doris_fe_rps - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ---------------------------------------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|------------------------------------------------------------------------------------------------------| | value | None | The number of requests per second on the current FE (includes queries and other types of statements) | #### Metric Set: doris_fe_scheduled_tablet_num - -| Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Metric Name | Metric Unit | Metric help description | +|-------------|-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | value | None | The number of tablets currently being scheduled by the Master FE node. This includes tablets that are being repaired and tablets that are being balanced. This value can reflect the number of tablets currently migrating in the cluster. If there is a value for a long time, it indicates that the cluster is unstable | #### Metric Set: doris_fe_txn_status Can observe the number of import transactions in various states to determine if there is a backlog. - | Metric Name | Metric Unit | Metric help description | -| ----------- | ----------- | ----------------------- | +|-------------|-------------|-------------------------| | unknown | None | Unknown state | | prepare | None | In preparation | | committed | None | Committed | | visible | None | Visible | | aborted | None | Aborted / Revoked | + diff --git a/home/versioned_docs/version-v1.5.x/help/dynamic_tp.md b/home/versioned_docs/version-v1.5.x/help/dynamic_tp.md index 7418e17cc1b..fd36206bc6e 100644 --- a/home/versioned_docs/version-v1.5.x/help/dynamic_tp.md +++ b/home/versioned_docs/version-v1.5.x/help/dynamic_tp.md @@ -24,6 +24,7 @@ management: exposure: include: '*' ``` + Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has response json data as follows: ```json @@ -60,7 +61,6 @@ Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has respo 3. Add DynamicTp monitoring under HertzBeat middleware monitoring - ### Configuration parameters | Parameter name | Parameter help description | @@ -78,24 +78,25 @@ Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has respo #### Metric collection: thread_pool -| Metric Name | Metric Unit | Metric Help Description | -|---------|------|------------------------| -| pool_name | None | Thread pool name | -| core_pool_size | None | Number of core threads | -| maximum_pool_size | None | Maximum number of threads | -| queue_type | None | Task queue type | -| queue_capacity | MB | task queue capacity | -| queue_size | None | The current occupied size of the task queue | -| fair | None | Queue mode, SynchronousQueue will be used | -| queue_remaining_capacity | MB | task queue remaining size | -| active_count | None | Number of active threads | -| task_count | None | Total number of tasks | -| completed_task_count | None | Number of completed tasks | -| largest_pool_size | None | The largest number of threads in history | -| pool_size | none | current number of threads | -| wait_task_count | None | Number of tasks waiting to be executed | -| reject_count | None | Number of rejected tasks | -| reject_handler_name | None | Reject policy type | -| dynamic | None | Dynamic thread pool or not | -| run_timeout_count | None | Number of running timeout tasks | -| queue_timeout_count | None | Number of tasks waiting for timeout | +| Metric Name | Metric Unit | Metric Help Description | +|--------------------------|-------------|---------------------------------------------| +| pool_name | None | Thread pool name | +| core_pool_size | None | Number of core threads | +| maximum_pool_size | None | Maximum number of threads | +| queue_type | None | Task queue type | +| queue_capacity | MB | task queue capacity | +| queue_size | None | The current occupied size of the task queue | +| fair | None | Queue mode, SynchronousQueue will be used | +| queue_remaining_capacity | MB | task queue remaining size | +| active_count | None | Number of active threads | +| task_count | None | Total number of tasks | +| completed_task_count | None | Number of completed tasks | +| largest_pool_size | None | The largest number of threads in history | +| pool_size | none | current number of threads | +| wait_task_count | None | Number of tasks waiting to be executed | +| reject_count | None | Number of rejected tasks | +| reject_handler_name | None | Reject policy type | +| dynamic | None | Dynamic thread pool or not | +| run_timeout_count | None | Number of running timeout tasks | +| queue_timeout_count | None | Number of tasks waiting for timeout | + diff --git a/home/versioned_docs/version-v1.5.x/help/elasticsearch.md b/home/versioned_docs/version-v1.5.x/help/elasticsearch.md index 25078850862..3ac3d62a7e1 100644 --- a/home/versioned_docs/version-v1.5.x/help/elasticsearch.md +++ b/home/versioned_docs/version-v1.5.x/help/elasticsearch.md @@ -9,7 +9,7 @@ keywords: [ open source monitoring tool, monitoring ElasticSearch metrics ] ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6. Note⚠️Without protocol header (eg: https://, http://). | | Monitoring name | Identify the name of this monitoring. The name needs to be unique. | @@ -27,7 +27,7 @@ keywords: [ open source monitoring tool, monitoring ElasticSearch metrics ] #### Metrics Set:health -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|-------------------------------------------| | cluster_name | none | Cluster Name | | status | none | status | @@ -49,7 +49,7 @@ keywords: [ open source monitoring tool, monitoring ElasticSearch metrics ] #### Metrics Set:nodes_detail -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------|-------------|-------------------------| | node_name | none | Node Name | | ip | none | IP Address | @@ -61,3 +61,4 @@ keywords: [ open source monitoring tool, monitoring ElasticSearch metrics ] | disk_free | GB | Disk Free | | disk_total | GB | Disk Total | | disk_used_percent | % | Disk Used Percent | + diff --git a/home/versioned_docs/version-v1.5.x/help/euleros.md b/home/versioned_docs/version-v1.5.x/help/euleros.md index c63c6c26643..786dab30afc 100644 --- a/home/versioned_docs/version-v1.5.x/help/euleros.md +++ b/home/versioned_docs/version-v1.5.x/help/euleros.md @@ -9,7 +9,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, EulerOS Mo ### Configuration Parameters -| Parameter Name | Parameter help description | +| Parameter Name | Parameter help description | |---------------------|----------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The IP, IPV6, or domain name of the monitored endpoint. Note ⚠️: Do not include protocol headers (eg: https://, http://). | | Task Name | Identifies the name of this monitoring, ensuring uniqueness. | @@ -28,7 +28,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, EulerOS Mo #### Metric Set: Basic Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|---------------------------| | Host Name | None | Host name. | | System Version | None | Operating system version. | @@ -36,7 +36,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, EulerOS Mo #### Metric Set: CPU Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|-------------------------------------------| | info | None | CPU model. | | cores | None | Number of CPU cores. | @@ -47,7 +47,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, EulerOS Mo #### Metric Set: Memory Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|------------------------------------| | total | Mb | Total memory capacity. | | used | Mb | Used memory by user programs. | @@ -58,7 +58,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, EulerOS Mo #### Metric Set: Disk Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |---------------|-------------|------------------------------------| | disk_num | None | Total number of disks. | | partition_num | None | Total number of partitions. | @@ -68,7 +68,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, EulerOS Mo #### Metric Set: Interface Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|--------------------------------| | interface_name | None | Name of the network interface. | | receive_bytes | Mb | Inbound data traffic. | @@ -105,3 +105,4 @@ Top 10 processes consuming memory. Metrics include: Process ID, Memory usage, CP | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | + diff --git a/home/versioned_docs/version-v1.5.x/help/flink.md b/home/versioned_docs/version-v1.5.x/help/flink.md index 2115a1f0b0f..bd731a6dee6 100644 --- a/home/versioned_docs/version-v1.5.x/help/flink.md +++ b/home/versioned_docs/version-v1.5.x/help/flink.md @@ -7,10 +7,9 @@ keywords: [open source monitoring tool, open source flink monitoring tool] > Collect and monitor the general performance Metrics of Flink. - ### Configuration parameter -| Parameter Name | Parameter Help Description | +| Parameter Name | Parameter Help Description | |---------------------|-----------------------------------------------------------------------------------------------------------------------------| | Monitor Host | The monitored peer IPV4, IPV6, or domain name. Note: Do not include protocol headers (e.g., https://, http://). | | Task Name | Identifier for this monitoring task, name must be unique. | @@ -27,13 +26,11 @@ keywords: [open source monitoring tool, open source flink monitoring tool] #### Metrics Set:Overview -| Metric Name | Metric Unit | Metric Help Description | -|---------------|-------------|-------------------------| -| slots_total | Units | Total number of slots. | -| slots_used | Units | Number of slots used. | -| task_total | Units | Total number of tasks. | -| jobs_running | Units | Number of jobs running. | -| jobs_failed | Units | Number of jobs failed. | - - +| Metric Name | Metric Unit | Metric Help Description | +|--------------|-------------|-------------------------| +| slots_total | Units | Total number of slots. | +| slots_used | Units | Number of slots used. | +| task_total | Units | Total number of tasks. | +| jobs_running | Units | Number of jobs running. | +| jobs_failed | Units | Number of jobs failed. | diff --git a/home/versioned_docs/version-v1.5.x/help/freebsd.md b/home/versioned_docs/version-v1.5.x/help/freebsd.md index 96d9866743b..51d0ed9ab0b 100644 --- a/home/versioned_docs/version-v1.5.x/help/freebsd.md +++ b/home/versioned_docs/version-v1.5.x/help/freebsd.md @@ -9,7 +9,7 @@ keywords: [ Open Source Monitoring System, Open Source Operating System Monitori ### Configuration Parameters -| Parameter Name | Parameter help description | +| Parameter Name | Parameter help description | |---------------------|------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The IPv4, IPv6, or domain name of the monitored peer. Note ⚠️ without the protocol header (eg: https://, http://). | | Task Name | Identifies the name of this monitor, ensuring uniqueness of the name. | @@ -28,7 +28,7 @@ keywords: [ Open Source Monitoring System, Open Source Operating System Monitori #### Metric Set: Basic Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|--------------------------| | Host Name | None | Host name | | System Version | None | Operating system version | @@ -36,7 +36,7 @@ keywords: [ Open Source Monitoring System, Open Source Operating System Monitori #### Metric Set: CPU Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|----------------------------------------------| | info | None | CPU model | | cores | Number | Number of CPU cores | @@ -85,3 +85,4 @@ Statistics of the top 10 processes using memory. Statistics include: Process ID, | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | + diff --git a/home/versioned_docs/version-v1.5.x/help/ftp.md b/home/versioned_docs/version-v1.5.x/help/ftp.md index 8802d3ab415..50a571eb7a7 100644 --- a/home/versioned_docs/version-v1.5.x/help/ftp.md +++ b/home/versioned_docs/version-v1.5.x/help/ftp.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source ftp server monitoring tool, ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| | Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: ftp://). | | Monitoring name | Identify the name of this monitoring, The name needs to be unique. | @@ -28,7 +28,8 @@ keywords: [ open source monitoring tool, open source ftp server monitoring tool, #### Metrics Set:Basic -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |---------------|-------------|----------------------------------------------------------| | Is Active | none | Check if the directory exists and has access permission. | | Response Time | ms | Response Time | + diff --git a/home/versioned_docs/version-v1.5.x/help/fullsite.md b/home/versioned_docs/version-v1.5.x/help/fullsite.md index 3246fa31d82..6145f238bdc 100644 --- a/home/versioned_docs/version-v1.5.x/help/fullsite.md +++ b/home/versioned_docs/version-v1.5.x/help/fullsite.md @@ -7,28 +7,29 @@ keywords: [open source monitoring tool, open source website monitoring tool, mon > Available or not to monitor all pages of the website. > A website often has multiple pages provided by different services. We monitor the full site by collecting the SiteMap exposed by the website. -> Note⚠️ This monitoring requires your website to support SiteMap. We support SiteMap in XML and TXT formats. +> Note⚠️ This monitoring requires your website to support SiteMap. We support SiteMap in XML and TXT formats. -### Configuration parameter +### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| SiteMap | Relative path of website SiteMap address, eg:/sitemap.xml | -| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | +| SiteMap | Relative path of website SiteMap address, eg:/sitemap.xml | +| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | -### Collection Metric +### Collection Metric -#### Metric set:summary +#### Metric set:summary + +| Metric name | Metric unit | Metric help description | +|--------------|-------------|------------------------------------------------------| +| url | none | URL path of web page | +| statusCode | none | Response HTTP status code for requesting the website | +| responseTime | ms | Website response time | +| errorMsg | none | Error message feedback after requesting the website | -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| url | none | URL path of web page | -| statusCode | none | Response HTTP status code for requesting the website | -| responseTime | ms | Website response time | -| errorMsg | none | Error message feedback after requesting the website | diff --git a/home/versioned_docs/version-v1.5.x/help/guide.md b/home/versioned_docs/version-v1.5.x/help/guide.md index 1d9bac72dd5..ce182746ffa 100644 --- a/home/versioned_docs/version-v1.5.x/help/guide.md +++ b/home/versioned_docs/version-v1.5.x/help/guide.md @@ -5,19 +5,19 @@ sidebar_label: Help Center --- > Hertzbeat - An open source, real-time monitoring tool with custom-monitor and agentLess. -> Help documents and auxiliary information during use +> Help documents and auxiliary information during use ## 🔬 Monitoring services > Regularly collect and monitor the performance Metrics exposed by end-to-end services, provide visual interfaces, and process data for alarm and other service scheduling. > Planned monitoring type:application service, database, operating system, cloud native, open source middleware. -### Application service monitoring +### Application service monitoring - 👉 [Website monitoring](website)
- 👉 [HTTP API](api)
- 👉 [PING Connectivity](ping)
- 👉 [Port availability](port)
+ 👉 [Website monitoring](website)
+ 👉 [HTTP API](api)
+ 👉 [PING Connectivity](ping)
+ 👉 [Port availability](port)
 👉 [Full site monitoring](fullsite)
 👉 [SSL Cert monitoring](ssl_cert)
 👉 [DNS monitoring](dns)
@@ -32,7 +32,7 @@ sidebar_label: Help Center  👉 [SpringBoot3.0](springboot3)
 👉 [DynamicTp](dynamic_tp)
-### Database monitoring +### Database monitoring  👉 [MYSQL database monitoring](mysql)
 👉 [MariaDB database monitoring](mariadb)
@@ -51,8 +51,7 @@ sidebar_label: Help Center  👉 [Redis monitoring](redis)
 👉 [Memcached monitoring](memcached)
- -### Operating system monitoring +### Operating system monitoring  👉 [Linux operating system monitoring](linux)
 👉 [Windows operating system monitoring](windows)
@@ -92,35 +91,33 @@ sidebar_label: Help Center ### Network monitoring - 👉 [Huawei-switch](huawei_switch)
+ 👉 [Huawei-switch](huawei_switch)
### Server monitoring - *** -## 💡 Alarm service +## 💡 Alarm service > More liberal threshold alarm configuration (calculation expression), supports email, SMS, WebHook, DingDing, WeChat and FeiShu for alarm notification. > The positioning of alarm service is to trigger the threshold accurately and timely, and the alarm notification can be reached in time. -### Alarm center +### Alarm center > The triggered alarm information center provides query and filtering of alarm deletion, alarm processing, mark unprocessed, alarm level status, etc. -### Alarm configuration +### Alarm configuration > The Metric threshold configuration provides the Metric threshold configuration in the form of expression, which can set the alarm level, trigger times, alarm notification template and whether it is enabled, correlation monitoring and other functions. -More details see 👉 [Threshold alarm](alert_threshold)
-   👉 [Threshold expression](alert_threshold_expr) +More details see 👉 [Threshold alarm](alert_threshold)
+   👉 [Threshold expression](alert_threshold_expr) -### Alarm notification +### Alarm notification > After triggering the alarm information, in addition to being displayed in the alarm center list, it can also be notified to the designated recipient in a specified way (e-mail, wechat and FeiShu etc.) > Alarm notification provides different types of notification methods, such as email recipient, enterprise wechat robot notification, DingDing robot notification, and FeiShu robot notification. -> After setting the receiver, you need to set the associated alarm notification strategy to configure which alarm information is sent to which receiver. - +> After setting the receiver, you need to set the associated alarm notification strategy to configure which alarm information is sent to which receiver.  👉 [Configure Email Notification](alert_email)
 👉 [Configure Discord Notification](alert_webhook)
@@ -134,4 +131,4 @@ More details see 👉 [Threshold alarm](alert_threshold)
### Plugins - 👉 [Plugin](plugin)
\ No newline at end of file + 👉 [Plugin](plugin)
diff --git a/home/versioned_docs/version-v1.5.x/help/hadoop.md b/home/versioned_docs/version-v1.5.x/help/hadoop.md index f0a458ecc9f..56f19472277 100644 --- a/home/versioned_docs/version-v1.5.x/help/hadoop.md +++ b/home/versioned_docs/version-v1.5.x/help/hadoop.md @@ -11,9 +11,10 @@ keywords: [Open Source Monitoring System, Open Source Java Monitoring, Hadoop JV ### Pre-monitoring steps ->You need to enable JMX service in the Hadoop application before monitoring. HertzBeat uses the JMX protocol to collect performance metrics from Hadoop's JVM. +> You need to enable JMX service in the Hadoop application before monitoring. HertzBeat uses the JMX protocol to collect performance metrics from Hadoop's JVM. ### Steps to enable JMX protocol in the Hadoop application + Add JVM parameters when the application starts. ⚠️Note that you can customize the exposed port and external IP. - 1.Enter the hadoop-env.sh configuration file and enter the following command in the terminal: @@ -31,12 +32,12 @@ export HADOOP_OPTS= "$HADOOP_OPTS -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false " ``` -- 3.Save and exit, and then execute "start-all.sh" in the "$HADOOP_HOME/sbin" directory to restart the service. +- 3.Save and exit, and then execute "start-all.sh" in the "$HADOOP_HOME/sbin" directory to restart the service. ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -70,16 +71,15 @@ export HADOOP_OPTS= "$HADOOP_OPTS #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|--------------------------| | LoadedClassCount | | Loaded Class Count | | TotalLoadedClassCount | | Total Loaded Class Count | | UnloadedClassCount | | Unloaded Class Count | - #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|----------------------------| | TotalStartedThreadCount | | Total Started Thread Count | | ThreadCount | | Thread Count | @@ -88,4 +88,3 @@ export HADOOP_OPTS= "$HADOOP_OPTS | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.5.x/help/hbase_master.md b/home/versioned_docs/version-v1.5.x/help/hbase_master.md index 188ad146a5a..1e3efe84af7 100644 --- a/home/versioned_docs/version-v1.5.x/help/hbase_master.md +++ b/home/versioned_docs/version-v1.5.x/help/hbase_master.md @@ -4,6 +4,7 @@ title: Monitoring Hbase Master sidebar_label: HbaseMaster Monitoring keywords: [Open Source Monitoring System, Open Source Database Monitoring, HbaseMaster Monitoring] --- + > Collect monitoring data for general performance metrics of Hbase Master. **Protocol: HTTP** @@ -14,13 +15,12 @@ Check the `hbase-site.xml` file to obtain the value of the `hbase.master.info.po ## Configuration Parameters - -| Parameter Name | Parameter Description | -| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Parameter Name | Parameter Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Target Host | The IPv4, IPv6, or domain name of the monitored peer. Note: without protocol header (e.g., https://, http://). | | Port | The port number of the Hbase master, default is 16010. That is, the value of the`hbase.master.info.port` parameter. | | Task Name | The name identifying this monitoring, which needs to be unique. | -| Query Timeout | Set the connection timeout in ms, the default is 3000 milliseconds. | +| Query Timeout | Set the connection timeout in ms, the default is 3000 milliseconds. | | Collection Interval | The periodic collection interval for monitoring data, in seconds, with the minimum allowable interval being 30 seconds. | | Probe | Whether to probe and check the availability of monitoring before adding new monitoring, and proceed with the addition or modification operation only if the probe is successful. | | Description | Additional notes and descriptions for this monitoring, users can add notes here. | @@ -29,9 +29,8 @@ Check the `hbase-site.xml` file to obtain the value of the `hbase.master.info.po #### Metric Set: server - -| Metric Name | Unit | Metric Description | -| -------------------- | ---- | --------------------------------------- | +| Metric Name | Unit | Metric Description | +|----------------------|------|-----------------------------------------| | numRegionServers | none | Number of currently alive RegionServers | | numDeadRegionServers | none | Number of currently dead RegionServers | | averageLoad | none | Cluster average load | @@ -39,18 +38,16 @@ Check the `hbase-site.xml` file to obtain the value of the `hbase.master.info.po #### Metric Set: Rit - -| Metric Name | Unit | Metric Description | -| -------------------- | ---- | -------------------------------- | +| Metric Name | Unit | Metric Description | +|----------------------|------|----------------------------------| | ritnone | none | Current number of RIT | | ritnoneOverThreshold | none | Number of RIT over the threshold | | ritOldestAge | ms | Duration of the oldest RIT | #### Metric Set: basic - -| Metric Name | Unit | Metric Description | -| ----------------------- | ---- | ------------------------------------------- | +| Metric Name | Unit | Metric Description | +|-------------------------|------|---------------------------------------------| | liveRegionServers | none | List of currently active RegionServers | | deadRegionServers | none | List of currently offline RegionServers | | zookeeperQuorum | none | Zookeeper list | @@ -60,3 +57,4 @@ Check the `hbase-site.xml` file to obtain the value of the `hbase.master.info.po | receivedBytes | MB | Cluster received data volume | | sentBytes | MB | Cluster sent data volume (MB) | | clusterRequests | none | Total number of cluster requests | + diff --git a/home/versioned_docs/version-v1.5.x/help/hbase_regionserver.md b/home/versioned_docs/version-v1.5.x/help/hbase_regionserver.md index 4e676491022..0a77eb5441b 100644 --- a/home/versioned_docs/version-v1.5.x/help/hbase_regionserver.md +++ b/home/versioned_docs/version-v1.5.x/help/hbase_regionserver.md @@ -4,6 +4,7 @@ title: Monitoring HBase RegionServer Monitoring sidebar_label: HBase RegionServer Monitoring keywords: [Open-source monitoring system, Open-source database monitoring, RegionServer monitoring] --- + > Collect and monitor common performance metrics for HBase RegionServer. **Protocol:** HTTP @@ -14,13 +15,12 @@ Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver. ## Configuration Parameters - -| Parameter Name | Parameter Description | -| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | -| Target Host | The IPV4, IPV6, or domain name of the monitored entity. Note ⚠️ Do not include the protocol header (e.g., https://, http://). | +| Parameter Name | Parameter Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------------| +| Target Host | The IPV4, IPV6, or domain name of the monitored entity. Note ⚠️ Do not include the protocol header (e.g., https://, http://). | | Port | The port number of the HBase regionserver, default is 16030, i.e., the value of the`hbase.regionserver.info.port` parameter | | Task Name | A unique name to identify this monitoring task. | -| Query Timeout | Set the connection timeout in ms, the default is 3000 milliseconds. | +| Query Timeout | Set the connection timeout in ms, the default is 3000 milliseconds. | | Collection Interval | The interval time for periodic data collection in seconds, with a minimum interval of 30 seconds. | | Probe Before Adding | Whether to probe and check the availability of monitoring before adding new monitoring, only proceed with the addition if the probe is successful. | | Description Note | Additional notes to identify and describe this monitoring, users can add notes here. | @@ -31,9 +31,8 @@ Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver. #### Metric Set: server - -| Metric Name | Unit | Metric Description | -| --------------------------------- | ----- | ------------------------------------------------------------------------- | +| Metric Name | Unit | Metric Description | +|-----------------------------------|-------|---------------------------------------------------------------------------| | regionCount | None | Number of Regions | | readRequestCount | None | Number of read requests since cluster restart | | writeRequestCount | None | Number of write requests since cluster restart | @@ -74,9 +73,8 @@ Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver. #### Metric Set: IPC - -| Metric Name | Unit | Metric Description | -| ------------------------- | ---- | -------------------------------------- | +| Metric Name | Unit | Metric Description | +|---------------------------|------|----------------------------------------| | numActiveHandler | None | Current number of RITs | | NotServingRegionException | None | Number of RITs exceeding the threshold | | RegionMovedException | ms | Duration of the oldest RIT | @@ -84,9 +82,8 @@ Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver. #### Metric Set: JVM - -| Metric Name | Unit | Metric Description | -| -------------------- | ---- | --------------------------------- | +| Metric Name | Unit | Metric Description | +|----------------------|------|-----------------------------------| | MemNonHeapUsedM | None | Current active RegionServer list | | MemNonHeapCommittedM | None | Current offline RegionServer list | | MemHeapUsedM | None | Zookeeper list | @@ -94,3 +91,4 @@ Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver. | MemHeapMaxM | None | Cluster balance load times | | MemMaxM | None | RPC handle count | | GcCount | MB | Cluster data reception volume | + diff --git a/home/versioned_docs/version-v1.5.x/help/hdfs_datanode.md b/home/versioned_docs/version-v1.5.x/help/hdfs_datanode.md index 2e09fb9fba1..164adc7d6df 100644 --- a/home/versioned_docs/version-v1.5.x/help/hdfs_datanode.md +++ b/home/versioned_docs/version-v1.5.x/help/hdfs_datanode.md @@ -15,42 +15,43 @@ Retrieve the HTTP monitoring port for the Apache HDFS DataNode. Value: `dfs.data ## Configuration Parameters -| Parameter Name | Parameter Description | -| ----------------- |-------------------------------------------------------| -| Target Host | IP(v4 or v6) or domain name of the target to be monitored. Exclude protocol. | -| Port | Monitoring port number for Apache HDFS DataNode, default is 50075. | -| Query Timeout | Timeout for querying Apache HDFS DataNode, in milliseconds, default is 6000 milliseconds. | +| Parameter Name | Parameter Description | +|-----------------------------|-------------------------------------------------------------------------------------------| +| Target Host | IP(v4 or v6) or domain name of the target to be monitored. Exclude protocol. | +| Port | Monitoring port number for Apache HDFS DataNode, default is 50075. | +| Query Timeout | Timeout for querying Apache HDFS DataNode, in milliseconds, default is 6000 milliseconds. | | Metrics Collection Interval | Time interval for monitoring data collection, in seconds, minimum interval is 30 seconds. | -| Probe Before Monitoring | Whether to probe and check monitoring availability before adding. | -| Description/Remarks | Additional description and remarks for this monitoring. | +| Probe Before Monitoring | Whether to probe and check monitoring availability before adding. | +| Description/Remarks | Additional description and remarks for this monitoring. | ### Metrics Collected #### Metric Set: FSDatasetState -| Metric Name | Metric Unit | Metric Description | -| ------------ | ----------- | ------------------------------ | -| DfsUsed | GB | DataNode HDFS usage | -| Remaining | GB | Remaining space on DataNode HDFS | -| Capacity | GB | Total capacity of DataNode HDFS | +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|----------------------------------| +| DfsUsed | GB | DataNode HDFS usage | +| Remaining | GB | Remaining space on DataNode HDFS | +| Capacity | GB | Total capacity of DataNode HDFS | #### Metric Set: JvmMetrics -| Metric Name | Metric Unit | Metric Description | -| ---------------------- | ----------- | ----------------------------------------- | -| MemNonHeapUsedM | MB | Current usage of NonHeapMemory by JVM | -| MemNonHeapCommittedM | MB | Committed size of NonHeapMemory configured in JVM | -| MemHeapUsedM | MB | Current usage of HeapMemory by JVM | -| MemHeapCommittedM | MB | Committed size of HeapMemory by JVM | -| MemHeapMaxM | MB | Maximum size of HeapMemory configured in JVM | -| MemMaxM | MB | Maximum memory available for JVM at runtime | -| ThreadsRunnable | Count | Number of threads in RUNNABLE state | -| ThreadsBlocked | Count | Number of threads in BLOCKED state | -| ThreadsWaiting | Count | Number of threads in WAITING state | -| ThreadsTimedWaiting | Count | Number of threads in TIMED WAITING state | +| Metric Name | Metric Unit | Metric Description | +|----------------------|-------------|---------------------------------------------------| +| MemNonHeapUsedM | MB | Current usage of NonHeapMemory by JVM | +| MemNonHeapCommittedM | MB | Committed size of NonHeapMemory configured in JVM | +| MemHeapUsedM | MB | Current usage of HeapMemory by JVM | +| MemHeapCommittedM | MB | Committed size of HeapMemory by JVM | +| MemHeapMaxM | MB | Maximum size of HeapMemory configured in JVM | +| MemMaxM | MB | Maximum memory available for JVM at runtime | +| ThreadsRunnable | Count | Number of threads in RUNNABLE state | +| ThreadsBlocked | Count | Number of threads in BLOCKED state | +| ThreadsWaiting | Count | Number of threads in WAITING state | +| ThreadsTimedWaiting | Count | Number of threads in TIMED WAITING state | #### Metric Set: runtime -| Metric Name | Metric Unit | Metric Description | -| ------------ | ----------- | ------------------ | -| StartTime | | Startup time | +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|--------------------| +| StartTime | | Startup time | + diff --git a/home/versioned_docs/version-v1.5.x/help/hdfs_namenode.md b/home/versioned_docs/version-v1.5.x/help/hdfs_namenode.md index 1afd6d4b1ae..978daac3613 100644 --- a/home/versioned_docs/version-v1.5.x/help/hdfs_namenode.md +++ b/home/versioned_docs/version-v1.5.x/help/hdfs_namenode.md @@ -15,78 +15,79 @@ Ensure that you have obtained the JMX monitoring port for the HDFS NameNode. ## Configuration Parameters -| Parameter Name | Parameter Description | -| ------------------ |--------------------------------------------------------| -| Target Host | The IPv4, IPv6, or domain name of the target being monitored. Exclude protocol headers. | -| Port | The monitoring port number of the HDFS NameNode, default is 50070. | -| Query Timeout | Timeout for querying the HDFS NameNode, in milliseconds, default is 6000 milliseconds. | +| Parameter Name | Parameter Description | +|-----------------------------|-------------------------------------------------------------------------------------------| +| Target Host | The IPv4, IPv6, or domain name of the target being monitored. Exclude protocol headers. | +| Port | The monitoring port number of the HDFS NameNode, default is 50070. | +| Query Timeout | Timeout for querying the HDFS NameNode, in milliseconds, default is 6000 milliseconds. | | Metrics Collection Interval | Time interval for collecting monitoring data, in seconds, minimum interval is 30 seconds. | -| Probe Before Monitoring | Whether to probe and check the availability of monitoring before adding it. | -| Description/Remarks | Additional description and remarks for this monitoring. | +| Probe Before Monitoring | Whether to probe and check the availability of monitoring before adding it. | +| Description/Remarks | Additional description and remarks for this monitoring. | ### Collected Metrics #### Metric Set: FSNamesystem -| Metric Name | Metric Unit | Metric Description | -| --------------------------- | ----------- | ------------------------------------- | -| CapacityTotal | | Total cluster storage capacity | -| CapacityTotalGB | GB | Total cluster storage capacity | -| CapacityUsed | | Used cluster storage capacity | -| CapacityUsedGB | GB | Used cluster storage capacity | -| CapacityRemaining | | Remaining cluster storage capacity | -| CapacityRemainingGB | GB | Remaining cluster storage capacity | -| CapacityUsedNonDFS | | Non-HDFS usage of cluster capacity | -| TotalLoad | | Total client connections in the cluster | -| FilesTotal | | Total number of files in the cluster | -| BlocksTotal | | Total number of BLOCKs | -| PendingReplicationBlocks | | Number of blocks awaiting replication | -| UnderReplicatedBlocks | | Number of blocks with insufficient replicas | -| CorruptBlocks | | Number of corrupt blocks | -| ScheduledReplicationBlocks | | Number of blocks scheduled for replication | -| PendingDeletionBlocks | | Number of blocks awaiting deletion | -| ExcessBlocks | | Number of excess blocks | -| PostponedMisreplicatedBlocks| | Number of misreplicated blocks postponed for processing | -| NumLiveDataNodes | | Number of live data nodes in the cluster | -| NumDeadDataNodes | | Number of data nodes marked as dead | -| NumDecomLiveDataNodes | | Number of decommissioned live nodes | -| NumDecomDeadDataNodes | | Number of decommissioned dead nodes | -| NumDecommissioningDataNodes | | Number of nodes currently being decommissioned | -| TransactionsSinceLastCheckpoint | | Number of transactions since the last checkpoint | -| LastCheckpointTime | | Time of the last checkpoint | -| PendingDataNodeMessageCount| | Number of DATANODE requests queued in the standby namenode | +| Metric Name | Metric Unit | Metric Description | +|---------------------------------|-------------|------------------------------------------------------------| +| CapacityTotal | | Total cluster storage capacity | +| CapacityTotalGB | GB | Total cluster storage capacity | +| CapacityUsed | | Used cluster storage capacity | +| CapacityUsedGB | GB | Used cluster storage capacity | +| CapacityRemaining | | Remaining cluster storage capacity | +| CapacityRemainingGB | GB | Remaining cluster storage capacity | +| CapacityUsedNonDFS | | Non-HDFS usage of cluster capacity | +| TotalLoad | | Total client connections in the cluster | +| FilesTotal | | Total number of files in the cluster | +| BlocksTotal | | Total number of BLOCKs | +| PendingReplicationBlocks | | Number of blocks awaiting replication | +| UnderReplicatedBlocks | | Number of blocks with insufficient replicas | +| CorruptBlocks | | Number of corrupt blocks | +| ScheduledReplicationBlocks | | Number of blocks scheduled for replication | +| PendingDeletionBlocks | | Number of blocks awaiting deletion | +| ExcessBlocks | | Number of excess blocks | +| PostponedMisreplicatedBlocks | | Number of misreplicated blocks postponed for processing | +| NumLiveDataNodes | | Number of live data nodes in the cluster | +| NumDeadDataNodes | | Number of data nodes marked as dead | +| NumDecomLiveDataNodes | | Number of decommissioned live nodes | +| NumDecomDeadDataNodes | | Number of decommissioned dead nodes | +| NumDecommissioningDataNodes | | Number of nodes currently being decommissioned | +| TransactionsSinceLastCheckpoint | | Number of transactions since the last checkpoint | +| LastCheckpointTime | | Time of the last checkpoint | +| PendingDataNodeMessageCount | | Number of DATANODE requests queued in the standby namenode | #### Metric Set: RPC -| Metric Name | Metric Unit | Metric Description | -| ------------------------- | ----------- | -------------------------- | -| ReceivedBytes | | Data receiving rate | -| SentBytes | | Data sending rate | -| RpcQueueTimeNumOps | | RPC call rate | +| Metric Name | Metric Unit | Metric Description | +|--------------------|-------------|---------------------| +| ReceivedBytes | | Data receiving rate | +| SentBytes | | Data sending rate | +| RpcQueueTimeNumOps | | RPC call rate | #### Metric Set: runtime -| Metric Name | Metric Unit | Metric Description | -| ------------------------- | ----------- | ------------------- | -| StartTime | | Start time | +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|--------------------| +| StartTime | | Start time | #### Metric Set: JvmMetrics -| Metric Name | Metric Unit | Metric Description | -| ------------------------- | ----------- | ------------------- | -| MemNonHeapUsedM | MB | Current usage of NonHeapMemory by JVM | -| MemNonHeapCommittedM | MB | Committed NonHeapMemory by JVM | -| MemHeapUsedM | MB | Current usage of HeapMemory by JVM | -| MemHeapCommittedM | MB | Committed HeapMemory by JVM | -| MemHeapMaxM | MB | Maximum HeapMemory configured for JVM | -| MemMaxM | MB | Maximum memory that can be used by JVM | -| GcCountParNew | Count | Number of ParNew GC events | -| GcTimeMillisParNew | Milliseconds| Time spent in ParNew GC | -| GcCountConcurrentMarkSweep| Count | Number of ConcurrentMarkSweep GC events| -| GcTimeMillisConcurrentMarkSweep | Milliseconds | Time spent in ConcurrentMarkSweep GC | -| GcCount | Count | Total number of GC events | -| GcTimeMillis | Milliseconds| Total time spent in GC events | -| ThreadsRunnable | Count | Number of threads in RUNNABLE state | -| ThreadsBlocked | Count | Number of threads in BLOCKED state | -| ThreadsWaiting | Count | Number of threads in WAITING state | -| ThreadsTimedWaiting | Count | Number of threads in TIMED WAITING state| +| Metric Name | Metric Unit | Metric Description | +|---------------------------------|--------------|------------------------------------------| +| MemNonHeapUsedM | MB | Current usage of NonHeapMemory by JVM | +| MemNonHeapCommittedM | MB | Committed NonHeapMemory by JVM | +| MemHeapUsedM | MB | Current usage of HeapMemory by JVM | +| MemHeapCommittedM | MB | Committed HeapMemory by JVM | +| MemHeapMaxM | MB | Maximum HeapMemory configured for JVM | +| MemMaxM | MB | Maximum memory that can be used by JVM | +| GcCountParNew | Count | Number of ParNew GC events | +| GcTimeMillisParNew | Milliseconds | Time spent in ParNew GC | +| GcCountConcurrentMarkSweep | Count | Number of ConcurrentMarkSweep GC events | +| GcTimeMillisConcurrentMarkSweep | Milliseconds | Time spent in ConcurrentMarkSweep GC | +| GcCount | Count | Total number of GC events | +| GcTimeMillis | Milliseconds | Total time spent in GC events | +| ThreadsRunnable | Count | Number of threads in RUNNABLE state | +| ThreadsBlocked | Count | Number of threads in BLOCKED state | +| ThreadsWaiting | Count | Number of threads in WAITING state | +| ThreadsTimedWaiting | Count | Number of threads in TIMED WAITING state | + diff --git a/home/versioned_docs/version-v1.5.x/help/hive.md b/home/versioned_docs/version-v1.5.x/help/hive.md index ec0d7dee398..806969c2e7c 100644 --- a/home/versioned_docs/version-v1.5.x/help/hive.md +++ b/home/versioned_docs/version-v1.5.x/help/hive.md @@ -16,6 +16,7 @@ If you want to monitor information in `Apache Hive` with this monitoring type, y ```shell hive --service metastore & ``` + **2. Enable hive server2:** ```shell @@ -24,55 +25,53 @@ hive --service hiveserver2 & ### Configure parameters -| Parameter name | Parameter Help describes the | -| ------------ |-------------------------------------------------------------------------------------------------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| Port | The default port provided by the database is 10002. | -| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | +| Parameter name | Parameter Help describes the | +|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| Port | The default port provided by the database is 10002. | +| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | | The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | ### Collect metrics #### metric Collection: basic -| Metric Name | metric unit | Metrics help describe | -|-------------| -------- |--------------------------------| -| vm_name | None | The name of the virtual machine (VM) running HiveServer2. | -| vm_vendor | None | The vendor or provider of the virtual machine. | -| vm_version | None | The version of the virtual machine. | -| up_time | None | The duration for which HiveServer2 has been running. | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-----------------------------------------------------------| +| vm_name | None | The name of the virtual machine (VM) running HiveServer2. | +| vm_vendor | None | The vendor or provider of the virtual machine. | +| vm_version | None | The version of the virtual machine. | +| up_time | None | The duration for which HiveServer2 has been running. | #### metric Collection: enviroment -| Metric Name | metric unit | Metrics help describe | -|-----------------| -------- |-------------------------------------| -| https_proxyPort | None | The port number used for HTTPS proxy communication. | -| os_name | None | The name of the operating system on which HiveServer2 is running. | -| os_version | None | The version of the operating system.| -| os_arch | None | The architecture of the operating system.| -| java_runtime_name | None | The name of the Java runtime environment used by HiveServer2. | -| java_runtime_version | None | The version of the Java runtime environment. | +| Metric Name | metric unit | Metrics help describe | +|----------------------|-------------|-------------------------------------------------------------------| +| https_proxyPort | None | The port number used for HTTPS proxy communication. | +| os_name | None | The name of the operating system on which HiveServer2 is running. | +| os_version | None | The version of the operating system. | +| os_arch | None | The architecture of the operating system. | +| java_runtime_name | None | The name of the Java runtime environment used by HiveServer2. | +| java_runtime_version | None | The version of the Java runtime environment. | #### metric Collection: thread -| Metric Name | metric unit | Metrics help describe | -| ---------------- |------|--------------------| -| thread_count | None | The current number of threads being used by HiveServer2. | -| total_started_thread | None | The total count of threads started by HiveServer2 since its launch. | -| peak_thread_count | None | The highest number of threads used by HiveServer2 at any given time. | -| daemon_thread_count | None | The number of daemon threads currently active in HiveServer2. | +| Metric Name | metric unit | Metrics help describe | +|----------------------|-------------|----------------------------------------------------------------------| +| thread_count | None | The current number of threads being used by HiveServer2. | +| total_started_thread | None | The total count of threads started by HiveServer2 since its launch. | +| peak_thread_count | None | The highest number of threads used by HiveServer2 at any given time. | +| daemon_thread_count | None | The number of daemon threads currently active in HiveServer2. | #### metric Collection: code_cache -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|--------------------------------------------| -| committed | MB | The amount of memory currently allocated for the memory pool. | -| init | MB | The initial amount of memory requested for the memory pool. | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-------------------------------------------------------------------------| +| committed | MB | The amount of memory currently allocated for the memory pool. | +| init | MB | The initial amount of memory requested for the memory pool. | | max | MB | The maximum amount of memory that can be allocated for the memory pool. | -| used | MB | The amount of memory currently being used by the memory pool. | - - +| used | MB | The amount of memory currently being used by the memory pool. | diff --git a/home/versioned_docs/version-v1.5.x/help/http_sd.md b/home/versioned_docs/version-v1.5.x/help/http_sd.md index 3278936d66d..6b8de487555 100644 --- a/home/versioned_docs/version-v1.5.x/help/http_sd.md +++ b/home/versioned_docs/version-v1.5.x/help/http_sd.md @@ -16,39 +16,37 @@ keywords: [open source monitoring tool, open source java monitoring tool, monito > We currently support for `Consul` and `Nacos`. 2. Add http_sd monitor and enter necessary info about **Register center** on Hertzbeat, such as host, port and so on. - 3. Click **OK** # Configuration parameter -| Parameter name | Parameter help description | -| --------------------- | ------------------------------------------------------------ | -| Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Task name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Register center | -| Discovery Client Type | Select one Register center that you want to monitor | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Parameter name | Parameter help description | +|-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Task name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Register center | +| Discovery Client Type | Select one Register center that you want to monitor | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | | Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | # Collection Metrics ## Metrics Set:server -| Metric name | Metric unit | Metric help description | -| ------------- | ----------- | ----------------------- | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|-------------------------| | Address | | | | Port | | | | Response Time | ms | | ## Metrics Set:service -| Metric name | Metric unit | Metric help description | -| ------------- | ----------- | -------------------------------- | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|----------------------------------| | Service Id | | | | Service Name | | | | Address | | | | Port | | | | Health Status | | Current health status of service | - diff --git a/home/versioned_docs/version-v1.5.x/help/huawei_switch.md b/home/versioned_docs/version-v1.5.x/help/huawei_switch.md index 2e9982c5ef9..902c0596965 100644 --- a/home/versioned_docs/version-v1.5.x/help/huawei_switch.md +++ b/home/versioned_docs/version-v1.5.x/help/huawei_switch.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, network monitoring, Huawei switch ] ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Task Name | Identify the name of this monitoring. The name needs to be unique | @@ -25,8 +25,8 @@ keywords: [ open source monitoring tool, network monitoring, Huawei switch ] | SNMP privPassphrase | For SNMP v3, SNMP encrypted passwords | | privPassword Encryption | For SNMP v3, SNMP encrypted algorithm | | Timeout | Set the timeout time when querying unresponsive data, in milliseconds, the default is 6000 milliseconds | -| Intervals | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Description | For more information about identifying and describing this monitoring, users can note information here | +| Intervals | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Description | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric @@ -37,7 +37,7 @@ This document only introduces the monitoring indicators queried in the monitor t #### Metric set: huawei_core -| Metric Name | Metric Unit | Metric Help Description | +| Metric Name | Metric Unit | Metric Help Description | |---------------|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | ifIndex | none | Interface index. This value is greater than zero and globally unique. | | ifDescr | none | A textual string containing information about the interface. This string should include the name of the manufacturer, the product name and the version of the interface hardware/software. | @@ -47,7 +47,8 @@ This document only introduces the monitoring indicators queried in the monitor t | ifInDiscards | none | The number of inbound packets which were chosen to be discarded even though no errors had been detected to prevent their being deliverable to a higher-layer protocol. One possible reason for discarding such a packet could be to free up buffer space. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | | ifInErrors | none | For packet-oriented interfaces, the number of inbound packets that contained errors preventing them from being deliverable to a higher-layer protocol. For character-oriented or fixed-length interfaces, the number of inbound transmission units that contained errors preventing them from being deliverable to a higher-layer protocol. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | | ifOutOctets | octets | The total number of octets transmitted out of the interface, including framing characters. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | -| ifOutDiscards | none | The number of outbound packets which were chosen to be discarded even though no errors had been detected to prevent their being transmitted. One possible reason for discarding such a packet could be to free up buffer space. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | +| ifOutDiscards | none | The number of outbound packets which were chosen to be discarded even though no errors had been detected to prevent their being transmitted. One possible reason for discarding such a packet could be to free up buffer space. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | | ifOutErrors | none | For packet-oriented interfaces, the number of outbound packets that could not be transmitted because of errors. For character-oriented or fixed-length interfaces, the number of outbound transmission units that could not be transmitted because of errors. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | | ifAdminStatus | none | The desired state of the interface. The testing(3) state indicates that no operational packets can be passed. When a managed system initializes, all interfaces start with ifAdminStatus in the down(2) state. As a result of either explicit management action or per configuration information retained by the managed system, ifAdminStatus is then changed to either the up(1) or testing(3) states (or remains in the down(2) state). | | ifOperStatus | none | The current operational state of the interface. The testing(3) state indicates that no operational packets can be passed. If ifAdminStatus is down(2) then ifOperStatus should be down(2). If ifAdminStatus is changed to up(1) then ifOperStatus should change to up(1) if the interface is ready to transmit and receive network traffic; it should change to dormant(5) if the interface is waiting for external actions (such as a serial line waiting for an incoming connection); it should remain in the down(2) state if and only if there is a fault that prevents it from going to the up(1) state; it should remain in the notPresent(6) state if the interface has missing (typically, hardware) components. | + diff --git a/home/versioned_docs/version-v1.5.x/help/hugegraph.md b/home/versioned_docs/version-v1.5.x/help/hugegraph.md index 4fca13f4e00..66b0574aab7 100644 --- a/home/versioned_docs/version-v1.5.x/help/hugegraph.md +++ b/home/versioned_docs/version-v1.5.x/help/hugegraph.md @@ -4,6 +4,7 @@ title: Monitoring HugeGraph Monitoring sidebar_label: Apache HugeGraph keywords: [Open Source Monitoring System, Open Source Database Monitoring, HugeGraph Monitoring] --- + > Collect and monitor the general performance metrics of HugeGraph **Protocol used: HTTP** @@ -14,137 +15,127 @@ Check the `rest-server.properties` file to obtain the value of the `restserver_p ## Configuration Parameters - -| Parameter Name | Parameter Description | -|------------------|--------------------------------------------------------| -| Target Host | The IPv4, IPv6, or domain name of the monitored endpoint. Note ⚠️ Do not include protocol headers (eg: https://, http://). | -| Port | Port number of the HugeGraph restserver, default is 8080. i.e., the value of the `restserver_port` parameter | -| Enable SSL | Enable SSL usage | -| Base Path | Base path, default is: /metrics, usually does not need to be modified | -| Task Name | Identifies the name of this monitoring, ensuring uniqueness. | -| Collection Interval | Interval for periodically collecting data for monitoring, in seconds, with a minimum interval of 30 seconds | -| Probe Enabled | Whether to probe before adding new monitoring, only continue with add/modify operations if the probe is successful | -| Description | Additional identification and description of this monitoring, users can add information here | +| Parameter Name | Parameter Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------| +| Target Host | The IPv4, IPv6, or domain name of the monitored endpoint. Note ⚠️ Do not include protocol headers (eg: https://, http://). | +| Port | Port number of the HugeGraph restserver, default is 8080. i.e., the value of the `restserver_port` parameter | +| Enable SSL | Enable SSL usage | +| Base Path | Base path, default is: /metrics, usually does not need to be modified | +| Task Name | Identifies the name of this monitoring, ensuring uniqueness. | +| Collection Interval | Interval for periodically collecting data for monitoring, in seconds, with a minimum interval of 30 seconds | +| Probe Enabled | Whether to probe before adding new monitoring, only continue with add/modify operations if the probe is successful | +| Description | Additional identification and description of this monitoring, users can add information here | ### Metrics Collection #### Metric Set: gauges -| Metric Name | Metric Unit | Metric Description | -| ------------------------------------ | -------- | --------------------------------- | -| edge-hugegraph-capacity | NONE | Indicates the capacity limit of edges in the current graph | -| edge-hugegraph-expire | NONE | Indicates the expiration time of edge data | -| edge-hugegraph-hits | NONE | Indicates the number of hits in the edge data cache | -| edge-hugegraph-miss | NONE | Indicates the number of misses in the edge data cache | -| edge-hugegraph-size | NONE | Indicates the number of edges in the current graph | -| instances | NONE | Indicates the number of currently running HugeGraph instances| -| schema-id-hugegraph-capacity | NONE | Indicates the capacity limit of schema IDs in the graph | -| schema-id-hugegraph-expire | NONE | Indicates the expiration time of schema ID data | -| schema-id-hugegraph-hits | NONE | Indicates the number of hits in the schema ID data cache| -| schema-id-hugegraph-miss | NONE | Indicates the number of misses in the schema ID data cache| -| schema-id-hugegraph-size | NONE | Indicates the number of schema IDs in the current graph | -| schema-name-hugegraph-capacity | NONE | Indicates the capacity limit of schema names in the graph | -| schema-name-hugegraph-expire | NONE | Indicates the expiration time of schema name data | -| schema-name-hugegraph-hits | NONE | Indicates the number of hits in the schema name data cache| -| schema-name-hugegraph-miss | NONE | Indicates the number of misses in the schema name data cache| -| schema-name-hugegraph-size | NONE | Indicates the number of schema names in the current graph | -| token-hugegraph-capacity | NONE | Indicates the capacity limit of tokens in the graph | -| token-hugegraph-expire | NONE | Indicates the expiration time of token data | -| token-hugegraph-hits | NONE | Indicates the number of hits in the token data cache | -| token-hugegraph-miss | NONE | Indicates the number of misses in the token data cache | -| token-hugegraph-size | NONE | Indicates the number of tokens in the current graph | -| users-hugegraph-capacity | NONE | Indicates the capacity limit of users in the graph | -| users-hugegraph-expire | NONE | Indicates the expiration time of user data | -| users-hugegraph-hits | NONE | Indicates the number of hits in the user data cache | -| users-hugegraph-miss | NONE | Indicates the number of misses in the user data cache | -| users-hugegraph-size | NONE | Indicates the number of users in the current graph | -| users_pwd-hugegraph-capacity | NONE | Indicates the capacity limit of user passwords | -| users_pwd-hugegraph-expire | NONE | Indicates the expiration time of user password data | -| users_pwd-hugegraph-hits | NONE | Indicates the number of hits in the user password data cache| -| users_pwd-hugegraph-miss | NONE | Indicates the number of misses in the user password data cache| -| users_pwd-hugegraph-size | NONE | Indicates the number of user passwords in the current graph | -| vertex-hugegraph-capacity | NONE | Indicates the capacity limit of vertices in the graph | -| vertex-hugegraph-expire | NONE | Indicates the expiration time of vertex data | -| vertex-hugegraph-hits | NONE | Indicates the number of hits in the vertex data cache | -| vertex-hugegraph-miss | NONE | Indicates the number of misses in the vertex data cache | -| vertex-hugegraph-size | NONE | Indicates the number of vertices in the current graph | -| batch-write-threads | NONE | Indicates the number of threads for batch write operations | -| max-write-threads | NONE | Indicates the maximum number of threads for write operations | -| pending-tasks | NONE | Indicates the number of pending tasks | -| workers | NONE | Indicates the current number of worker threads | -| average-load-penalty | NONE | Indicates the average load penalty | -| estimated-size | NONE | Indicates the estimated data size | -| eviction-count | NONE | Indicates the number of evicted data entries | -| eviction-weight | NONE | Indicates the weight of evicted data | -| hit-count | NONE | Indicates the total cache hits | -| hit-rate | NONE | Indicates the cache hit rate | -| load-count | NONE | Indicates the number of data loads | -| load-failure-count | NONE | Indicates the number of data load failures | -| load-failure-rate | NONE | Indicates the data load failure rate | -| load-success-count | NONE | Indicates the number of successful data loads | -| long-run-compilation-count | NONE | Indicates the number of long-running compilations | -| miss-count | NONE | Indicates the total cache misses | -| miss-rate | NONE | Indicates the cache miss rate | -| request-count | NONE | Indicates the total request count | -| total-load-time | NONE | Indicates the total data load time | -| sessions | NONE | Indicates the current number of active sessions | - - - +| Metric Name | Metric Unit | Metric Description | +|--------------------------------|-------------|----------------------------------------------------------------| +| edge-hugegraph-capacity | NONE | Indicates the capacity limit of edges in the current graph | +| edge-hugegraph-expire | NONE | Indicates the expiration time of edge data | +| edge-hugegraph-hits | NONE | Indicates the number of hits in the edge data cache | +| edge-hugegraph-miss | NONE | Indicates the number of misses in the edge data cache | +| edge-hugegraph-size | NONE | Indicates the number of edges in the current graph | +| instances | NONE | Indicates the number of currently running HugeGraph instances | +| schema-id-hugegraph-capacity | NONE | Indicates the capacity limit of schema IDs in the graph | +| schema-id-hugegraph-expire | NONE | Indicates the expiration time of schema ID data | +| schema-id-hugegraph-hits | NONE | Indicates the number of hits in the schema ID data cache | +| schema-id-hugegraph-miss | NONE | Indicates the number of misses in the schema ID data cache | +| schema-id-hugegraph-size | NONE | Indicates the number of schema IDs in the current graph | +| schema-name-hugegraph-capacity | NONE | Indicates the capacity limit of schema names in the graph | +| schema-name-hugegraph-expire | NONE | Indicates the expiration time of schema name data | +| schema-name-hugegraph-hits | NONE | Indicates the number of hits in the schema name data cache | +| schema-name-hugegraph-miss | NONE | Indicates the number of misses in the schema name data cache | +| schema-name-hugegraph-size | NONE | Indicates the number of schema names in the current graph | +| token-hugegraph-capacity | NONE | Indicates the capacity limit of tokens in the graph | +| token-hugegraph-expire | NONE | Indicates the expiration time of token data | +| token-hugegraph-hits | NONE | Indicates the number of hits in the token data cache | +| token-hugegraph-miss | NONE | Indicates the number of misses in the token data cache | +| token-hugegraph-size | NONE | Indicates the number of tokens in the current graph | +| users-hugegraph-capacity | NONE | Indicates the capacity limit of users in the graph | +| users-hugegraph-expire | NONE | Indicates the expiration time of user data | +| users-hugegraph-hits | NONE | Indicates the number of hits in the user data cache | +| users-hugegraph-miss | NONE | Indicates the number of misses in the user data cache | +| users-hugegraph-size | NONE | Indicates the number of users in the current graph | +| users_pwd-hugegraph-capacity | NONE | Indicates the capacity limit of user passwords | +| users_pwd-hugegraph-expire | NONE | Indicates the expiration time of user password data | +| users_pwd-hugegraph-hits | NONE | Indicates the number of hits in the user password data cache | +| users_pwd-hugegraph-miss | NONE | Indicates the number of misses in the user password data cache | +| users_pwd-hugegraph-size | NONE | Indicates the number of user passwords in the current graph | +| vertex-hugegraph-capacity | NONE | Indicates the capacity limit of vertices in the graph | +| vertex-hugegraph-expire | NONE | Indicates the expiration time of vertex data | +| vertex-hugegraph-hits | NONE | Indicates the number of hits in the vertex data cache | +| vertex-hugegraph-miss | NONE | Indicates the number of misses in the vertex data cache | +| vertex-hugegraph-size | NONE | Indicates the number of vertices in the current graph | +| batch-write-threads | NONE | Indicates the number of threads for batch write operations | +| max-write-threads | NONE | Indicates the maximum number of threads for write operations | +| pending-tasks | NONE | Indicates the number of pending tasks | +| workers | NONE | Indicates the current number of worker threads | +| average-load-penalty | NONE | Indicates the average load penalty | +| estimated-size | NONE | Indicates the estimated data size | +| eviction-count | NONE | Indicates the number of evicted data entries | +| eviction-weight | NONE | Indicates the weight of evicted data | +| hit-count | NONE | Indicates the total cache hits | +| hit-rate | NONE | Indicates the cache hit rate | +| load-count | NONE | Indicates the number of data loads | +| load-failure-count | NONE | Indicates the number of data load failures | +| load-failure-rate | NONE | Indicates the data load failure rate | +| load-success-count | NONE | Indicates the number of successful data loads | +| long-run-compilation-count | NONE | Indicates the number of long-running compilations | +| miss-count | NONE | Indicates the total cache misses | +| miss-rate | NONE | Indicates the cache miss rate | +| request-count | NONE | Indicates the total request count | +| total-load-time | NONE | Indicates the total data load time | +| sessions | NONE | Indicates the current number of active sessions | #### Metric Set: counters - - -| Metric Name | Metric Unit | Metric Description | -| --------------------------------------------- | -------- | ---------------------------------------- | -| GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests | -| GET-TOTAL_COUNTER | NONE | Records the total number of GET requests | -| favicon-ico-GET-FAILED_COUNTER | NONE | Records the number of failed GET requests to retrieve favicon.ico | -| favicon-ico-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve favicon.ico | -| graphs-HEAD-FAILED_COUNTER | NONE | Records the number of failed HEAD requests for graphs resources | -| graphs-HEAD-SUCCESS_COUNTER | NONE | Records the number of successful HEAD requests for graphs resources | -| graphs-HEAD-TOTAL_COUNTER | NONE | Records the total number of HEAD requests for graphs resources | -| graphs-hugegraph-graph-vertices-GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests for vertices in HugeGraph graphs | -| graphs-hugegraph-graph-vertices-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests for vertices in HugeGraph graphs | -| metrics-GET-FAILED_COUNTER | NONE | Records the number of failed GET requests to retrieve metrics | -| metrics-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve metrics | -| metrics-GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests to retrieve metrics | -| metrics-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve metrics | -| metrics-gauges-GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests to retrieve metrics gauges | -| metrics-gauges-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve metrics gauges | - - - +| Metric Name | Metric Unit | Metric Description | +|-----------------------------------------------------|-------------|--------------------------------------------------------------------------------| +| GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests | +| GET-TOTAL_COUNTER | NONE | Records the total number of GET requests | +| favicon-ico-GET-FAILED_COUNTER | NONE | Records the number of failed GET requests to retrieve favicon.ico | +| favicon-ico-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve favicon.ico | +| graphs-HEAD-FAILED_COUNTER | NONE | Records the number of failed HEAD requests for graphs resources | +| graphs-HEAD-SUCCESS_COUNTER | NONE | Records the number of successful HEAD requests for graphs resources | +| graphs-HEAD-TOTAL_COUNTER | NONE | Records the total number of HEAD requests for graphs resources | +| graphs-hugegraph-graph-vertices-GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests for vertices in HugeGraph graphs | +| graphs-hugegraph-graph-vertices-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests for vertices in HugeGraph graphs | +| metrics-GET-FAILED_COUNTER | NONE | Records the number of failed GET requests to retrieve metrics | +| metrics-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve metrics | +| metrics-GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests to retrieve metrics | +| metrics-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve metrics | +| metrics-gauges-GET-SUCCESS_COUNTER | NONE | Records the number of successful GET requests to retrieve metrics gauges | +| metrics-gauges-GET-TOTAL_COUNTER | NONE | Records the total number of GET requests to retrieve metrics gauges | #### Metric Set: system +| Metric Name | Metric Unit | Metric Description | +|---------------------------------------------|-------------|---------------------------------------------------------------------------------------------------------| +| mem | NONE | Indicates the total memory of the system | +| mem_total | NONE | Indicates the total memory of the system (same as mem) | +| mem_used | NONE | Indicates the currently used memory of the system | +| mem_free | NONE | Indicates the free memory of the system | +| mem_unit | NONE | Indicates the unit of memory (such as bytes, kilobytes, megabytes, etc.) | +| processors | NONE | Indicates the number of processors in the system | +| uptime | NONE | Indicates the system uptime, i.e., the time since booting | +| systemload_average | NONE | Indicates the average system load, reflecting the system's busyness | +| heap_committed | NONE | Indicates the committed size of JVM heap memory, i.e., the guaranteed heap memory size available to JVM | +| heap_init | NONE | Indicates the initial size of JVM heap memory | +| heap_used | NONE | Indicates the currently used JVM heap memory size | +| heap_max | NONE | Indicates the maximum available size of JVM heap memory | +| nonheap_committed | NONE | Indicates the committed size of JVM non-heap memory | +| nonheap_init | NONE | Indicates the initial size of JVM non-heap memory | +| nonheap_used | NONE | Indicates the currently used JVM non-heap memory size | +| nonheap_max | NONE | Indicates the maximum available size of JVM non-heap memory | +| thread_peak | NONE | Indicates the peak number of threads since JVM startup | +| thread_daemon | NONE | Indicates the current number of active daemon threads | +| thread_total_started | NONE | Indicates the total number of threads started since JVM startup | +| thread_count | NONE | Indicates the current number of active threads | +| garbage_collector_g1_young_generation_count | NONE | Indicates the number of young generation garbage collections by G1 garbage collector | +| garbage_collector_g1_young_generation_time | NONE | Indicates the total time spent in young generation garbage collections by G1 garbage collector | +| garbage_collector_g1_old_generation_count | NONE | Indicates the number of old generation garbage collections by G1 garbage collector | +| garbage_collector_g1_old_generation_time | NONE | Indicates the total time spent in old generation garbage collections by G1 garbage collector | +| garbage_collector_time_unit | NONE | Indicates the unit of garbage collection time (such as milliseconds, seconds, etc.) | - -| Metric Name | Metric Unit | Metric Description | -| --------------------------------------------- | -------- | -------------------------------------------------- | -| mem | NONE | Indicates the total memory of the system | -| mem_total | NONE | Indicates the total memory of the system (same as mem) | -| mem_used | NONE | Indicates the currently used memory of the system | -| mem_free | NONE | Indicates the free memory of the system | -| mem_unit | NONE | Indicates the unit of memory (such as bytes, kilobytes, megabytes, etc.) | -| processors | NONE | Indicates the number of processors in the system | -| uptime | NONE | Indicates the system uptime, i.e., the time since booting | -| systemload_average | NONE | Indicates the average system load, reflecting the system's busyness | -| heap_committed | NONE | Indicates the committed size of JVM heap memory, i.e., the guaranteed heap memory size available to JVM | -| heap_init | NONE | Indicates the initial size of JVM heap memory | -| heap_used | NONE | Indicates the currently used JVM heap memory size | -| heap_max | NONE | Indicates the maximum available size of JVM heap memory | -| nonheap_committed | NONE | Indicates the committed size of JVM non-heap memory | -| nonheap_init | NONE | Indicates the initial size of JVM non-heap memory | -| nonheap_used | NONE | Indicates the currently used JVM non-heap memory size | -| nonheap_max | NONE | Indicates the maximum available size of JVM non-heap memory | -| thread_peak | NONE | Indicates the peak number of threads since JVM startup | -| thread_daemon | NONE | Indicates the current number of active daemon threads | -| thread_total_started | NONE | Indicates the total number of threads started since JVM startup | -| thread_count | NONE | Indicates the current number of active threads | -| garbage_collector_g1_young_generation_count | NONE | Indicates the number of young generation garbage collections by G1 garbage collector | -| garbage_collector_g1_young_generation_time | NONE | Indicates the total time spent in young generation garbage collections by G1 garbage collector | -| garbage_collector_g1_old_generation_count | NONE | Indicates the number of old generation garbage collections by G1 garbage collector | -| garbage_collector_g1_old_generation_time | NONE | Indicates the total time spent in old generation garbage collections by G1 garbage collector | -| garbage_collector_time_unit | NONE | Indicates the unit of garbage collection time (such as milliseconds, seconds, etc.) | \ No newline at end of file diff --git a/home/versioned_docs/version-v1.5.x/help/influxdb.md b/home/versioned_docs/version-v1.5.x/help/influxdb.md index cf3d838e796..92c5da380ef 100644 --- a/home/versioned_docs/version-v1.5.x/help/influxdb.md +++ b/home/versioned_docs/version-v1.5.x/help/influxdb.md @@ -7,61 +7,60 @@ keywords: [open source monitoring system, open source database monitoring, Influ ### Configuration Parameters -| Parameter Name | Parameter Description | -| -------------- | -------------------------------------------------------- | -| Monitor Host | The IPv4, IPv6, or domain name of the target being monitored. Note⚠️: Do not include the protocol header (e.g., https://, http://). | -| Task Name | A unique identifier for this monitoring task. | -| Port | The port on which the database is exposed. Default is 8086. | -| URL | The database connection URL, usually constructed from the host. No need to add it separately. | -| Collection Interval | The interval at which data is collected during monitoring, in seconds. The minimum interval that can be set is 30 seconds. | -| Probe Enabled | Whether to perform a probe check for monitoring availability before adding or modifying the monitoring task. | -| Description | Additional notes and remarks about this monitoring task. Users can provide information and descriptions here. | +| Parameter Name | Parameter Description | +|---------------------|-------------------------------------------------------------------------------------------------------------------------------------| +| Monitor Host | The IPv4, IPv6, or domain name of the target being monitored. Note⚠️: Do not include the protocol header (e.g., https://, http://). | +| Task Name | A unique identifier for this monitoring task. | +| Port | The port on which the database is exposed. Default is 8086. | +| URL | The database connection URL, usually constructed from the host. No need to add it separately. | +| Collection Interval | The interval at which data is collected during monitoring, in seconds. The minimum interval that can be set is 30 seconds. | +| Probe Enabled | Whether to perform a probe check for monitoring availability before adding or modifying the monitoring task. | +| Description | Additional notes and remarks about this monitoring task. Users can provide information and descriptions here. | ### Collected Metrics #### Metric Set: influxdb_info | Metric Name | Metric Unit | Metric Description | -|------------| ----------- |--------| -| build_date | N/A | Creation date | -| os | N/A | Operating system | -| cpus | N/A | CPUs | -| version | N/A | Version number | +|-------------|-------------|--------------------| +| build_date | N/A | Creation date | +| os | N/A | Operating system | +| cpus | N/A | CPUs | +| version | N/A | Version number | #### Metric Set: http_api_request_duration_seconds -| Metric Name | Metric Unit | Metric Description | -|---------------|------|---------| -| handler | N/A | Handler | -| path | N/A | Path | -| response_code | N/A | Response code | -| method | N/A | Request method | -| user_agent | N/A | User agent | -| status | N/A | Status | +| Metric Name | Metric Unit | Metric Description | +|---------------|-------------|--------------------| +| handler | N/A | Handler | +| path | N/A | Path | +| response_code | N/A | Response code | +| method | N/A | Request method | +| user_agent | N/A | User agent | +| status | N/A | Status | #### Metric Set: storage_compactions_queued -| Metric Name | Metric Unit | Metric Description | -|---------------------------------|------|------------| -| bucket | N/A | Storage bucket | -| engine | N/A | Engine type | -| id | N/A | Identifier | -| level | N/A | Level | -| path | N/A | Data file path | - +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|--------------------| +| bucket | N/A | Storage bucket | +| engine | N/A | Engine type | +| id | N/A | Identifier | +| level | N/A | Level | +| path | N/A | Data file path | #### Metric Set: http_write_request_bytes -| Metric Name | Metric Unit | Metric Description | -| ----------- |------|--------| -| endpoint | N/A | Endpoint | -| org_id | N/A | Organization identifier | -| status | N/A | Status | +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|-------------------------| +| endpoint | N/A | Endpoint | +| org_id | N/A | Organization identifier | +| status | N/A | Status | #### Metric Set: qc_requests_total -| Metric Name | Metric Unit | Metric Description | -| ----------- |------|--------| -| result | N/A | Result | -| org | N/A | Organization identifier | +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|-------------------------| +| result | N/A | Result | +| org | N/A | Organization identifier | diff --git a/home/versioned_docs/version-v1.5.x/help/influxdb_promql.md b/home/versioned_docs/version-v1.5.x/help/influxdb_promql.md index fcef4b4acff..afed14cad7a 100644 --- a/home/versioned_docs/version-v1.5.x/help/influxdb_promql.md +++ b/home/versioned_docs/version-v1.5.x/help/influxdb_promql.md @@ -9,7 +9,7 @@ keywords: [ Open Source Monitoring System, InfluxDB Monitoring, InfluxDB-PromQL ### Configuration Parameters -| Parameter Name | Parameter help description | +| Parameter Name | Parameter help description | |---------------------|----------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | IP, IPv6, or domain name of the target being monitored. Note ⚠️: Do not include protocol header (e.g., https://, http://). | | Monitoring name | Name to identify this monitoring, ensuring uniqueness of names. | @@ -28,7 +28,7 @@ keywords: [ Open Source Monitoring System, InfluxDB Monitoring, InfluxDB-PromQL #### Metric Set: basic_influxdb_memstats_alloc -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | instance | None | Instance to which the metric belongs | | timestamp | None | Timestamp of metric collection | @@ -36,7 +36,7 @@ keywords: [ Open Source Monitoring System, InfluxDB Monitoring, InfluxDB-PromQL #### Metric Set: influxdb_database_numMeasurements -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | job | None | Metric name | | instance | None | Instance to which the metric belongs | @@ -46,7 +46,7 @@ keywords: [ Open Source Monitoring System, InfluxDB Monitoring, InfluxDB-PromQL #### Metric Set: influxdb_query_rate_seconds -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | instance | None | Instance to which the metric belongs | | timestamp | None | Timestamp of metric collection | @@ -54,8 +54,9 @@ keywords: [ Open Source Monitoring System, InfluxDB Monitoring, InfluxDB-PromQL #### Metric Set: influxdb_queryExecutor_queriesFinished_10s -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | instance | None | Instance to which the metric belongs | | timestamp | None | Timestamp of metric collection | | value | None | Metric value | + diff --git a/home/versioned_docs/version-v1.5.x/help/iotdb.md b/home/versioned_docs/version-v1.5.x/help/iotdb.md index 0e4dcad9912..bec827feb73 100644 --- a/home/versioned_docs/version-v1.5.x/help/iotdb.md +++ b/home/versioned_docs/version-v1.5.x/help/iotdb.md @@ -61,33 +61,33 @@ predefinedMetrics: #### Metric collection: cluster_node_status -| Metric Name | Metric Unit | Metric Help Description | -| --------- |------|-------------------------| -| name | None | Node name IP | -| status | None | Node status, 1=online 2=offline | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|---------------------------------| +| name | None | Node name IP | +| status | None | Node status, 1=online 2=offline | #### Metric collection: jvm_memory_committed_bytes -| Metric Name | Metric Unit | Metric Help Description | -|-------|------|------------------| -| area | none | heap memory or nonheap memory | -| id | none | memory block | -| value | MB | The memory size currently requested by the JVM | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|------------------------------------------------| +| area | none | heap memory or nonheap memory | +| id | none | memory block | +| value | MB | The memory size currently requested by the JVM | #### Metric collection: jvm_memory_used_bytes -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------| -| area | none | heap memory or nonheap memory | -| id | none | memory block | -| value | MB | JVM used memory size | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------| +| area | none | heap memory or nonheap memory | +| id | none | memory block | +| value | MB | JVM used memory size | #### Metric collection: jvm_threads_states_threads -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------| -| state | none | thread state | -| count | None | The number of threads corresponding to the thread state | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|---------------------------------------------------------| +| state | none | thread state | +| count | None | The number of threads corresponding to the thread state | #### Index collection: quantity business data @@ -114,7 +114,8 @@ predefinedMetrics: #### Metric collection: thrift_connections -| Metric Name | Metric Unit | Metric Help Description | -|-------|------|-------------| -| name | None | name | -| connection | none | thrift current connection number | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|----------------------------------| +| name | None | name | +| connection | none | thrift current connection number | + diff --git a/home/versioned_docs/version-v1.5.x/help/issue.md b/home/versioned_docs/version-v1.5.x/help/issue.md index c3dffd147be..a48e84cfd4e 100644 --- a/home/versioned_docs/version-v1.5.x/help/issue.md +++ b/home/versioned_docs/version-v1.5.x/help/issue.md @@ -1,49 +1,58 @@ --- id: issue title: Common issues -sidebar_label: Common issues +sidebar_label: Common issues --- -### Monitoring common issues +### Monitoring common issues -1. **Page feedback:monitor.host: Monitoring Host must be ipv4, ipv6 or domain name** -> As shown in the information, the entered monitoring Host must be ipv4, ipv6 or domain name, and cannot carry a protocol header, such as http +1. **Page feedback:monitor.host: Monitoring Host must be ipv4, ipv6 or domain name** -2. **The website API and other monitoring feedback statusCode:403 or 401, but the opposite end service itself does not need authentication, and the direct access of the browser is OK** -> Please check whether it is blocked by the firewall. For example, BaoTa/aaPanel have set the blocking of `User-Agent=Apache-HttpClient` in the request header by default. If it is blocked, please delete this blocking rule. (user-agent has been simulated as a browser in the v1.0.beat5 version. This problem does not exist) +> As shown in the information, the entered monitoring Host must be ipv4, ipv6 or domain name, and cannot carry a protocol header, such as http + +2. **The website API and other monitoring feedback statusCode:403 or 401, but the opposite end service itself does not need authentication, and the direct access of the browser is OK** + +> Please check whether it is blocked by the firewall. For example, BaoTa/aaPanel have set the blocking of `User-Agent=Apache-HttpClient` in the request header by default. If it is blocked, please delete this blocking rule. (user-agent has been simulated as a browser in the v1.0.beat5 version. This problem does not exist) 3. Ping connectivity monitoring exception when installing hertzbeat for package deployment. -The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 + The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 + > The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. > When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address - -### Docker Deployment common issues +### Docker Deployment common issues 1. **MYSQL, TDENGINE and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** -The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. + The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. + > Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. -> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` +> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` 2. **According to the process deploy,visit http://ip:1157/ no interface** -Please refer to the following points to troubleshoot issues: + Please refer to the following points to troubleshoot issues: + > one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. -> two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. -> > three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. +> two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. +> +>> three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. + +3. **Log an error TDengine connection or insert SQL failed** -3. **Log an error TDengine connection or insert SQL failed** > one:Check whether database account and password configured is correct, the database is created. -> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. +> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. ### Package Deployment common issues 1. **According to the process deploy,visit http://ip:1157/ no interface** Please refer to the following points to troubleshoot issues: + > one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. > two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. > three: Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 2. **Log an error TDengine connection or insert SQL failed** + > one:Check whether database account and password configured is correct, the database is created. -> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. +> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. + diff --git a/home/versioned_docs/version-v1.5.x/help/jetty.md b/home/versioned_docs/version-v1.5.x/help/jetty.md index 3e5230aa9d0..6e069553dba 100644 --- a/home/versioned_docs/version-v1.5.x/help/jetty.md +++ b/home/versioned_docs/version-v1.5.x/help/jetty.md @@ -23,6 +23,7 @@ keywords: [open source monitoring tool, open source jetty web server monitoring java -jar $JETTY_HOME/start.jar --add-module=jmx java -jar $JETTY_HOME/start.jar --add-module=jmx-remote ``` + Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file 2. Edit the `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file to modify the JMX IP port and other parameters. @@ -50,7 +51,7 @@ Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -73,19 +74,17 @@ Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` | max | kb | max size | | used | kb | used size | - #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|--------------------------| | LoadedClassCount | | Loaded Class Count | | TotalLoadedClassCount | | Total Loaded Class Count | | UnloadedClassCount | | Unloaded Class Count | - #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|----------------------------| | TotalStartedThreadCount | | Total Started Thread Count | | ThreadCount | | Thread Count | diff --git a/home/versioned_docs/version-v1.5.x/help/jvm.md b/home/versioned_docs/version-v1.5.x/help/jvm.md index 95b1545fffc..3b47e0e7a8a 100644 --- a/home/versioned_docs/version-v1.5.x/help/jvm.md +++ b/home/versioned_docs/version-v1.5.x/help/jvm.md @@ -24,7 +24,7 @@ Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#rem ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -58,16 +58,15 @@ Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#rem #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|--------------------------| | LoadedClassCount | | Loaded Class Count | | TotalLoadedClassCount | | Total Loaded Class Count | | UnloadedClassCount | | Unloaded Class Count | - #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|----------------------------| | TotalStartedThreadCount | | Total Started Thread Count | | ThreadCount | | Thread Count | @@ -76,4 +75,3 @@ Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#rem | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.5.x/help/kafka.md b/home/versioned_docs/version-v1.5.x/help/kafka.md index 067cabef0e9..f86913733b1 100644 --- a/home/versioned_docs/version-v1.5.x/help/kafka.md +++ b/home/versioned_docs/version-v1.5.x/help/kafka.md @@ -27,70 +27,64 @@ exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by JMX | -| Username | JMX connection user name, optional | -| Password | JMX connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by JMX | +| Username | JMX connection user name, optional | +| Password | JMX connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metrics #### Metrics Set:server_info -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| Version | | Kafka Version | -| StartTimeMs | ms | Start Time | -| CommitId | | Version Commit ID | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| Version | | Kafka Version | +| StartTimeMs | ms | Start Time | +| CommitId | | Version Commit ID | #### Metrics Set:memory_pool -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| name | | metrics name | +| committed | kb | total size | +| init | kb | init size | +| max | kb | max size | +| used | kb | used size | #### Metrics Set:active_controller_count -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| Value | | server active controller count | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------| +| Value | | server active controller count | #### Metrics Set:broker_partition_count -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| Value | | broker partition count | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| Value | | broker partition count | #### Metrics Set:broker_leader_count -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| Value | | broker leader count | - - -#### Metrics Set:broker_handler_avg_percent - -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| EventType | | event type | -| RateUnit | | rate unit | -| Count | | percent count | -| OneMinuteRate | % | One Minute Rate | -| FiveMinuteRate | % | Five Minute Rate | -| MeanRate | % | Mean Rate | -| FifteenMinuteRate | % | Fifteen Minute Rate | - - - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| Value | | broker leader count | + +#### Metrics Set:broker_handler_avg_percent + +| Metric name | Metric unit | Metric help description | +|-------------------|-------------|-------------------------| +| EventType | | event type | +| RateUnit | | rate unit | +| Count | | percent count | +| OneMinuteRate | % | One Minute Rate | +| FiveMinuteRate | % | Five Minute Rate | +| MeanRate | % | Mean Rate | +| FifteenMinuteRate | % | Fifteen Minute Rate | diff --git a/home/versioned_docs/version-v1.5.x/help/kafka_promql.md b/home/versioned_docs/version-v1.5.x/help/kafka_promql.md index e88f6eb0342..ea358d0de8d 100644 --- a/home/versioned_docs/version-v1.5.x/help/kafka_promql.md +++ b/home/versioned_docs/version-v1.5.x/help/kafka_promql.md @@ -16,7 +16,7 @@ keywords: [ Open Source Monitoring System, Open Source Middleware Monitoring, Ka ### Configuration Parameters -| Parameter Name | Parameter Description | +| Parameter Name | Parameter Description | |---------------------|----------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | IP, IPv6, or domain name of the target being monitored. Note ⚠️: Do not include protocol header (e.g., https://, http://). | | Monitoring name | Name to identify this monitoring, ensuring uniqueness of names. | @@ -35,7 +35,7 @@ keywords: [ Open Source Monitoring System, Open Source Middleware Monitoring, Ka #### Metric Set: kafka_brokers -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | \_\_name__ | None | Metric name | | instance | None | Instance to which the metric belongs | @@ -44,7 +44,7 @@ keywords: [ Open Source Monitoring System, Open Source Middleware Monitoring, Ka #### Metric Set: kafka_topic_partitions -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | \_\_name__ | None | Metric name | | instance | None | Instance to which the metric belongs | @@ -53,7 +53,7 @@ keywords: [ Open Source Monitoring System, Open Source Middleware Monitoring, Ka #### Metric Set: kafka_server_brokertopicmetrics_bytesinpersec -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|--------------------------------------| | \_\_name__ | None | Metric name | | instance | None | Instance to which the metric belongs | @@ -64,3 +64,4 @@ keywords: [ Open Source Monitoring System, Open Source Middleware Monitoring, Ka 1. If Kafka is enabled with JMX monitoring, you can use [Kafka](kafka) Monitoring. 2. If Kafka cluster deploys kafka_exporter to expose monitoring metrics, you can refer to [Prometheus task](prometheus) to configure the Prometheus collection task to monitor kafka. + diff --git a/home/versioned_docs/version-v1.5.x/help/kubernetes.md b/home/versioned_docs/version-v1.5.x/help/kubernetes.md index 8e10896c6d1..45adda576fc 100644 --- a/home/versioned_docs/version-v1.5.x/help/kubernetes.md +++ b/home/versioned_docs/version-v1.5.x/help/kubernetes.md @@ -28,6 +28,7 @@ kubectl describe secret {secret} -n kube-system ``` #### method two: + ```shell kubectl create serviceaccount cluster-admin kubectl create clusterrolebinding cluster-admin-manual --clusterrole=cluster-admin --serviceaccount=default:cluster-admin @@ -36,59 +37,60 @@ kubectl create token --duration=1000h cluster-admin ### Configure parameters -| Parameter name | Parameter Help describes the | -|-------------|------------------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| APiServer port | K8s APiServer port, default 6443 | -| token | Authorize the Access Token | -| URL | The database connection URL is optional, if configured, the database name, user name and password parameters in the URL will override the parameter | configured above -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | +| Parameter name | Parameter Help describes the | +|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| APiServer port | K8s APiServer port, default 6443 | +| token | Authorize the Access Token | +| URL | The database connection URL is optional, if configured, the database name, user name and password parameters in the URL will override the parameter | configured above | +| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | ### Collect metrics #### metric collection: nodes -| Metric Name | metric unit | Metrics help describe | -| ------------------ | -------- |--------| -| node_name | None | Node name | -| is_ready | None | Node Status | -| capacity_cpu | None | CPU capacity | -| allocatable_cpu | None | CPU | allotted -| capacity_memory | None | Memory capacity | -| allocatable_memory | None | Memory | allocated -| creation_time | None | Node creation time | +| Metric Name | metric unit | Metrics help describe | +|--------------------|-------------|-----------------------|-----------| +| node_name | None | Node name | +| is_ready | None | Node Status | +| capacity_cpu | None | CPU capacity | +| allocatable_cpu | None | CPU | allotted | +| capacity_memory | None | Memory capacity | +| allocatable_memory | None | Memory | allocated | +| creation_time | None | Node creation time | #### metric Collection: namespaces -| Metric Name | metric unit | Metrics help describe | -| -------- | -------- |-------------| -| namespace | None | namespace name | -| status | None | Status | -| creation_time | None | Created | +| Metric Name | metric unit | Metrics help describe | +|---------------|-------------|-----------------------| +| namespace | None | namespace name | +| status | None | Status | +| creation_time | None | Created | #### metric collection: pods -| Metric Name | metric unit | Metrics help describe | -| ---------------- | -------- |----------------| -| pod | None | Pod name | -| namespace | None | The namespace | to which the pod belongs -| status | None | Pod status | -| restart | None | Number of restarts | -| host_ip | None | The IP address of the host is | -| pod_ip | None | pod ip | -| creation_time | None | Pod creation time | -| start_time | None | Pod startup time | +| Metric Name | metric unit | Metrics help describe | +|---------------|-------------|-------------------------------|--------------------------| +| pod | None | Pod name | +| namespace | None | The namespace | to which the pod belongs | +| status | None | Pod status | +| restart | None | Number of restarts | +| host_ip | None | The IP address of the host is | +| pod_ip | None | pod ip | +| creation_time | None | Pod creation time | +| start_time | None | Pod startup time | #### metric Collection: services -| Metric Name | metric unit | Metrics help describe | -| ---------------- |------|--------------------------------------------------------| -| service | None | Service Name | -| namespace | None | The namespace | to which the service belongs -| type | None | Service Type ClusterIP NodePort LoadBalancer ExternalName | -| cluster_ip | None | cluster ip | -| selector | None | tag selector matches | -| creation_time | None | Created | +| Metric Name | metric unit | Metrics help describe | +|---------------|-------------|-----------------------------------------------------------|------------------------------| +| service | None | Service Name | +| namespace | None | The namespace | to which the service belongs | +| type | None | Service Type ClusterIP NodePort LoadBalancer ExternalName | +| cluster_ip | None | cluster ip | +| selector | None | tag selector matches | +| creation_time | None | Created | + diff --git a/home/versioned_docs/version-v1.5.x/help/linux.md b/home/versioned_docs/version-v1.5.x/help/linux.md index 05e3405ff6e..6c22028114c 100644 --- a/home/versioned_docs/version-v1.5.x/help/linux.md +++ b/home/versioned_docs/version-v1.5.x/help/linux.md @@ -9,74 +9,74 @@ keywords: [open source monitoring tool, open source linux monitoring tool, monit ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Linux SSH. The default is 22 | -| Username | SSH connection user name, optional | -| Password | SSH connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Linux SSH. The default is 22 | +| Username | SSH connection user name, optional | +| Password | SSH connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| hostname | none | Host name | -| version | none | Operating system version | -| uptime | none | System running time | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------| +| hostname | none | Host name | +| version | none | Operating system version | +| uptime | none | System running time | #### Metric set:cpu -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| info | none | CPU model | -| cores | cores | Number of CPU cores | -| interrupt | number | Number of CPU interrupts | -| load | none | Average load of CPU in the last 1/5/15 minutes | -| context_switch | number | Number of current context switches | -| usage | % | CPU usage | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------------------------| +| info | none | CPU model | +| cores | cores | Number of CPU cores | +| interrupt | number | Number of CPU interrupts | +| load | none | Average load of CPU in the last 1/5/15 minutes | +| context_switch | number | Number of current context switches | +| usage | % | CPU usage | #### Metric set:memory -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| total | Mb | Total memory capacity | -| used | Mb | User program memory | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory occupied by cache | -| available | Mb | Remaining available memory capacity | -| usage | % | Memory usage | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------------------| +| total | Mb | Total memory capacity | +| used | Mb | User program memory | +| free | Mb | Free memory capacity | +| buff_cache | Mb | Memory occupied by cache | +| available | Mb | Remaining available memory capacity | +| usage | % | Memory usage | #### Metric set:disk -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| disk_num | blocks | Total number of disks | -| partition_num | partitions | Total number of partitions | -| block_write | blocks | Total number of blocks written to disk | -| block_read | blocks | Number of blocks read from disk | -| write_rate | iops | Rate of writing disk blocks per second | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|----------------------------------------| +| disk_num | blocks | Total number of disks | +| partition_num | partitions | Total number of partitions | +| block_write | blocks | Total number of blocks written to disk | +| block_read | blocks | Number of blocks read from disk | +| write_rate | iops | Rate of writing disk blocks per second | #### Metric set:interface -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| interface_name | none | Network card name | -| receive_bytes | byte | Inbound data traffic(bytes) | -| transmit_bytes | byte | Outbound data traffic(bytes) | +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------| +| interface_name | none | Network card name | +| receive_bytes | byte | Inbound data traffic(bytes) | +| transmit_bytes | byte | Outbound data traffic(bytes) | #### Metric set:disk_free -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| filesystem | none | File system name | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | usage | -| mounted | none | Mount point directory | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| filesystem | none | File system name | +| used | Mb | Used disk size | +| available | Mb | Available disk size | +| usage | % | usage | +| mounted | none | Mount point directory | + diff --git a/home/versioned_docs/version-v1.5.x/help/mariadb.md b/home/versioned_docs/version-v1.5.x/help/mariadb.md index e72668fe791..374e6e6a081 100644 --- a/home/versioned_docs/version-v1.5.x/help/mariadb.md +++ b/home/versioned_docs/version-v1.5.x/help/mariadb.md @@ -9,49 +9,46 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 3306 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 3306 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| version | none | Database version | -| port | none | Database exposure service port | -| datadir | none | Database storage data disk address | -| max_connections | none | Database maximum connections | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|------------------------------------| +| version | none | Database version | +| port | none | Database exposure service port | +| datadir | none | Database storage data disk address | +| max_connections | none | Database maximum connections | #### Metric set:status -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| threads_created | none | MariaDB created total connections | -| threads_connected | none | MariaDB connected connections | -| threads_cached | none | MariaDB current cached connections | -| threads_running | none | MariaDB current active connections | - +| Metric name | Metric unit | Metric help description | +|-------------------|-------------|------------------------------------| +| threads_created | none | MariaDB created total connections | +| threads_connected | none | MariaDB connected connections | +| threads_cached | none | MariaDB current cached connections | +| threads_running | none | MariaDB current active connections | #### Metric set:innodb -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| innodb_data_reads | none | innodb average number of reads from files per second | -| innodb_data_writes | none | innodb average number of writes from file per second | -| innodb_data_read | KB | innodb average amount of data read per second | -| innodb_data_written | KB | innodb average amount of data written per second | - - +| Metric name | Metric unit | Metric help description | +|---------------------|-------------|------------------------------------------------------| +| innodb_data_reads | none | innodb average number of reads from files per second | +| innodb_data_writes | none | innodb average number of writes from file per second | +| innodb_data_read | KB | innodb average amount of data read per second | +| innodb_data_written | KB | innodb average amount of data written per second | diff --git a/home/versioned_docs/version-v1.5.x/help/memcached.md b/home/versioned_docs/version-v1.5.x/help/memcached.md index 5d89ce0977b..920da021e6b 100644 --- a/home/versioned_docs/version-v1.5.x/help/memcached.md +++ b/home/versioned_docs/version-v1.5.x/help/memcached.md @@ -14,7 +14,7 @@ The default YML configuration for the memcache version is in compliance with 1.4 You need to use the stats command to view the parameters that your memcache can monitor ``` -### +### **1、Obtain usable parameter indicators through commands such as stats、stats setting、stats settings. @@ -36,7 +36,7 @@ STAT version 1.4.15 ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -49,7 +49,7 @@ STAT version 1.4.15 #### Metrics Set:server_info -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |------------------|-------------|---------------------------------------------------| | pid | | Memcache server process ID | | uptime | s | The number of seconds the server has been running | @@ -66,4 +66,5 @@ STAT version 1.4.15 | cmd_set | | Set command request count | | cmd_flush | | Flush command request count | | get_misses | | Get command misses | -| delete_misses | | Delete command misses | \ No newline at end of file +| delete_misses | | Delete command misses | + diff --git a/home/versioned_docs/version-v1.5.x/help/mongodb.md b/home/versioned_docs/version-v1.5.x/help/mongodb.md index 4a2951ec23c..9c536e73d7a 100644 --- a/home/versioned_docs/version-v1.5.x/help/mongodb.md +++ b/home/versioned_docs/version-v1.5.x/help/mongodb.md @@ -9,7 +9,7 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |------------------------|-------------------------------------------------------------------------------------------------------------------------| | Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://). | | Monitoring name | Identify the name of this monitoring. The name needs to be unique. | @@ -27,7 +27,7 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m #### Metric set:Build Info -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |------------------|-------------|-----------------------------------------------------------------------------------------| | version | none | The version number of the MongoDB server. | | gitVersion | none | The Git version of the MongoDB codebase. | @@ -39,7 +39,7 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m #### Metric set:Server Document -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|-----------------------------------| | deleted | none | The number of documents deleted. | | inserted | none | The number of documents inserted. | @@ -48,21 +48,21 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m #### Metric set:Server Operation -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |----------------|-------------|------------------------------------------------------------------| | scanAndOrder | none | The number of times a query required both scanning and ordering. | | writeConflicts | none | The number of write conflicts that occurred. | #### Metric set: Max Connections -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |------------------|-------------|--------------------------------------------| | deletedDocuments | none | Number of deleted documents. | | passes | none | Total number of passes for TTL operations. | #### Metric set:System Info -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|------------------------------------------------------| | currentTime | none | Current system time. | | hostname | none | Hostname of the server. | @@ -75,7 +75,7 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m #### Metric set:OS Info -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------|-------------|----------------------------------| | type | none | Type of the operating system. | | name | none | Name of the operating system. | @@ -83,7 +83,7 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m #### Metric set:Extra Info -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------|-------------|--------------------------------------------------------| | versionString | none | String describing the version of the operating system. | | libcVersion | none | Version of the C standard library (libc). | diff --git a/home/versioned_docs/version-v1.5.x/help/mysql.md b/home/versioned_docs/version-v1.5.x/help/mysql.md index 3f07be99380..dca64b3f9f0 100644 --- a/home/versioned_docs/version-v1.5.x/help/mysql.md +++ b/home/versioned_docs/version-v1.5.x/help/mysql.md @@ -7,9 +7,9 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo > Collect and monitor the general performance Metrics of MySQL database. Support MYSQL5+. -### Configuration parameter +### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -27,31 +27,28 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| version | none | Database version | -| port | none | Database exposure service port | -| datadir | none | Database storage data disk address | -| max_connections | none | Database maximum connections | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|------------------------------------| +| version | none | Database version | +| port | none | Database exposure service port | +| datadir | none | Database storage data disk address | +| max_connections | none | Database maximum connections | #### Metric set:status -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| threads_created | none | MySql created total connections | -| threads_connected | none | MySql connected connections | -| threads_cached | none | MySql current cached connections | -| threads_running | none | MySql current active connections | - +| Metric name | Metric unit | Metric help description | +|-------------------|-------------|----------------------------------| +| threads_created | none | MySql created total connections | +| threads_connected | none | MySql connected connections | +| threads_cached | none | MySql current cached connections | +| threads_running | none | MySql current active connections | #### Metric set:innodb -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| innodb_data_reads | none | innodb average number of reads from files per second | -| innodb_data_writes | none | innodb average number of writes from file per second | -| innodb_data_read | KB | innodb average amount of data read per second | -| innodb_data_written | KB | innodb average amount of data written per second | - - +| Metric name | Metric unit | Metric help description | +|---------------------|-------------|------------------------------------------------------| +| innodb_data_reads | none | innodb average number of reads from files per second | +| innodb_data_writes | none | innodb average number of writes from file per second | +| innodb_data_read | KB | innodb average amount of data read per second | +| innodb_data_written | KB | innodb average amount of data written per second | diff --git a/home/versioned_docs/version-v1.5.x/help/nacos.md b/home/versioned_docs/version-v1.5.x/help/nacos.md index 721f1776c2a..4de3661c17b 100644 --- a/home/versioned_docs/version-v1.5.x/help/nacos.md +++ b/home/versioned_docs/version-v1.5.x/help/nacos.md @@ -13,81 +13,83 @@ keywords: [open source monitoring tool, open source middleware monitoring tool, 1. Deploy the Nacos cluster according to [deployment document](https://nacos.io/en-us/docs/deployment.html). 2. Configure the application. properties file to expose metrics data. + ``` management.endpoints.web.exposure.include=* ``` + 3. Access ```{ip}:8848/nacos/actuator/prometheus``` to see if metrics data can be accessed. More information see [Nacos monitor guide](https://nacos.io/en-us/docs/monitor-guide.html). -### Configuration parameter +### Configuration parameter -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Target name | Identify the name of this monitoring. The name needs to be unique | -| Nacos Port | Port provided by the Nacos Server. The default is 8848 | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|-------------------------------------------------------------------------------------------------------------------------| +| Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Target name | Identify the name of this monitoring. The name needs to be unique | +| Nacos Port | Port provided by the Nacos Server. The default is 8848 | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:jvm -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| system_cpu_usage | none | cpu usage | -| system_load_average_1m | none | load | -| jvm_memory_used_bytes | byte | jvm memory used | -| jvm_memory_max_bytes | byte | jvm max memory | -| jvm_gc_pause_seconds_count | none | gc count | -| jvm_gc_pause_seconds_sum | second | gc time | -| jvm_threads_daemon | none | jvm threads count | +| Metric name | Metric unit | Metric help description | +|----------------------------|-------------|-------------------------| +| system_cpu_usage | none | cpu usage | +| system_load_average_1m | none | load | +| jvm_memory_used_bytes | byte | jvm memory used | +| jvm_memory_max_bytes | byte | jvm max memory | +| jvm_gc_pause_seconds_count | none | gc count | +| jvm_gc_pause_seconds_sum | second | gc time | +| jvm_threads_daemon | none | jvm threads count | #### Metric set:Nacos -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| http_server_requests_seconds_count | second | http requests count | -| http_server_requests_seconds_sum | second | http requests time | -| nacos_timer_seconds_sum | second | Nacos config notify time | -| nacos_timer_seconds_count | none | Nacos config notify count | -| nacos_monitor{name='longPolling'} | none | Nacos config connection count | -| nacos_monitor{name='configCount'} | none | Nacos configuration file count | -| nacos_monitor{name='dumpTask'} | none | Nacos config dump task count | -| nacos_monitor{name='notifyTask'} | none | Nacos config notify task count | -| nacos_monitor{name='getConfig'} | none | Nacos config read configuration count | -| nacos_monitor{name='publish'} | none | Nacos config update configuration count | -| nacos_monitor{name='ipCount'} | none | Nacos naming ip count | -| nacos_monitor{name='domCount'} | none | Nacos naming domain count(1.x version) | -| nacos_monitor{name='serviceCount'} | none | Nacos naming domain count(2.x version) | -| nacos_monitor{name='failedPush'} | none | Nacos naming push fail count | -| nacos_monitor{name='avgPushCost'} | second | Nacos naming push cost time(average) | -| nacos_monitor{name='leaderStatus'} | none | Nacos naming if node is leader | -| nacos_monitor{name='maxPushCost'} | second | Nacos naming push cost time(max) | -| nacos_monitor{name='mysqlhealthCheck'} | none | Nacos naming mysql health check count | -| nacos_monitor{name='httpHealthCheck'} | none | Nacos naming http health check count | -| nacos_monitor{name='tcpHealthCheck'} | none | Nacos naming tcp health check count | +| Metric name | Metric unit | Metric help description | +|----------------------------------------|-------------|-----------------------------------------| +| http_server_requests_seconds_count | second | http requests count | +| http_server_requests_seconds_sum | second | http requests time | +| nacos_timer_seconds_sum | second | Nacos config notify time | +| nacos_timer_seconds_count | none | Nacos config notify count | +| nacos_monitor{name='longPolling'} | none | Nacos config connection count | +| nacos_monitor{name='configCount'} | none | Nacos configuration file count | +| nacos_monitor{name='dumpTask'} | none | Nacos config dump task count | +| nacos_monitor{name='notifyTask'} | none | Nacos config notify task count | +| nacos_monitor{name='getConfig'} | none | Nacos config read configuration count | +| nacos_monitor{name='publish'} | none | Nacos config update configuration count | +| nacos_monitor{name='ipCount'} | none | Nacos naming ip count | +| nacos_monitor{name='domCount'} | none | Nacos naming domain count(1.x version) | +| nacos_monitor{name='serviceCount'} | none | Nacos naming domain count(2.x version) | +| nacos_monitor{name='failedPush'} | none | Nacos naming push fail count | +| nacos_monitor{name='avgPushCost'} | second | Nacos naming push cost time(average) | +| nacos_monitor{name='leaderStatus'} | none | Nacos naming if node is leader | +| nacos_monitor{name='maxPushCost'} | second | Nacos naming push cost time(max) | +| nacos_monitor{name='mysqlhealthCheck'} | none | Nacos naming mysql health check count | +| nacos_monitor{name='httpHealthCheck'} | none | Nacos naming http health check count | +| nacos_monitor{name='tcpHealthCheck'} | none | Nacos naming tcp health check count | #### Metric set:Nacos exception -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| nacos_exception_total{name='db'} | none | database exception | -| nacos_exception_total{name='configNotify'} | none | Nacos config notify exception | -| nacos_exception_total{name='unhealth'} | none | Nacos config server health check exception | -| nacos_exception_total{name='disk'} | none | Nacos naming write disk exception | -| nacos_exception_total{name='leaderSendBeatFailed'} | none | Nacos naming leader send heart beat fail count | -| nacos_exception_total{name='illegalArgument'} | none | request argument illegal count | -| nacos_exception_total{name='nacos'} | none | Nacos inner exception | +| Metric name | Metric unit | Metric help description | +|----------------------------------------------------|-------------|------------------------------------------------| +| nacos_exception_total{name='db'} | none | database exception | +| nacos_exception_total{name='configNotify'} | none | Nacos config notify exception | +| nacos_exception_total{name='unhealth'} | none | Nacos config server health check exception | +| nacos_exception_total{name='disk'} | none | Nacos naming write disk exception | +| nacos_exception_total{name='leaderSendBeatFailed'} | none | Nacos naming leader send heart beat fail count | +| nacos_exception_total{name='illegalArgument'} | none | request argument illegal count | +| nacos_exception_total{name='nacos'} | none | Nacos inner exception | #### Metric set:client -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| nacos_monitor{name='subServiceCount'} | none | subscribed services count | -| nacos_monitor{name='pubServiceCount'} | none | published services count | -| nacos_monitor{name='configListenSize'} | none | listened configuration file count | -| nacos_client_request_seconds_count | none | request count | -| nacos_client_request_seconds_sum | second | request time | - \ No newline at end of file +| Metric name | Metric unit | Metric help description | +|----------------------------------------|-------------|-----------------------------------| +| nacos_monitor{name='subServiceCount'} | none | subscribed services count | +| nacos_monitor{name='pubServiceCount'} | none | published services count | +| nacos_monitor{name='configListenSize'} | none | listened configuration file count | +| nacos_client_request_seconds_count | none | request count | +| nacos_client_request_seconds_sum | second | request time | + diff --git a/home/versioned_docs/version-v1.5.x/help/nebulagraph.md b/home/versioned_docs/version-v1.5.x/help/nebulagraph.md index ae2cfb4683f..c23e39c14fe 100644 --- a/home/versioned_docs/version-v1.5.x/help/nebulagraph.md +++ b/home/versioned_docs/version-v1.5.x/help/nebulagraph.md @@ -14,7 +14,7 @@ The monitoring has two parts,nebulaGraph_stats and rocksdb_stats. nebulaGraph_stats is nebulaGraph's statistics, and rocksdb_stats is rocksdb's statistics. ``` -### +### **1、Obtain available parameters through the stats and rocksdb stats interfaces.** @@ -36,7 +36,7 @@ The default port is 19779 and the access address is:http://ip:19779/rocksdb_stat ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -55,7 +55,7 @@ The default port is 19779 and the access address is:http://ip:19779/rocksdb_stat Too many indicators, related links are as follows **https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |---------------------------------------|-------------|--------------------------------------------------------------| | num_queries_hit_memory_watermark_rate | | The rate of statements that reached the memory watermark. | | num_queries_hit_memory_watermark_sum | | The sum of statements that reached the memory watermark. | @@ -67,8 +67,9 @@ Too many indicators, related links are as follows Too many indicators, related links are as follows **https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |----------------------------|-------------|-------------------------------------------------------------| | rocksdb.backup.read.bytes | | Number of bytes read during the RocksDB database backup. | | rocksdb.backup.write.bytes | | Number of bytes written during the RocksDB database backup. | | ... | | ... | + diff --git a/home/versioned_docs/version-v1.5.x/help/nebulagraph_cluster.md b/home/versioned_docs/version-v1.5.x/help/nebulagraph_cluster.md index d0da21a7adb..c39195f427e 100644 --- a/home/versioned_docs/version-v1.5.x/help/nebulagraph_cluster.md +++ b/home/versioned_docs/version-v1.5.x/help/nebulagraph_cluster.md @@ -11,7 +11,7 @@ keywords: [ Open Source Monitoring System, Open Source Database Monitoring, Open ### Configuration parameters -| Parameter Name | Parameter help description | +| Parameter Name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------| | Target Host | The IPv4, IPv6, or domain name of the monitored peer. Note ⚠️ without the protocol header (eg: https://, http://). | | Task Name | Identifies the name of this monitor, ensuring uniqueness of the name. | @@ -35,21 +35,21 @@ keywords: [ Open Source Monitoring System, Open Source Database Monitoring, Open #### Metric Set: Session -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |---------------------|-------------|----------------------------------| | session | None | Number of sessions | | running_query_count | None | Number of queries being executed | #### Metric Set: Jobs -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |--------------|-------------|-------------------------------------------| | queue_jobs | None | Number of pending background tasks | | running_jobs | None | Number of background tasks being executed | #### Metric Set: Cluster node info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------------|-------------|---------------------------------| | total_storage_node | None | Number of storage nodes | | offline_storage_node | None | Number of offline storage nodes | @@ -60,7 +60,7 @@ keywords: [ Open Source Monitoring System, Open Source Database Monitoring, Open #### Metric Set: Storage Nodes -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-----------------------|-------------|-------------------------------------------------------| | host | None | Node address | | port | None | Port | @@ -89,3 +89,4 @@ keywords: [ Open Source Monitoring System, Open Source Database Monitoring, Open | version | None | Version | > If you need to customize monitoring templates to collect data from NebulaGraph clusters, please refer to: [NGQL Custom Monitoring](../advanced/extend-ngql.md) + diff --git a/home/versioned_docs/version-v1.5.x/help/nginx.md b/home/versioned_docs/version-v1.5.x/help/nginx.md index 99bb389000c..f630e4d4d24 100644 --- a/home/versioned_docs/version-v1.5.x/help/nginx.md +++ b/home/versioned_docs/version-v1.5.x/help/nginx.md @@ -20,6 +20,7 @@ If you want to monitor information in 'Nginx' with this monitoring type, you nee ```shell nginx -V ``` + View whether it contains `--with-http_stub_status_module`, if not, you need to recompile and install Nginx. 2. Compile and install Nginx, add `ngx_http_stub_status_module` module @@ -50,6 +51,7 @@ server { } } ``` + 4. Reload Nginx ```shell @@ -107,14 +109,13 @@ nginx -s reload 4. Access `http://localhost/req-status` in the browser to view the Nginx monitoring status information. - **Refer Doc: https://github.com/zls0424/ngx_req_status** **⚠️Attention: The endpoint path of the monitoring module is `/nginx-status` `/req-status`** ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -128,28 +129,27 @@ nginx -s reload #### Metrics Set:nginx_status -| Metric name | Metric unit | Metric help description | -|-------------|-------------|------------------------------------------| -| accepts | | Accepted connections | -| handled | | Successfully processed connections | -| active | | Currently active connections | -| dropped | | Discarded connections | -| requests | | Client requests | -| reading | | Connections performing read operations | -| writing | | Connections performing write operations | -| waiting | | Waiting connections | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-----------------------------------------| +| accepts | | Accepted connections | +| handled | | Successfully processed connections | +| active | | Currently active connections | +| dropped | | Discarded connections | +| requests | | Client requests | +| reading | | Connections performing read operations | +| writing | | Connections performing write operations | +| waiting | | Waiting connections | #### Metrics Set:req_status -| Metric name | Metric unit | Metric help description | -|-------------|-------------|---------------------------------| -| zone_name | | Group category | -| key | | Group name | -| max_active | | Maximum concurrent connections | -| max_bw | kb | Maximum bandwidth | -| traffic | kb | Total traffic | -| requests | | Total requests | -| active | | Current concurrent connections | -| bandwidth | kb | Current bandwidth | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------| +| zone_name | | Group category | +| key | | Group name | +| max_active | | Maximum concurrent connections | +| max_bw | kb | Maximum bandwidth | +| traffic | kb | Total traffic | +| requests | | Total requests | +| active | | Current concurrent connections | +| bandwidth | kb | Current bandwidth | diff --git a/home/versioned_docs/version-v1.5.x/help/ntp.md b/home/versioned_docs/version-v1.5.x/help/ntp.md index 5eca6c58e80..666f2a6b39a 100644 --- a/home/versioned_docs/version-v1.5.x/help/ntp.md +++ b/home/versioned_docs/version-v1.5.x/help/ntp.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source NTP monitoring tool, monito ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -23,7 +23,7 @@ keywords: [ open source monitoring tool, open source NTP monitoring tool, monito #### Metrics Set:summary -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |--------------|-------------|------------------------------------------------------------------------------------------| | responseTime | ms | The time it takes for the NTP server to respond to a request). | | time | ms | The current time reported by the NTP server). | diff --git a/home/versioned_docs/version-v1.5.x/help/openai.md b/home/versioned_docs/version-v1.5.x/help/openai.md index 7fc70548645..7165925372f 100644 --- a/home/versioned_docs/version-v1.5.x/help/openai.md +++ b/home/versioned_docs/version-v1.5.x/help/openai.md @@ -8,6 +8,7 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI ### Preparation #### Obtain Session Key + > 1. Open Chrome browser's network request interface > `Mac: cmd + option + i` > `Windows: ctrl + shift + i` @@ -22,7 +23,7 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI ### Configuration Parameters -| Parameter Name | Parameter Description | +| Parameter Name | Parameter Description | |:------------------|------------------------------------------------------------------------------------------------| | Monitoring Host | Fill in api.openai.com here. | | Task Name | Identify the name of this monitoring, ensuring uniqueness. | @@ -36,7 +37,7 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI #### Metric Set: Credit Grants -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |----------------------|-------------|--------------------------------------| | Total Granted | USD ($) | Total granted credit limit | | Total Used | USD ($) | Total used credit limit | @@ -45,14 +46,14 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI #### Metric Set: Model Cost -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |-------------|-------------|------------------------| | Model Name | None | Name of the model | | Cost | USD ($) | Expenses for the model | #### Metric Set: Billing Subscription -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |--------------------------|-------------|-----------------------------------------| | Has Payment Method | None | Whether payment method is available | | Canceled | None | Whether subscription is cancelled | @@ -80,3 +81,4 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI | Tax IDs | None | Tax IDs | | Billing Address | None | Billing address | | Business Address | None | Business address | + diff --git a/home/versioned_docs/version-v1.5.x/help/opengauss.md b/home/versioned_docs/version-v1.5.x/help/opengauss.md index 650882861e8..28171658951 100644 --- a/home/versioned_docs/version-v1.5.x/help/opengauss.md +++ b/home/versioned_docs/version-v1.5.x/help/opengauss.md @@ -5,54 +5,52 @@ sidebar_label: OpenGauss Database keywords: [open source monitoring tool, open source database monitoring tool, monitoring opengauss database metrics] --- -> Collect and monitor the general performance Metrics of OpenGauss database. +> Collect and monitor the general performance Metrics of OpenGauss database. ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 5432 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 5432 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| server_version | none | Version number of the database server | -| port | none | Database server exposure service port | -| server_encoding | none | Character set encoding of database server | -| data_directory | none | Database storage data disk address | -| max_connections | connections | Database maximum connections | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|-------------------------------------------| +| server_version | none | Version number of the database server | +| port | none | Database server exposure service port | +| server_encoding | none | Character set encoding of database server | +| data_directory | none | Database storage data disk address | +| max_connections | connections | Database maximum connections | #### Metric set:state -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | none | Database name, or share-object is a shared object | -| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | -| deadlocks | number | Number of deadlocks detected in the database | -| blks_read | times | The number of disk blocks read in the database | -| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | -| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | -| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | -| stats_reset | none | The last time these statistics were reset | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | none | Database name, or share-object is a shared object | +| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | +| deadlocks | number | Number of deadlocks detected in the database | +| blks_read | times | The number of disk blocks read in the database | +| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | +| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | +| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | +| stats_reset | none | The last time these statistics were reset | #### Metric set:activity -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| running | connections | Number of current client connections | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------------| +| running | connections | Number of current client connections | diff --git a/home/versioned_docs/version-v1.5.x/help/opensuse.md b/home/versioned_docs/version-v1.5.x/help/opensuse.md index e4220262e67..acaf86632d4 100644 --- a/home/versioned_docs/version-v1.5.x/help/opensuse.md +++ b/home/versioned_docs/version-v1.5.x/help/opensuse.md @@ -4,103 +4,105 @@ title: Monitoring OpenSUSE Operating System Monitoring sidebar_label: OpenSUSE OS keywords: [open source monitoring system, open source operating system monitoring, OpenSUSE OS monitoring] --- + > Collect and monitor general performance metrics of the OpenSUSE operating system. ### Configuration Parameters -| Parameter Name | Parameter Help Description | -| -------------- | ---------------------------------------------------------------------------- | -| Monitored Host | The IPV4, IPV6, or domain name of the host being monitored. Note ⚠️ No protocol header (e.g., https://, http://). | -| Task Name | The name that identifies this monitoring, which must be unique. | -| Port | The port provided by Linux SSH, default is 22. | -| Timeout | Sets the connection timeout in milliseconds, default is 6000 ms. | -| Connection Reuse | Sets whether SSH connections are reused, default is :false. If false, a new connection is created each time information is retrieved. | -| Username | SSH connection username, optional. | -| Password | SSH connection password, optional. | -| Collector | Configures which collector is used to schedule data collection for this monitoring. | -| Monitoring Period | The interval time for periodic data collection in seconds, with a minimum interval of 30 seconds. | -| Binding Tags | Used for categorized management of monitoring resources. | -| Description | Additional notes and descriptions for this monitoring, where users can make notes. | -| Key | The key required to connect to the server. | +| Parameter Name | Parameter Help Description | +|-------------------|---------------------------------------------------------------------------------------------------------------------------------------| +| Monitored Host | The IPV4, IPV6, or domain name of the host being monitored. Note ⚠️ No protocol header (e.g., https://, http://). | +| Task Name | The name that identifies this monitoring, which must be unique. | +| Port | The port provided by Linux SSH, default is 22. | +| Timeout | Sets the connection timeout in milliseconds, default is 6000 ms. | +| Connection Reuse | Sets whether SSH connections are reused, default is :false. If false, a new connection is created each time information is retrieved. | +| Username | SSH connection username, optional. | +| Password | SSH connection password, optional. | +| Collector | Configures which collector is used to schedule data collection for this monitoring. | +| Monitoring Period | The interval time for periodic data collection in seconds, with a minimum interval of 30 seconds. | +| Binding Tags | Used for categorized management of monitoring resources. | +| Description | Additional notes and descriptions for this monitoring, where users can make notes. | +| Key | The key required to connect to the server. | ### Collection Metrics #### Metric Set: System Basic Information -| Metric Name | Unit | Metric Help Description | -| --------------- | ------- | ------------------------ | -| Host Name | None | Host name | -| System Version | None | Operating system version| -| Uptime | None | Uptime | +| Metric Name | Unit | Metric Help Description | +|----------------|------|--------------------------| +| Host Name | None | Host name | +| System Version | None | Operating system version | +| Uptime | None | Uptime | #### Metric Set: CPU Information -| Metric Name | Unit | Metric Help Description | -| --------------- | ----- | ---------------------------------- | -| info | None | CPU model | -| cores | Cores | Number of CPU cores | -| interrupt | Count | Number of CPU interrupts | -| load | None | Average CPU load over the last 1/5/15 minutes | -| context_switch | Count | Number of context switches | -| usage | % | CPU usage rate | +| Metric Name | Unit | Metric Help Description | +|----------------|-------|-----------------------------------------------| +| info | None | CPU model | +| cores | Cores | Number of CPU cores | +| interrupt | Count | Number of CPU interrupts | +| load | None | Average CPU load over the last 1/5/15 minutes | +| context_switch | Count | Number of context switches | +| usage | % | CPU usage rate | #### Metric Set: Memory Information -| Metric Name | Unit | Metric Help Description | -| ----------- | ---- | ------------------------ | -| total | Mb | Total memory capacity | -| used | Mb | Memory used by user programs | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory used for cache | +| Metric Name | Unit | Metric Help Description | +|-------------|------|-------------------------------------| +| total | Mb | Total memory capacity | +| used | Mb | Memory used by user programs | +| free | Mb | Free memory capacity | +| buff_cache | Mb | Memory used for cache | | available | Mb | Remaining available memory capacity | -| usage | % | Memory usage rate | +| usage | % | Memory usage rate | #### Metric Set: Disk Information -| Metric Name | Unit | Metric Help Description | -| --------------- | ----- | ----------------------------- | -| disk_num | Count | Total number of disks | -| partition_num | Count | Total number of partitions | -| block_write | Count | Total number of blocks written to disk | -| block_read | Count | Total number of blocks read from disk | -| write_rate | iops | Disk block write rate per second | +| Metric Name | Unit | Metric Help Description | +|---------------|-------|----------------------------------------| +| disk_num | Count | Total number of disks | +| partition_num | Count | Total number of partitions | +| block_write | Count | Total number of blocks written to disk | +| block_read | Count | Total number of blocks read from disk | +| write_rate | iops | Disk block write rate per second | #### Metric Set: Network Card Information -| Metric Name | Unit | Metric Help Description | -| ------------------- | ---- | -------------------------- | -| interface_name | None | Network card name | -| receive_bytes | Mb | Inbound data traffic | -| transmit_bytes | Mb | Outbound data traffic | +| Metric Name | Unit | Metric Help Description | +|----------------|------|-------------------------| +| interface_name | None | Network card name | +| receive_bytes | Mb | Inbound data traffic | +| transmit_bytes | Mb | Outbound data traffic | #### Metric Set: File System | Metric Name | Unit | Metric Help Description | -| ---------- | ---- | ------------------------ | -| filesystem | None | Name of the file system | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | Usage rate | -| mounted | None | Mount point directory | +|-------------|------|-------------------------| +| filesystem | None | Name of the file system | +| used | Mb | Used disk size | +| available | Mb | Available disk size | +| usage | % | Usage rate | +| mounted | None | Mount point directory | #### Metric Set: Top 10 CPU Processes Statistics for the top 10 processes using the CPU. Statistics include: Process ID, CPU usage, memory usage, executed command. -| Metric Name | Unit | Metric Help Description | -| ------------ | ---- | ------------------------ | -| pid | None | Process ID | -| cpu_usage | % | CPU usage rate | -| mem_usage | % | Memory usage rate | -| command | None | Executed command | +| Metric Name | Unit | Metric Help Description | +|-------------|------|-------------------------| +| pid | None | Process ID | +| cpu_usage | % | CPU usage rate | +| mem_usage | % | Memory usage rate | +| command | None | Executed command | #### Metric Set: Top 10 Memory Processes Statistics for the top 10 processes using memory. Statistics include: Process ID, memory usage, CPU usage, executed command. -| Metric Name | Unit | Metric Help Description | -| ------------ | ---- | ------------------------ | -| pid | None | Process ID | -| mem_usage | % | Memory usage rate | -| cpu_usage | % | CPU usage rate | -| command | None | Executed command | \ No newline at end of file +| Metric Name | Unit | Metric Help Description | +|-------------|------|-------------------------| +| pid | None | Process ID | +| mem_usage | % | Memory usage rate | +| cpu_usage | % | CPU usage rate | +| command | None | Executed command | + diff --git a/home/versioned_docs/version-v1.5.x/help/oracle.md b/home/versioned_docs/version-v1.5.x/help/oracle.md index 5410e53decb..50d2f6422bc 100644 --- a/home/versioned_docs/version-v1.5.x/help/oracle.md +++ b/home/versioned_docs/version-v1.5.x/help/oracle.md @@ -9,7 +9,7 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -27,37 +27,38 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| database_version | none | Database version | -| database_type | none | Database type | -| hostname | none | Host name | -| instance_name | none | Database instance name | -| startup_time | none | Database start time | -| status | none | Database status | +| Metric name | Metric unit | Metric help description | +|------------------|-------------|-------------------------| +| database_version | none | Database version | +| database_type | none | Database type | +| hostname | none | Host name | +| instance_name | none | Database instance name | +| startup_time | none | Database start time | +| status | none | Database status | #### Metric set:tablespace -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| file_id | none | File ID | -| file_name | none | File name | -| tablespace_name | none | Table space name | -| status | none | Status | -| bytes | MB | Size | -| blocks | none | Number of blocks | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|-------------------------| +| file_id | none | File ID | +| file_name | none | File name | +| tablespace_name | none | Table space name | +| status | none | Status | +| bytes | MB | Size | +| blocks | none | Number of blocks | #### Metric set:user_connect -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| username | none | Username | -| counts | number | Current connection counts | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|---------------------------| +| username | none | Username | +| counts | number | Current connection counts | #### Metric set:performance -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| qps | QPS | I/O Requests per second | -| tps | TPS | User transaction per second | -| mbps | MBPS | I/O Megabytes per second | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-----------------------------| +| qps | QPS | I/O Requests per second | +| tps | TPS | User transaction per second | +| mbps | MBPS | I/O Megabytes per second | + diff --git a/home/versioned_docs/version-v1.5.x/help/ping.md b/home/versioned_docs/version-v1.5.x/help/ping.md index c5603fdfbce..7c894f488ff 100644 --- a/home/versioned_docs/version-v1.5.x/help/ping.md +++ b/home/versioned_docs/version-v1.5.x/help/ping.md @@ -5,32 +5,33 @@ sidebar_label: PING connectivity keywords: [open source monitoring tool, open source network monitoring tool, monitoring ping metrics] --- -> Ping the opposite end HOST address and judge its connectivity. +> Ping the opposite end HOST address and judge its connectivity. ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Ping timeout | Set the timeout when Ping does not respond to data, unit:ms, default: 3000ms | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Ping timeout | Set the timeout when Ping does not respond to data, unit:ms, default: 3000ms | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:summary -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| responseTime | ms | Website response time | - +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-------------------------| +| responseTime | ms | Website response time | ### Common Problem 1. Ping connectivity monitoring exception when installing hertzbeat for package deployment. The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 + > The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. > When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address + diff --git a/home/versioned_docs/version-v1.5.x/help/plugin.md b/home/versioned_docs/version-v1.5.x/help/plugin.md index b8104a3594a..6c0f938c7a4 100644 --- a/home/versioned_docs/version-v1.5.x/help/plugin.md +++ b/home/versioned_docs/version-v1.5.x/help/plugin.md @@ -1,10 +1,11 @@ --- id: plugin title: Custom plugin -sidebar_label: Custom plugin +sidebar_label: Custom plugin --- ## Custom plugins + ### Introduction Currently, `Hertzbeat` relies on the `alert` module to notify the user, and then the user can take actions such as sending requests, executing `sql`, executing `shell` scripts, etc. However, this can only be automated manually or by `webhook` to receive the alert message. @@ -13,6 +14,7 @@ After adding the customized code, you only need to package the `plugin` module, Currently, `HertzBeat` only set up the trigger `alert` method after alarm, if you need to set up the trigger method at the time of acquisition, startup program, etc., please mention `Task` in `https://github.com/apache/hertzbeat/issues/new/choose`. ### Specific uses + 1. Pull the master branch code `git clone https://github.com/apache/hertzbeat.git` and locate the `plugin` module's `Plugin` interface. ![plugin-1.png](/img/docs/help/plugin-1.png) @@ -23,3 +25,4 @@ Currently, `HertzBeat` only set up the trigger `alert` method after alarm, if yo 4. Copy the packaged `jar` package to the `ext-lib` directory under the installation directory (for `docker` installations, mount the `ext-lib` directory first, then copy it there). ![plugin-4.png](/img/docs/help/plugin-4.png) 5. Then restart `HertzBeat` to enable the customized post-alert handling policy. + diff --git a/home/versioned_docs/version-v1.5.x/help/pop3.md b/home/versioned_docs/version-v1.5.x/help/pop3.md index 822192ad66d..fffff2a494f 100644 --- a/home/versioned_docs/version-v1.5.x/help/pop3.md +++ b/home/versioned_docs/version-v1.5.x/help/pop3.md @@ -24,10 +24,9 @@ If you want to monitor information in 'POP3' with this monitoring type, you just 5. 通过POP3服务器域名,端口号,qq邮箱账号以及授权码连接POP3服务器,采集监控指标 ``` - ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -42,9 +41,8 @@ If you want to monitor information in 'POP3' with this monitoring type, you just #### Metrics Set:email_status -| Metric name | Metric unit | Metric help description | -|--------------|-------------|------------------------------------------| -| email_count | | Number of emails | -| mailbox_size | kb | The total size of emails in the mailbox | - +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-----------------------------------------| +| email_count | | Number of emails | +| mailbox_size | kb | The total size of emails in the mailbox | diff --git a/home/versioned_docs/version-v1.5.x/help/port.md b/home/versioned_docs/version-v1.5.x/help/port.md index e3350a8776f..7f420fd1375 100644 --- a/home/versioned_docs/version-v1.5.x/help/port.md +++ b/home/versioned_docs/version-v1.5.x/help/port.md @@ -7,25 +7,23 @@ keywords: [open source monitoring tool, open source port monitoring tool, monito > Judge whether the exposed port of the opposite end service is available, then judge whether the opposite end service is available, and collect Metrics such as response time for monitoring. -### Configuration parameter - -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| Connection timeout | Waiting timeout for port connection, unit:ms, default: 3000ms | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +### Configuration parameter + +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | +| Connection timeout | Waiting timeout for port connection, unit:ms, default: 3000ms | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:summary -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| responseTime | ms | Website response time | - - +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-------------------------| +| responseTime | ms | Website response time | diff --git a/home/versioned_docs/version-v1.5.x/help/postgresql.md b/home/versioned_docs/version-v1.5.x/help/postgresql.md index de14f9d62eb..57834a713bd 100644 --- a/home/versioned_docs/version-v1.5.x/help/postgresql.md +++ b/home/versioned_docs/version-v1.5.x/help/postgresql.md @@ -9,50 +9,48 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 5432 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 5432 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| server_version | none | Version number of the database server | -| port | none | Database server exposure service port | -| server_encoding | none | Character set encoding of database server | -| data_directory | none | Database storage data disk address | -| max_connections | connections | Database maximum connections | +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|-------------------------------------------| +| server_version | none | Version number of the database server | +| port | none | Database server exposure service port | +| server_encoding | none | Character set encoding of database server | +| data_directory | none | Database storage data disk address | +| max_connections | connections | Database maximum connections | #### Metric set:state -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | none | Database name, or share-object is a shared object | -| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | -| deadlocks | number | Number of deadlocks detected in the database | -| blks_read | times | The number of disk blocks read in the database | -| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | -| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | -| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | -| stats_reset | none | The last time these statistics were reset | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | none | Database name, or share-object is a shared object | +| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | +| deadlocks | number | Number of deadlocks detected in the database | +| blks_read | times | The number of disk blocks read in the database | +| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | +| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | +| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | +| stats_reset | none | The last time these statistics were reset | #### Metric set:activity -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| running | connections | Number of current client connections | - +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------------| +| running | connections | Number of current client connections | diff --git a/home/versioned_docs/version-v1.5.x/help/process.md b/home/versioned_docs/version-v1.5.x/help/process.md index 599c4f1ea7b..825a20ac43b 100644 --- a/home/versioned_docs/version-v1.5.x/help/process.md +++ b/home/versioned_docs/version-v1.5.x/help/process.md @@ -4,34 +4,33 @@ title: Monitoring Linux Process Monitoring sidebar_label: Process keywords: [Open Source Monitoring System, Operating System Process Monitoring, Process Monitoring] --- + > Collect and monitor basic information of processes on Linux systems, including CPU usage, memory usage, physical memory, IO, etc. ## Configuration Parameters - -| Parameter Name | Parameter Description | -| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| Parameter Name | Parameter Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------------| | Target Host | The IPv4, IPv6, or domain name of the monitored endpoint. Note ⚠️: Do not include the protocol header (e.g., https://, http://). | -| Task Name | Identifies the name of this monitoring, ensuring uniqueness. | -| Port | SSH port of the Linux system, default: 22 | -| Timeout | Sets the timeout for the connection in milliseconds, default is 6000 milliseconds. | -| Reuse Connection | Sets whether SSH connection is reused, default is false. When false, a new connection is created for each information retrieval. | -| Username | Username for the server. | -| Password | Password for the server. | -| Process Name | Name or part of the name of the process to be monitored. | -| Collector | Specifies which collector to use for scheduling this monitoring. | -| Monitoring Interval | Interval for periodic data collection, in seconds. Minimum interval that can be set is 30 seconds. | -| Tags | Used for categorizing and managing monitoring resources. | -| Description | Additional notes and descriptions for identifying this monitoring. Users can add remarks here. | -| Private Key | Private key required for connecting to the server. | +| Task Name | Identifies the name of this monitoring, ensuring uniqueness. | +| Port | SSH port of the Linux system, default: 22 | +| Timeout | Sets the timeout for the connection in milliseconds, default is 6000 milliseconds. | +| Reuse Connection | Sets whether SSH connection is reused, default is false. When false, a new connection is created for each information retrieval. | +| Username | Username for the server. | +| Password | Password for the server. | +| Process Name | Name or part of the name of the process to be monitored. | +| Collector | Specifies which collector to use for scheduling this monitoring. | +| Monitoring Interval | Interval for periodic data collection, in seconds. Minimum interval that can be set is 30 seconds. | +| Tags | Used for categorizing and managing monitoring resources. | +| Description | Additional notes and descriptions for identifying this monitoring. Users can add remarks here. | +| Private Key | Private key required for connecting to the server. | ### Metrics Collected #### Metric Set: Process Basic Information - | Metric Name | Metric Unit | Metric Description | -| ----------- | ----------- | ------------------ | +|-------------|-------------|--------------------| | PID | NONE | Process ID | | User | NONE | User | | CPU | NONE | CPU Usage | @@ -41,9 +40,8 @@ keywords: [Open Source Monitoring System, Operating System Process Monitoring, P #### Metric Set: Memory Usage Information - | Metric Name | Metric Unit | Metric Description | -| ----------- | ----------- | ------------------ | +|-------------|-------------|--------------------| | PID | NONE | Process ID | | detail | NONE | Detailed metrics | @@ -63,9 +61,8 @@ Includes metrics for: #### Metric Set: Other Monitoring Information - -| Metric Name | Metric Unit | Metric Description | -| ----------- | ----------- | --------------------------------- | +| Metric Name | Metric Unit | Metric Description | +|-------------|-------------|-----------------------------------| | PID | NONE | Process ID | | path | NONE | Execution Path | | date | NONE | Start Time | @@ -73,9 +70,8 @@ Includes metrics for: #### Metric Set: IO - | Metric Name | Metric Unit | Metric Description | -| ----------- | ----------- | ------------------ | +|-------------|-------------|--------------------| | PID | NONE | Process ID | | metric | NONE | Metric Name | | value | NONE | Metric Value | @@ -89,3 +85,4 @@ Includes metrics for: - read_bytes (Actual number of bytes read by the process from disk) - write_bytes (Actual number of bytes written by the process to disk) - cancelled_write_bytes (Actual number of bytes cancelled by the process while writing to disk) + diff --git a/home/versioned_docs/version-v1.5.x/help/prometheus.md b/home/versioned_docs/version-v1.5.x/help/prometheus.md index 4de9f80f67d..571a2e9b51b 100755 --- a/home/versioned_docs/version-v1.5.x/help/prometheus.md +++ b/home/versioned_docs/version-v1.5.x/help/prometheus.md @@ -9,7 +9,7 @@ keywords: [ open source monitoring tool, Prometheus protocol monitoring ] ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| | Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Task Name | Identify the name of this monitoring. The name needs to be unique. | @@ -40,5 +40,3 @@ You can use the following configuration: Keep the rest of the settings default. - - diff --git a/home/versioned_docs/version-v1.5.x/help/pulsar.md b/home/versioned_docs/version-v1.5.x/help/pulsar.md index 2cc520d6189..1424bd3f58b 100644 --- a/home/versioned_docs/version-v1.5.x/help/pulsar.md +++ b/home/versioned_docs/version-v1.5.x/help/pulsar.md @@ -4,52 +4,48 @@ title: Monitoring Pulsar Monitoring sidebar_label: Apache Pulsar keywords: [open-source monitoring system, open-source database monitoring, HbaseMaster monitoring] --- + > Collecting and monitoring general performance metrics of Pulsar **Protocol Used: HTTP** ## Configuration Parameters - -| Parameter Name | Description | -| ------------------- | ---------------------------------------------------------------------------------------------------------------------------- | +| Parameter Name | Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------| | Target Host | The monitored endpoint's IPV4, IPV6, or domain name. Note⚠️: Do not include the protocol header (e.g., https://, http://). | -| Port | The webServicePort value of Pulsar, default is 8080. | -| Task Name | The name identifying this monitoring task, must be unique. | -| Query Timeout | Set the connection timeout in milliseconds, default is 3000 milliseconds. | -| Monitoring Interval | Interval time for periodic data collection, in seconds, minimum interval is 30 seconds. | -| Binding Tags | Used for categorizing monitoring resources. | -| Description/Remarks | Additional notes and descriptions for this monitoring task. Users can add more information here. | +| Port | The webServicePort value of Pulsar, default is 8080. | +| Task Name | The name identifying this monitoring task, must be unique. | +| Query Timeout | Set the connection timeout in milliseconds, default is 3000 milliseconds. | +| Monitoring Interval | Interval time for periodic data collection, in seconds, minimum interval is 30 seconds. | +| Binding Tags | Used for categorizing monitoring resources. | +| Description/Remarks | Additional notes and descriptions for this monitoring task. Users can add more information here. | ### Collected Metrics #### Metric Set: Version Information - -| Metric Name | Unit | Description | -| ------------ | ---- | ------------------- | -| Version Info | NONE | Version Information | +| Metric Name | Unit | Description | +|--------------|------|---------------------| +| Version Info | NONE | Version Information | #### Metric Set: process_start_time_seconds - -| Metric Name | Unit | Description | -| ------------------ | ---- | ------------------ | -| Process Start Time | NONE | Process Start Time | +| Metric Name | Unit | Description | +|--------------------|------|--------------------| +| Process Start Time | NONE | Process Start Time | #### Metric Set: process_open_fds - -| Metric Name | Unit | Description | -| --------------------- | ---- | ------------------------------- | -| Open File Descriptors | NONE | Number of Open File Descriptors | +| Metric Name | Unit | Description | +|-----------------------|------|---------------------------------| +| Open File Descriptors | NONE | Number of Open File Descriptors | #### Metric Set: process_max_fds - -| Metric Name | Unit | Description | -| -------------------- | ---- | ---------------------------------- | -| Max File Descriptors | NONE | Maximum Number of File Descriptors | +| Metric Name | Unit | Description | +|----------------------|------|------------------------------------| +| Max File Descriptors | NONE | Maximum Number of File Descriptors | #### Metric Set: jvm_memory_pool_allocated_bytes diff --git a/home/versioned_docs/version-v1.5.x/help/rabbitmq.md b/home/versioned_docs/version-v1.5.x/help/rabbitmq.md index 1bcd3ea5851..917ca63c3d3 100644 --- a/home/versioned_docs/version-v1.5.x/help/rabbitmq.md +++ b/home/versioned_docs/version-v1.5.x/help/rabbitmq.md @@ -7,7 +7,7 @@ keywords: [open source monitoring tool, open source rabbitmq monitoring tool, mo > Monitoring the running status of RabbitMQ message middleware, nodes, topics and other related metrics. -### Pre-monitoring Operations +### Pre-monitoring Operations > HertzBeat uses RabbitMQ Management's Rest Api to collect RabbitMQ metric data. > Therefore, you need to enable the Management plug-in in your RabbitMQ environment @@ -24,7 +24,7 @@ rabbitmq-plugins enable rabbitmq_management ### Configuration parameters -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | | Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | @@ -40,7 +40,7 @@ rabbitmq-plugins enable rabbitmq_management #### metrics: overview -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |--------------------|-------------|------------------------------------| | product_version | None | Product Version | | product_name | None | Product name | @@ -52,7 +52,7 @@ rabbitmq-plugins enable rabbitmq_management #### metrics: object_totals -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |-------------|-------------|-----------------------------| | channels | none | total number of channels | | connections | none | total number of connections | @@ -62,24 +62,24 @@ rabbitmq-plugins enable rabbitmq_management #### metrics: nodes -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |--------------------|-------------|-----------------------------------------------------------| | name | None | The node name | | type | None | The node type | | running | None | Running state | | os_pid | None | Pid in OS | -| mem_limit | MB | Memory usage high watermark | -| mem_used | MB | Total amount of memory used | +| mem_limit | MB | Memory usage high watermark | +| mem_used | MB | Total amount of memory used | | fd_total | None | File descriptors available | -| fd_used | None | File descriptors used | -| sockets_total | None | Sockets available | -| sockets_used | None | Sockets used | -| proc_total | None | Erlang process limit | -| proc_used | None | Erlang processes used | -| disk_free_limit | GB | Free disk space low watermark | +| fd_used | None | File descriptors used | +| sockets_total | None | Sockets available | +| sockets_used | None | Sockets used | +| proc_total | None | Erlang process limit | +| proc_used | None | Erlang processes used | +| disk_free_limit | GB | Free disk space low watermark | | disk_free | GB | Free disk space | -| gc_num | None | GC runs | -| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | +| gc_num | None | GC runs | +| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | | context_switches | None | Context_switches num | | io_read_count | None | Total number of read operations | | io_read_bytes | KB | Total data size read into disk | @@ -100,27 +100,27 @@ rabbitmq-plugins enable rabbitmq_management | queue_deleted | None | queue deleted num | | connection_closed | None | connection closed num | - #### metrics: queues -| Metric Name | Metric Unit | Metric Description | +| Metric Name | Metric Unit | Metric Description | |------------------------------|-------------|--------------------------------------------------------------------------------------------------------------------------------------| -| name | None | The name of the queue with non-ASCII characters escaped as in C. | +| name | None | The name of the queue with non-ASCII characters escaped as in C. | | node | None | The queue on the node name | -| state | None | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | +| state | None | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | | type | None | Queue type, one of: quorum, stream, classic. | -| vhost | None | vhost path | +| vhost | None | vhost path | | auto_delete | None | Whether the queue will be deleted automatically when no longer used | -| policy | None | Effective policy name for the queue. | +| policy | None | Effective policy name for the queue. | | consumers | None | Number of consumers. | | memory | B | Bytes of memory allocated by the runtime for the queue, including stack, heap and internal structures. | | messages_ready | None | Number of messages ready to be delivered to clients | -| messages_unacknowledged | None | Number of messages delivered to clients but not yet acknowledged | +| messages_unacknowledged | None | Number of messages delivered to clients but not yet acknowledged | | messages | None | Sum of ready and unacknowledged messages (queue depth) | -| messages_ready_ram | None | Number of messages from messages_ready which are resident in ram | +| messages_ready_ram | None | Number of messages from messages_ready which are resident in ram | | messages_persistent | None | Total number of persistent messages in the queue (will always be 0 for transient queues) | -| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | +| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | | message_bytes_ready | B | Like message_bytes but counting only those messages ready to be delivered to clients | -| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | +| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | | message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | | message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | + diff --git a/home/versioned_docs/version-v1.5.x/help/redhat.md b/home/versioned_docs/version-v1.5.x/help/redhat.md index d877c46df36..2a8472e00d6 100644 --- a/home/versioned_docs/version-v1.5.x/help/redhat.md +++ b/home/versioned_docs/version-v1.5.x/help/redhat.md @@ -9,7 +9,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, RedHat OS ### Configuration Parameters -| Parameter Name | Parameter help description | +| Parameter Name | Parameter help description | |---------------------|----------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The IP, IPV6, or domain name of the monitored endpoint. Note ⚠️: Do not include protocol headers (eg: https://, http://). | | Task Name | Identifies the name of this monitoring, ensuring uniqueness. | @@ -28,7 +28,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, RedHat OS #### Metric Set: Basic Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|---------------------------| | Host Name | None | Host name. | | System Version | None | Operating system version. | @@ -36,7 +36,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, RedHat OS #### Metric Set: CPU Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|-------------------------------------------| | info | None | CPU model. | | cores | None | Number of CPU cores. | @@ -47,7 +47,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, RedHat OS #### Metric Set: Memory Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|------------------------------------| | total | Mb | Total memory capacity. | | used | Mb | Used memory by user programs. | @@ -58,7 +58,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, RedHat OS #### Metric Set: Disk Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |---------------|-------------|------------------------------------| | disk_num | None | Total number of disks. | | partition_num | None | Total number of partitions. | @@ -68,7 +68,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, RedHat OS #### Metric Set: Interface Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|--------------------------------| | interface_name | None | Name of the network interface. | | receive_bytes | Mb | Inbound data traffic. | @@ -105,3 +105,4 @@ Top 10 processes consuming memory. Metrics include: Process ID, Memory usage, CP | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | + diff --git a/home/versioned_docs/version-v1.5.x/help/redis.md b/home/versioned_docs/version-v1.5.x/help/redis.md index de0df0d52ca..bdb78ce3584 100644 --- a/home/versioned_docs/version-v1.5.x/help/redis.md +++ b/home/versioned_docs/version-v1.5.x/help/redis.md @@ -2,244 +2,239 @@ id: redis title: 监控:REDIS数据库监控 sidebar_label: REDIS -keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] +keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] --- > 对REDIS数据库的通用性能指标进行采集监控。支持REDIS1.0+。 ### 配置参数 -| 参数名称 | 参数帮助描述 | -| ----------- | ----------- | -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | -| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | +| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | ### 采集指标 #### 指标集合:server -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| redis_version | 无 | Redis 服务器版本 | -| redis_git_sha1 | 无 | Git SHA1 | -| redis_git_dirty | 无 | Git dirty flag | -| redis_build_id | 无 | redis 构建的id | -| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | -| os | 无 | Redis 服务器的宿主操作系统 | -| arch_bits | 无 | 架构(32 或 64 位) | -| multiplexing_api | 无 | Redis使用的事件循环机制| -| atomicvar_api | 无 | Redis使用的原子 API | -| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本| -| process_id | 无 | 服务器进程的PID | -| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | -| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | -| tcp_port | 无 | TCP/IP侦听端口 | -| server_time_usec | 无 | 微秒级精度的基于时间的系统时间| -| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | -| uptime_in_days | 无 | 自Redis服务器启动后的天数 | -| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | -| configured_hz | 无 | 服务器配置的频率设置 | -| lru_clock | 无 | 时钟每分钟递增,用于LRU管理| -| executable | 无 | 服务器可执行文件的路径 | -| config_file | 无 | 配置文件的路径 | -| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志| -| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------|------|-----------------------------------------------| +| redis_version | 无 | Redis 服务器版本 | +| redis_git_sha1 | 无 | Git SHA1 | +| redis_git_dirty | 无 | Git dirty flag | +| redis_build_id | 无 | redis 构建的id | +| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | +| os | 无 | Redis 服务器的宿主操作系统 | +| arch_bits | 无 | 架构(32 或 64 位) | +| multiplexing_api | 无 | Redis使用的事件循环机制 | +| atomicvar_api | 无 | Redis使用的原子 API | +| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本 | +| process_id | 无 | 服务器进程的PID | +| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | +| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | +| tcp_port | 无 | TCP/IP侦听端口 | +| server_time_usec | 无 | 微秒级精度的基于时间的系统时间 | +| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | +| uptime_in_days | 无 | 自Redis服务器启动后的天数 | +| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | +| configured_hz | 无 | 服务器配置的频率设置 | +| lru_clock | 无 | 时钟每分钟递增,用于LRU管理 | +| executable | 无 | 服务器可执行文件的路径 | +| config_file | 无 | 配置文件的路径 | +| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志 | +| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。 | #### 指标集合:clients -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | -| cluster_connections | 无 | 群集总线使用的套接字数量的近似值| -| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。| -| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | -| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | -| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | -| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING)| -| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------------|------|--------------------------------------------------------------------------------| +| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | +| cluster_connections | 无 | 群集总线使用的套接字数量的近似值 | +| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。 | +| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | +| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | +| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | +| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING) | +| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | #### 指标集合:memory -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | -| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | -| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字| -| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值| -| used_memory_peak | byte | Redis消耗的峰值内存(字节)| -| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | -| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和| -| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节)| -| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | -| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | -| allocator_allocated | byte| 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同| -| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片| -| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | -| total_system_memory | byte | Redis主机的内存总量 | -| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_lua | byte | Lua引擎使用的字节数 | -| used_memory_lua_human | KB | 上一个值的人类可读值 | -| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | -| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | -| number_of_cached_scripts | 无 |缓存的lua脚本数量 | -| maxmemory | byte | maxmemory配置指令的值| -| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | -| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | -| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | -| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | -| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | -| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | -| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | -| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | -| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | -| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | -| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。| -| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | -| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | -| mem_clients_normal | 无 | 普通客户端使用的内存 | -| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | -| mem_allocator | 无 | 内存分配器,在编译时选择。 | -| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | -| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL)| -| lazyfreed_objects | 无 | 已延迟释放的对象数。| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|----------|-----------------------------------------------------------------------------------------------| +| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | +| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | +| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字 | +| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_peak | byte | Redis消耗的峰值内存(字节) | +| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | +| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和 | +| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节) | +| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | +| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | +| allocator_allocated | byte | 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同 | +| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片 | +| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | +| total_system_memory | byte | Redis主机的内存总量 | +| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | +| used_memory_lua | byte | Lua引擎使用的字节数 | +| used_memory_lua_human | KB | 上一个值的人类可读值 | +| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | +| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | +| number_of_cached_scripts | 无 | 缓存的lua脚本数量 | +| maxmemory | byte | maxmemory配置指令的值 | +| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | +| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | +| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | +| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | +| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | +| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | +| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | +| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | +| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | +| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | +| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。 | +| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | +| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | +| mem_clients_normal | 无 | 普通客户端使用的内存 | +| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | +| mem_allocator | 无 | 内存分配器,在编译时选择。 | +| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | +| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL) | +| lazyfreed_objects | 无 | 已延迟释放的对象数。 | #### 指标集合:persistence -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是| -| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | -| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | -| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比| -| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | -| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | -| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | -| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | -| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | -| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功| -| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | -| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | -| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | -| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | -| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite| -| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | -| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | -| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | -| aof_last_write_status | 无 | 上次aof写入状态 | -| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | -| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------------------|--------|-----------------------------------------------------------------------------------------------------| +| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是 | +| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | +| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | +| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比 | +| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | +| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | +| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | +| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | +| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | +| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功 | +| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | +| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | +| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | +| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | +| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | +| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite | +| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | +| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | +| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | +| aof_last_write_status | 无 | 上次aof写入状态 | +| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | +| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | +| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | #### 指标集合:stats -| 指标名称 |指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| total_connections_received | 无 | 服务器接受的连接总数 | -| total_commands_processed | 无 | 服务器处理的命令总数 | -| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | -| total_net_input_bytes | byte | 从网络读取的字节总数 | -| total_net_output_bytes | byte | 写入网络的总字节数 | -| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | -| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | -| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数| -| sync_full | 无 | 具有副本的完整重新同步数 | -| sync_partial_ok | 无 | 接受的部分重新同步请求数 | -| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | -| expired_keys | 无 | 过期的key总数 | -| expired_stale_perc | 无 | 可能过期key的百分比 | -| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | -| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | -| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | -| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | -| keyspace_misses | 无 | 在主dict 中未查到key的次数 | -| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | -| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | -| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | -| total_forks | 无 | 自服务器启动以来的fork操作总数| -| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | -| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | -| active_defrag_hits | 无 | 主动碎片整理命中次数 | -| active_defrag_misses | 无 | 主动碎片整理未命中次数 | -| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | -| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数| -| tracking_total_keys | 无 | key 查询的总数| -| tracking_total_items | 无 | item查询的总数 | -| tracking_total_prefixes | 无 | 前缀查询的总数 | -| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | -| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | -| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | -| total_reads_processed | 无 | 正在读取的请求数 | -| total_writes_processed | 无 | 正在写入的请求数 | -| io_threaded_reads_processed | 无 | 正在读取的线程数| -| io_threaded_writes_processed | 无 | 正在写入的线程数 | - +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|----------------------------------------------------| +| total_connections_received | 无 | 服务器接受的连接总数 | +| total_commands_processed | 无 | 服务器处理的命令总数 | +| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | +| total_net_input_bytes | byte | 从网络读取的字节总数 | +| total_net_output_bytes | byte | 写入网络的总字节数 | +| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | +| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | +| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数 | +| sync_full | 无 | 具有副本的完整重新同步数 | +| sync_partial_ok | 无 | 接受的部分重新同步请求数 | +| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | +| expired_keys | 无 | 过期的key总数 | +| expired_stale_perc | 无 | 可能过期key的百分比 | +| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | +| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | +| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | +| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | +| keyspace_misses | 无 | 在主dict 中未查到key的次数 | +| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | +| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | +| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | +| total_forks | 无 | 自服务器启动以来的fork操作总数 | +| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | +| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | +| active_defrag_hits | 无 | 主动碎片整理命中次数 | +| active_defrag_misses | 无 | 主动碎片整理未命中次数 | +| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | +| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数 | +| tracking_total_keys | 无 | key 查询的总数 | +| tracking_total_items | 无 | item查询的总数 | +| tracking_total_prefixes | 无 | 前缀查询的总数 | +| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | +| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | +| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | +| total_reads_processed | 无 | 正在读取的请求数 | +| total_writes_processed | 无 | 正在写入的请求数 | +| io_threaded_reads_processed | 无 | 正在读取的线程数 | +| io_threaded_writes_processed | 无 | 正在写入的线程数 | #### 指标集合:replication -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| role | 无 | 节点角色 master 主节点 slave 从节点 | -| connected_slaves | 无 | 连接的从节点数 | -| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | -| master_replid | 无 | 实例启动的随机字符串| -| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID| -| master_repl_offset | 无 | 主从同步偏移量 | -| second_repl_offset | 无 | 接受从服务ID的最大偏移量| -| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | -| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | -| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | -| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|--------------------------------|------|-------------------------------------------------------------------------------------| +| role | 无 | 节点角色 master 主节点 slave 从节点 | +| connected_slaves | 无 | 连接的从节点数 | +| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | +| master_replid | 无 | 实例启动的随机字符串 | +| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID | +| master_repl_offset | 无 | 主从同步偏移量 | +| second_repl_offset | 无 | 接受从服务ID的最大偏移量 | +| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | +| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | +| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | +| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | #### 指标集合:cpu -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和| -| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和| -| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和| -| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | -| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU| -| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------------------|------|------------------------| +| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和 | +| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和 | +| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和 | +| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | +| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU | +| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | #### 指标集合:errorstats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| errorstat_ERR | 无 | 错误累计出现的次数 | -| errorstat_MISCONF | 无 | | +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------------|------|-----------| +| errorstat_ERR | 无 | 错误累计出现的次数 | +| errorstat_MISCONF | 无 | | #### 指标集合:cluster -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是| - +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|--------------------| +| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是 | #### 指标集合:commandstats -| 指标名称 | 指标单位 | 指标帮助描述 | -| ----------- | ----------- | ----------- | -| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数| -| cmdstat_get | 无 | get命令的统计信息 | -| cmdstat_setnx | 无 | setnx命令的统计信息 | -| cmdstat_hset | 无 | hset命令的统计信息 | -| cmdstat_hget | 无 | hget命令的统计信息 | -| cmdstat_lpush | 无 | lpush命令的统计信息 | -| cmdstat_rpush | 无 | rpush命令的统计信息 | -| cmdstat_lpop | 无 | lpop命令的统计信息 | -| cmdstat_rpop | 无 | rpop命令的统计信息 | -| cmdstat_llen | 无 | llen命令的统计信息 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------------|------|---------------------------------------------------------------------------------------------------------------------------| +| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数 | +| cmdstat_get | 无 | get命令的统计信息 | +| cmdstat_setnx | 无 | setnx命令的统计信息 | +| cmdstat_hset | 无 | hset命令的统计信息 | +| cmdstat_hget | 无 | hget命令的统计信息 | +| cmdstat_lpush | 无 | lpush命令的统计信息 | +| cmdstat_rpush | 无 | rpush命令的统计信息 | +| cmdstat_lpop | 无 | lpop命令的统计信息 | +| cmdstat_rpop | 无 | rpop命令的统计信息 | +| cmdstat_llen | 无 | llen命令的统计信息 | + diff --git a/home/versioned_docs/version-v1.5.x/help/rocketmq.md b/home/versioned_docs/version-v1.5.x/help/rocketmq.md index f56bdfc2f14..f31dea47d9b 100644 --- a/home/versioned_docs/version-v1.5.x/help/rocketmq.md +++ b/home/versioned_docs/version-v1.5.x/help/rocketmq.md @@ -9,7 +9,7 @@ keywords: [ open source monitoring tool, monitoring Apache RocketMQ metrics ] ### Configuration parameters -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| | Namesrv Host | IPV4,IPV6 of RocketMQ name server(eg: https://, http://)。 | | Monitoring name | Identify the name of this monitoring. The name needs to be unique. | @@ -24,7 +24,7 @@ keywords: [ open source monitoring tool, monitoring Apache RocketMQ metrics ] #### Metric set:cluster -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|---------------------------------------| | BrokerId | none | Broker id | | Address | none | Broker address | @@ -38,7 +38,7 @@ keywords: [ open source monitoring tool, monitoring Apache RocketMQ metrics ] #### Metric set:Consumer -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------|-------------|-------------------------| | Consumer_group | none | Consumer group | | Client_quantity | none | Number of clients | @@ -46,3 +46,4 @@ keywords: [ open source monitoring tool, monitoring Apache RocketMQ metrics ] | Consume_type | none | Consume type | | Consume_tps | none | Consume tps | | Delay | none | Delay | + diff --git a/home/versioned_docs/version-v1.5.x/help/rockylinux.md b/home/versioned_docs/version-v1.5.x/help/rockylinux.md index f83eb606b0a..b1e093bc210 100644 --- a/home/versioned_docs/version-v1.5.x/help/rockylinux.md +++ b/home/versioned_docs/version-v1.5.x/help/rockylinux.md @@ -9,7 +9,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, Rocky Linu ### Configuration Parameters -| Parameter Name | Parameter help description | +| Parameter Name | Parameter help description | |---------------------|----------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | The IP, IPV6, or domain name of the monitored endpoint. Note ⚠️: Do not include protocol headers (eg: https://, http://). | | Task Name | Identifies the name of this monitoring, ensuring uniqueness. | @@ -28,7 +28,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, Rocky Linu #### Metric Set: Basic Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|---------------------------| | Host Name | None | Host name. | | System Version | None | Operating system version. | @@ -36,7 +36,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, Rocky Linu #### Metric Set: CPU Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|-------------------------------------------| | info | None | CPU model. | | cores | None | Number of CPU cores. | @@ -47,7 +47,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, Rocky Linu #### Metric Set: Memory Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |-------------|-------------|------------------------------------| | total | Mb | Total memory capacity. | | used | Mb | Used memory by user programs. | @@ -58,7 +58,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, Rocky Linu #### Metric Set: Disk Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |---------------|-------------|------------------------------------| | disk_num | None | Total number of disks. | | partition_num | None | Total number of partitions. | @@ -68,7 +68,7 @@ keywords: [ Open Source Monitoring System, Open Source OS Monitoring, Rocky Linu #### Metric Set: Interface Info -| Metric Name | Metric Unit | Metric help description | +| Metric Name | Metric Unit | Metric help description | |----------------|-------------|--------------------------------| | interface_name | None | Name of the network interface. | | receive_bytes | Mb | Inbound data traffic. | @@ -105,3 +105,4 @@ Top 10 processes consuming memory. Metrics include: Process ID, Memory usage, CP | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | + diff --git a/home/versioned_docs/version-v1.5.x/help/shenyu.md b/home/versioned_docs/version-v1.5.x/help/shenyu.md index 01523769c78..c7f12bbfaf0 100644 --- a/home/versioned_docs/version-v1.5.x/help/shenyu.md +++ b/home/versioned_docs/version-v1.5.x/help/shenyu.md @@ -5,27 +5,27 @@ sidebar_label: Apache ShenYu keywords: [open source monitoring tool, open source apache shenyu monitoring tool, monitoring apache shenyu metrics] --- -> monitor ShenYu running status(JVM-related), include request response and other related metrics. +> monitor ShenYu running status(JVM-related), include request response and other related metrics. -## Pre-monitoring operations +## Pre-monitoring operations -Enable `metrics` plugin in ShenYu, expose it's prometheus metrics endpoint。 +Enable `metrics` plugin in ShenYu, expose it's prometheus metrics endpoint。 -Refer [ShenYu Document](https://shenyu.apache.org/docs/plugin-center/observability/metrics-plugin) +Refer [ShenYu Document](https://shenyu.apache.org/docs/plugin-center/observability/metrics-plugin) -Two Steps Mainly: +Two Steps Mainly: -1. add metrics plugin dependency in gateway's pom.xml. +1. add metrics plugin dependency in gateway's pom.xml. ```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + ``` -2. modify this config in shenyu gateway yaml. +2. modify this config in shenyu gateway yaml. ```yaml shenyu: @@ -57,75 +57,74 @@ Finally, restart the access gateway metrics endpoint `http://ip:8090` to respond #### Index collection: shenyu_request_total -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-------------------| -| value | None | Collect all requests from ShenYu gateway | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|------------------------------------------| +| value | None | Collect all requests from ShenYu gateway | #### Metric collection: shenyu_request_throw_created -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-------------------| -| value | None | Collect the number of abnormal requests from ShenYu Gateway | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------------------------| +| value | None | Collect the number of abnormal requests from ShenYu Gateway | #### Metric collection: process_cpu_seconds_total -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------| -| value | none | total user and system CPU elapsed seconds | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------| +| value | none | total user and system CPU elapsed seconds | #### Metric collection: process_open_fds -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-------------| -| value | none | number of open file descriptors | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|---------------------------------| +| value | none | number of open file descriptors | #### Metric collection: process_max_fds -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|----------------| -| value | none | maximum number of open file descriptors | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-----------------------------------------| +| value | none | maximum number of open file descriptors | #### Metric collection: jvm_info | Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-----------| -| runtime | none | JVM version information | -| vendor | none | JVM version information | -| version | None | JVM version information | +|-------------|-------------|-------------------------| +| runtime | none | JVM version information | +| vendor | none | JVM version information | +| version | None | JVM version information | #### Metric collection: jvm_memory_bytes_used -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------| -| area | None | JVM memory area | -| value | MB | used size of the given JVM memory region | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|------------------------------------------| +| area | None | JVM memory area | +| value | MB | used size of the given JVM memory region | #### Metric collection: jvm_memory_pool_bytes_used -| Metric Name | Metric Unit | Metric Help Description | -|--------|------|-----------------| -| pool | None | JVM memory pool | -| value | MB | used size of the given JVM memory pool | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|----------------------------------------| +| pool | None | JVM memory pool | +| value | MB | used size of the given JVM memory pool | #### Metric collection: jvm_memory_pool_bytes_committed -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------| -| pool | None | JVM memory pool | -| value | MB | The committed size of the given JVM memory pool | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------------| +| pool | None | JVM memory pool | +| value | MB | The committed size of the given JVM memory pool | #### Metric collection: jvm_memory_pool_bytes_max -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------| ----------- | -| pool | None | JVM memory pool | -| value | MB | The maximum size of the memory pool for the given JVM | +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|-------------------------------------------------------| +| pool | None | JVM memory pool | +| value | MB | The maximum size of the memory pool for the given JVM | #### Metric collection: jvm_threads_state -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|-------------| -| state | none | thread state | -| value | None | The number of threads corresponding to the thread state | - +| Metric Name | Metric Unit | Metric Help Description | +|-------------|-------------|---------------------------------------------------------| +| state | none | thread state | +| value | None | The number of threads corresponding to the thread state | diff --git a/home/versioned_docs/version-v1.5.x/help/smtp.md b/home/versioned_docs/version-v1.5.x/help/smtp.md index 971de82c3e0..fedb17e0040 100644 --- a/home/versioned_docs/version-v1.5.x/help/smtp.md +++ b/home/versioned_docs/version-v1.5.x/help/smtp.md @@ -13,12 +13,11 @@ Determine whether the server is available through the hello command in SMTP > see https://datatracker.ietf.org/doc/html/rfc821#page-13 - **Protocol Use:SMTP** ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -33,7 +32,7 @@ Determine whether the server is available through the hello command in SMTP #### Metrics Set:summary -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |--------------|-------------|----------------------------------------------------------------| | responseTime | ms | The time it takes for the SMTP server to respond to a request. | | response | | Response Status. | diff --git a/home/versioned_docs/version-v1.5.x/help/spark.md b/home/versioned_docs/version-v1.5.x/help/spark.md index 3d4b44828ea..41865300024 100644 --- a/home/versioned_docs/version-v1.5.x/help/spark.md +++ b/home/versioned_docs/version-v1.5.x/help/spark.md @@ -15,12 +15,9 @@ keywords: [open source monitoring tool, open source java spark monitoring tool, Refer: https://spark.apache.org/docs/latest/spark-standalone.html - **监控配置spark的监控主要分为Master、Worker、driver、executor监控。Master和Worker的监控在spark集群运行时即可监控,Driver和Excutor的监控需要针对某一个app来进行监控。** **如果都要监控,需要根据以下步骤来配置** - - ## 第一步 **修改$SPARK_HOME/conf/spark-env.sh,添加以下语句:** @@ -36,8 +33,6 @@ export SPARK_DAEMON_JAVA_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.manageme 语句中有$JMX_PORT,这个的值可以自定义,也可以获取一个随机数作为端口号。 如果端口自定义为一个具体的值,而 spark 的 Master 和其中之一的 Worker 在同一台机器上,会出现端口冲突的情况。 - - ## 第二步 **vim $SPARK_HOME/conf/metrics.properties 添加如下内容** @@ -50,10 +45,6 @@ driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource ``` - - - - ## 第三步 **vim $SPARK_HOME/conf/spark-defaults.conf,添加以下项为driver和executor设置监控端口,在有程序运行的情况下,此端口会被打开。** @@ -69,11 +60,9 @@ gement.jmxremote.port=8711 在spark的Master和Worker正常运行以及spark-submit提交了一个程序的情况下,可以从linux中查询出端口号码。 - - ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | | Monitoring name | Identify the name of this monitoring. The name needs to be unique | @@ -107,16 +96,15 @@ gement.jmxremote.port=8711 #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-----------------------|-------------|--------------------------| | LoadedClassCount | | Loaded Class Count | | TotalLoadedClassCount | | Total Loaded Class Count | | UnloadedClassCount | | Unloaded Class Count | - #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |-------------------------|-------------|----------------------------| | TotalStartedThreadCount | | Total Started Thread Count | | ThreadCount | | Thread Count | @@ -125,4 +113,3 @@ gement.jmxremote.port=8711 | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.5.x/help/spring_gateway.md b/home/versioned_docs/version-v1.5.x/help/spring_gateway.md index ae24228c222..66c5f0b4f29 100644 --- a/home/versioned_docs/version-v1.5.x/help/spring_gateway.md +++ b/home/versioned_docs/version-v1.5.x/help/spring_gateway.md @@ -19,6 +19,7 @@ If you want to monitor information in 'Spring Gateway' with this monitoring type spring-boot-starter-actuator ``` + **2. Modify the YML configuration exposure metric interface:** ```yaml @@ -35,56 +36,55 @@ management: ### Configure parameters -| Parameter name | Parameter Help describes the | -| ------------ |------------------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| Port | The default port provided by the database is 8080. | -| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | +| Parameter name | Parameter Help describes the | +|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| Port | The default port provided by the database is 8080. | +| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | +| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | ### Collect metrics #### metric Collection: Health -| Metric Name | metric unit | Metrics help describe | -| ------------------ | -------- |--------------------------------| -| status | None | Service health: UP, Down | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|--------------------------| +| status | None | Service health: UP, Down | #### metric Collection: enviroment -| Metric Name | metric unit | Metrics help describe | -|---------| -------- |----------------------------| -| profile | None | The application runs profile: prod, dev, test | -| port | None | Apply the exposed port | -| os | None | Run the operating system | -| os_arch | None | Run the operating system architecture | -| jdk_vendor | None | jdk vendor | -| jvm_version | None | jvm version | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-----------------------------------------------| +| profile | None | The application runs profile: prod, dev, test | +| port | None | Apply the exposed port | +| os | None | Run the operating system | +| os_arch | None | Run the operating system architecture | +| jdk_vendor | None | jdk vendor | +| jvm_version | None | jvm version | #### metric Collection: threads -| Metric Name | metric unit | Metrics help describe | -| ---------------- |------|--------------------| -| state | None | Thread status | -| number | None | This thread state corresponds to | number of threads +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|----------------------------------|-------------------| +| state | None | Thread status | +| number | None | This thread state corresponds to | number of threads | #### metric Collection: memory_used -| Metric Name | metric unit | Metrics help describe | -|---------|------|------------| -| space | None | Memory space name | -| mem_used | MB | This space occupies a memory size of | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|--------------------------------------| +| space | None | Memory space name | +| mem_used | MB | This space occupies a memory size of | #### metric Collection: route_info -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|--------------------------------------| -| route_id | None | Route id | -| predicate | None | This is a routing matching rule | -| uri | None | This is a service resource identifier| -| order | None | The priority of this route | - +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|---------------------------------------| +| route_id | None | Route id | +| predicate | None | This is a routing matching rule | +| uri | None | This is a service resource identifier | +| order | None | The priority of this route | diff --git a/home/versioned_docs/version-v1.5.x/help/springboot2.md b/home/versioned_docs/version-v1.5.x/help/springboot2.md index ca46530f77b..6452aff270e 100644 --- a/home/versioned_docs/version-v1.5.x/help/springboot2.md +++ b/home/versioned_docs/version-v1.5.x/help/springboot2.md @@ -19,6 +19,7 @@ If you want to monitor information in 'SpringBoot' with this monitoring type, yo spring-boot-starter-actuator ``` + **2. Modify the YML configuration exposure metric interface:** ```yaml @@ -29,7 +30,9 @@ management: include: '*' enabled-by-default: on ``` + *Note: If your project also introduces authentication related dependencies, such as springboot security, the interfaces exposed by SpringBoot Actor may be intercepted. In this case, you need to manually release these interfaces. Taking springboot security as an example, you should add the following code to the Security Configuration class:* + ```java public class SecurityConfig extends WebSecurityConfigurerAdapter{ @Override @@ -45,47 +48,49 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ } } ``` + ### Configure parameters -| Parameter name | Parameter Help describes the | -| ------------ |------------------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| Port | The default port provided by the database is 8080. | -| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | +| Parameter name | Parameter Help describes the | +|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| Port | The default port provided by the database is 8080. | +| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | +| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | ### Collect metrics #### metric Collection: Health -| Metric Name | metric unit | Metrics help describe | -| ------------------ | -------- |--------------------------------| -| status | None | Service health: UP, Down | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|--------------------------| +| status | None | Service health: UP, Down | #### metric Collection: enviroment -| Metric Name | metric unit | Metrics help describe | -|---------| -------- |----------------------------| -| profile | None | The application runs profile: prod, dev, test | -| port | None | Apply the exposed port | -| os | None | Run the operating system | -| os_arch | None | Run the operating system architecture | -| jdk_vendor | None | jdk vendor | -| jvm_version | None | jvm version | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-----------------------------------------------| +| profile | None | The application runs profile: prod, dev, test | +| port | None | Apply the exposed port | +| os | None | Run the operating system | +| os_arch | None | Run the operating system architecture | +| jdk_vendor | None | jdk vendor | +| jvm_version | None | jvm version | #### metric Collection: threads -| Metric Name | metric unit | Metrics help describe | -| ---------------- |------|--------------------| -| state | None | Thread status | -| number | None | This thread state corresponds to | number of threads +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|----------------------------------|-------------------| +| state | None | Thread status | +| number | None | This thread state corresponds to | number of threads | #### metric Collection: memory_used -| Metric Name | metric unit | Metrics help describe | -|---------|------|------------| -| space | None | Memory space name | -| mem_used | MB | This space occupies a memory size of | +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|--------------------------------------| +| space | None | Memory space name | +| mem_used | MB | This space occupies a memory size of | + diff --git a/home/versioned_docs/version-v1.5.x/help/springboot3.md b/home/versioned_docs/version-v1.5.x/help/springboot3.md index 0dbc32fc834..47b3db10b5c 100644 --- a/home/versioned_docs/version-v1.5.x/help/springboot3.md +++ b/home/versioned_docs/version-v1.5.x/help/springboot3.md @@ -51,7 +51,7 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ ### Configuration Parameters -| Parameter Name | Parameter Description | +| Parameter Name | Parameter Description | |-------------------|----------------------------------------------------------------------------------------------------------------------| | Monitor Host | The monitored peer's IPV4, IPV6, or domain name. Note⚠️: Do not include protocol headers (eg: https://, http://). | | Task Name | Identifies the name of this monitor, ensuring uniqueness is necessary. | @@ -65,23 +65,28 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ ### Collection Metrics #### Metric Set: Availability + | Metric Name | Unit | Metric Description | |--------------|------|--------------------| | responseTime | ms | Response time | #### Metric Set: Threads -| Metric Name | Unit | Metric Description | + +| Metric Name | Unit | Metric Description | |-------------|------|----------------------------------| | state | None | Thread state | | size | None | Number of threads for this state | #### Metric Set: Memory Usage -| Metric Name | Unit | Metric Description | + +| Metric Name | Unit | Metric Description | |-------------|------|-----------------------------| | space | None | Memory space name | | mem_used | MB | Memory usage for this space | #### Metric Set: Health Status -| Metric Name | Unit | Metric Description | + +| Metric Name | Unit | Metric Description | |-------------|------|---------------------------------| | status | None | Service health status: UP, Down | + diff --git a/home/versioned_docs/version-v1.5.x/help/sqlserver.md b/home/versioned_docs/version-v1.5.x/help/sqlserver.md index cc12abf0d7e..71bd8ebdc83 100644 --- a/home/versioned_docs/version-v1.5.x/help/sqlserver.md +++ b/home/versioned_docs/version-v1.5.x/help/sqlserver.md @@ -9,51 +9,49 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 1433 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 1433 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| machine_name | none | Windows computer name running the server instance | -| server_name | none | Server and instance information SQL Server associated with Windows instance | -| version | none | Version of the instance,SQL Server,format is "major.minor.build.revision" | -| edition | none | The product SQL server version of the installed instance | -| start_time | none | Database start time | +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-----------------------------------------------------------------------------| +| machine_name | none | Windows computer name running the server instance | +| server_name | none | Server and instance information SQL Server associated with Windows instance | +| version | none | Version of the instance,SQL Server,format is "major.minor.build.revision" | +| edition | none | The product SQL server version of the installed instance | +| start_time | none | Database start time | #### Metric set:performance_counters -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| database_pages | none | Database pages, Number of pages obtained (buffer pool) | -| target_pages | none | Target pages, The desired number of pages that the buffer pool must have | -| page_life_expectancy | s | Page life expectancy. The time that data pages stay in the buffer pool. This time is generally greater than 300 | -| buffer_cache_hit_ratio | % | Buffer cache hit ratio, Database buffer pool cache hit rate. The probability that the requested data is found in the buffer pool is generally greater than 80%, otherwise the buffer pool capacity may be too small | -| checkpoint_pages_sec | none | Checkpoint pages/sec, The number of dirty pages written to the disk by the checkpoint per second. If the data is too high, it indicates that there is a lack of memory capacity | -| page_reads_sec | none | Page reads/sec, Number of pages read per second in the cache pool | -| page_writes_sec | none | Page writes/sec, Number of pages written per second in the cache pool | - +| Metric name | Metric unit | Metric help description | +|------------------------|-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| database_pages | none | Database pages, Number of pages obtained (buffer pool) | +| target_pages | none | Target pages, The desired number of pages that the buffer pool must have | +| page_life_expectancy | s | Page life expectancy. The time that data pages stay in the buffer pool. This time is generally greater than 300 | +| buffer_cache_hit_ratio | % | Buffer cache hit ratio, Database buffer pool cache hit rate. The probability that the requested data is found in the buffer pool is generally greater than 80%, otherwise the buffer pool capacity may be too small | +| checkpoint_pages_sec | none | Checkpoint pages/sec, The number of dirty pages written to the disk by the checkpoint per second. If the data is too high, it indicates that there is a lack of memory capacity | +| page_reads_sec | none | Page reads/sec, Number of pages read per second in the cache pool | +| page_writes_sec | none | Page writes/sec, Number of pages written per second in the cache pool | #### Metric set:connection -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| user_connection | none | Number of connected sessions | - +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|------------------------------| +| user_connection | none | Number of connected sessions | ### Common Problem @@ -61,10 +59,12 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo jdk version: jdk11 Description of the problem: SQL Server 2019 uses the SA user connection to report an error -Error message: +Error message: + ```text The driver could not establish a secure connection to SQL Server by using Secure Sockets Layer (SSL) encryption. Error: "PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target". ClientConnectionId:xxxxxxxxxxxxxxxxxxxx ``` + Screenshot of the problem: ![issue](https://user-images.githubusercontent.com/38679717/206621658-c0741d48-673d-45ff-9a3b-47d113064c12.png) diff --git a/home/versioned_docs/version-v1.5.x/help/ssl_cert.md b/home/versioned_docs/version-v1.5.x/help/ssl_cert.md index 7265bd12a59..e7b60fc8a89 100644 --- a/home/versioned_docs/version-v1.5.x/help/ssl_cert.md +++ b/home/versioned_docs/version-v1.5.x/help/ssl_cert.md @@ -9,25 +9,26 @@ keywords: [open source monitoring tool, open source ssl cert monitoring tool, mo ### Configuration parameters -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | -| Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | -| Port | The port provided by the website, https generally defaults to 443. | -| Relative path | The suffix path of the website address except the IP port, for example, `www.tancloud.io/console` The relative path of the website is `/console`. | -| Acquisition Interval | Interval time for monitoring periodic data collection, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | -| Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | +| Parameter name | Parameter help description | +|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | +| Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | +| Port | The port provided by the website, https generally defaults to 443. | +| Relative path | The suffix path of the website address except the IP port, for example, `www.tancloud.io/console` The relative path of the website is `/console`. | +| Acquisition Interval | Interval time for monitoring periodic data collection, in seconds, the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | +| Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | ### Collect metrics #### Metric collection: certificate -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|----------| -| subject | none | certificate name | -| expired | no | expired or not | -| start_time | None | Validity start time | -| start_timestamp | ms millisecond | Validity start timestamp | -| end_time | None | Expiration time | -| end_timestamp | ms milliseconds | expiration timestamp | +| Metric Name | Metric Unit | Metric Help Description | +|-----------------|-----------------|--------------------------| +| subject | none | certificate name | +| expired | no | expired or not | +| start_time | None | Validity start time | +| start_timestamp | ms millisecond | Validity start timestamp | +| end_time | None | Expiration time | +| end_timestamp | ms milliseconds | expiration timestamp | + diff --git a/home/versioned_docs/version-v1.5.x/help/tidb.md b/home/versioned_docs/version-v1.5.x/help/tidb.md index 541b84d6876..83128c527c8 100644 --- a/home/versioned_docs/version-v1.5.x/help/tidb.md +++ b/home/versioned_docs/version-v1.5.x/help/tidb.md @@ -17,22 +17,21 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Task name | Identify the name of this monitoring. The name needs to be unique | -| Service Port | The port that the TiDB database provides externally for status reporting is 10080 by default | -| PD Port | The PD port for the TiDB database, which defaults to 2379 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 6000ms | -| JDBC Port | The TiDB database externally provides the port used for client requests, which defaults to 4000 | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| JDBC URL | Database using [JDBC](https://docs.pingcap.com/tidb/stable/dev-guide-connect-to-tidb#jdbc) connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Task name | Identify the name of this monitoring. The name needs to be unique | +| Service Port | The port that the TiDB database provides externally for status reporting is 10080 by default | +| PD Port | The PD port for the TiDB database, which defaults to 2379 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 6000ms | +| JDBC Port | The TiDB database externally provides the port used for client requests, which defaults to 4000 | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| JDBC URL | Database using [JDBC](https://docs.pingcap.com/tidb/stable/dev-guide-connect-to-tidb#jdbc) connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric @@ -44,12 +43,13 @@ Due to the large number of metrics that can be monitored, only the metrics queri #### Metric set: global variables -| Metric Name | Metric Unit | Metric Help Description | -|---------------|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| version | none | The MySQL version, followed by the TiDB version. For example '8.0.11-TiDB-v7.5.1'. | -| version_comment | none | The TiDB version. For example, 'TiDB Server (Apache License 2.0) Community Edition, MySQL 8.0 compatible'. | -| version_compile_machine | none | The name of the CPU architecture on which TiDB is running. | -| version_compile_os | none | The name of the OS on which TiDB is running. | -| max_connections | none | The maximum number of concurrent connections permitted for a single TiDB instance. This variable can be used for resources control. The default value 0 means no limit. When the value of this variable is larger than 0, and the number of connections reaches the value, the TiDB server rejects new connections from clients. | -| datadir | none | The location where data is stored. This location can be a local path /tmp/tidb, or point to a PD server if the data is stored on TiKV. A value in the format of ${pd-ip}:${pd-port} indicates the PD server that TiDB connects to on startup. | -| port | none | The port that the tidb-server is listening on when speaking the MySQL protocol. | +| Metric Name | Metric Unit | Metric Help Description | +|-------------------------|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| version | none | The MySQL version, followed by the TiDB version. For example '8.0.11-TiDB-v7.5.1'. | +| version_comment | none | The TiDB version. For example, 'TiDB Server (Apache License 2.0) Community Edition, MySQL 8.0 compatible'. | +| version_compile_machine | none | The name of the CPU architecture on which TiDB is running. | +| version_compile_os | none | The name of the OS on which TiDB is running. | +| max_connections | none | The maximum number of concurrent connections permitted for a single TiDB instance. This variable can be used for resources control. The default value 0 means no limit. When the value of this variable is larger than 0, and the number of connections reaches the value, the TiDB server rejects new connections from clients. | +| datadir | none | The location where data is stored. This location can be a local path /tmp/tidb, or point to a PD server if the data is stored on TiKV. A value in the format of ${pd-ip}:${pd-port} indicates the PD server that TiDB connects to on startup. | +| port | none | The port that the tidb-server is listening on when speaking the MySQL protocol. | + diff --git a/home/versioned_docs/version-v1.5.x/help/time_expression.md b/home/versioned_docs/version-v1.5.x/help/time_expression.md index e2cb0928b05..482fc04cc05 100644 --- a/home/versioned_docs/version-v1.5.x/help/time_expression.md +++ b/home/versioned_docs/version-v1.5.x/help/time_expression.md @@ -23,28 +23,28 @@ ${FORMATTER [{ + | - } ]} > Example outputs are based on the current time being `2022-04-24 02:40:00.123` -| Name | Description | Example | -|---------------|------------------------------------|------------------------| -| @now | Formats as `yyyy-MM-dd HH:mm:ss` | 2022-04-24 02:40:00 | -| @date | Formats as `yyyy-MM-dd` | 2022-04-24 | -| @timestamp10 | Returns 10-digit timestamp | 1650768000 | -| @timestamp | Returns 13-digit timestamp | 1650768000000 | -| @time | Formats as `HH:mm:ss` | 02:40:00 | -| @year | Formats as `yyyy` | 2022 | -| @month | Formats as `MM` | 04 | -| @day | Formats as `dd` | 24 | -| @hour | Formats as `HH` | 02 | -| @minute | Formats as `mm` | 40 | -| @millisecond | Formats as `SSS` | 123 | -| @second | Formats as `ss` | 00 | +| Name | Description | Example | +|--------------|----------------------------------|---------------------| +| @now | Formats as `yyyy-MM-dd HH:mm:ss` | 2022-04-24 02:40:00 | +| @date | Formats as `yyyy-MM-dd` | 2022-04-24 | +| @timestamp10 | Returns 10-digit timestamp | 1650768000 | +| @timestamp | Returns 13-digit timestamp | 1650768000000 | +| @time | Formats as `HH:mm:ss` | 02:40:00 | +| @year | Formats as `yyyy` | 2022 | +| @month | Formats as `MM` | 04 | +| @day | Formats as `dd` | 24 | +| @hour | Formats as `HH` | 02 | +| @minute | Formats as `mm` | 40 | +| @millisecond | Formats as `SSS` | 123 | +| @second | Formats as `ss` | 00 | ### Supported Time Units | Name | Description | |------|-------------| -| y | Year | +| y | Year | | M | Month | -| d | Day | +| d | Day | | H | Hour | | m | Minute | | s | Second | @@ -57,8 +57,9 @@ ${FORMATTER [{ + | - } ]} #### Usage Examples 1. Simple expression - - `${now}` gets the current time and formats it as `yyyy-MM-dd HH:mm:ss` - - `${time+1h}` calculates the time one hour from now and formats it as `HH:mm:ss` - - `${time+1h+15s+30s}` calculates the time one hour, 15 minutes, and 30 seconds from now and formats it as `HH:mm:ss` + - `${now}` gets the current time and formats it as `yyyy-MM-dd HH:mm:ss` + - `${time+1h}` calculates the time one hour from now and formats it as `HH:mm:ss` + - `${time+1h+15s+30s}` calculates the time one hour, 15 minutes, and 30 seconds from now and formats it as `HH:mm:ss` 2. Complex expression template (if the built-in formatter does not meet your needs, you can combine multiple expressions) - - `${@year}年${@month}月${@day}日` returns the current date formatted as yyyy年MM月dd日 + - `${@year}年${@month}月${@day}日` returns the current date formatted as yyyy年MM月dd日 + diff --git a/home/versioned_docs/version-v1.5.x/help/tomcat.md b/home/versioned_docs/version-v1.5.x/help/tomcat.md index 8b35808ffc8..60591f85579 100644 --- a/home/versioned_docs/version-v1.5.x/help/tomcat.md +++ b/home/versioned_docs/version-v1.5.x/help/tomcat.md @@ -11,61 +11,60 @@ keywords: [open source monitoring tool, open source tomcat monitoring tool, moni ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by JMX | -| Username | JMX connection user name, optional | -| Password | JMX connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by JMX | +| Username | JMX connection user name, optional | +| Password | JMX connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metrics #### Metrics Set:memory_pool -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| name | | metrics name | +| committed | kb | total size | +| init | kb | init size | +| max | kb | max size | +| used | kb | used size | #### Metrics Set:code_cache -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| committed | kb | total size | +| init | kb | init size | +| max | kb | max size | +| used | kb | used size | #### Metrics Set:class_loading -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| LoadedClassCount | | Loaded Class Count | -| TotalLoadedClassCount | | Total Loaded Class Count | -| UnloadedClassCount | | Unloaded Class Count | - +| Metric name | Metric unit | Metric help description | +|-----------------------|-------------|--------------------------| +| LoadedClassCount | | Loaded Class Count | +| TotalLoadedClassCount | | Total Loaded Class Count | +| UnloadedClassCount | | Unloaded Class Count | #### Metrics Set:thread -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| TotalStartedThreadCount | | Total Started Thread Count | -| ThreadCount | | Thread Count | -| PeakThreadCount | | Peak Thread Count | -| DaemonThreadCount | | Daemon Thread Count | -| CurrentThreadUserTime | ms | Current Thread User Time | -| CurrentThreadCpuTime | ms | Current Thread Cpu Time | +| Metric name | Metric unit | Metric help description | +|-------------------------|-------------|----------------------------| +| TotalStartedThreadCount | | Total Started Thread Count | +| ThreadCount | | Thread Count | +| PeakThreadCount | | Peak Thread Count | +| DaemonThreadCount | | Daemon Thread Count | +| CurrentThreadUserTime | ms | Current Thread User Time | +| CurrentThreadCpuTime | ms | Current Thread Cpu Time | ### Tomcat Enable JMX Protocol -1. After building tomcat, enter the bin directory under tomcat and modify the catalina.sh file +1. After building tomcat, enter the bin directory under tomcat and modify the catalina.sh file 2. vim catalina.sh Attention⚠️ Replace Hostname And Port diff --git a/home/versioned_docs/version-v1.5.x/help/ubuntu.md b/home/versioned_docs/version-v1.5.x/help/ubuntu.md index e7d368c9ea3..8d3b65ce195 100644 --- a/home/versioned_docs/version-v1.5.x/help/ubuntu.md +++ b/home/versioned_docs/version-v1.5.x/help/ubuntu.md @@ -9,74 +9,74 @@ keywords: [open source monitoring tool, open source linux ubuntu monitoring tool ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Linux SSH. The default is 22 | -| Username | SSH connection user name, optional | -| Password | SSH connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Linux SSH. The default is 22 | +| Username | SSH connection user name, optional | +| Password | SSH connection password, optional | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:basic -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| hostname | none | Host name | -| version | none | Operating system version | -| uptime | none | System running time | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------| +| hostname | none | Host name | +| version | none | Operating system version | +| uptime | none | System running time | #### Metric set:cpu -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| info | none | CPU model | -| cores | cores | Number of CPU cores | -| interrupt | number | Number of CPU interrupts | -| load | none | Average load of CPU in the last 1/5/15 minutes | -| context_switch | number | Number of current context switches | -| usage | % | CPU usage | - +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------------------------| +| info | none | CPU model | +| cores | cores | Number of CPU cores | +| interrupt | number | Number of CPU interrupts | +| load | none | Average load of CPU in the last 1/5/15 minutes | +| context_switch | number | Number of current context switches | +| usage | % | CPU usage | #### Metric set:memory -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| total | Mb | Total memory capacity | -| used | Mb | User program memory | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory occupied by cache | -| available | Mb | Remaining available memory capacity | -| usage | % | Memory usage | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------------------| +| total | Mb | Total memory capacity | +| used | Mb | User program memory | +| free | Mb | Free memory capacity | +| buff_cache | Mb | Memory occupied by cache | +| available | Mb | Remaining available memory capacity | +| usage | % | Memory usage | #### Metric set:disk -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| disk_num | blocks | Total number of disks | -| partition_num | partitions | Total number of partitions | -| block_write | blocks | Total number of blocks written to disk | -| block_read | blocks | Number of blocks read from disk | -| write_rate | iops | Rate of writing disk blocks per second | +| Metric name | Metric unit | Metric help description | +|---------------|-------------|----------------------------------------| +| disk_num | blocks | Total number of disks | +| partition_num | partitions | Total number of partitions | +| block_write | blocks | Total number of blocks written to disk | +| block_read | blocks | Number of blocks read from disk | +| write_rate | iops | Rate of writing disk blocks per second | #### Metric set:interface -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| interface_name | none | Network card name | -| receive_bytes | byte | Inbound data traffic(bytes) | -| transmit_bytes | byte | Outbound data traffic(bytes) | +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------| +| interface_name | none | Network card name | +| receive_bytes | byte | Inbound data traffic(bytes) | +| transmit_bytes | byte | Outbound data traffic(bytes) | #### Metric set:disk_free -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| filesystem | none | File system name | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | usage | -| mounted | none | Mount point directory | +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-------------------------| +| filesystem | none | File system name | +| used | Mb | Used disk size | +| available | Mb | Available disk size | +| usage | % | usage | +| mounted | none | Mount point directory | + diff --git a/home/versioned_docs/version-v1.5.x/help/udp_port.md b/home/versioned_docs/version-v1.5.x/help/udp_port.md index 7fdcce3cf77..51c3098dc9a 100644 --- a/home/versioned_docs/version-v1.5.x/help/udp_port.md +++ b/home/versioned_docs/version-v1.5.x/help/udp_port.md @@ -10,7 +10,7 @@ keywords: [open source monitoring tool, open source port monitoring tool, monito ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| | Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️ Without protocol header (eg: https://, http://). | | Monitoring name | Identify the name of this monitoring. The name needs to be unique. | @@ -26,9 +26,7 @@ keywords: [open source monitoring tool, open source port monitoring tool, monito #### Metric set:summary -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |---------------|-------------------|-------------------------| | Response Time | Milliseconds (ms) | Website response time | - - diff --git a/home/versioned_docs/version-v1.5.x/help/website.md b/home/versioned_docs/version-v1.5.x/help/website.md index e227de0dd1f..afe86397c9e 100644 --- a/home/versioned_docs/version-v1.5.x/help/website.md +++ b/home/versioned_docs/version-v1.5.x/help/website.md @@ -5,25 +5,26 @@ sidebar_label: Website Monitor keywords: [open source monitoring tool, open source website monitoring tool, monitoring website metrics] --- -> Monitor whether the website is available, response time and other Metrics. +> Monitor whether the website is available, response time and other Metrics. -### Configuration parameter +### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| Relative path | Suffix path of website address except IP port. For example, the relative path of `www.tancloud.io/console` website is `/console` | -| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | +| Relative path | Suffix path of website address except IP port. For example, the relative path of `www.tancloud.io/console` website is `/console` | +| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | -### Collection Metric +### Collection Metric -#### Metric set:summary +#### Metric set:summary + +| Metric name | Metric unit | Metric help description | +|--------------|-------------|-------------------------| +| responseTime | ms | Website response time | -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| responseTime | ms | Website response time | diff --git a/home/versioned_docs/version-v1.5.x/help/websocket.md b/home/versioned_docs/version-v1.5.x/help/websocket.md index 8e3d29a204a..13d1f6eed31 100644 --- a/home/versioned_docs/version-v1.5.x/help/websocket.md +++ b/home/versioned_docs/version-v1.5.x/help/websocket.md @@ -9,7 +9,7 @@ keywords: [ open source monitoring tool, Websocket监控 ] ### Configuration parameter -| Parameter name | Parameter help description | +| Parameter name | Parameter help description | |---------------------------|--------------------------------------------------------------------------------------------------------------------------| | Host of WebSocket service | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://). | | Monitoring name | Identify the name of this monitoring. The name needs to be unique. | @@ -23,7 +23,7 @@ keywords: [ open source monitoring tool, Websocket监控 ] #### Metric set:Summary -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |---------------|-------------|-------------------------| | responseTime | ms | Response time | | httpVersion | none | HTTP version | @@ -31,3 +31,4 @@ keywords: [ open source monitoring tool, Websocket监控 ] | statusMessage | none | Status messages | | connection | none | Connect type | | upgrade | none | Upgraded protocols | + diff --git a/home/versioned_docs/version-v1.5.x/help/windows.md b/home/versioned_docs/version-v1.5.x/help/windows.md index 82e36d23470..e4be2bd6d96 100644 --- a/home/versioned_docs/version-v1.5.x/help/windows.md +++ b/home/versioned_docs/version-v1.5.x/help/windows.md @@ -6,38 +6,39 @@ keywords: [open source monitoring tool, open source windows monitoring tool, mon --- > Collect and monitor the general performance Metrics of Windows operating system through SNMP protocol. -> Note⚠️ You need to start SNMP service for Windows server. +> Note⚠️ You need to start SNMP service for Windows server. References: [What is SNMP protocol 1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) [What is SNMP protocol 2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) [Win configure SNMP in English](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) -[Win configure SNMP in Chinese](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) +[Win configure SNMP in Chinese](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Windows SNMP service. The default is 161 | -| SNMP version | SNMP protocol version V1 V2c V3 | +| Parameter name | Parameter help description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Windows SNMP service. The default is 161 | +| SNMP version | SNMP protocol version V1 V2c V3 | | SNMP community Word | SNMP agreement community name(Community Name). It is used to realize the authentication of SNMP network administrator when accessing SNMP management agent. Similar to password, the default value is public | -| Timeout | Protocol connection timeout | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Timeout | Protocol connection timeout | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:system -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| name | none | Host name | -| descr | none | Operating system description | -| uptime | none | System running time | -| numUsers | number | Current number of users | -| services | number | Current number of services | -| processes | number | Current number of processes | -| responseTime | ms | Collection response time | +| Metric name | Metric unit | Metric help description | +|--------------|-------------|------------------------------| +| name | none | Host name | +| descr | none | Operating system description | +| uptime | none | System running time | +| numUsers | number | Current number of users | +| services | number | Current number of services | +| processes | number | Current number of processes | +| responseTime | ms | Collection response time | + diff --git a/home/versioned_docs/version-v1.5.x/help/yarn.md b/home/versioned_docs/version-v1.5.x/help/yarn.md index 176a3209fee..d7f304ff910 100644 --- a/home/versioned_docs/version-v1.5.x/help/yarn.md +++ b/home/versioned_docs/version-v1.5.x/help/yarn.md @@ -15,69 +15,70 @@ Retrieve the HTTP monitoring port of Apache Yarn. Value: `yarn.resourcemanager.w ## Configuration Parameters -| Parameter Name | Parameter Description | -| ---------------- |----------------------------------------------------| -| Target Host | IP address, IPV6, or domain name of the monitored endpoint. Without protocol header. | -| Port | Monitoring port number of Apache Yarn, default is 8088. | -| Query Timeout | Timeout for querying Apache Yarn, in milliseconds, default is 6000 milliseconds. | +| Parameter Name | Parameter Description | +|------------------|-------------------------------------------------------------------------------------------| +| Target Host | IP address, IPV6, or domain name of the monitored endpoint. Without protocol header. | +| Port | Monitoring port number of Apache Yarn, default is 8088. | +| Query Timeout | Timeout for querying Apache Yarn, in milliseconds, default is 6000 milliseconds. | | Metrics Interval | Time interval for monitoring data collection, in seconds, minimum interval is 30 seconds. | ### Collected Metrics #### Metric Set: ClusterMetrics -| Metric Name | Unit | Metric Description | -| ----------------------- | ---- | -----------------------------------------| -| NumActiveNMs | | Number of currently active NodeManagers | -| NumDecommissionedNMs | | Number of currently decommissioned NodeManagers | -| NumDecommissioningNMs | | Number of nodes currently decommissioning | -| NumLostNMs | | Number of lost nodes in the cluster | -| NumUnhealthyNMs | | Number of unhealthy nodes in the cluster | +| Metric Name | Unit | Metric Description | +|-----------------------|------|-------------------------------------------------| +| NumActiveNMs | | Number of currently active NodeManagers | +| NumDecommissionedNMs | | Number of currently decommissioned NodeManagers | +| NumDecommissioningNMs | | Number of nodes currently decommissioning | +| NumLostNMs | | Number of lost nodes in the cluster | +| NumUnhealthyNMs | | Number of unhealthy nodes in the cluster | #### Metric Set: JvmMetrics -| Metric Name | Unit | Metric Description | -| ----------------------- | ---- | -------------------------------------------- | -| MemNonHeapCommittedM | MB | Current committed size of non-heap memory in JVM | -| MemNonHeapMaxM | MB | Maximum available non-heap memory in JVM | -| MemNonHeapUsedM | MB | Current used size of non-heap memory in JVM | -| MemHeapCommittedM | MB | Current committed size of heap memory in JVM | -| MemHeapMaxM | MB | Maximum available heap memory in JVM | -| MemHeapUsedM | MB | Current used size of heap memory in JVM | -| GcTimeMillis | | JVM GC time | -| GcCount | | Number of JVM GC occurrences | +| Metric Name | Unit | Metric Description | +|----------------------|------|--------------------------------------------------| +| MemNonHeapCommittedM | MB | Current committed size of non-heap memory in JVM | +| MemNonHeapMaxM | MB | Maximum available non-heap memory in JVM | +| MemNonHeapUsedM | MB | Current used size of non-heap memory in JVM | +| MemHeapCommittedM | MB | Current committed size of heap memory in JVM | +| MemHeapMaxM | MB | Maximum available heap memory in JVM | +| MemHeapUsedM | MB | Current used size of heap memory in JVM | +| GcTimeMillis | | JVM GC time | +| GcCount | | Number of JVM GC occurrences | #### Metric Set: QueueMetrics -| Metric Name | Unit | Metric Description | -| --------------------------- | ---- | -------------------------------------------- | -| queue | | Queue name | -| AllocatedVCores | | Allocated virtual cores (allocated) | -| ReservedVCores | | Reserved cores | -| AvailableVCores | | Available cores (unallocated) | -| PendingVCores | | Blocked scheduling cores | -| AllocatedMB | MB | Allocated (used) memory size | -| AvailableMB | MB | Available memory (unallocated) | -| PendingMB | MB | Blocked scheduling memory | -| ReservedMB | MB | Reserved memory | -| AllocatedContainers | | Number of allocated (used) containers | -| PendingContainers | | Number of blocked scheduling containers | -| ReservedContainers | | Number of reserved containers | -| AggregateContainersAllocated| | Total aggregated containers allocated | -| AggregateContainersReleased| | Total aggregated containers released | -| AppsCompleted | | Number of completed applications | -| AppsKilled | | Number of killed applications | -| AppsFailed | | Number of failed applications | -| AppsPending | | Number of pending applications | -| AppsRunning | | Number of currently running applications | -| AppsSubmitted | | Number of submitted applications | -| running_0 | | Number of jobs running for less than 60 minutes | -| running_60 | | Number of jobs running between 60 and 300 minutes | -| running_300 | | Number of jobs running between 300 and 1440 minutes | -| running_1440 | | Number of jobs running for more than 1440 minutes | +| Metric Name | Unit | Metric Description | +|------------------------------|------|-----------------------------------------------------| +| queue | | Queue name | +| AllocatedVCores | | Allocated virtual cores (allocated) | +| ReservedVCores | | Reserved cores | +| AvailableVCores | | Available cores (unallocated) | +| PendingVCores | | Blocked scheduling cores | +| AllocatedMB | MB | Allocated (used) memory size | +| AvailableMB | MB | Available memory (unallocated) | +| PendingMB | MB | Blocked scheduling memory | +| ReservedMB | MB | Reserved memory | +| AllocatedContainers | | Number of allocated (used) containers | +| PendingContainers | | Number of blocked scheduling containers | +| ReservedContainers | | Number of reserved containers | +| AggregateContainersAllocated | | Total aggregated containers allocated | +| AggregateContainersReleased | | Total aggregated containers released | +| AppsCompleted | | Number of completed applications | +| AppsKilled | | Number of killed applications | +| AppsFailed | | Number of failed applications | +| AppsPending | | Number of pending applications | +| AppsRunning | | Number of currently running applications | +| AppsSubmitted | | Number of submitted applications | +| running_0 | | Number of jobs running for less than 60 minutes | +| running_60 | | Number of jobs running between 60 and 300 minutes | +| running_300 | | Number of jobs running between 300 and 1440 minutes | +| running_1440 | | Number of jobs running for more than 1440 minutes | #### Metric Set: runtime -| Metric Name | Unit | Metric Description | -| ----------------------- | ---- | --------------------------| -| StartTime | | Startup timestamp | \ No newline at end of file +| Metric Name | Unit | Metric Description | +|-------------|------|--------------------| +| StartTime | | Startup timestamp | + diff --git a/home/versioned_docs/version-v1.5.x/help/zookeeper.md b/home/versioned_docs/version-v1.5.x/help/zookeeper.md index b7a34f49eda..f14b0bb8273 100644 --- a/home/versioned_docs/version-v1.5.x/help/zookeeper.md +++ b/home/versioned_docs/version-v1.5.x/help/zookeeper.md @@ -10,10 +10,12 @@ keywords: [open source monitoring tool, open source zookeeper monitoring tool, m ### PreRequisites #### Zookeeper four word command ->The current implementation scheme uses the four word command provided by zookeeper to collect Metrics. -Users need to add the four word command of zookeeper to the white list by themselves. + +> The current implementation scheme uses the four word command provided by zookeeper to collect Metrics. +> Users need to add the four word command of zookeeper to the white list by themselves. Steps + > 1.Find our zookeeper configuration file, which is usually zoo.cfg. > > 2.Add the following commands to the configuration file @@ -28,73 +30,74 @@ Steps > 3.Restart service -```shell +```shell zkServer.sh restart ``` #### netcat protocol + The current implementation scheme requires us to deploy the Linux server of zookeeper Command environment for installing netcat > netcat installation steps -```shell -yum install -y nc -``` +> +> ```shell +> yum install -y nc +> ``` If the terminal displays the following information, the installation is successful + ```shell Complete! ``` ### Configuration parameter -| Parameter name | Parameter help description | -| ----------- | ----------- | -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Zookeeper. The default is 2181 | -| Query timeout | Set the timeout of Zookeeper connection, unit: ms, default: 3000ms | -| Username | User name of the Linux connection where Zookeeper is located | -| Password | Password of the Linux connection where Zookeeper is located | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by Zookeeper. The default is 2181 | +| Query timeout | Set the timeout of Zookeeper connection, unit: ms, default: 3000ms | +| Username | User name of the Linux connection where Zookeeper is located | +| Password | Password of the Linux connection where Zookeeper is located | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | ### Collection Metric #### Metric set:conf -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| clientPort | none | Port | -| dataDir | none | Data snapshot file directory. By default, 100000 operations generate a snapshot | -| dataDirSize | kb | Data snapshot file size | -| dataLogDir | none | Transaction log file directory, production environment on a separate disk | -| dataLogSize | kb | Transaction log file size | -| tickTime | ms | Time interval between servers or between clients and servers to maintain heartbeat | -| minSessionTimeout | ms | Minimum session timeout. Heartbeat timex2. The specified time is less than this time, which is used by default | -| maxSessionTimeout | ms | Maximum session timeout. Heartbeat timex20. The specified time is greater than this time, which is used by default | -| serverId | none | Server id | - +| Metric name | Metric unit | Metric help description | +|-------------------|-------------|--------------------------------------------------------------------------------------------------------------------| +| clientPort | none | Port | +| dataDir | none | Data snapshot file directory. By default, 100000 operations generate a snapshot | +| dataDirSize | kb | Data snapshot file size | +| dataLogDir | none | Transaction log file directory, production environment on a separate disk | +| dataLogSize | kb | Transaction log file size | +| tickTime | ms | Time interval between servers or between clients and servers to maintain heartbeat | +| minSessionTimeout | ms | Minimum session timeout. Heartbeat timex2. The specified time is less than this time, which is used by default | +| maxSessionTimeout | ms | Maximum session timeout. Heartbeat timex20. The specified time is greater than this time, which is used by default | +| serverId | none | Server id | #### Metric set:stats -| Metric name | Metric unit | Metric help description | -| ----------- | ----------- | ----------- | -| zk_version | none | Server version | -| zk_server_state | none | Server role | -| zk_num_alive_connections | number | Number of connections | -| zk_avg_latency | ms | Average latency | -| zk_outstanding_requests | number | Number of outstanding requests | -| zk_znode_count | number | Number of znode | -| zk_packets_sent | number | Number of packets sent | -| zk_packets_received | number | Number of packets received | -| zk_watch_count | number | Number of watch | -| zk_max_file_descriptor_count | number | Maximum number of file descriptors | -| zk_approximate_data_size | kb | data size | -| zk_open_file_descriptor_count | number | Number of open file descriptors | -| zk_max_latency | ms | Max latency | -| zk_ephemerals_count | number | Number of ephemeral nodes | -| zk_min_latency | ms | Min latency | - +| Metric name | Metric unit | Metric help description | +|-------------------------------|-------------|------------------------------------| +| zk_version | none | Server version | +| zk_server_state | none | Server role | +| zk_num_alive_connections | number | Number of connections | +| zk_avg_latency | ms | Average latency | +| zk_outstanding_requests | number | Number of outstanding requests | +| zk_znode_count | number | Number of znode | +| zk_packets_sent | number | Number of packets sent | +| zk_packets_received | number | Number of packets received | +| zk_watch_count | number | Number of watch | +| zk_max_file_descriptor_count | number | Maximum number of file descriptors | +| zk_approximate_data_size | kb | data size | +| zk_open_file_descriptor_count | number | Number of open file descriptors | +| zk_max_latency | ms | Max latency | +| zk_ephemerals_count | number | Number of ephemeral nodes | +| zk_min_latency | ms | Min latency | diff --git a/home/versioned_docs/version-v1.5.x/introduce.md b/home/versioned_docs/version-v1.5.x/introduce.md index 98305d95ed5..b1dd5bc6771 100644 --- a/home/versioned_docs/version-v1.5.x/introduce.md +++ b/home/versioned_docs/version-v1.5.x/introduce.md @@ -5,7 +5,7 @@ sidebar_label: Introduce slug: / --- -> A real-time monitoring system with agentless, performance cluster, prometheus-compatible, custom monitoring and status page building capabilities. +> A real-time monitoring system with agentless, performance cluster, prometheus-compatible, custom monitoring and status page building capabilities. [![Discord](https://img.shields.io/badge/Chat-Discord-7289DA?logo=discord)](https://discord.gg/Fb6M73htGr) [![Reddit](https://img.shields.io/badge/Reddit-Community-7289DA?logo=reddit)](https://www.reddit.com/r/hertzbeat/) @@ -32,11 +32,9 @@ slug: / * Provides flexible alarm threshold rules and timely notifications delivered via `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. * Provides powerful status page building capabilities, easily communicate the real-time status of your service to users. +> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system. -> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system. - ----- - +--- ### Powerful Monitoring Templates > Before we discuss the customizable monitoring capabilities of HertzBeat, which we mentioned at the beginning, let's introduce the different monitoring templates of HertzBeat. And it is because of this monitoring template design that the advanced features come later. @@ -49,7 +47,6 @@ Do you believe that users can just write a monitoring template on the UI page, c ![hertzbeat](/img/home/9.png) - ### Built-in Monitoring Types **There are a lot of built-in monitoring templates for users to add directly on the page, one monitoring type corresponds to one YML monitoring template**. @@ -97,7 +94,7 @@ Do you believe that users can just write a monitoring template on the UI page, c > From the previous introduction of **Monitoring Templates**, it is clear that `HertzBeat` has powerful customization features. > Each monitor type is considered as a monitor template, no matter it is built-in or user-defined. You can easily add, modify and delete indicators by modifying the monitoring template. -> The templates contain a series of functions such as protocol configuration, environment variables, metrics conversion, metrics calculation, units conversion, metrics collection, etc., which help users to collect the metrics they want. +> The templates contain a series of functions such as protocol configuration, environment variables, metrics conversion, metrics calculation, units conversion, metrics collection, etc., which help users to collect the metrics they want. ![hertzbeat](/img/docs/custom-arch.png) @@ -105,12 +102,12 @@ Do you believe that users can just write a monitoring template on the UI page, c > For users who have used various systems, the most troublesome thing is the installation, deployment, debugging and upgrading of various `agents`. > You need to install one `agent` per host, and several corresponding `agents` to monitor different application middleware, and the number of monitoring can easily reach thousands, so writing a batch script may ease the burden. -> The problem of whether the version of `agent` is compatible with the main application, debugging the communication between `agent` and the main application, upgrading the `agent` synchronization and so on and so forth, are all big headaches. +> The problem of whether the version of `agent` is compatible with the main application, debugging the communication between `agent` and the main application, upgrading the `agent` synchronization and so on and so forth, are all big headaches. -The principle of `HertzBeat` is to use different protocols to connect directly to the end system, and use the `PULL` form to pull the collected data, without the need for the user to deploy and install `Agent` | `Exporter` on the host of the end, etc. For example, monitoring the `linux operating system`. +The principle of `HertzBeat` is to use different protocols to connect directly to the end system, and use the `PULL` form to pull the collected data, without the need for the user to deploy and install `Agent` | `Exporter` on the host of the end, etc. For example, monitoring the `linux operating system`. - For example, if you want to monitor `linux OS`, you can just input the IP port account password or key on `HertzBeat` side. -- For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. +- For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. **Password and other sensitive information is encrypted on all links**. @@ -129,7 +126,7 @@ The principle of `HertzBeat` is to use different protocols to connect directly t > Two locations, three centers, multi-cloud environments, multi-isolated networks, you may have heard of these scenarios. When there is a need for a unified monitoring system to monitor the IT resources of different isolated networks, this is where our Cloud Edge Collaboration comes in. In an isolated network where multiple networks are not connected, we need to deploy a monitoring system in each network in the previous solution, which leads to data non-interoperability and inconvenient management, deployment and maintenance. -`HertzBeat` provides the ability of cloud edge collaboration, can be deployed in multiple isolated networks edge collector, collector in the isolated network within the monitoring task collection, collection of data reported by the main service unified scheduling management display. +`HertzBeat` provides the ability of cloud edge collaboration, can be deployed in multiple isolated networks edge collector, collector in the isolated network within the monitoring task collection, collection of data reported by the main service unified scheduling management display. ![hertzbeat](/img/docs/cluster-arch.png) @@ -148,12 +145,11 @@ In an isolated network where multiple networks are not connected, we need to dep - Built on `Java+SpringBoot+TypeScript+Angular` mainstream technology stack , convenient secondary development . - Open source is not the same as free, dev based on HertzBeat must retain page footnotes, copyright, etc. - **HertzBeat has been included in the [CNCF Observability And Analysis - Monitoring Landscape](https://landscape.cncf.io/card-mode?category=monitoring&grouping=category)** ![cncf](/img/home/cncf-landscape-left-logo.svg) ------ +--- **HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system.** @@ -271,7 +267,6 @@ Built-in support for monitoring types include: ![hertzbeat](/img/home/13.png) - ### Alarm Silence - When the alarm is triggered by the threshold rule, it will enter into the alarm silence, the alarm silence will be based on the rules of a specific one-time time period or periodic time period of the alarm message blocking silence, this time period does not send alarm messages. @@ -303,7 +298,6 @@ Built-in support for monitoring types include: ![hertzbeat](/img/home/9.png) - ----- **There's so much more to discover. Have Fun!** diff --git a/home/versioned_docs/version-v1.5.x/others/design.md b/home/versioned_docs/version-v1.5.x/others/design.md index e7f80d164d7..da5ec8fffee 100644 --- a/home/versioned_docs/version-v1.5.x/others/design.md +++ b/home/versioned_docs/version-v1.5.x/others/design.md @@ -1,9 +1,9 @@ --- id: design title: 设计文档 -sidebar_label: 设计文档 +sidebar_label: 设计文档 --- -### HertzBeat Arch +### HertzBeat Arch -![architecture](/img/docs/hertzbeat-arch.svg) +![architecture](/img/docs/hertzbeat-arch.svg) diff --git a/home/versioned_docs/version-v1.5.x/others/resource.md b/home/versioned_docs/version-v1.5.x/others/resource.md index 79c16b4ab56..83188bd1c8e 100644 --- a/home/versioned_docs/version-v1.5.x/others/resource.md +++ b/home/versioned_docs/version-v1.5.x/others/resource.md @@ -1,19 +1,19 @@ --- id: resource title: Related resources -sidebar_label: Related resources +sidebar_label: Related resources --- -## Icon Resources +## Icon Resources -### HertzBeat Logo +### HertzBeat Logo -![logo](/img/hertzbeat-logo.svg) +![logo](/img/hertzbeat-logo.svg) -Download: [SVG](/img/hertzbeat-logo.svg) [PNG](/img/hertzbeat-logo.png) +Download: [SVG](/img/hertzbeat-logo.svg) [PNG](/img/hertzbeat-logo.png) -### HertzBeat Brand Logo +### HertzBeat Brand Logo -![logo](/img/hertzbeat-brand.svg) +![logo](/img/hertzbeat-brand.svg) -Download: [SVG](/img/hertzbeat-brand.svg) [PNG](/img/hertzbeat-brand.png) +Download: [SVG](/img/hertzbeat-brand.svg) [PNG](/img/hertzbeat-brand.png) diff --git a/home/versioned_docs/version-v1.5.x/start/account-modify.md b/home/versioned_docs/version-v1.5.x/start/account-modify.md index 64bba7d72df..8dc8b03a565 100644 --- a/home/versioned_docs/version-v1.5.x/start/account-modify.md +++ b/home/versioned_docs/version-v1.5.x/start/account-modify.md @@ -1,7 +1,7 @@ --- id: account-modify title: Modify Account Username Password And Secret -sidebar_label: Update Account Secret +sidebar_label: Update Account Secret --- ## Update Account @@ -112,12 +112,11 @@ account: role: [guest] ``` -## Update Security Secret +## Update Security Secret -> This secret is the key for account security encryption management and needs to be updated to your custom key string of the same length. +> This secret is the key for account security encryption management and needs to be updated to your custom key string of the same length. - -Update the `application.yml` file in the `config` directory, modify the `sureness.jwt.secret` parameter to your custom key string of the same length. +Update the `application.yml` file in the `config` directory, modify the `sureness.jwt.secret` parameter to your custom key string of the same length. ```yaml sureness: @@ -128,4 +127,4 @@ sureness: dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' ``` -**Restart HertzBeat, access http://ip:1157/ to explore** +**Restart HertzBeat, access http://ip:1157/ to explore** diff --git a/home/versioned_docs/version-v1.5.x/start/custom-config.md b/home/versioned_docs/version-v1.5.x/start/custom-config.md index 5a60d6b02b9..7554498bc6e 100644 --- a/home/versioned_docs/version-v1.5.x/start/custom-config.md +++ b/home/versioned_docs/version-v1.5.x/start/custom-config.md @@ -1,7 +1,7 @@ --- id: custom-config title: Advanced Params Config -sidebar_label: Advanced Params Config +sidebar_label: Advanced Params Config --- This describes how to configure the SMS server, the number of built-in availability alarm triggers, etc. @@ -12,13 +12,14 @@ This describes how to configure the SMS server, the number of built-in availabil Modify the configuration file located at `hertzbeat/config/application.yml` Note ⚠️The docker container method needs to mount the application.yml file to the local host -The installation package can be decompressed and modified in `hertzbeat/config/application.yml` +The installation package can be decompressed and modified in `hertzbeat/config/application.yml` 1. Configure the SMS sending server > Only when your own SMS server is successfully configured, the alarm SMS triggered in the monitoring tool will be sent normally. -Add the following Tencent platform SMS server configuration in `application.yml` (parameters need to be replaced with your SMS server configuration) +Add the following Tencent platform SMS server configuration in `application.yml` (parameters need to be replaced with your SMS server configuration) + ```yaml common: sms: @@ -32,7 +33,6 @@ common: 2. Configure alarm custom parameters - ```yaml alerter: # Custom console address @@ -44,6 +44,7 @@ alerter: > By default, the real-time data of our metrics is stored in memory, which can be configured as follows to use redis instead of memory storage. Note ⚠️ `memory.enabled: false, redis.enabled: true` + ```yaml warehouse: store: @@ -56,3 +57,4 @@ warehouse: port: 6379 password: 123456 ``` + diff --git a/home/versioned_docs/version-v1.5.x/start/docker-deploy.md b/home/versioned_docs/version-v1.5.x/start/docker-deploy.md index 7671366e9ad..10ecb09b001 100644 --- a/home/versioned_docs/version-v1.5.x/start/docker-deploy.md +++ b/home/versioned_docs/version-v1.5.x/start/docker-deploy.md @@ -1,52 +1,51 @@ --- id: docker-deploy title: Install HertzBeat via Docker -sidebar_label: Install via Docker +sidebar_label: Install via Docker --- > Recommend to use docker deploy Apache HertzBeat (incubating) - 1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. + ``` $ docker -v Docker version 20.10.12, build e91ed57 ``` - 2. pull HertzBeat Docker mirror you can look up the mirror version TAG in [dockerhub mirror repository](https://hub.docker.com/r/apache/hertzbeat/tags) - or in [quay.io mirror repository](https://quay.io/repository/apache/hertzbeat) + or in [quay.io mirror repository](https://quay.io/repository/apache/hertzbeat) + ```shell $ docker pull apache/hertzbeat $ docker pull apache/hertzbeat-collector ``` - or + + or + ```shell $ docker pull quay.io/tancloud/hertzbeat $ docker pull quay.io/tancloud/hertzbeat-collector ``` - 3. Mounted HertzBeat configuration file (optional) Download and config `application.yml` in the host directory, eg:`$(pwd)/application.yml` Download from [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - You can modify the configuration yml file according to your needs. - - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` - - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) - - **Recommended** If you need to use the time series database TDengine to store metric data, you need to replace the `warehouse.store.td-engine` parameter in `application.yml` for specific steps, see [Using TDengine to store metrics data](tdengine-init) - - **Recommended** If you need to use the time series database IotDB to store the metric database, you need to replace the `warehouse.storeiot-db` parameter in `application.yml` For specific steps, see [Use IotDB to store metrics data](iotdb-init) - + You can modify the configuration yml file according to your needs. + - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` + - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) + - **Recommended** If you need to use the time series database TDengine to store metric data, you need to replace the `warehouse.store.td-engine` parameter in `application.yml` for specific steps, see [Using TDengine to store metrics data](tdengine-init) + - **Recommended** If you need to use the time series database IotDB to store the metric database, you need to replace the `warehouse.storeiot-db` parameter in `application.yml` For specific steps, see [Use IotDB to store metrics data](iotdb-init) 4. Mounted the account file(optional) HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` If you need update account or password, configure `sureness.yml`. Ignore this step without this demand. Download and config `sureness.yml` in the host directory,eg:`$(pwd)/sureness.yml` Download from [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) - For detail steps, please refer to [Configure Account Password](account-modify) - -5. Start the HertzBeat Docker container + For detail steps, please refer to [Configure Account Password](account-modify) +5. Start the HertzBeat Docker container -```shell +```shell $ docker run -d -p 1157:1157 -p 1158:1158 \ -e LANG=en_US.UTF-8 \ -e TZ=Asia/Shanghai \ @@ -57,25 +56,25 @@ $ docker run -d -p 1157:1157 -p 1158:1158 \ --name hertzbeat apache/hertzbeat ``` - This command starts a running HertzBeat Docker container with mapping port 1157-1158. If existing processes on the host use the port, please modify host mapped port. - - `docker run -d` : Run a container in the background via Docker - - `-p 1157:1157 -p 1158:1158` : Mapping container ports to the host, 1157 is web-ui port, 1158 is cluster port. - - `-e LANG=en_US.UTF-8` : Set the system language - - `-e TZ=Asia/Shanghai` : Set the system timezone - - `-v $(pwd)/data:/opt/hertzbeat/data` : (optional, data persistence) Important⚠️ Mount the H2 database file to the local host, to ensure that the data is not lost due creating or deleting container. - - `-v $(pwd)/logs:/opt/hertzbeat/logs` : (optional, if you don't have a need, just delete it) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. - - `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional, if you don't have a need, just delete it) Mount the local configuration file into the container which has been modified in the previous step, namely using the local configuration file to cover container configuration file. - - `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (optional, if you don't have a need, just delete it) Mount account configuration file modified in the previous step into the container. Delete this command parameters if no needs. - - `--name hertzbeat` : Naming container name hertzbeat - - `apache/hertzbeat` : Use the pulled latest HertzBeat official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat` instead if you pull `quay.io` docker image.** +This command starts a running HertzBeat Docker container with mapping port 1157-1158. If existing processes on the host use the port, please modify host mapped port. +- `docker run -d` : Run a container in the background via Docker +- `-p 1157:1157 -p 1158:1158` : Mapping container ports to the host, 1157 is web-ui port, 1158 is cluster port. +- `-e LANG=en_US.UTF-8` : Set the system language +- `-e TZ=Asia/Shanghai` : Set the system timezone +- `-v $(pwd)/data:/opt/hertzbeat/data` : (optional, data persistence) Important⚠️ Mount the H2 database file to the local host, to ensure that the data is not lost due creating or deleting container. +- `-v $(pwd)/logs:/opt/hertzbeat/logs` : (optional, if you don't have a need, just delete it) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. +- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional, if you don't have a need, just delete it) Mount the local configuration file into the container which has been modified in the previous step, namely using the local configuration file to cover container configuration file. +- `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (optional, if you don't have a need, just delete it) Mount account configuration file modified in the previous step into the container. Delete this command parameters if no needs. +- `--name hertzbeat` : Naming container name hertzbeat +- `apache/hertzbeat` : Use the pulled latest HertzBeat official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat` instead if you pull `quay.io` docker image.** -6. Begin to explore HertzBeat +6. Begin to explore HertzBeat - Access `http://ip:1157/` using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! + Access `http://ip:1157/` using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! 7. Deploy collector cluster (Optional) -```shell +```shell $ docker run -d \ -e IDENTITY=custom-collector-name \ -e MODE=public \ @@ -84,53 +83,61 @@ $ docker run -d \ --name hertzbeat-collector apache/hertzbeat-collector ``` - This command starts a running HertzBeat-Collector container. - - `docker run -d` : Run a container in the background via Docker - - `-e IDENTITY=custom-collector-name` : (optional) Set the collector unique identity name. Attention the clusters collector name must unique. - - `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. - - `-e MANAGER_HOST=127.0.0.1` : Important⚠️ Set the main hertzbeat server ip. - - `-e MANAGER_PORT=1158` : (optional) Set the main hertzbeat server port, default 1158. - - `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (optional) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. - - `--name hertzbeat-collector` : Naming container name hertzbeat-collector - - `apache/hertzbeat-collector` : Use the pulled latest HertzBeat-Collector official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat-collector` instead if you pull `quay.io` docker image.** +This command starts a running HertzBeat-Collector container. +- `docker run -d` : Run a container in the background via Docker +- `-e IDENTITY=custom-collector-name` : (optional) Set the collector unique identity name. Attention the clusters collector name must unique. +- `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. +- `-e MANAGER_HOST=127.0.0.1` : Important⚠️ Set the main hertzbeat server ip. +- `-e MANAGER_PORT=1158` : (optional) Set the main hertzbeat server port, default 1158. +- `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (optional) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. +- `--name hertzbeat-collector` : Naming container name hertzbeat-collector +- `apache/hertzbeat-collector` : Use the pulled latest HertzBeat-Collector official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat-collector` instead if you pull `quay.io` docker image.** -8. Access `http://localhost:1157` and you will see the registered new collector in dashboard. +8. Access `http://localhost:1157` and you will see the registered new collector in dashboard. -**HAVE FUN** +**HAVE FUN** -### FAQ +### FAQ **The most common problem is network problems, please check in advance** 1. **MYSQL, TDENGINE, IoTDB and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** -The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. + The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. + > Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. -> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` +> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` 2. **According to the process deploy,visit http://ip:1157/ no interface** -Please refer to the following points to troubleshoot issues: + Please refer to the following points to troubleshoot issues: + > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. > 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. > 3:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. -3. **Log an error TDengine connection or insert SQL failed** +3. **Log an error TDengine connection or insert SQL failed** + > 1:Check whether database account and password configured is correct, the database is created. -> 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. +> 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. + +4. **Historical monitoring charts have been missing data for a long time** -4. **Historical monitoring charts have been missing data for a long time** > 1:Check whether you configure Tdengine or IoTDB. No configuration means no historical chart data. -> 2:Check whether Tdengine database `hertzbeat` is created. +> 2:Check whether Tdengine database `hertzbeat` is created. > 3: Check whether IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. 5. If the history chart on the monitoring page is not displayed,popup [please configure time series database] + > As shown in the popup window,the premise of history chart display is that you need install and configure hertzbeat's dependency service - IoTDB or TDengine database. -> Installation and initialization this database refer to [TDengine Installation](tdengine-init) or [IoTDB Installation](iotdb-init) +> Installation and initialization this database refer to [TDengine Installation](tdengine-init) or [IoTDB Installation](iotdb-init) + +6. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed -6. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed -> Please confirm whether the installed TDengine version is near 2.4.0.12, version 3.0 and 2.2 are not compatible. +> Please confirm whether the installed TDengine version is near 2.4.0.12, version 3.0 and 2.2 are not compatible. 7. The time series database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure dependent time series database] + > Please check if the configuration parameters are correct > Is iot-db or td-engine enable set to true > Note⚠️If both hertzbeat and IotDB, TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed > You can check the startup logs according to the logs directory + diff --git a/home/versioned_docs/version-v1.5.x/start/greptime-init.md b/home/versioned_docs/version-v1.5.x/start/greptime-init.md index cfb148ecea7..84916590868 100644 --- a/home/versioned_docs/version-v1.5.x/start/greptime-init.md +++ b/home/versioned_docs/version-v1.5.x/start/greptime-init.md @@ -8,22 +8,25 @@ Apache HertzBeat (incubating)'s historical data storage relies on the time serie > It is recommended to use VictoriaMetrics as metrics storage. -GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. +GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage. -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** +**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** + +### Install GreptimeDB via Docker -### Install GreptimeDB via Docker > Refer to the official website [installation tutorial](https://docs.greptime.com/getting-started/overview) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Install GreptimeDB with Docker +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Install GreptimeDB with Docker ```shell $ docker run -p 4000-4004:4000-4004 \ @@ -33,10 +36,11 @@ $ docker run -p 4000-4004:4000-4004 \ --http-addr 0.0.0.0:4000 \ --rpc-addr 0.0.0.0:4001 \ ``` - `-v /opt/greptimedb:/tmp/greptimedb` is local persistent mount of greptimedb data directory. `/opt/greptimedb` should be replaced with the actual local directory. - use```$ docker ps``` to check if the database started successfully -### Configure the database connection in hertzbeat `application.yml` configuration file +`-v /opt/greptimedb:/tmp/greptimedb` is local persistent mount of greptimedb data directory. `/opt/greptimedb` should be replaced with the actual local directory. +use```$ docker ps``` to check if the database started successfully + +### Configure the database connection in hertzbeat `application.yml` configuration file 1. Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) diff --git a/home/versioned_docs/version-v1.5.x/start/influxdb-init.md b/home/versioned_docs/version-v1.5.x/start/influxdb-init.md index b752fc82a21..b9eeb2fd00c 100644 --- a/home/versioned_docs/version-v1.5.x/start/influxdb-init.md +++ b/home/versioned_docs/version-v1.5.x/start/influxdb-init.md @@ -1,51 +1,54 @@ --- id: influxdb-init title: Use Time Series Database InfluxDB to Store Metrics Data (Optional) -sidebar_label: Use InfluxDB Store Metrics +sidebar_label: Use InfluxDB Store Metrics --- Apache HertzBeat (incubating)'s historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) > It is recommended to use VictoriaMetrics as metrics storage. - **Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** **⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** -Note⚠️ Need InfluxDB 1.x Version. +Note⚠️ Need InfluxDB 1.x Version. ### 1. Use HuaweiCloud GaussDB For Influx > Use [HuaweiCloud GaussDB For Influx](https://www.huaweicloud.com/product/gaussdbforinflux.html) - -> Get the `GaussDB For Influx` service url, username and password config. +> +> Get the `GaussDB For Influx` service url, username and password config. ⚠️Note `GaussDB For Influx` enable SSL default, the service url should use `https:` -### 2. Install TDengine via Docker +### 2. Install TDengine via Docker + > Refer to the official website [installation tutorial](https://hub.docker.com/_/influxdb) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Install InfluxDB with Docker - ``` - $ docker run -p 8086:8086 \ - -v /opt/influxdb:/var/lib/influxdb \ - influxdb:1.8 - ``` - `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. - use```$ docker ps``` to check if the database started successfully - - -### Configure the database connection in hertzbeat `application.yml` configuration file +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Install InfluxDB with Docker +> +> ``` +> $ docker run -p 8086:8086 \ +> -v /opt/influxdb:/var/lib/influxdb \ +> influxdb:1.8 +> ``` +> +> `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. +> use```$ docker ps``` to check if the database started successfully + +### Configure the database connection in hertzbeat `application.yml` configuration file 1. Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Replace `warehouse.store.influxdb` data source parameters, URL account and password. + Replace `warehouse.store.influxdb` data source parameters, URL account and password. ```yaml warehouse: @@ -70,3 +73,4 @@ warehouse: 1. Do both the time series databases InfluxDB, IoTDB and TDengine need to be configured? Can they both be used? > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. + diff --git a/home/versioned_docs/version-v1.5.x/start/iotdb-init.md b/home/versioned_docs/version-v1.5.x/start/iotdb-init.md index ee7a66a87f4..43fb3235406 100644 --- a/home/versioned_docs/version-v1.5.x/start/iotdb-init.md +++ b/home/versioned_docs/version-v1.5.x/start/iotdb-init.md @@ -3,6 +3,7 @@ id: iotdb-init title: Use Time Series Database IoTDB to Store Metrics Data (Optional) sidebar_label: Use IoTDB Store Metrics --- + Apache HertzBeat (incubating)'s historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) > It is recommended to use VictoriaMetrics as metrics storage. @@ -87,9 +88,8 @@ warehouse: Configuration parameters: - -| Parameter Name | Description | -| ------------------- |-------------------------------------------------------------------------------------------| +| Parameter Name | Description | +|---------------------|-------------------------------------------------------------------------------------------| | enabled | Whether to enable | | host | IoTDB database address | | rpc-port | IoTDB database port | @@ -120,3 +120,4 @@ Configuration parameters: > Is td-engine enable set to true > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed > You can check the startup logs according to the logs directory + diff --git a/home/versioned_docs/version-v1.5.x/start/mysql-change.md b/home/versioned_docs/version-v1.5.x/start/mysql-change.md index 1095f538b0c..e0f3721bb7f 100644 --- a/home/versioned_docs/version-v1.5.x/start/mysql-change.md +++ b/home/versioned_docs/version-v1.5.x/start/mysql-change.md @@ -1,21 +1,25 @@ --- id: mysql-change title: Use MYSQL Replace H2 Database to Store Metadata(Optional) -sidebar_label: Use MYSQL Instead of H2 +sidebar_label: Use MYSQL Instead of H2 --- -MYSQL is a reliable relational database. In addition to default built-in H2 database, Apache HertzBeat (incubating) allow you to use MYSQL to store structured relational data such as monitoring information, alarm information and configuration information. -> If you have the MYSQL environment, can be directly to database creation step. +MYSQL is a reliable relational database. In addition to default built-in H2 database, Apache HertzBeat (incubating) allow you to use MYSQL to store structured relational data such as monitoring information, alarm information and configuration information. + +> If you have the MYSQL environment, can be directly to database creation step. + +### Install MYSQL via Docker -### Install MYSQL via Docker 1. Download and install the Docker environment For Docker installation, please refer to the [Docker official documentation](https://docs.docker.com/get-docker/). After the installation, please verify in the terminal that the Docker version can be printed normally. + ``` $ docker -v Docker version 20.10.12, build e91ed57 ``` -2. Install MYSQl with Docker +2. Install MYSQl with Docker + ``` $ docker run -d --name mysql \ -p 3306:3306 \ @@ -24,18 +28,20 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data --restart=always \ mysql:5.7 ``` + `-v /opt/data:/var/lib/mysql` is local persistent mount of mysql data directory. `/opt/data` should be replaced with the actual local directory. use ```$ docker ps``` to check if the database started successfully -### Database creation +### Database creation + 1. Enter MYSQL or use the client to connect MYSQL service - `mysql -uroot -p123456` + `mysql -uroot -p123456` 2. Create database named hertzbeat `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. Check if hertzbeat database has been successfully created `show databases;` -### Modify hertzbeat's configuration file application.yml and switch data source +### Modify hertzbeat's configuration file application.yml and switch data source - Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file @@ -43,6 +49,7 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data Replace `spring.database` data source parameters, URL account and password. For example: + ```yaml spring: datasource: @@ -51,7 +58,9 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data password: 123456 url: jdbc:h2:./data/hertzbeat;MODE=MYSQL ``` - Specific replacement parameters are as follows and you need to configure account according to the mysql environment: + + Specific replacement parameters are as follows and you need to configure account according to the mysql environment: + ```yaml spring: datasource: @@ -63,7 +72,6 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data jpa: database: mysql ``` +- It is recommended to set the host field in the MySQL URL or Redis URL to the public IP address when using Hertzbeat in docker. -- It is recommended to set the host field in the MySQL URL or Redis URL to the public IP address when using Hertzbeat in docker. - -**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** +**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/versioned_docs/version-v1.5.x/start/package-deploy.md b/home/versioned_docs/version-v1.5.x/start/package-deploy.md index 050121f5c55..cbd42ad5bb9 100644 --- a/home/versioned_docs/version-v1.5.x/start/package-deploy.md +++ b/home/versioned_docs/version-v1.5.x/start/package-deploy.md @@ -9,14 +9,15 @@ sidebar_label: Install via Package 1. Download HertzBeat installation package Download installation package `hertzbeat-xx.tar.gz` `hertzbeat-collector-xx.tar.gz` corresponding to your system environment - [Download Page](/docs/download) - 2. Configure HertzBeat's configuration file(optional) - Unzip the installation package to the host eg: /opt/hertzbeat - ``` + Unzip the installation package to the host eg: /opt/hertzbeat + + ``` $ tar zxvf hertzbeat-xx.tar.gz or $ unzip -o hertzbeat-xx.zip ``` + Modify the configuration file `hertzbeat/config/application.yml` params according to your needs. - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) @@ -27,22 +28,23 @@ sidebar_label: Install via Package 3. Configure the account file(optional) HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` If you need add, delete or modify account or password, configure `hertzbeat/config/sureness.yml`. Ignore this step without this demand. - For detail steps, please refer to [Configure Account Password](account-modify) + For detail steps, please refer to [Configure Account Password](account-modify) 4. Start the service - Execute the startup script `startup.sh` in the installation directory `hertzbeat/bin/`, or `startup.bat` in windows. - ``` + Execute the startup script `startup.sh` in the installation directory `hertzbeat/bin/`, or `startup.bat` in windows. + + ``` $ ./startup.sh ``` +5. Begin to explore HertzBeat -5. Begin to explore HertzBeat - - Access http://localhost:1157/ using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! + Access http://localhost:1157/ using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! 6. Deploy collector clusters (Optional) - - Download and unzip the collector release package `hertzbeat-collector-xx.tar.gz` to new machine [Download Page](/docs/download) + - Download and unzip the collector release package `hertzbeat-collector-xx.tar.gz` to new machine [Download Page](/docs/download) - Configure the collector configuration yml file `hertzbeat-collector/config/application.yml`: unique `identity` name, running `mode` (public or private), hertzbeat `manager-host`, hertzbeat `manager-port` + ```yaml collector: dispatch: @@ -59,7 +61,7 @@ sidebar_label: Install via Package **HAVE FUN** -### FAQ +### FAQ 1. **If using the package not contains JDK, you need to prepare the JAVA environment in advance** @@ -67,6 +69,7 @@ sidebar_label: Install via Package requirement:JDK17 ENV download JAVA installation package: [mirror website](https://repo.huaweicloud.com/java/jdk/) After installation use command line to check whether you install it successfully. + ``` $ java -version java version "17.0.9" @@ -74,18 +77,20 @@ sidebar_label: Install via Package Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) ``` - 2. **According to the process deploy,visit http://ip:1157/ no interface** Please refer to the following points to troubleshoot issues: + > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. > 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. > 3:Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 3. **Log an error TDengine connection or insert SQL failed** + > 1:Check whether database account and password configured is correct, the database is created. -> 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. +> 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. 4. **Monitoring historical charts with no data for a long time ** + > 1: Whether the time series database is configured or not, if it is not configured, there is no historical chart data. > 2: If you are using Tdengine, check whether the database `hertzbeat` of Tdengine is created. > 3: HertzBeat's configuration file `application.yml`, the dependent services in it, the time series, the IP account password, etc. are configured correctly. diff --git a/home/versioned_docs/version-v1.5.x/start/postgresql-change.md b/home/versioned_docs/version-v1.5.x/start/postgresql-change.md index c78a24a1ca7..ac63a41ebc9 100644 --- a/home/versioned_docs/version-v1.5.x/start/postgresql-change.md +++ b/home/versioned_docs/version-v1.5.x/start/postgresql-change.md @@ -3,27 +3,35 @@ id: postgresql-change title: Use PostgreSQL Replace H2 Database to Store Metadata(Optional) sidebar_label: Use PostgreSQL Instead of H2 --- + PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition to default built-in H2 database, Apache HertzBeat (incubating) allow you to use PostgreSQL to store structured relational data such as monitoring information, alarm information and configuration information. > If you have the PostgreSQL environment, can be directly to database creation step. ### Install PostgreSQL via Docker + 1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. + ``` $ docker -v Docker version 20.10.12, build e91ed57 ``` 2. Install PostgreSQL with Docker + ``` $ docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` + use```$ docker ps```to check if the database started successfully + 3. Create database in container manually or with [script](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-postgresql-iotdb/conf/sql/schema.sql). ### Database creation -1. Enter postgreSQL or use the client to connect postgreSQL service + +1. Enter postgreSQL or use the client to connect postgreSQL service + ``` su - postgres psql @@ -39,6 +47,7 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition Modify `hertzbeat/config/application.yml` configuration file Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `spring.database` data source parameters, URL account and password. + ```yaml spring: datasource: @@ -47,7 +56,9 @@ spring: password: 123456 url: jdbc:h2:./data/hertzbeat;MODE=MYSQL ``` + Specific replacement parameters are as follows and you need to configure account, ip, port according to the postgresql environment: + ```yaml spring: config: @@ -70,4 +81,4 @@ spring: dialect: org.hibernate.dialect.PostgreSQLDialect ``` -**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** +**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/versioned_docs/version-v1.5.x/start/quickstart.md b/home/versioned_docs/version-v1.5.x/start/quickstart.md index 5bd804c4d54..2e10d8f72de 100644 --- a/home/versioned_docs/version-v1.5.x/start/quickstart.md +++ b/home/versioned_docs/version-v1.5.x/start/quickstart.md @@ -1,7 +1,7 @@ --- id: quickstart title: Quick Start -sidebar_label: Quick Start +sidebar_label: Quick Start --- ### 🐕 Quick Start @@ -29,6 +29,7 @@ sidebar_label: Quick Start ``` docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` + - `-e IDENTITY=custom-collector-name` : set the collector unique identity name. - `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. - `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. @@ -45,6 +46,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.apache 5. Deploy collector clusters(Optional) - Download the release package `hertzbeat-collector-xx.tar.gz` to new machine [Download](https://hertzbeat.apache.org/docs/download) - Configure the collector configuration yml file `hertzbeat-collector/config/application.yml`: unique `identity` name, running `mode` (public or private), hertzbeat `manager-host`, hertzbeat `manager-port` + ```yaml collector: dispatch: @@ -68,9 +70,9 @@ Detailed config refer to [Install HertzBeat via Package](https://hertzbeat.apach 3. Web:need `nodejs npm angular-cli` environment, Run `ng serve --open` in `web-app` directory after backend startup. 4. Access `http://localhost:4200` to start, default account: `admin/hertzbeat` -Detailed steps refer to [CONTRIBUTING](../community/contribution) +Detailed steps refer to [CONTRIBUTING](../community/contribution) -##### 4:Install All(hertzbeat+mysql+iotdb/tdengine) via Docker-compose +##### 4:Install All(hertzbeat+mysql+iotdb/tdengine) via Docker-compose Install and deploy the mysql database, iotdb/tdengine database and hertzbeat at one time through [docker-compose deployment script](https://github.com/apache/hertzbeat/tree/master/script/docker-compose). @@ -82,4 +84,4 @@ Install HertzBeat cluster in a Kubernetes cluster by Helm chart. Detailed steps refer to [Artifact Hub](https://artifacthub.io/packages/helm/hertzbeat/hertzbeat) -**HAVE FUN** +**HAVE FUN** diff --git a/home/versioned_docs/version-v1.5.x/start/rainbond-deploy.md b/home/versioned_docs/version-v1.5.x/start/rainbond-deploy.md index 57f537aa4ac..ef2c581d57d 100644 --- a/home/versioned_docs/version-v1.5.x/start/rainbond-deploy.md +++ b/home/versioned_docs/version-v1.5.x/start/rainbond-deploy.md @@ -1,7 +1,7 @@ --- id: rainbond-deploy title: Use Rainbond Deploy HertzBeat -sidebar_label: Install via Rainbond +sidebar_label: Install via Rainbond --- If you are unfamiliar with Kubernetes, and want to install Apache HertzBeat (incubating) in Kubernetes, you can use Rainbond to deploy. Rainbond is a cloud-native application management platform built on Kubernetes and simplifies the application deployment to Kubernetes. diff --git a/home/versioned_docs/version-v1.5.x/start/sslcert-practice.md b/home/versioned_docs/version-v1.5.x/start/sslcert-practice.md index d8c06a8beba..26c7f6ecf32 100644 --- a/home/versioned_docs/version-v1.5.x/start/sslcert-practice.md +++ b/home/versioned_docs/version-v1.5.x/start/sslcert-practice.md @@ -12,7 +12,6 @@ This article introduces how to use the hertzbeat monitoring tool to detect the v Apache HertzBeat (incubating) is a real-time monitoring tool with powerful custom monitoring capabilities without Agent. Website monitoring, PING connectivity, port availability, database, operating system, middleware, API monitoring, threshold alarms, alarm notification (email, WeChat, Ding Ding Feishu). - github: https://github.com/apache/hertzbeat #### Install HertzBeat @@ -29,7 +28,6 @@ github: https://github.com/apache/hertzbeat > System Page -> Monitor Menu -> SSL Certificate -> Add SSL Certificate - ![](/img/docs/start/ssl_1.png) 2. Configure the monitoring website @@ -43,48 +41,38 @@ github: https://github.com/apache/hertzbeat > In the monitoring list, you can view the monitoring status, and in the monitoring details, you can view the metric data chart, etc. - ![](/img/docs/start/ssl_3.png) - ![](/img/docs/start/ssl_11.png) 4. Set the threshold (triggered when the certificate expires) > System Page -> Alarms -> Alarm Thresholds -> New Thresholds - ![](/img/docs/start/ssl_4.png) > Configure the threshold, select the SSL certificate metric object, configure the alarm expression-triggered when the metric `expired` is `true`, that is, `equals(expired,"true")`, set the alarm level notification template information, etc. - ![](/img/docs/start/ssl_5.png) > Associating thresholds with monitoring, in the threshold list, set which monitoring this threshold applies to. - ![](/img/docs/start/ssl_6.png) - 5. Set the threshold (triggered one week before the certificate expires) > In the same way, add a new configuration threshold and configure an alarm expression - when the metric expires timestamp `end_timestamp`, the `now()` function is the current timestamp, if the configuration triggers an alarm one week in advance: `end_timestamp <= (now( ) + 604800000)` , where `604800000` is the 7-day total time difference in milliseconds. - ![](/img/docs/start/ssl_7.png) > Finally, you can see the triggered alarm in the alarm center. - ![](/img/docs/start/ssl_8.png) - 6. Alarm notification (in time notification via Dingding WeChat Feishu, etc.) > Monitoring Tool -> Alarm Notification -> New Receiver - ![](/img/docs/start/ssl_10.png) For token configuration such as Dingding WeChat Feishu, please refer to the help document @@ -93,7 +81,6 @@ https://hertzbeat.apache.org/docs/help/alert_dingtalk > Alarm Notification -> New Alarm Notification Policy -> Enable Notification for the Recipient Just Configured - ![](/img/docs/start/ssl_11.png) 7. OK When the threshold is triggered, we can receive the corresponding alarm message. If there is no notification, you can also view the alarm information in the alarm center. diff --git a/home/versioned_docs/version-v1.5.x/start/tdengine-init.md b/home/versioned_docs/version-v1.5.x/start/tdengine-init.md index 1f73a4151eb..4048520bfe2 100644 --- a/home/versioned_docs/version-v1.5.x/start/tdengine-init.md +++ b/home/versioned_docs/version-v1.5.x/start/tdengine-init.md @@ -1,85 +1,90 @@ --- id: tdengine-init title: Use Time Series Database TDengine to Store Metrics Data (Optional) -sidebar_label: Use TDengine Store Metrics +sidebar_label: Use TDengine Store Metrics --- Apache HertzBeat (incubating)'s historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) > It is recommended to use VictoriaMetrics as metrics storage. -TDengine is an open-source IoT time-series database, which we use to store the collected historical data of monitoring metrics. Pay attention to support ⚠️ 3.x version. +TDengine is an open-source IoT time-series database, which we use to store the collected historical data of monitoring metrics. Pay attention to support ⚠️ 3.x version. **Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** **⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** -Note⚠️ Need TDengine 3.x Version. +Note⚠️ Need TDengine 3.x Version. -> If you have TDengine environment, can directly skip to create a database instance. +> If you have TDengine environment, can directly skip to create a database instance. +### Install TDengine via Docker -### Install TDengine via Docker > Refer to the official website [installation tutorial](https://docs.taosdata.com/get-started/docker/) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` -2. Install TDengine with Docker - ```shell - $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ - -v /opt/taosdata:/var/lib/taos \ - --name tdengine -e TZ=Asia/Shanghai \ - tdengine/tdengine:3.0.4.0 - ``` - `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. - `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. - use```$ docker ps``` to check if the database started successfully - -### Create database instance +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` +> +> 2. Install TDengine with Docker +> +> ```shell +> $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ +> -v /opt/taosdata:/var/lib/taos \ +> --name tdengine -e TZ=Asia/Shanghai \ +> tdengine/tdengine:3.0.4.0 +> ``` +> +> `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. +> `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. +> use```$ docker ps``` to check if the database started successfully + +### Create database instance + +1. Enter database Docker container -1. Enter database Docker container ``` $ docker exec -it tdengine /bin/bash ``` 2. Create database named hertzbeat - After entering the container,execute `taos` command as follows: - + After entering the container,execute `taos` command as follows: + ``` root@tdengine-server:~/TDengine-server# taos Welcome to the TDengine shell from Linux, Client Version Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. taos> ``` - - execute commands to create database - + + execute commands to create database + ``` taos> show databases; taos> CREATE DATABASE hertzbeat KEEP 90 DURATION 10 BUFFER 16; ``` - + The above statements will create a database named hertzbeat. The data will be saved for 90 days (more than 90 days data will be automatically deleted). A data file every 10 days, memory blocks buffer is 16MB. -3. Check if hertzbeat database has been created success - +3. Check if hertzbeat database has been created success + ``` taos> show databases; taos> use hertzbeat; ``` -**Note⚠️If you install TDengine using package** +**Note⚠️If you install TDengine using package** > In addition to start the server,you must execute `systemctl start taosadapter` to start adapter -### Configure the database connection in hertzbeat `application.yml` configuration file +### Configure the database connection in hertzbeat `application.yml` configuration file 1. Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) Note⚠️The docker container way need to mount application.yml file locally,while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Replace `warehouse.store.td-engine` data source parameters, URL account and password. + Replace `warehouse.store.td-engine` data source parameters, URL account and password. ```yaml warehouse: @@ -101,16 +106,21 @@ warehouse: ### FAQ 1. Do both the time series databases IoTDB and TDengine need to be configured? Can they both be used? + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. 2. The historical chart of the monitoring page is not displayed, and pops up [Unable to provide historical chart data, please configure to rely on the time series database] + > As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database -3. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed -> Please confirm whether the installed TDengine version is 3.x, version 2.x are not compatible. +3. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed + +> Please confirm whether the installed TDengine version is 3.x, version 2.x are not compatible. 4. The TDengine database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure the dependent time series database] + > Please check if the configuration parameters are correct > Is td-engine enable set to true > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory +> You can check the startup logs according to the logs directory + diff --git a/home/versioned_docs/version-v1.5.x/start/upgrade.md b/home/versioned_docs/version-v1.5.x/start/upgrade.md index f34f3b36d03..f4d9b8ce14e 100644 --- a/home/versioned_docs/version-v1.5.x/start/upgrade.md +++ b/home/versioned_docs/version-v1.5.x/start/upgrade.md @@ -1,7 +1,7 @@ --- id: upgrade title: HertzBeat New Version Upgrade -sidebar_label: Version Upgrade Guide +sidebar_label: Version Upgrade Guide --- **HertzBeat Release Version List** @@ -14,7 +14,6 @@ Apache HertzBeat (incubating)'s metadata information is stored in H2 or Mysql, P **You need to save and back up the data files of the database and monitoring templates yml files before upgrading** - ### Upgrade For Docker Deploy 1. If using custom monitoring templates @@ -22,30 +21,26 @@ Apache HertzBeat (incubating)'s metadata information is stored in H2 or Mysql, P - `docker cp hertzbeat:/opt/hertzbeat/define ./define` - And mount the template define directory when docker start `-v $(pwd)/define:/opt/hertzbeat/define` - `-v $(pwd)/define:/opt/hertzbeat/define` - -2. If using the built-in default H2 database +2. If using the built-in default H2 database - Need to mount or back up `-v $(pwd)/data:/opt/hertzbeat/data` database file directory in the container `/opt/hertzbeat/data` - Stop and delete the container, delete the local HertzBeat docker image, and pull the new version image - Refer to [Docker installation of HertzBeat](docker-deploy) to create a new container using a new image. Note that the database file directory needs to be mounted `-v $(pwd)/data:/opt/hertzbeat/data` - -3. If using external relational database Mysql, PostgreSQL +3. If using external relational database Mysql, PostgreSQL - No need to mount the database file directory in the backup container - Stop and delete the container, delete the local HertzBeat docker image, and pull the new version image - Refer to [Docker installation HertzBeat](docker-deploy) to create a new container using the new image, and configure the database connection in `application.yml` - ### Upgrade For Package Deploy -1. If using the built-in default H2 database +1. If using the built-in default H2 database - Back up the database file directory under the installation package `/opt/hertzbeat/data` - If there is a custom monitoring template, you need to back up the template YML under `/opt/hertzbeat/define` - `bin/shutdown.sh` stops the HertzBeat process and downloads the new installation package - Refer to [Installation package to install HertzBeat](package-deploy) to start using the new installation package - -2. If using external relational database Mysql, PostgreSQL +2. If using external relational database Mysql, PostgreSQL - No need to back up the database file directory under the installation package - If there is a custom monitoring template, you need to back up the template YML under `/opt/hertzbeat/define` - `bin/shutdown.sh` stops the HertzBeat process and downloads the new installation package - Refer to [Installation package to install HertzBeat](package-deploy) to start with the new installation package and configure the database connection in `application.yml` -**HAVE FUN** +**HAVE FUN** diff --git a/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md b/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md index 327bc4999af..3d0c22901a8 100644 --- a/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md +++ b/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md @@ -1,7 +1,7 @@ --- id: victoria-metrics-init title: Use Time Series Database VictoriaMetrics to Store Metrics Data (Recommended) -sidebar_label: Use VictoriaMetrics Store Metrics(Recommended) +sidebar_label: Use VictoriaMetrics Store Metrics(Recommended) --- Apache HertzBeat (incubating)'s historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) @@ -15,17 +15,19 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t > If you already have an VictoriaMetrics environment, you can skip directly to the YML configuration step. -### Install VictoriaMetrics via Docker +### Install VictoriaMetrics via Docker + > Refer to the official website [installation tutorial](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -1. Download and install Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). - After the installation you can check if the Docker version normally output at the terminal. - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` - -2. Install VictoriaMetrics via Docker +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> After the installation you can check if the Docker version normally output at the terminal. +> +> ``` +> $ docker -v +> Docker version 20.10.12, build e91ed57 +> ``` + +2. Install VictoriaMetrics via Docker ```shell $ docker run -d -p 8428:8428 \ @@ -34,8 +36,8 @@ $ docker run -d -p 8428:8428 \ victoriametrics/victoria-metrics:v1.95.1 ``` - `-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` is local persistent mount of VictoriaMetrics data directory - use```$ docker ps``` to check if the database started successfully +`-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` is local persistent mount of VictoriaMetrics data directory +use```$ docker ps``` to check if the database started successfully 3. Configure the database connection in hertzbeat `application.yml`configuration file @@ -61,5 +63,7 @@ warehouse: ### FAQ -1. Do both the time series databases need to be configured? Can they both be used? +1. Do both the time series databases need to be configured? Can they both be used? + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which can affects the historical chart data. + diff --git a/home/versioned_docs/version-v1.5.x/template.md b/home/versioned_docs/version-v1.5.x/template.md index 4bdf785e588..cee7aa05055 100644 --- a/home/versioned_docs/version-v1.5.x/template.md +++ b/home/versioned_docs/version-v1.5.x/template.md @@ -4,27 +4,27 @@ title: Monitoring Template Here sidebar_label: Monitoring Template --- -> Apache HertzBeat (incubating) is an open source, real-time monitoring tool with custom-monitor and agentLess. - +> Apache HertzBeat (incubating) is an open source, real-time monitoring tool with custom-monitor and agentLess. +> > We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? -Here is the architecture. +Here is the architecture. ![hertzBeat](/img/docs/hertzbeat-arch.png) -**We define all monitoring collection types (mysql, website, jvm, k8s) as yml templates, and users can import these templates into the hertzbeat system to support corresponding types of monitoring, which is very convenient!** +**We define all monitoring collection types (mysql, website, jvm, k8s) as yml templates, and users can import these templates into the hertzbeat system to support corresponding types of monitoring, which is very convenient!** ![](/img/docs/advanced/extend-point-1.png) **Welcome everyone to contribute your customized general monitoring type YML template during use. The available templates are as follows:** -### Application service monitoring +### Application service monitoring  👉 [Website monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml)
- 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
- 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
- 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
+ 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
+ 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
+ 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
 👉 [Full site monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml)
 👉 [SSL Cert monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml)
 👉 [JVM monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml)
@@ -32,7 +32,7 @@ Here is the architecture.  👉 [SpringBoot3.0](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml)
 👉 [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml)
-### Database monitoring +### Database monitoring  👉 [MYSQL database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml)
 👉 [MariaDB database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml)
@@ -49,7 +49,7 @@ Here is the architecture.  👉 [Redis Sentinel database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml)
 👉 [Redis Cluster database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml)
-### Operating system monitoring +### Operating system monitoring  👉 [Linux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml)
 👉 [Windows operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml)
@@ -64,7 +64,6 @@ Here is the architecture.  👉 [AlmaLinux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml)
 👉 [Debian operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml)
- ### Middleware monitoring  👉 [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml)
@@ -78,13 +77,12 @@ Here is the architecture.  👉 [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml)
 👉 [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml)
- ### CloudNative monitoring  👉 [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml)
 👉 [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml)
-### Network monitoring +### Network monitoring  👉 [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml)
 👉 [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml)
diff --git a/pom.xml b/pom.xml index 449c1d20e46..31f9f010b8f 100644 --- a/pom.xml +++ b/pom.xml @@ -139,6 +139,7 @@ 3.2.1 3.10.0 0.8.11 + 2.40.0 @@ -378,7 +379,7 @@ okhttp ${okhttp.version} - + com.github.ben-manes.caffeine caffeine @@ -481,6 +482,37 @@ + + com.diffplug.spotless + spotless-maven-plugin + ${spotless.version} + + false + + + home/**/*.md + + + + Markdown Formatter + (^-*\n$)([\s\S]*?)(-+$) + ---$2--- + + + + true + + + + + spotless-check + + check + + validate + + + From 35616d2763609daf241f5791f53d5b056ae87821 Mon Sep 17 00:00:00 2001 From: aias00 Date: Tue, 13 Aug 2024 14:19:08 +0800 Subject: [PATCH 180/257] [improve] add windows i18n (#2513) Co-authored-by: tomsun28 Co-authored-by: Calvin --- .../src/main/resources/define/app-windows.yml | 195 ++++++++++++++++++ 1 file changed, 195 insertions(+) diff --git a/manager/src/main/resources/define/app-windows.yml b/manager/src/main/resources/define/app-windows.yml index 4a2669e1196..6e06b2cbb6d 100644 --- a/manager/src/main/resources/define/app-windows.yml +++ b/manager/src/main/resources/define/app-windows.yml @@ -110,29 +110,59 @@ params: metrics: # metrics - system - name: system + i18n: + zh-CN: 系统 + en-US: System # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue priority: 0 fields: - field: name type: 1 + i18n: + zh-CN: 名称 + en-US: Name - field: descr type: 1 + i18n: + zh-CN: 描述 + en-US: Description - field: uptime type: 1 + i18n: + zh-CN: 运行时间 + en-US: Uptime - field: numUsers type: 0 + i18n: + zh-CN: 用户数 + en-US: Number of Users - field: services type: 0 + i18n: + zh-CN: 服务数 + en-US: Number of Services - field: processes type: 0 + i18n: + zh-CN: 进程数 + en-US: Number of Processes - field: responseTime type: 0 + i18n: + zh-CN: 响应时间 + en-US: Response Time unit: ms - field: location type: 1 + i18n: + zh-CN: 位置 + en-US: Location - field: memory type: 0 + i18n: + zh-CN: 内存 + en-US: Memory unit: GB units: - memory=KB->GB @@ -155,28 +185,58 @@ metrics: memory: 1.3.6.1.2.1.25.2.2.0 - name: processes + i18n: + zh-CN: 进程 + en-US: Processes priority: 1 fields: - field: hrSWRunIndex type: 0 + i18n: + zh-CN: 进程 Index + en-US: Process Index - field: hrSWRunName type: 1 + i18n: + zh-CN: 进程名称 + en-US: Process Name label: true - field: hrSWRunID type: 1 + i18n: + zh-CN: 进程 ID + en-US: Process ID - field: hrSWRunPath type: 1 + i18n: + zh-CN: 进程路径 + en-US: Process Path - field: hrSWRunParameters type: 1 + i18n: + zh-CN: 进程参数 + en-US: Process Parameters - field: hrSWRunType type: 0 + i18n: + zh-CN: 进程类型 + en-US: Process Type - field: hrSWRunStatus type: 0 + i18n: + zh-CN: 进程状态 + en-US: Process Status - field: hrSWRunPerfCPU type: 0 + i18n: + zh-CN: 进程占用 CPU + en-US: Process CPU unit: 's' - field: hrSWRunPerfMem type: 0 + i18n: + zh-CN: 进程占用内存 + en-US: Process Memory unit: 'KB' calculates: - hrSWRunPerfCPU = hrSWRunPerfCPU / 100 @@ -200,19 +260,37 @@ metrics: hrSWRunPerfMem: 1.3.6.1.2.1.25.5.1.1.2 - name: services + i18n: + zh-CN: 服务 + en-US: Services priority: 1 fields: - field: svSvcName type: 1 + i18n: + zh-CN: 服务名称 + en-US: Service Name label: true - field: svSvcInstalledState type: 0 + i18n: + zh-CN: 服务安装状态 + en-US: Service Installed State - field: svSvcOperatingState type: 0 + i18n: + zh-CN: 服务运行状态 + en-US: Service Operating State - field: svSvcCanBeUninstalled type: 0 + i18n: + zh-CN: 服务是否可以被卸载 + en-US: Service Can Be Uninstalled - field: svSvcCanBePaused type: 0 + i18n: + zh-CN: 服务是否可以被暂停 + en-US: Service Can Be Paused protocol: snmp snmp: host: ^_^host^_^ @@ -229,17 +307,32 @@ metrics: svSvcCanBePaused: 1.3.6.1.4.1.77.1.2.3.1.5 - name: installed + i18n: + zh-CN: 安装 + en-US: Installed priority: 1 fields: - field: hrSWInstalledIndex type: 0 + i18n: + zh-CN: 安装 Index + en-US: Installed Index - field: hrSWInstalledName type: 1 + i18n: + zh-CN: 安装名称 + en-US: Installed Name label: true - field: hrSWInstalledID type: 1 + i18n: + zh-CN: 安装 ID + en-US: Installed ID - field: hrSWInstalledType type: 0 + i18n: + zh-CN: 安装类型 + en-US: Installed Type protocol: snmp snmp: host: ^_^host^_^ @@ -255,12 +348,21 @@ metrics: hrSWInstalledType: 1.3.6.1.2.1.25.6.3.1.4 - name: cpu + i18n: + zh-CN: CPU + en-US: CPU priority: 1 fields: - field: hrProcessorFrwID type: 1 + i18n: + zh-CN: 处理器 ID + en-US: Processor ID - field: hrProcessorLoad type: 0 + i18n: + zh-CN: 处理器负载 + en-US: Processor Load unit: '%' protocol: snmp snmp: @@ -275,24 +377,45 @@ metrics: hrProcessorLoad: 1.3.6.1.2.1.25.3.3.1.2 - name: storages + i18n: + zh-CN: 存储 + en-US: Storages priority: 1 fields: - field: index type: 0 + i18n: + zh-CN: 存储 Index + en-US: Storage Index - field: descr type: 1 + i18n: + zh-CN: 存储描述 + en-US: Storage Description label: true - field: size + i18n: + zh-CN: 存储大小 + en-US: Storage Size type: 0 unit: MB - field: free type: 0 + i18n: + zh-CN: 存储空闲 + en-US: Storage Free unit: MB - field: used type: 0 + i18n: + zh-CN: 存储占用 + en-US: Storage Used unit: MB - field: usage type: 0 + i18n: + zh-CN: 存储使用率 + en-US: Storage Usage unit: '%' # (Not required) Monitor indicator alias, which maps to the indicator name above. The field used to collect interface data is not directly the final indicator name, and this alias is required for mapping translation aliasFields: @@ -329,16 +452,31 @@ metrics: hrStorageAllocationUnits: 1.3.6.1.2.1.25.2.3.1.4 - name: disk + i18n: + zh-CN: 磁盘 + en-US: Disk priority: 2 fields: - field: hrDiskStorageAccess type: 0 + i18n: + zh-CN: 磁盘存储访问 + en-US: Disk Storage Access - field: hrDiskStorageMedia type: 0 + i18n: + zh-CN: 磁盘存储介质 + en-US: Disk Storage Media - field: hrDiskStorageRemoveble type: 0 + i18n: + zh-CN: 磁盘存储是否可移动 + en-US: Disk Storage Removeble - field: hrDiskStorageCapacity type: 0 + i18n: + zh-CN: 磁盘存储容量 + en-US: Disk Storage Capacity unit: MB units: - hrDiskStorageCapacity=KB->MB @@ -357,10 +495,16 @@ metrics: hrDiskStorageCapacity: 1.3.6.1.2.1.25.3.6.1.4 - name: network + i18n: + zh-CN: 网络 + en-US: Network priority: 3 fields: - field: number type: 1 + i18n: + zh-CN: 编号 + en-US: Number protocol: snmp snmp: host: ^_^host^_^ @@ -372,41 +516,80 @@ metrics: number: 1.3.6.1.2.1.2.1.0 - name: interfaces + i18n: + zh-CN: 接口 + en-US: Interfaces priority: 4 fields: - field: index type: 0 + i18n: + zh-CN: 序号 + en-US: Index - field: descr type: 1 + i18n: + zh-CN: 描述 + en-US: Description label: true - field: mtu type: 0 + i18n: + zh-CN: 最大传输单元 + en-US: MTU unit: 'byte' - field: speed type: 0 + i18n: + zh-CN: 速度 + en-US: Speed unit: 'KB/s' - field: in_octets type: 0 + i18n: + zh-CN: 输入字节数 + en-US: In Octets unit: 'byte' - field: in_discards type: 0 + i18n: + zh-CN: 输入丢弃数 + en-US: In Discards unit: 'package' - field: in_errors type: 0 + i18n: + zh-CN: 输入错误数 + en-US: In Errors unit: 'package' - field: out_octets type: 0 + i18n: + zh-CN: 输出字节数 + en-US: Out Octets unit: 'byte' - field: out_discards type: 0 + i18n: + zh-CN: 输出丢弃数 + en-US: Out Discards unit: 'package' - field: out_errors type: 0 + i18n: + zh-CN: 输出错误数 + en-US: Out Errors unit: 'package' - field: admin_status type: 1 + i18n: + zh-CN: 管理状态 + en-US: Admin Status - field: oper_status type: 1 + i18n: + zh-CN: 运行状态 + en-US: Oper Status aliasFields: - ifIndex - ifDescr @@ -456,15 +639,27 @@ metrics: ifOperStatus: 1.3.6.1.2.1.2.2.1.8 - name: devices + i18n: + zh-CN: 设备 + en-US: Devices priority: 4 fields: - field: index type: 1 + i18n: + zh-CN: 序号 + en-US: Index label: true - field: descr type: 1 + i18n: + zh-CN: 描述 + en-US: Description - field: status type: 1 + i18n: + zh-CN: 状态 + en-US: Status aliasFields: - hrDeviceIndex - hrDeviceDescr From ed21cc3a94abfdeefa0035daf7553b645ae25677 Mon Sep 17 00:00:00 2001 From: Calvin Date: Tue, 13 Aug 2024 22:29:56 +0800 Subject: [PATCH 181/257] [improve] fix spotless check failed (#2521) --- .../version-v1.4.x/others/developer.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md index 0b503032d0f..5fa3a9c1929 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md @@ -1,7 +1,7 @@ --- id: developer title: 开发者们 -sidebar_label: 开发者们 +sidebar_label: 开发者们 --- ## ✨ HertzBeat的成员们 @@ -26,7 +26,7 @@ sidebar_label: 开发者们
-cert +cert ## ✨ HertzBeat的开发者们 @@ -259,5 +259,4 @@ Thanks these wonderful people, welcome to join us: [贡献者指南](contributin - - + From ffa81a68f718bc513f75654caba7dfecfa67a672 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Wed, 14 Aug 2024 00:14:30 +0800 Subject: [PATCH 182/257] [infra] add sub git module hertzbeat-helm-chart (#2522) --- .gitmodules | 21 ++ script/helm/hertzbeat-helm-chart | 1 + script/helm/hertzbeat/.helmignore | 23 -- script/helm/hertzbeat/Chart.yaml | 37 --- script/helm/hertzbeat/README.md | 161 ---------- script/helm/hertzbeat/templates/NOTES.txt | 22 -- script/helm/hertzbeat/templates/_helpers.tpl | 67 ---- .../templates/collector/configmap.yaml | 60 ---- .../templates/collector/deployment.yaml | 84 ----- .../hertzbeat/templates/collector/hpa.yaml | 45 --- .../templates/database/configmap.yaml | 36 --- .../hertzbeat/templates/database/pvc.yaml | 46 --- .../hertzbeat/templates/database/service.yaml | 32 -- .../templates/database/statefulset.yaml | 116 ------- .../templates/manager/configmap.yaml | 300 ------------------ .../templates/manager/deployment.yaml | 89 ------ .../hertzbeat/templates/manager/ingress.yaml | 48 --- .../templates/manager/service-cluster.yaml | 33 -- .../templates/manager/service-expose.yaml | 64 ---- .../hertzbeat/templates/tsdb/configmap.yaml | 25 -- script/helm/hertzbeat/templates/tsdb/pvc.yaml | 46 --- .../hertzbeat/templates/tsdb/service.yaml | 32 -- .../hertzbeat/templates/tsdb/statefulset.yaml | 87 ----- script/helm/hertzbeat/values.yaml | 143 --------- 24 files changed, 22 insertions(+), 1596 deletions(-) create mode 100644 .gitmodules create mode 160000 script/helm/hertzbeat-helm-chart delete mode 100644 script/helm/hertzbeat/.helmignore delete mode 100644 script/helm/hertzbeat/Chart.yaml delete mode 100644 script/helm/hertzbeat/README.md delete mode 100644 script/helm/hertzbeat/templates/NOTES.txt delete mode 100644 script/helm/hertzbeat/templates/_helpers.tpl delete mode 100644 script/helm/hertzbeat/templates/collector/configmap.yaml delete mode 100644 script/helm/hertzbeat/templates/collector/deployment.yaml delete mode 100644 script/helm/hertzbeat/templates/collector/hpa.yaml delete mode 100644 script/helm/hertzbeat/templates/database/configmap.yaml delete mode 100644 script/helm/hertzbeat/templates/database/pvc.yaml delete mode 100644 script/helm/hertzbeat/templates/database/service.yaml delete mode 100644 script/helm/hertzbeat/templates/database/statefulset.yaml delete mode 100644 script/helm/hertzbeat/templates/manager/configmap.yaml delete mode 100644 script/helm/hertzbeat/templates/manager/deployment.yaml delete mode 100644 script/helm/hertzbeat/templates/manager/ingress.yaml delete mode 100644 script/helm/hertzbeat/templates/manager/service-cluster.yaml delete mode 100644 script/helm/hertzbeat/templates/manager/service-expose.yaml delete mode 100644 script/helm/hertzbeat/templates/tsdb/configmap.yaml delete mode 100644 script/helm/hertzbeat/templates/tsdb/pvc.yaml delete mode 100644 script/helm/hertzbeat/templates/tsdb/service.yaml delete mode 100644 script/helm/hertzbeat/templates/tsdb/statefulset.yaml delete mode 100644 script/helm/hertzbeat/values.yaml diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000000..a7ef9fc5dde --- /dev/null +++ b/.gitmodules @@ -0,0 +1,21 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +[submodule "script/helm/hertzbeat-helm-chart"] + path = script/helm/hertzbeat-helm-chart + url = git@github.com:apache/hertzbeat-helm-chart.git + branch = main diff --git a/script/helm/hertzbeat-helm-chart b/script/helm/hertzbeat-helm-chart new file mode 160000 index 00000000000..74027a5fdae --- /dev/null +++ b/script/helm/hertzbeat-helm-chart @@ -0,0 +1 @@ +Subproject commit 74027a5fdaed41693842b343789d4e49a57a5771 diff --git a/script/helm/hertzbeat/.helmignore b/script/helm/hertzbeat/.helmignore deleted file mode 100644 index 0e8a0eb36f4..00000000000 --- a/script/helm/hertzbeat/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/script/helm/hertzbeat/Chart.yaml b/script/helm/hertzbeat/Chart.yaml deleted file mode 100644 index 9bedb3948b3..00000000000 --- a/script/helm/hertzbeat/Chart.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -apiVersion: v2 -name: hertzbeat -description: An open-source, real-time monitoring system with custom monitoring, high performance cluster and agentless capabilities. -type: application -home: https://hertzbeat.apache.org/ -sources: - - https://github.com/apache/hertzbeat -maintainers: - - name: hertzbeat-dev - email: dev@hertzbeat.apache.org -icon: https://raw.githubusercontent.com/apache/hertzbeat/master/home/static/img/hertzbeat-logo.png -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.6.0 -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "v1.6.0" diff --git a/script/helm/hertzbeat/README.md b/script/helm/hertzbeat/README.md deleted file mode 100644 index 0ebff08af35..00000000000 --- a/script/helm/hertzbeat/README.md +++ /dev/null @@ -1,161 +0,0 @@ -# Helm Chart for Apache HertzBeat (incubating) - -[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/hertzbeat)](https://artifacthub.io/packages/search?repo=hertzbeat) - -

hertzbeat: An open-source, real-time monitoring system with custom monitoring, high performance cluster and agentless capabilities.

— Open in Artifact Hub
- - -## What is Apache HertzBeat (incubating)? - -[Apache HertzBeat](https://github.com/apache/hertzbeat) (incubating) is an easy-to-use, open source, real-time monitoring system with agentless, high performance cluster, prometheus-compatible, offers powerful custom monitoring and status page building capabilities. - -### Features - -* Combines **monitoring, alarm, and notification** features into one platform, and supports monitoring for web service, program, database, cache, os, webserver, middleware, bigdata, cloud-native, network, custom and more. -* Easy to use and agentless, web-based and with one-click monitoring and alerting, zero learning curve. -* Makes protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, allowing you to collect any metrics by simply configuring the template `YML` file online. Imagine being able to quickly adapt to a new monitoring type like K8s or Docker simply by configuring online with HertzBeat. -* Compatible with the `Prometheus` ecosystem and more, can monitoring what `Prometheus` can monitoring with few clicks on webui. -* High performance, supports horizontal expansion of multi-collector clusters, multi-isolated network monitoring and cloud-edge collaboration. -* Provides flexible alarm threshold rules and timely notifications delivered via `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. -* Provides powerful status page building capabilities, easily communicate the real-time status of your service to users. - - -> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help developers and teams quickly build their own monitoring system. - -## Helm Chart for HertzBeat - -This [Helm](https://github.com/kubernetes/helm) chart installs [HertzBeat](https://github.com/apache/hertzbeat) in a Kubernetes cluster. Welcome to [contribute](https://github.com/apache/hertzbeat/tree/master/script/helm) to Helm Chart for HertzBeat. - -## Prerequisites - -- Kubernetes cluster 1.20+ -- Helm v3.2.0+ - -## Installation - -### Add Helm repository - -```bash -helm repo add hertzbeat https://charts.hertzbeat.com/ -helm repo update -``` - -### Configure the chart - -The following items can be set via `--set` flag during installation or configured by editing the `values.yaml` directly (need to download the chart first). - -#### Configure how to expose HertzBeat service - -- **Ingress**: The ingress controller must be installed in the Kubernetes cluster. -- **ClusterIP**: Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster. -- **NodePort**: Exposes the service on each Node’s IP at a static port (the NodePort). You’ll be able to contact the NodePort service, from outside the cluster, by requesting `NodeIP:NodePort`. -- **LoadBalancer**: Exposes the service externally using a cloud provider’s load balancer. - -#### Configure the external URL - -The external URL for HertzBeat core service is used to: - -1. populate the docker/helm commands showed on portal -2. populate the token service URL returned to docker client - -Format: `protocol://domain[:port]`. Usually: - -- if service exposed via `Ingress`, the `domain` should be the value of `expose.ingress.hosts` -- if service exposed via `ClusterIP`, the `domain` should be the value of `expose.clusterIP.name` -- if service exposed via `NodePort`, the `domain` should be the IP address of one Kubernetes node -- if service exposed via `LoadBalancer`, set the `domain` as your own domain name and add a CNAME record to map the domain name to the one you got from the cloud provider - -If HertzBeat is deployed behind the proxy, set it as the URL of proxy. - -#### Configure how to persist data - -- **Disable**: The data does not survive the termination of a pod. -- **Persistent Volume Claim(default)**: A default `StorageClass` is needed in the Kubernetes cluster to dynamically provision the volumes. Specify another StorageClass in the `storageClass` or set `existingClaim` if you already have existing persistent volumes to use. - -#### Configure the other items listed in [configuration](#configuration) section - -### Install the chart - -Install the HertzBeat helm chart with a release name `my-release`: -```bash -helm install hertzbeat hertzbeat/hertzbeat -``` - -## Uninstallation - -To uninstall/delete the `hertzbeat` deployment: -```bash -helm uninstall hertzbeat -``` - -## Configuration - -The following table lists the configurable parameters of the HertzBeat chart and the default values. - -| Parameter | Description | Default | -|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------| -| **Expose** | | | -| `expose.type` | How to expose the service: `Ingress`, `ClusterIP`, `NodePort` or `LoadBalancer`, other values will be ignored and the creation of service will be skipped. | `Ingress` | -| `expose.clusterIP.name` | The name of ClusterIP service | `hertzbeat` | -| `expose.clusterIP.annotations` | The annotations attached to the ClusterIP service | {} | -| `expose.clusterIP.ports.port` | The service port HertzBeat listens on when serving HTTP | `80` | -| `expose.nodePort.name` | The name of NodePort service | `hertzbeat` | -| `expose.nodePort.ports.port` | The service port HertzBeat listens on when serving HTTP | `80` | -| `expose.nodePort.ports.nodePort` | The node port HertzBeat listens on when serving HTTP | `30002` | -| `expose.loadBalancer.IP` | The IP of the loadBalancer. It only works when loadBalancer supports assigning IP | `""` | -| `expose.loadBalancer.ports.port` | The service port HertzBeat listens on when serving HTTP | `80` | -| `expose.loadBalancer.sourceRanges` | List of IP address ranges to assign to loadBalancerSourceRanges | [] | -| **Manager** | | | -| `manager.account.username` | The hertzbeat account username | `admin` | -| `manager.account.password` | The hertzbeat account password | `hertzbeat` | -| `manager.resources` | The [resources] to allocate for container | undefined | -| `manager.nodeSelector` | Node labels for pod assignment | `{}` | -| `manager.tolerations` | Tolerations for pod assignment | `[]` | -| `manager.affinity` | Node/Pod affinities | `{}` | -| `manager.podAnnotations` | Annotations to add to the nginx pod | `{}` | -| **Collector** | | | -| `collector.replicaCount` | The replica count | `1` | -| `collector.autoscaling.enable` | Is enable auto scaling collector replicas | `1` | -| `collector.resources` | The [resources] to allocate for container | undefined | -| `collector.nodeSelector` | Node labels for pod assignment | `{}` | -| `collector.tolerations` | Tolerations for pod assignment | `[]` | -| `collector.affinity` | Node/Pod affinities | `{}` | -| `collector.podAnnotations` | Annotations to add to the nginx pod | `{}` | -| **Database** | | | -| `database.timezone` | The database system timezone | `1` | -| `database.rootPassword` | The database root user password | `1` | -| `database.persistence.enabled` | Enable the data persistence or not | `true` | -| `database.persistence.resourcePolicy` | Setting it to `keep` to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted. Does not affect PVCs created for internal database and redis components. | `keep` | -| `database.persistence.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components | | -| `database.persistence.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | -| `database.persistence.subPath` | The sub path used in the volume | | -| `database.persistence.accessMode` | The access mode of the volume | `ReadWriteOnce` | -| `database.persistence.size` | The size of the volume | `5Gi` | -| `database.persistence.annotations` | The annotations of the volume | | -| `database.resources` | The [resources] to allocate for container | undefined | -| `database.nodeSelector` | Node labels for pod assignment | `{}` | -| `database.tolerations` | Tolerations for pod assignment | `[]` | -| `database.affinity` | Node/Pod affinities | `{}` | -| `database.podAnnotations` | Annotations to add to the nginx pod | `{}` | -| **TSDB** | | | -| `tsdb.timezone` | The database system timezone | `1` | -| `tsdb.persistence.enabled` | Enable the data persistence or not | `true` | -| `tsdb.persistence.resourcePolicy` | Setting it to `keep` to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted. Does not affect PVCs created for internal database and redis components. | `keep` | -| `tsdb.persistence.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components | | -| `tsdb.persistence.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | -| `tsdb.persistence.subPath` | The sub path used in the volume | | -| `tsdb.persistence.accessMode` | The access mode of the volume | `ReadWriteOnce` | -| `tsdb.persistence.size` | The size of the volume | `5Gi` | -| `tsdb.persistence.annotations` | The annotations of the volume | | -| `tsdb.resources` | The [resources] to allocate for container | undefined | -| `tsdb.nodeSelector` | Node labels for pod assignment | `{}` | -| `tsdb.tolerations` | Tolerations for pod assignment | `[]` | -| `tsdb.affinity` | Node/Pod affinities | `{}` | -| `tsdb.podAnnotations` | Annotations to add to the nginx pod | `{}` | - - -[resources]: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ -[hertzbeat]: https://github.com/apache/hertzbeat/ -[artifacthub]: https://artifacthub.io/ -[helm]: https://helm.sh/ - diff --git a/script/helm/hertzbeat/templates/NOTES.txt b/script/helm/hertzbeat/templates/NOTES.txt deleted file mode 100644 index 376794e1b12..00000000000 --- a/script/helm/hertzbeat/templates/NOTES.txt +++ /dev/null @@ -1,22 +0,0 @@ -1. Get the application URL by running these commands: -{{- if (eq .Values.expose.type "Ingress")}} -http{{ if $.Values.expose.ingress.tls.enable }}s{{ end }}://{{ .Values.expose.ingress.host }} - -{{- else if contains "NodePort" .Values.expose.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "hertzbeat.fullname" . }}-http) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT - -{{- else if contains "LoadBalancer" .Values.expose.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "hertzbeat.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "hertzbeat.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.expose.loadBalancer.ports.port }} - -{{- else if contains "ClusterIP" .Values.expose.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "hertzbeat.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT - -{{- end }} diff --git a/script/helm/hertzbeat/templates/_helpers.tpl b/script/helm/hertzbeat/templates/_helpers.tpl deleted file mode 100644 index 4b64c3a038e..00000000000 --- a/script/helm/hertzbeat/templates/_helpers.tpl +++ /dev/null @@ -1,67 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "hertzbeat.name" -}} -{{- default .Chart.Name | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "hertzbeat.fullname" -}} -{{- $name := default .Chart.Name }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} - -{{- define "hertzbeat.manager" -}} -{{- printf "%s" (include "hertzbeat.fullname" .) -}} -{{- end -}} - -{{- define "hertzbeat.manager.host" -}} -{{- printf "%s-cluster" (include "hertzbeat.manager" .) -}} -{{- end -}} - -{{- define "hertzbeat.collector" -}} -{{- printf "%s-collector" (include "hertzbeat.fullname" .) -}} -{{- end -}} - -{{- define "hertzbeat.database" -}} -{{- printf "%s-database" (include "hertzbeat.fullname" .) -}} -{{- end -}} - -{{- define "hertzbeat.tsdb" -}} -{{- printf "%s-tsdb" (include "hertzbeat.fullname" .) -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "hertzbeat.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "hertzbeat.labels" -}} -helm.sh/chart: {{ include "hertzbeat.chart" . }} -{{ include "hertzbeat.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "hertzbeat.selectorLabels" -}} -app.kubernetes.io/name: {{ include "hertzbeat.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} diff --git a/script/helm/hertzbeat/templates/collector/configmap.yaml b/script/helm/hertzbeat/templates/collector/configmap.yaml deleted file mode 100644 index 94345ab3fdf..00000000000 --- a/script/helm/hertzbeat/templates/collector/configmap.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -apiVersion: v1 -kind: ConfigMap -metadata: - name: "{{ include "hertzbeat.collector" . }}" - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} -data: - IDENTITY: "" - MANAGER_HOST: "{{ include "hertzbeat.manager.host" . }}" - MANAGER_PORT: "1158" - application.yml: |+ - server: - port: 1159 - spring: - application: - name: ${HOSTNAME:@hertzbeat-collector@}${PID} - profiles: - active: cluster - jackson: - default-property-inclusion: ALWAYS - # need to disable spring boot mongodb auto config, or default mongodb connection tried and failed... - autoconfigure: - exclude: org.springframework.boot.autoconfigure.mongo.MongoAutoConfiguration, org.springframework.boot.autoconfigure.data.mongo.MongoDataAutoConfiguration - - --- - spring: - config: - activate: - on-profile: cluster - - collector: - dispatch: - entrance: - netty: - enabled: true - identity: ${IDENTITY:} - mode: ${MODE:public} - manager-host: ${MANAGER_HOST:127.0.0.1} - manager-port: ${MANAGER_PORT:1158} - - common: - queue: - type: netty diff --git a/script/helm/hertzbeat/templates/collector/deployment.yaml b/script/helm/hertzbeat/templates/collector/deployment.yaml deleted file mode 100644 index 98954b7f9f4..00000000000 --- a/script/helm/hertzbeat/templates/collector/deployment.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "hertzbeat.collector" . }} - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} - component: collector -spec: - {{- if not .Values.collector.autoscaling.enabled }} - replicas: {{ .Values.collector.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "hertzbeat.selectorLabels" . | nindent 6 }} - component: collector - template: - metadata: - {{- with .Values.collector.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "hertzbeat.selectorLabels" . | nindent 8 }} - component: collector - spec: - securityContext: - {{- toYaml .Values.collector.podSecurityContext | nindent 8 }} - containers: - - name: hertzbeat-collector - securityContext: - {{- toYaml .Values.collector.securityContext | nindent 12 }} - image: "{{ .Values.collector.image.repository }}:{{ .Values.collector.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.collector.image.pullPolicy }} - ports: - - containerPort: 1159 - protocol: TCP - livenessProbe: - tcpSocket: - port: 1159 - readinessProbe: - tcpSocket: - port: 1159 - envFrom: - - configMapRef: - name: "{{ include "hertzbeat.collector" . }}" - volumeMounts: - - mountPath: /opt/hertzbeat-collector/config/application.yml - subPath: application.yml - name: application - resources: - {{- toYaml .Values.collector.resources | nindent 12 }} - volumes: - - name: application - configMap: - name: {{ include "hertzbeat.collector" . }} - {{- with .Values.collector.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.collector.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.collector.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/script/helm/hertzbeat/templates/collector/hpa.yaml b/script/helm/hertzbeat/templates/collector/hpa.yaml deleted file mode 100644 index ee82be3d4af..00000000000 --- a/script/helm/hertzbeat/templates/collector/hpa.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -{{- if .Values.collector.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "hertzbeat.collector" . }} - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "hertzbeat.collector" . }} - minReplicas: {{ .Values.collector.autoscaling.minReplicas }} - maxReplicas: {{ .Values.collector.autoscaling.maxReplicas }} - metrics: - {{- if .Values.collector.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.collector.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.collector.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.collector.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/script/helm/hertzbeat/templates/database/configmap.yaml b/script/helm/hertzbeat/templates/database/configmap.yaml deleted file mode 100644 index 4c9c3e91c43..00000000000 --- a/script/helm/hertzbeat/templates/database/configmap.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -apiVersion: v1 -kind: ConfigMap -metadata: - name: "{{ include "hertzbeat.database" . }}" - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} -data: - TZ: "{{ .Values.database.timezone }}" - POSTGRES_PASSWORD: "{{ .Values.database.rootPassword }}" - POSTGRES_USER: "root" - schema.sql: |+ - CREATE EXTENSION IF NOT EXISTS dblink; - - DO $$ - BEGIN - PERFORM dblink_exec('', 'CREATE DATABASE hertzbeat'); - EXCEPTION WHEN duplicate_database THEN RAISE NOTICE '%, skipping', SQLERRM USING ERRCODE = SQLSTATE; - END - $$; diff --git a/script/helm/hertzbeat/templates/database/pvc.yaml b/script/helm/hertzbeat/templates/database/pvc.yaml deleted file mode 100644 index 69f6ad44937..00000000000 --- a/script/helm/hertzbeat/templates/database/pvc.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -{{- if .Values.database.persistence.enabled }} -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ template "hertzbeat.database" . }} - annotations: - {{- range $key, $value := .Values.database.persistence.annotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- if eq .Values.database.persistence.resourcePolicy "keep" }} - helm.sh/resource-policy: keep - {{- end }} - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} - component: database -spec: - accessModes: - - {{ .Values.database.persistence.accessMode }} - resources: - requests: - storage: {{ .Values.database.persistence.size }} - {{- if .Values.database.persistence.storageClass }} - {{- if eq "-" .Values.database.persistence.storageClass }} - storageClassName: "" - {{- else }} - storageClassName: {{ .Values.database.persistence.storageClass }} - {{- end }} - {{- end }} -{{- end }} diff --git a/script/helm/hertzbeat/templates/database/service.yaml b/script/helm/hertzbeat/templates/database/service.yaml deleted file mode 100644 index e95d860d24d..00000000000 --- a/script/helm/hertzbeat/templates/database/service.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -apiVersion: v1 -kind: Service -metadata: - name: {{ include "hertzbeat.database" . }} - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} -spec: - type: ClusterIP - ports: - - port: 5432 - targetPort: 5432 - protocol: TCP - selector: - {{- include "hertzbeat.selectorLabels" . | nindent 4 }} - component: database diff --git a/script/helm/hertzbeat/templates/database/statefulset.yaml b/script/helm/hertzbeat/templates/database/statefulset.yaml deleted file mode 100644 index 85d4e9f608c..00000000000 --- a/script/helm/hertzbeat/templates/database/statefulset.yaml +++ /dev/null @@ -1,116 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "hertzbeat.database" . }} - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} - component: database -spec: - replicas: 1 - serviceName: {{ include "hertzbeat.database" . }} - selector: - matchLabels: - {{- include "hertzbeat.selectorLabels" . | nindent 6 }} - component: database - template: - metadata: - {{- with .Values.database.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "hertzbeat.selectorLabels" . | nindent 8 }} - component: database - spec: - securityContext: - {{- toYaml .Values.database.podSecurityContext | nindent 8 }} - containers: - - name: database - image: {{ .Values.database.image.repository }}:{{ .Values.database.image.tag }} - imagePullPolicy: {{ .Values.database.image.pullPolicy }} - livenessProbe: - tcpSocket: - port: 5432 - initialDelaySeconds: 300 - periodSeconds: 10 - readinessProbe: - tcpSocket: - port: 5432 - initialDelaySeconds: 1 - periodSeconds: 10 - resources: - {{- toYaml .Values.database.resources | nindent 12 }} - envFrom: - - configMapRef: - name: "{{ include "hertzbeat.database" . }}" - volumeMounts: - - name: data - mountPath: /var/lib/postgresql/data - subPath: "" - - mountPath: /docker-entrypoint-initdb.d/schema.sql - subPath: schema.sql - name: schema - volumes: - - name: schema - configMap: - name: {{ include "hertzbeat.database" . }} - {{- if not .Values.database.persistence.enabled }} - - name: data - emptyDir: {} - {{- else }} - - name: data - persistentVolumeClaim: - claimName: {{ .Values.database.persistence.existingClaim | default (include "hertzbeat.database" .) }} - {{- end }} - {{- with .Values.database.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.database.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.database.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} -{{/* {{- if and .Values.database.persistence.enabled (not .Values.database.persistence.existingClaim) }}*/}} -{{/* volumeClaimTemplates:*/}} -{{/* - metadata:*/}} -{{/* name: data*/}} -{{/* labels:*/}} -{{/* {{- include "hertzbeat.labels" . | indent 10 }}*/}} -{{/* annotations:*/}} -{{/* {{- range $key, $value := .Values.database.persistence.annotations }}*/}} -{{/* {{ $key }}: {{ $value | quote }}*/}} -{{/* {{- end }}*/}} -{{/* spec:*/}} -{{/* accessModes: [{{ .Values.database.persistence.accessMode | quote }}]*/}} -{{/* {{- if .Values.database.persistence.storageClass }}*/}} -{{/* {{- if (eq "-" .Values.database.persistence.storageClass) }}*/}} -{{/* storageClassName: ""*/}} -{{/* {{- else }}*/}} -{{/* storageClassName: "{{ .Values.database.persistence.storageClass }}"*/}} -{{/* {{- end }}*/}} -{{/* {{- end }}*/}} -{{/* resources:*/}} -{{/* requests:*/}} -{{/* storage: {{ .Values.database.persistence.size | quote }}*/}} -{{/* {{- end -}}*/}} diff --git a/script/helm/hertzbeat/templates/manager/configmap.yaml b/script/helm/hertzbeat/templates/manager/configmap.yaml deleted file mode 100644 index f96c2f7d556..00000000000 --- a/script/helm/hertzbeat/templates/manager/configmap.yaml +++ /dev/null @@ -1,300 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -apiVersion: v1 -kind: ConfigMap -metadata: - name: "{{ include "hertzbeat.manager" . }}" - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} -data: - application.yml: |+ - server: - port: 1157 - spring: - application: - name: ${HOSTNAME:@hertzbeat@}${PID} - profiles: - active: prod - mvc: - static-path-pattern: /** - jackson: - default-property-inclusion: ALWAYS - web: - resources: - static-locations: - - classpath:/dist/ - - classpath:../dist/ - # need to disable spring boot mongodb auto config, or default mongodb connection tried and failed.. - autoconfigure: - exclude: org.springframework.boot.autoconfigure.mongo.MongoAutoConfiguration, org.springframework.boot.autoconfigure.data.mongo.MongoDataAutoConfiguration - freemarker: - enabled: false - management: - health: - mail: - enabled: off - endpoints: - web: - exposure: - include: - - 'metrics' - - 'health' - - 'env' - enabled-by-default: on - - sureness: - container: jakarta_servlet - auths: - - digest - - basic - - jwt - jwt: - secret: {{ .Values.manager.jwtSecretKey }} - - --- - spring: - config: - activate: - on-profile: prod - - datasource: - driver-class-name: org.postgresql.Driver - username: root - password: {{ .Values.database.rootPassword }} - url: jdbc:postgresql://{{ include "hertzbeat.database" . }}:5432/hertzbeat - hikari: - max-lifetime: 120000 - jpa: - show-sql: false - database-platform: org.eclipse.persistence.platform.database.PostgreSQLPlatform - database: postgresql - properties: - eclipselink: - logging: - level: SEVERE - flyway: - enabled: true - clean-disabled: true - baseline-on-migrate: true - baseline-version: 1 - locations: - - classpath:db/migration/{vendor} - mail: - # Attention: this is mail server address. - # 请注意此为邮件服务器地址:qq邮箱为 smtp.qq.com qq 企业邮箱为 smtp.exmail.qq.com - host: smtp.qq.com - username: tancloud@qq.com - # Attention: this is not email account password, this requires an email authorization code - # 请注意此非邮箱账户密码 此需填写邮箱授权码 - password: your-password - #Attention: Tencent mail smtps 465,smtp 587 - #请注意腾讯邮箱465为smtps,587为smtp - port: 587 - properties: - mail: - smtp: - socketFactoryClass: javax.net.ssl.SSLSocketFactory - ssl: - enable: true - - common: - queue: - # memory or kafka - type: memory - # properties when queue type is kafka - kafka: - servers: 127.0.0.1:9092 - metrics-data-topic: async-metrics-data - alerts-data-topic: async-alerts-data - - warehouse: - store: - # store history metrics data, enable only one below - # 存储历史数据方式, 下方只能enabled启用一种方式 - jpa: - enabled: false - victoria-metrics: - enabled: true - url: http://{{ include "hertzbeat.tsdb" . }}:8428 - username: root - password: root - td-engine: - enabled: false - driver-class-name: com.taosdata.jdbc.rs.RestfulDriver - url: jdbc:TAOS-RS://{{ include "hertzbeat.tsdb" . }}:6041/hertzbeat - username: root - password: taosdata - greptime: - enabled: false - endpoint: localhost:4001 - iot-db: - enabled: false - host: {{ include "hertzbeat.tsdb" . }} - rpc-port: 6667 - username: root - password: root - # org.apache.hertzbeat.warehouse.config.IotDbVersion: V_0_13 || V_1_0 - version: V_1_0 - query-timeout-in-ms: -1 - # 数据存储时间:默认'7776000000'(90天,单位为毫秒,-1代表永不过期) - # data expire time, unit:ms, default '7776000000'(90 days, -1:never expire) - expire-time: '7776000000' - influxdb: - enabled: false - server-url: http://127.0.0.1:8086 - username: root - password: root - expire-time: '30d' - replication: 1 - - # store real-time metrics data, enable only one below - # 存储实时数据方式, 下方只能enabled启用一种方式 - memory: - enabled: true - init-size: 16 - redis: - enabled: false - host: 127.0.0.1 - port: 6379 - password: 123456 - db: 0 - - alerter: - # custom console url - console-url: https://console.tancloud.io - # we work - we-work-webhook-url: https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key= - # ding ding talk - ding-talk-webhook-url: https://oapi.dingtalk.com/robot/send?access_token= - # fei shu fly book - fly-book-webhook-url: https://open.feishu.cn/open-apis/bot/v2/hook/ - # telegram - telegram-webhook-url: https://api.telegram.org/bot%s/sendMessage - # discord - discord-webhook-url: https://discord.com/api/v9/channels/%s/messages - # server酱 - server-chan-webhook-url: https://sctapi.ftqq.com/%s.send - # gotify - gotify-webhook-url: http://127.0.0.1/message?token=%s - - scheduler: - server: - enabled: true - port: 1158 - - - sureness.yml: |+ - resourceRole: - - /api/account/auth/refresh===post===[admin,user,guest] - - /api/apps/**===get===[admin,user,guest] - - /api/monitor/**===get===[admin,user,guest] - - /api/monitor/**===post===[admin,user] - - /api/monitor/**===put===[admin,user] - - /api/monitor/**===delete==[admin] - - /api/monitors/**===get===[admin,user,guest] - - /api/monitors/**===post===[admin,user] - - /api/monitors/**===put===[admin,user] - - /api/monitors/**===delete===[admin] - - /api/alert/**===get===[admin,user,guest] - - /api/alert/**===post===[admin,user] - - /api/alert/**===put===[admin,user] - - /api/alert/**===delete===[admin] - - /api/alerts/**===get===[admin,user,guest] - - /api/alerts/**===post===[admin,user] - - /api/alerts/**===put===[admin,user] - - /api/alerts/**===delete===[admin] - - /api/notice/**===get===[admin,user,guest] - - /api/notice/**===post===[admin,user] - - /api/notice/**===put===[admin,user] - - /api/notice/**===delete===[admin] - - /api/tag/**===get===[admin,user,guest] - - /api/tag/**===post===[admin,user] - - /api/tag/**===put===[admin,user] - - /api/tag/**===delete===[admin] - - /api/summary/**===get===[admin,user,guest] - - /api/summary/**===post===[admin,user] - - /api/summary/**===put===[admin,user] - - /api/summary/**===delete===[admin] - - /api/collector/**===get===[admin,user,guest] - - /api/collector/**===post===[admin,user] - - /api/collector/**===put===[admin,user] - - /api/collector/**===delete===[admin] - - /api/status/page/**===get===[admin,user,guest] - - /api/status/page/**===post===[admin,user] - - /api/status/page/**===put===[admin,user] - - /api/status/page/**===delete===[admin] - - # config the resource restful api that need bypass auth protection - # rule: api===method - # eg: /api/v1/source3===get means /api/v1/source3===get can be access by anyone, no need auth. - excludedResource: - - /api/alerts/report/**===* - - /api/account/auth/**===* - - /api/i18n/**===get - - /api/apps/hierarchy===get - - /api/push/**===* - - /api/status/page/public/**===* - # web ui resource - - /===get - - /dashboard/**===get - - /monitors/**===get - - /alert/**===get - - /account/**===get - - /setting/**===get - - /passport/**===get - - /status/**===get - - /**/*.html===get - - /**/*.js===get - - /**/*.css===get - - /**/*.ico===get - - /**/*.ttf===get - - /**/*.png===get - - /**/*.gif===get - - /**/*.jpg===get - - /**/*.svg===get - - /**/*.json===get - - /**/*.woff===get - - /**/*.eot===get - # swagger ui resource - - /swagger-resources/**===get - - /v2/api-docs===get - - /v3/api-docs===get - # h2 database - - /h2-console/**===* - - # account info config - # eg: admin has role [admin,user], password is hertzbeat - # eg: tom has role [user], password is hertzbeat - # eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 - account: - - appId: {{ .Values.manager.account.username }} - credential: {{ .Values.manager.account.password }} - role: [admin] - - appId: tom - credential: hertzbeat - role: [user] - - appId: guest - credential: hertzbeat - role: [guest] - - appId: lili - # credential = MD5(password + salt) - # plain password: hertzbeat - # attention: digest authentication does not support salted encrypted password accounts - credential: 94C6B34E7A199A9F9D4E1F208093B489 - salt: 123 - role: [user] diff --git a/script/helm/hertzbeat/templates/manager/deployment.yaml b/script/helm/hertzbeat/templates/manager/deployment.yaml deleted file mode 100644 index 77fd44bb86a..00000000000 --- a/script/helm/hertzbeat/templates/manager/deployment.yaml +++ /dev/null @@ -1,89 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "hertzbeat.manager" . }} - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} - component: manager -spec: - replicas: 1 - selector: - matchLabels: - {{- include "hertzbeat.selectorLabels" . | nindent 6 }} - component: manager - template: - metadata: - {{- with .Values.manager.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "hertzbeat.selectorLabels" . | nindent 8 }} - component: manager - spec: - securityContext: - {{- toYaml .Values.manager.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.manager.securityContext | nindent 12 }} - image: "{{ .Values.manager.image.repository }}:{{ .Values.manager.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.manager.image.pullPolicy }} - ports: - - name: http - containerPort: 1157 - protocol: TCP - - name: cluster - containerPort: 1158 - protocol: TCP - livenessProbe: - tcpSocket: - port: 1157 - readinessProbe: - tcpSocket: - port: 1157 - volumeMounts: - - mountPath: /opt/hertzbeat/config/application.yml - subPath: application.yml - name: application - - mountPath: /opt/hertzbeat/config/sureness.yml - subPath: sureness.yml - name: sureness - resources: - {{- toYaml .Values.manager.resources | nindent 12 }} - volumes: - - name: application - configMap: - name: {{ include "hertzbeat.manager" . }} - - name: sureness - configMap: - name: {{ include "hertzbeat.manager" . }} - {{- with .Values.manager.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.manager.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.manager.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/script/helm/hertzbeat/templates/manager/ingress.yaml b/script/helm/hertzbeat/templates/manager/ingress.yaml deleted file mode 100644 index 53aa725e242..00000000000 --- a/script/helm/hertzbeat/templates/manager/ingress.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -{{- if .Values.expose.ingress.enabled }} -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: "{{ include "hertzbeat.manager" . }}-ingress" - annotations: - {{- toYaml .Values.expose.ingress.annotations | nindent 4 }} - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} -spec: - {{if .Values.expose.ingress.ingressClassName }} - ingressClassName: {{ .Values.expose.ingress.ingressClassName }} - {{end}} - rules: - - host: {{ .Values.expose.ingress.host }} - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: "{{ include "hertzbeat.manager" . }}-http" - port: - number: 1157 - tls: - {{- if .Values.expose.ingress.tls.enabled }} - - secretName: {{ .Values.expose.ingress.tls.secretName }} - hosts: - - {{ .Values.expose.ingress.host }} - {{- end }} -{{- end }} diff --git a/script/helm/hertzbeat/templates/manager/service-cluster.yaml b/script/helm/hertzbeat/templates/manager/service-cluster.yaml deleted file mode 100644 index 2262cb67ff0..00000000000 --- a/script/helm/hertzbeat/templates/manager/service-cluster.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -apiVersion: v1 -kind: Service -metadata: - name: "{{ include "hertzbeat.manager" . }}-cluster" - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} -spec: - type: ClusterIP - ports: - - port: 1158 - targetPort: 1158 - protocol: TCP - name: cluster - selector: - {{- include "hertzbeat.selectorLabels" . | nindent 4 }} - component: manager diff --git a/script/helm/hertzbeat/templates/manager/service-expose.yaml b/script/helm/hertzbeat/templates/manager/service-expose.yaml deleted file mode 100644 index 6da190153b5..00000000000 --- a/script/helm/hertzbeat/templates/manager/service-expose.yaml +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -{{- if or (eq .Values.expose.type "ClusterIP") (eq .Values.expose.type "NodePort") (eq .Values.expose.type "LoadBalancer") (eq .Values.expose.type "Ingress") }} -apiVersion: v1 -kind: Service -metadata: - name: "{{ include "hertzbeat.manager" . }}-http" - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} -{{- if or (eq .Values.expose.type "ClusterIP") (eq .Values.expose.type "Ingress") }} -spec: - type: ClusterIP - ports: - - name: http - port: {{ .Values.expose.clusterIP.ports.port }} - targetPort: 1157 - selector: - {{- include "hertzbeat.selectorLabels" . | nindent 4 }} - component: manager -{{- else if eq .Values.expose.type "NodePort" }} -spec: - type: NodePort - ports: - - port: {{ .Values.expose.nodePort.ports.port }} - targetPort: 1157 - protocol: TCP - name: http - {{- if .Values.expose.nodePort.ports.nodePort }} - nodePort: {{ .Values.expose.nodePort.ports.nodePort }} - {{- end }} - selector: - {{- include "hertzbeat.selectorLabels" . | nindent 4 }} - component: manager -{{- else if eq .Values.expose.type "LoadBalancer" }} -spec: - type: LoadBalancer - {{- with .Values.expose.loadBalancer.sourceRanges }} - loadBalancerSourceRanges: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- if .Values.expose.loadBalancer.ip }} - loadBalancerIP: {{ .Values.expose.loadBalancer.ip }} - {{- end }} - ports: - - name: http - port: {{ .Values.expose.loadBalancer.ports.port }} - targetPort: 1157 -{{- end }} -{{- end }} diff --git a/script/helm/hertzbeat/templates/tsdb/configmap.yaml b/script/helm/hertzbeat/templates/tsdb/configmap.yaml deleted file mode 100644 index a8263dd2a6e..00000000000 --- a/script/helm/hertzbeat/templates/tsdb/configmap.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -apiVersion: v1 -kind: ConfigMap -metadata: - name: "{{ include "hertzbeat.tsdb" . }}" - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} -data: - TZ: "{{ .Values.database.timezone }}" diff --git a/script/helm/hertzbeat/templates/tsdb/pvc.yaml b/script/helm/hertzbeat/templates/tsdb/pvc.yaml deleted file mode 100644 index 409615122c4..00000000000 --- a/script/helm/hertzbeat/templates/tsdb/pvc.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -{{- if .Values.tsdb.persistence.enabled }} -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ template "hertzbeat.tsdb" . }} - annotations: - {{- range $key, $value := .Values.tsdb.persistence.annotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- if eq .Values.tsdb.persistence.resourcePolicy "keep" }} - helm.sh/resource-policy: keep - {{- end }} - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} - component: tsdb -spec: - accessModes: - - {{ .Values.tsdb.persistence.accessMode }} - resources: - requests: - storage: {{ .Values.tsdb.persistence.size }} - {{- if .Values.tsdb.persistence.storageClass }} - {{- if eq "-" .Values.tsdb.persistence.storageClass }} - storageClassName: "" - {{- else }} - storageClassName: {{ .Values.tsdb.persistence.storageClass }} - {{- end }} - {{- end }} -{{- end }} diff --git a/script/helm/hertzbeat/templates/tsdb/service.yaml b/script/helm/hertzbeat/templates/tsdb/service.yaml deleted file mode 100644 index 44f992f5655..00000000000 --- a/script/helm/hertzbeat/templates/tsdb/service.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -apiVersion: v1 -kind: Service -metadata: - name: {{ include "hertzbeat.tsdb" . }} - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} -spec: - type: ClusterIP - ports: - - port: 8428 - targetPort: 8428 - protocol: TCP - selector: - {{- include "hertzbeat.selectorLabels" . | nindent 4 }} - component: tsdb diff --git a/script/helm/hertzbeat/templates/tsdb/statefulset.yaml b/script/helm/hertzbeat/templates/tsdb/statefulset.yaml deleted file mode 100644 index 891ada92e79..00000000000 --- a/script/helm/hertzbeat/templates/tsdb/statefulset.yaml +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "hertzbeat.tsdb" . }} - labels: - {{- include "hertzbeat.labels" . | nindent 4 }} - component: tsdb -spec: - replicas: 1 - serviceName: {{ include "hertzbeat.tsdb" . }} - selector: - matchLabels: - {{- include "hertzbeat.selectorLabels" . | nindent 6 }} - component: tsdb - template: - metadata: - {{- with .Values.tsdb.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "hertzbeat.selectorLabels" . | nindent 8 }} - component: tsdb - spec: - securityContext: - {{- toYaml .Values.tsdb.podSecurityContext | nindent 8 }} - containers: - - name: tsdb - image: {{ .Values.tsdb.image.repository }}:{{ .Values.tsdb.image.tag }} - imagePullPolicy: {{ .Values.tsdb.image.pullPolicy }} - livenessProbe: - tcpSocket: - port: 8428 - initialDelaySeconds: 300 - periodSeconds: 10 - readinessProbe: - tcpSocket: - port: 8428 - initialDelaySeconds: 1 - periodSeconds: 10 - resources: - {{- toYaml .Values.tsdb.resources | nindent 12 }} - envFrom: - - configMapRef: - name: "{{ include "hertzbeat.tsdb" . }}" - volumeMounts: - - name: data - mountPath: /victoria-metrics-data - subPath: "" - volumes: - {{- if not .Values.tsdb.persistence.enabled }} - - name: data - emptyDir: {} - {{- else }} - - name: data - persistentVolumeClaim: - claimName: {{ .Values.tsdb.persistence.existingClaim | default (include "hertzbeat.tsdb" .) }} - {{- end }} - {{- with .Values.tsdb.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tsdb.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tsdb.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/script/helm/hertzbeat/values.yaml b/script/helm/hertzbeat/values.yaml deleted file mode 100644 index 49ff4443988..00000000000 --- a/script/helm/hertzbeat/values.yaml +++ /dev/null @@ -1,143 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Default values for hertzbeat. - -manager: - image: - repository: apache/hertzbeat - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "" - account: - username: "admin" - password: "hertzbeat" - jwtSecretKey: "CyaFv0bwq2Eik0jdrKUtsA6dx3sDJeFV143R - LnfKefTjsIfJLBa2YkhEqEGtcHDTNe4CU6+9 - 8tVt4bisXQ13rbN0oxhUZR73M6EByXIO+SV5 - dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp" - podAnnotations: { } - podSecurityContext: { } - securityContext: { } - resources: { } - nodeSelector: { } - tolerations: [ ] - affinity: { } - -collector: - image: - repository: apache/hertzbeat-collector - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "" - replicaCount: 1 - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 20 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 - - podAnnotations: { } - podSecurityContext: { } - securityContext: { } - resources: { } - nodeSelector: { } - tolerations: [ ] - affinity: { } - -database: - image: - repository: postgres - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "15" - timezone: "Asia/Shanghai" - rootPassword: "123456" - persistence: - enabled: true - existingClaim: "" - storageClass: "" - subPath: "" - accessMode: ReadWriteOnce - size: 4Gi - annotations: {} - resourcePolicy: "keep" - - podAnnotations: { } - podSecurityContext: { } - securityContext: { } - resources: { } - nodeSelector: { } - tolerations: [ ] - affinity: { } - -tsdb: - image: - repository: victoriametrics/victoria-metrics - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "v1.95.1" - timezone: "Asia/Shanghai" - persistence: - enabled: true - existingClaim: "" - storageClass: "" - subPath: "" - accessMode: ReadWriteOnce - size: 4Gi - annotations: {} - resourcePolicy: "keep" - - podAnnotations: { } - podSecurityContext: { } - securityContext: { } - resources: { } - nodeSelector: { } - tolerations: [ ] - affinity: { } - -# Expose Network -expose: - # Set how to expose the service. Set the type as "ClusterIP","NodePort","LoadBalancer" or "Ingress" - # and fill the information in the corresponding section - type: Ingress - clusterIP: - ports: - # The service port Harbor listens on when serving HTTP - port: 1157 - nodePort: - ports: - # The service port Harbor listens on when serving HTTP - port: 1157 - # The node port Harbor listens on when serving HTTP - nodePort: 31157 - loadBalancer: - # Set the IP if the LoadBalancer supports assigning IP - ip: "" - ports: - # The service port Harbor listens on when serving HTTP - port: 1157 - sourceRanges: [ ] - ingress: - enabled: true - ingressClassName: "" - host: "hertzbeat.domain" - annotations: {} - tls: - enabled: true - secretName: your-tls-secret From 9b065f7ec054c2e96f8361995b45c0fe36ea7c40 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Wed, 14 Aug 2024 00:17:35 +0800 Subject: [PATCH 183/257] [infra] support build and push docker image to hub nightly everyday (#2517) --- .github/workflows/nightly-build.yml | 60 +++++++++++++++++++++++++++++ script/docker/server/build.sh | 7 ++-- 2 files changed, 64 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/nightly-build.yml diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml new file mode 100644 index 00000000000..8a6060da8b4 --- /dev/null +++ b/.github/workflows/nightly-build.yml @@ -0,0 +1,60 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +name: Nightly CI + +on: + schedule: + # trigger at 00:00 everyday + - cron: '0 0 * * *' + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: docker/setup-qemu-action@v3 + - name: Set up JDK 17 + uses: actions/setup-java@v1 + with: + java-version: 17 + - name: Build with Maven + run: mvn clean -B package -Prelease --file pom.xml + + - name: Build Image + env: + IMAGE_PUSH: false + IMAGE_LOAD: true + IMAGE_VERSION: nightly + run: | + docker buildx create --use --name myBuilder --driver docker-container + docker buildx use myBuilder + + ./script/docker/server/build.sh + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and Push + uses: docker/build-push-action@v6 + with: + context: . + push: true + tags: apache/hertzbeat:nightly diff --git a/script/docker/server/build.sh b/script/docker/server/build.sh index 658b0f86cb9..2a71872ae1b 100755 --- a/script/docker/server/build.sh +++ b/script/docker/server/build.sh @@ -23,9 +23,10 @@ CURRENT_DIR=`pwd` cd ../../../dist # auto detect hertzbeat version VERSION=`ls apache-hertzbeat-*-incubating-docker-compose.tar.gz| awk -F"-" '{print $3}'` -# use the version param -if [ -n "$1" ]; then - VERSION="$1"; + +# when env IMAGE_VERSION is set, use it as version +if [ -n "$IMAGE_VERSION" ]; then + VERSION="$IMAGE_VERSION"; fi # compile context dir From c12b4c0d84736fa670d0114f94c8fc9f94f54d41 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Thu, 15 Aug 2024 21:10:55 +0800 Subject: [PATCH 184/257] [bugfix] fix github action build docker image error (#2525) Signed-off-by: tomsun28 --- .github/workflows/backend-build-test.yml | 17 ++++----- .github/workflows/nightly-build.yml | 47 ++++++++++++++++-------- e2e/docker-compose.yml | 2 +- script/docker/collector/Dockerfile | 6 +-- script/docker/collector/build.sh | 12 +++--- script/docker/server/Dockerfile | 9 ++--- script/docker/server/build.sh | 24 ++++++------ 7 files changed, 62 insertions(+), 55 deletions(-) diff --git a/.github/workflows/backend-build-test.yml b/.github/workflows/backend-build-test.yml index 3ddbc454fe3..ed07af72104 100644 --- a/.github/workflows/backend-build-test.yml +++ b/.github/workflows/backend-build-test.yml @@ -22,7 +22,7 @@ name: Backend CI on: push: - branches: [ master, dev ] + branches: [ master, dev, action* ] paths-ignore: - '**.md' - 'home/**' @@ -55,15 +55,12 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} - name: Build Image - env: - IMAGE_PUSH: false - IMAGE_LOAD: true - IMAGE_PLATFORM: linux/amd64 - run: | - docker buildx create --use --name myBuilder --driver docker-container - docker buildx use myBuilder - - ./script/docker/server/build.sh + uses: docker/build-push-action@v3 + with: + context: ./dist + file: ./script/docker/server/Dockerfile + push: false + tags: apache/hertzbeat:test - name: Run E2E run: | diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml index 8a6060da8b4..edbecde000d 100644 --- a/.github/workflows/nightly-build.yml +++ b/.github/workflows/nightly-build.yml @@ -21,40 +21,55 @@ on: schedule: # trigger at 00:00 everyday - cron: '0 0 * * *' + push: + branches: [ action* ] jobs: build: runs-on: ubuntu-latest + if: ${{ github.repository == 'apache/hertzbeat' }} steps: - uses: actions/checkout@v4 - - uses: docker/setup-qemu-action@v3 - name: Set up JDK 17 uses: actions/setup-java@v1 with: java-version: 17 - - name: Build with Maven - run: mvn clean -B package -Prelease --file pom.xml - - - name: Build Image - env: - IMAGE_PUSH: false - IMAGE_LOAD: true - IMAGE_VERSION: nightly + + - name: Build the Frontend run: | - docker buildx create --use --name myBuilder --driver docker-container - docker buildx use myBuilder + cd web-app + yarn install + yarn package - ./script/docker/server/build.sh + - name: Build the Backend + run: | + mvn clean install + mvn clean package -Prelease -DskipTests + cd collector + mvn clean package -Pcluster -DskipTests - - name: Login to Docker Hub + - uses: docker/setup-qemu-action@v3 + - uses: docker/setup-buildx-action@v3 + - name: Log in to Docker Hub uses: docker/login-action@v3 - with: + with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and Push + - name: Build and Push Server uses: docker/build-push-action@v6 with: - context: . + context: ./dist + file: ./script/docker/server/Dockerfile + platforms: linux/amd64,linux/arm64 push: true tags: apache/hertzbeat:nightly + + - name: Build and Push Collector + uses: docker/build-push-action@v6 + with: + context: ./dist + file: ./script/docker/collector/Dockerfile + platforms: linux/amd64,linux/arm64 + push: true + tags: apache/hertzbeat-collector:nightly diff --git a/e2e/docker-compose.yml b/e2e/docker-compose.yml index b9e1b12ce90..a8527fe519a 100644 --- a/e2e/docker-compose.yml +++ b/e2e/docker-compose.yml @@ -33,7 +33,7 @@ services: - hertzbeat hertzbeat: - image: apache/hertzbeat + image: apache/hertzbeat:test container_name: e2e-hertzbeat volumes: - ./logs/:/opt/hertzbeat/logs/ diff --git a/script/docker/collector/Dockerfile b/script/docker/collector/Dockerfile index ff075dc72b5..f5a76f173af 100644 --- a/script/docker/collector/Dockerfile +++ b/script/docker/collector/Dockerfile @@ -17,19 +17,17 @@ FROM openjdk:17-slim-buster -ARG VERSION - MAINTAINER Apache HertzBeat "dev@hertzbeat.apache.org" # Install SSH RUN apt-get update && apt-get install -y openssh-server RUN mkdir /var/run/sshd -ADD apache-hertzbeat-collector-${VERSION}-incubating-bin.tar.gz /opt/ +ADD apache-hertzbeat-collector-*-incubating-bin.tar.gz /opt/ ENV TZ=Asia/Shanghai ENV LANG=en_US.UTF-8 -WORKDIR /opt/apache-hertzbeat-collector-${VERSION}-incubating-bin/ +WORKDIR /opt/apache-hertzbeat-collector-*-incubating-bin/ ENTRYPOINT ["./bin/entrypoint.sh"] diff --git a/script/docker/collector/build.sh b/script/docker/collector/build.sh index d5a4515a59d..9bef73f5930 100644 --- a/script/docker/collector/build.sh +++ b/script/docker/collector/build.sh @@ -31,20 +31,20 @@ fi # docker compile context CONTEXT_DIR=`pwd` -COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t apache/hertzbeat-collector:v$VERSION -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --build-arg VERSION="$VERSION" --push" +COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t apache/hertzbeat-collector:v$VERSION -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --push" -#COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t apache/hertzbeat-collector:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --build-arg VERSION="$VERSION" --push" +#COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t apache/hertzbeat-collector:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --push" -#COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t quay.io/tancloud/hertzbeat-collector:v$VERSION -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --build-arg VERSION="$VERSION" --push" +#COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t quay.io/tancloud/hertzbeat-collector:v$VERSION -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --push" -#COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t quay.io/tancloud/hertzbeat-collector:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --build-arg VERSION="$VERSION" --push" +#COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t quay.io/tancloud/hertzbeat-collector:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --push" # Build Local -#COMMAND="docker build -t apache/hertzbeat-collector:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --build-arg VERSION="$VERSION"" +#COMMAND="docker build -t apache/hertzbeat-collector:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR " echo "$COMMAND" $COMMAND -#docker build -t apache/hertzbeat-collector:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --build-arg VERSION="$VERSION" +#docker build -t apache/hertzbeat-collector:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR diff --git a/script/docker/server/Dockerfile b/script/docker/server/Dockerfile index 8d04a3ff815..d8137f36772 100644 --- a/script/docker/server/Dockerfile +++ b/script/docker/server/Dockerfile @@ -17,9 +17,6 @@ FROM openjdk:17-slim-buster -# add args version -ARG VERSION - MAINTAINER Apache HertzBeat "dev@hertzbeat.apache.org" # Install SSH And Locales @@ -29,12 +26,12 @@ RUN mkdir /var/run/sshd RUN localedef -c -f UTF-8 -i zh_CN zh_CN.UTF-8 RUN localedef -c -f UTF-8 -i en_US en_US.UTF-8 -ADD apache-hertzbeat-${VERSION}-incubating-bin.tar.gz /opt/ - ENV TZ=Asia/Shanghai ENV LANG=en_US.UTF-8 -RUN mv /opt/apache-hertzbeat-${VERSION}-incubating-bin /opt/hertzbeat +ADD apache-hertzbeat-1.*-incubating-bin.tar.gz /opt/ + +RUN mv /opt/apache-hertzbeat-*-incubating-bin /opt/hertzbeat EXPOSE 1157 1158 22 diff --git a/script/docker/server/build.sh b/script/docker/server/build.sh index 2a71872ae1b..842b8eeb7f1 100755 --- a/script/docker/server/build.sh +++ b/script/docker/server/build.sh @@ -24,29 +24,29 @@ cd ../../../dist # auto detect hertzbeat version VERSION=`ls apache-hertzbeat-*-incubating-docker-compose.tar.gz| awk -F"-" '{print $3}'` -# when env IMAGE_VERSION is set, use it as version -if [ -n "$IMAGE_VERSION" ]; then - VERSION="$IMAGE_VERSION"; -fi - # compile context dir CONTEXT_DIR=`pwd` -COMMAND="docker buildx build --platform ${IMAGE_PLATFORM:-linux/arm64,linux/amd64} -t apache/hertzbeat:v$VERSION -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --build-arg VERSION="$VERSION" --push=${IMAGE_PUSH:-true} --load=${IMAGE_LOAD:-false}" +COMMAND="docker buildx build --platform ${IMAGE_PLATFORM:-linux/arm64,linux/amd64} -t apache/hertzbeat:v$VERSION -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --push=${IMAGE_PUSH:-true} --load=${IMAGE_LOAD:-false}" -#COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t apache/hertzbeat:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --build-arg VERSION="$VERSION" --push" +#COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t apache/hertzbeat:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --push" -#COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t quay.io/tancloud/hertzbeat:v$VERSION -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --build-arg VERSION="$VERSION" --push" +#COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t quay.io/tancloud/hertzbeat:v$VERSION -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --push" -#COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t quay.io/tancloud/hertzbeat:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --build-arg VERSION="$VERSION" --push" +#COMMAND="docker buildx build --platform linux/arm64,linux/amd64 -t quay.io/tancloud/hertzbeat:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --push" # Build Local -#COMMAND="docker build -t apache/hertzbeat:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --build-arg VERSION="$VERSION"" +#COMMAND="docker build -t apache/hertzbeat:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR " + +echo "$COMMAND" + +$COMMAND + +COMMAND="docker tag apache/hertzbeat:v$VERSION apache/hertzbeat:${IMAGE_VERSION:-latest}" echo "$COMMAND" $COMMAND -docker tag apache/hertzbeat:v$VERSION apache/hertzbeat -#docker build -t apache/hertzbeat:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR --build-arg VERSION="$VERSION" +#docker build -t apache/hertzbeat:latest -f $CURRENT_DIR/Dockerfile $CONTEXT_DIR From 361e573d8e9d55d4afb5fe76b1806b0f96fe298e Mon Sep 17 00:00:00 2001 From: YuLuo Date: Fri, 16 Aug 2024 10:49:43 +0800 Subject: [PATCH 185/257] [optimize] use okhttpclient connection pool (#2529) Signed-off-by: yuluo-yx Co-authored-by: Calvin --- .../controller/AlertConvergeController.java | 10 +++++----- .../controller/AlertDefineController.java | 10 +++++----- .../controller/AlertSilenceController.java | 10 +++++----- .../AlertDefineExcelImExportServiceImpl.java | 11 ---------- .../dispatch/unit/impl/TimeLengthConvert.java | 1 - .../common/constants/NetworkConstants.java | 18 +++++++++++++++++ .../impl/AbstractAlertNotifyHandlerImpl.java | 3 --- .../manager/config/AiProperties.java | 2 -- .../manager/config/RestTemplateConfig.java | 20 +++++++++++++------ .../history/influxdb/InfluxdbDataStorage.java | 20 ++++++++++++------- 10 files changed, 60 insertions(+), 45 deletions(-) diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertConvergeController.java b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertConvergeController.java index df38ea5a95c..f3b97409db9 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertConvergeController.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertConvergeController.java @@ -23,6 +23,7 @@ import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.validation.Valid; +import java.util.Objects; import org.apache.hertzbeat.alert.service.AlertConvergeService; import org.apache.hertzbeat.common.entity.alerter.AlertConverge; import org.apache.hertzbeat.common.entity.dto.Message; @@ -69,11 +70,10 @@ public ResponseEntity> modifyAlertConverge(@Valid @RequestBody Ale public ResponseEntity> getAlertConverge( @Parameter(description = "Alarm Converge ID", example = "6565463543") @PathVariable("id") long id) { AlertConverge alertConverge = alertConvergeService.getAlertConverge(id); - if (alertConverge == null) { - return ResponseEntity.ok(Message.fail(MONITOR_NOT_EXIST_CODE, "AlertConverge not exist.")); - } else { - return ResponseEntity.ok(Message.success(alertConverge)); - } + + return Objects.isNull(alertConverge) + ? ResponseEntity.ok(Message.fail(MONITOR_NOT_EXIST_CODE, "AlertConverge not exist.")) + : ResponseEntity.ok(Message.success(alertConverge)); } } diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertDefineController.java b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertDefineController.java index 33f4292bacb..9da2e018640 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertDefineController.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertDefineController.java @@ -24,6 +24,7 @@ import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.validation.Valid; import java.util.List; +import java.util.Objects; import java.util.stream.Collectors; import org.apache.hertzbeat.alert.service.AlertDefineService; import org.apache.hertzbeat.common.entity.alerter.AlertDefine; @@ -76,11 +77,10 @@ public ResponseEntity> getAlertDefine( @Parameter(description = "Alarm Definition ID", example = "6565463543") @PathVariable("id") long id) { // Obtaining Monitoring Information AlertDefine alertDefine = alertDefineService.getAlertDefine(id); - if (alertDefine == null) { - return ResponseEntity.ok(Message.fail(MONITOR_NOT_EXIST_CODE, "AlertDefine not exist.")); - } else { - return ResponseEntity.ok(Message.success(alertDefine)); - } + + return Objects.isNull(alertDefine) + ? ResponseEntity.ok(Message.fail(MONITOR_NOT_EXIST_CODE, "AlertDefine not exist.")) + : ResponseEntity.ok(Message.success(alertDefine)); } @DeleteMapping(path = "/{id}") diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertSilenceController.java b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertSilenceController.java index 56e9af3f022..6e6e1d7f458 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertSilenceController.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/controller/AlertSilenceController.java @@ -23,6 +23,7 @@ import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.validation.Valid; +import java.util.Objects; import org.apache.hertzbeat.alert.service.AlertSilenceService; import org.apache.hertzbeat.common.entity.alerter.AlertSilence; import org.apache.hertzbeat.common.entity.dto.Message; @@ -69,11 +70,10 @@ public ResponseEntity> modifyAlertSilence(@Valid @RequestBody Aler public ResponseEntity> getAlertSilence( @Parameter(description = "Alarm Silence ID", example = "6565463543") @PathVariable("id") long id) { AlertSilence alertSilence = alertSilenceService.getAlertSilence(id); - if (alertSilence == null) { - return ResponseEntity.ok(Message.fail(MONITOR_NOT_EXIST_CODE, "AlertSilence not exist.")); - } else { - return ResponseEntity.ok(Message.success(alertSilence)); - } + + return Objects.isNull(alertSilence) + ? ResponseEntity.ok(Message.fail(MONITOR_NOT_EXIST_CODE, "AlertSilence not exist.")) + : ResponseEntity.ok(Message.success(alertSilence)); } } diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineExcelImExportServiceImpl.java b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineExcelImExportServiceImpl.java index c89eb4d417e..5ea86d30d63 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineExcelImExportServiceImpl.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/service/impl/AlertDefineExcelImExportServiceImpl.java @@ -98,17 +98,6 @@ public List parseImport(InputStream is) { } } - private TagItem extractTagDataFromRow(Row row) { - String name = getCellValueAsString(row.getCell(7)); - if (StringUtils.hasText(name)) { - TagItem tagItem = new TagItem(); - tagItem.setName(name); - tagItem.setValue(getCellValueAsString(row.getCell(8))); - return tagItem; - } - return null; - } - private List extractTagDataFromRow(Cell cell) { String jsonStr = getCellValueAsString(cell); if (StringUtils.hasText(jsonStr)) { diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/unit/impl/TimeLengthConvert.java b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/unit/impl/TimeLengthConvert.java index 18fb266ada2..a185d6b1a13 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/unit/impl/TimeLengthConvert.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/unit/impl/TimeLengthConvert.java @@ -29,7 +29,6 @@ @Component public final class TimeLengthConvert implements UnitConvert { - @Override public String convert(String value, String originUnit, String newUnit) { if (value == null || value.isEmpty()) { diff --git a/common/src/main/java/org/apache/hertzbeat/common/constants/NetworkConstants.java b/common/src/main/java/org/apache/hertzbeat/common/constants/NetworkConstants.java index 17252765e91..de58a2e1fe7 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/constants/NetworkConstants.java +++ b/common/src/main/java/org/apache/hertzbeat/common/constants/NetworkConstants.java @@ -47,4 +47,22 @@ public interface NetworkConstants { String LOCATION = "Location"; + /** + * HttpClient Configuration Constants. + */ + interface HttpClientConstants { + + int READ_TIME_OUT = 6 * 1000; + int WRITE_TIME_OUT = 6 * 1000; + int CONNECT_TIME_OUT = 6 * 1000; + int CHUNK_SIZE = 8196; + int MAX_IDLE_CONNECTIONS = 20; + int KEEP_ALIVE_TIMEOUT = 30 * 1000; + int HTTP_CLIENT_CONNECTION_MANAGER_MAX_PER_ROUTE = 20; + int HTTP_CLIENT_CONNECTION_MANAGER_MAX_TOTAL = 20; + int HTTPCLIENT_KEEP_ALIVE_DURATION = 30 * 1000; + int HTTP_CLIENT_CONNECTION_MANAGER_CLOSE_WAIT_TIME_MS = 1000; + int HTTP_CLIENT_CONNECTION_MANAGER_CLOSE_IDLE_TIME_S = 30; + } + } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/AbstractAlertNotifyHandlerImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/AbstractAlertNotifyHandlerImpl.java index a44af67211f..492e340d9e8 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/AbstractAlertNotifyHandlerImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/AbstractAlertNotifyHandlerImpl.java @@ -38,7 +38,6 @@ import org.apache.hertzbeat.common.support.event.SystemConfigChangeEvent; import org.apache.hertzbeat.common.util.ResourceBundleUtil; import org.apache.hertzbeat.manager.component.alerter.AlertNotifyHandler; -import org.apache.hertzbeat.manager.service.NoticeConfigService; import org.springframework.context.event.EventListener; import org.springframework.ui.freemarker.FreeMarkerTemplateUtils; import org.springframework.web.client.RestTemplate; @@ -56,8 +55,6 @@ abstract class AbstractAlertNotifyHandlerImpl implements AlertNotifyHandler { protected RestTemplate restTemplate; @Resource protected AlerterProperties alerterProperties; - @Resource - protected NoticeConfigService noticeConfigService; protected String renderContent(NoticeTemplate noticeTemplate, Alert alert) throws TemplateException, IOException { diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/config/AiProperties.java b/manager/src/main/java/org/apache/hertzbeat/manager/config/AiProperties.java index a3a1040ffbf..d8628493b75 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/config/AiProperties.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/config/AiProperties.java @@ -49,6 +49,4 @@ public class AiProperties { */ private String apiSecret; - - } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/config/RestTemplateConfig.java b/manager/src/main/java/org/apache/hertzbeat/manager/config/RestTemplateConfig.java index d07735c2aee..d856c1d9b78 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/config/RestTemplateConfig.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/config/RestTemplateConfig.java @@ -19,7 +19,9 @@ import java.util.Collections; import java.util.concurrent.TimeUnit; +import okhttp3.ConnectionPool; import okhttp3.OkHttpClient; +import org.apache.hertzbeat.common.constants.NetworkConstants; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.http.client.ClientHttpRequestFactory; @@ -42,12 +44,18 @@ public RestTemplate restTemplate(ClientHttpRequestFactory factory) { @Bean public ClientHttpRequestFactory simpleClientHttpRequestFactory() { - OkHttpClient client = new OkHttpClient.Builder() - .connectTimeout(15, TimeUnit.SECONDS) - .readTimeout(20, TimeUnit.SECONDS) - .writeTimeout(20, TimeUnit.SECONDS) - .build(); - return new OkHttp3ClientHttpRequestFactory(client); + + return new OkHttp3ClientHttpRequestFactory( + new OkHttpClient.Builder() + .readTimeout(NetworkConstants.HttpClientConstants.READ_TIME_OUT, TimeUnit.SECONDS) + .writeTimeout(NetworkConstants.HttpClientConstants.WRITE_TIME_OUT, TimeUnit.SECONDS) + .connectTimeout(NetworkConstants.HttpClientConstants.CONNECT_TIME_OUT, TimeUnit.SECONDS) + .connectionPool(new ConnectionPool( + NetworkConstants.HttpClientConstants.MAX_IDLE_CONNECTIONS, + NetworkConstants.HttpClientConstants.KEEP_ALIVE_TIMEOUT, + TimeUnit.SECONDS) + ).build() + ); } } diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/influxdb/InfluxdbDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/influxdb/InfluxdbDataStorage.java index 65aded4adc6..85a21f3f0f4 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/influxdb/InfluxdbDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/influxdb/InfluxdbDataStorage.java @@ -35,8 +35,10 @@ import javax.net.ssl.TrustManager; import javax.net.ssl.X509TrustManager; import lombok.extern.slf4j.Slf4j; +import okhttp3.ConnectionPool; import okhttp3.OkHttpClient; import org.apache.hertzbeat.common.constants.CommonConstants; +import org.apache.hertzbeat.common.constants.NetworkConstants; import org.apache.hertzbeat.common.entity.dto.Value; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.util.JsonUtil; @@ -83,14 +85,18 @@ public InfluxdbDataStorage(InfluxdbProperties influxdbProperties) { } public void initInfluxDb(InfluxdbProperties influxdbProperties) { - OkHttpClient.Builder client = new OkHttpClient.Builder() - .connectTimeout(10, TimeUnit.SECONDS) - .writeTimeout(10, TimeUnit.SECONDS) - .readTimeout(10, TimeUnit.SECONDS) - .retryOnConnectionFailure(true); - client.sslSocketFactory(defaultSslSocketFactory(), defaultTrustManager()); - client.hostnameVerifier(noopHostnameVerifier()); + var client = new OkHttpClient.Builder() + .readTimeout(NetworkConstants.HttpClientConstants.READ_TIME_OUT, TimeUnit.SECONDS) + .writeTimeout(NetworkConstants.HttpClientConstants.WRITE_TIME_OUT, TimeUnit.SECONDS) + .connectTimeout(NetworkConstants.HttpClientConstants.CONNECT_TIME_OUT, TimeUnit.SECONDS) + .connectionPool(new ConnectionPool( + NetworkConstants.HttpClientConstants.MAX_IDLE_CONNECTIONS, + NetworkConstants.HttpClientConstants.KEEP_ALIVE_TIMEOUT, + TimeUnit.SECONDS) + ).sslSocketFactory(defaultSslSocketFactory(), defaultTrustManager()) + .hostnameVerifier(noopHostnameVerifier()) + .retryOnConnectionFailure(true); this.influxDb = InfluxDBFactory.connect(influxdbProperties.serverUrl(), influxdbProperties.username(), influxdbProperties.password(), client); // Close it if your application is terminating, or you are not using it anymore. From c8560e6b16d472ce8959b61a022732a2d74466f1 Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Fri, 16 Aug 2024 17:49:57 +0800 Subject: [PATCH 186/257] [bugfix] webhook template json escape (#2532) Co-authored-by: tomsun28 --- .../impl/AbstractAlertNotifyHandlerImpl.java | 36 +++++++++++++++++++ .../impl/WebHookAlertNotifyHandlerImpl.java | 1 + 2 files changed, 37 insertions(+) diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/AbstractAlertNotifyHandlerImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/AbstractAlertNotifyHandlerImpl.java index 492e340d9e8..4ba10283cc5 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/AbstractAlertNotifyHandlerImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/AbstractAlertNotifyHandlerImpl.java @@ -115,6 +115,42 @@ protected String renderContent(NoticeTemplate noticeTemplate, Alert alert) throw return template.replaceAll("((\r\n)|\n)[\\s\t ]*(\\1)+", "$1"); } + protected String escapeJsonStr(String jsonStr){ + if (jsonStr == null) { + return null; + } + + StringBuilder sb = new StringBuilder(); + for (char c : jsonStr.toCharArray()) { + switch (c) { + case '"': + sb.append("\\\""); + break; + case '\\': + sb.append("\\\\"); + break; + case '\b': + sb.append("\\b"); + break; + case '\f': + sb.append("\\f"); + break; + case '\n': + sb.append("\\n"); + break; + case '\r': + sb.append("\\r"); + break; + case '\t': + sb.append("\\t"); + break; + default: + sb.append(c); + } + } + return sb.toString(); + } + @EventListener(SystemConfigChangeEvent.class) public void onEvent(SystemConfigChangeEvent event) { log.info("{} receive system config change event: {}.", this.getClass().getName(), event.getSource()); diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/WebHookAlertNotifyHandlerImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/WebHookAlertNotifyHandlerImpl.java index b4db3b0773c..7f7b793cb25 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/WebHookAlertNotifyHandlerImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/impl/WebHookAlertNotifyHandlerImpl.java @@ -47,6 +47,7 @@ public void send(NoticeReceiver receiver, NoticeTemplate noticeTemplate, Alert a // fix null pointer exception filterInvalidTags(alert); + alert.setContent(escapeJsonStr(alert.getContent())); String webhookJson = renderContent(noticeTemplate, alert); webhookJson = webhookJson.replace(",\n }", "\n }"); From 0ec6a3b6ba9bdde3f1984f4473d27a3e8cedf32c Mon Sep 17 00:00:00 2001 From: b_mountain <49973336+MRgenial@users.noreply.github.com> Date: Fri, 16 Aug 2024 22:03:55 +0800 Subject: [PATCH 187/257] [imporve] simplify NoticeConfig Code. (#2530) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: 刘进山 Co-authored-by: Calvin --- .../component/alerter/DispatcherAlarm.java | 2 +- .../controller/NoticeConfigController.java | 6 ++-- .../manager/service/NoticeConfigService.java | 8 +---- .../service/impl/NoticeConfigServiceImpl.java | 32 ++++++++----------- .../NoticeConfigControllerTest.java | 2 -- .../service/NoticeConfigServiceTest.java | 4 +-- 6 files changed, 20 insertions(+), 34 deletions(-) diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/DispatcherAlarm.java b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/DispatcherAlarm.java index 7aab58b8241..0e688e1c7ce 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/DispatcherAlarm.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/component/alerter/DispatcherAlarm.java @@ -104,7 +104,7 @@ public boolean sendNoticeMsg(NoticeReceiver receiver, NoticeTemplate noticeTempl } private NoticeReceiver getOneReceiverById(Long id) { - return noticeConfigService.getOneReceiverById(id); + return noticeConfigService.getReceiverById(id); } private NoticeTemplate getOneTemplateById(Long id) { diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/controller/NoticeConfigController.java b/manager/src/main/java/org/apache/hertzbeat/manager/controller/NoticeConfigController.java index eb1c38f45a8..241bddaa070 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/controller/NoticeConfigController.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/controller/NoticeConfigController.java @@ -84,8 +84,7 @@ public ResponseEntity> deleteNoticeReceiver( description = "Get a list of message notification recipients based on query filter items") public ResponseEntity>> getReceivers( @Parameter(description = "en: Recipient name,support fuzzy query", example = "tom") @RequestParam(required = false) final String name) { - List receivers = noticeConfigService.getNoticeReceivers(name); - return ResponseEntity.ok(Message.success(receivers)); + return ResponseEntity.ok(Message.success(noticeConfigService.getNoticeReceivers(name))); } @PostMapping(path = "/rule") @@ -120,8 +119,7 @@ public ResponseEntity> deleteNoticeRule( description = "Get a list of message notification policies based on query filter items") public ResponseEntity>> getRules( @Parameter(description = "en: Recipient name", example = "rule1") @RequestParam(required = false) final String name) { - List receiverPage = noticeConfigService.getNoticeRules(name); - return ResponseEntity.ok(Message.success(receiverPage)); + return ResponseEntity.ok(Message.success(noticeConfigService.getNoticeRules(name))); } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/NoticeConfigService.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/NoticeConfigService.java index 42fde07d0c8..eaedf925837 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/NoticeConfigService.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/NoticeConfigService.java @@ -93,13 +93,6 @@ public interface NoticeConfigService { */ List getReceiverFilterRule(Alert alert); - /** - * Query the recipient information according to the recipient ID - * @param id Receiver ID - * @return Receiver - */ - NoticeReceiver getOneReceiverById(Long id); - /** * Query the template information according to the template ID * @param id Template ID @@ -121,6 +114,7 @@ public interface NoticeConfigService { */ NoticeRule getNoticeRulesById(Long ruleId); + /** * Add a notification template * @param noticeTemplate template information diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/NoticeConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/NoticeConfigServiceImpl.java index 94e0aeb7cd6..df4240a17c3 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/NoticeConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/NoticeConfigServiceImpl.java @@ -217,11 +217,6 @@ public List getReceiverFilterRule(Alert alert) { .collect(Collectors.toList()); } - @Override - public NoticeReceiver getOneReceiverById(Long id) { - return noticeReceiverDao.findById(id).orElse(null); - } - @Override public NoticeTemplate getOneTemplateById(Long id) { return noticeTemplateDao.findById(id).orElse(null); @@ -230,14 +225,15 @@ public NoticeTemplate getOneTemplateById(Long id) { @Override public NoticeReceiver getReceiverById(Long receiverId) { - return noticeReceiverDao.getReferenceById(receiverId); + return noticeReceiverDao.findById(receiverId).orElse(null); } @Override public NoticeRule getNoticeRulesById(Long ruleId) { - return noticeRuleDao.getReferenceById(ruleId); + return noticeRuleDao.findById(ruleId).orElse(null); } + @Override public void addNoticeTemplate(NoticeTemplate noticeTemplate) { noticeTemplateDao.save(noticeTemplate); @@ -276,17 +272,17 @@ public boolean sendTestMsg(NoticeReceiver noticeReceiver) { tags.put(CommonConstants.TAG_MONITOR_NAME, "100Name"); tags.put(CommonConstants.TAG_MONITOR_HOST, "127.0.0.1"); tags.put(CommonConstants.TAG_THRESHOLD_ID, "200"); - Alert alert = new Alert(); - alert.setTags(tags); - alert.setId(1003445L); - alert.setTarget(ALERT_TEST_TARGET); - alert.setPriority(CommonConstants.ALERT_PRIORITY_CODE_CRITICAL); - alert.setContent(ALERT_TEST_CONTENT); - alert.setAlertDefineId(200L); - alert.setTimes(2); - alert.setStatus((byte) 0); - alert.setFirstAlarmTime(System.currentTimeMillis()); - alert.setLastAlarmTime(System.currentTimeMillis()); + Alert alert = Alert.builder() + .tags(tags) + .id(1003445L) + .target(ALERT_TEST_TARGET) + .priority(CommonConstants.ALERT_PRIORITY_CODE_CRITICAL) + .content(ALERT_TEST_CONTENT) + .alertDefineId(200L) + .times(2) + .status((byte) 0) + .firstAlarmTime(System.currentTimeMillis()) + .lastAlarmTime(System.currentTimeMillis()).build(); return dispatcherAlarm.sendNoticeMsg(noticeReceiver, null, alert); } diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/controller/NoticeConfigControllerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/controller/NoticeConfigControllerTest.java index 1e753d263e1..88ff0a2fcf1 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/controller/NoticeConfigControllerTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/controller/NoticeConfigControllerTest.java @@ -90,7 +90,6 @@ public NoticeReceiver getNoticeReceiver(){ noticeReceiver.setEmail("2762242004@qq.com"); noticeReceiver.setHookUrl("https://www.tancloud.cn"); noticeReceiver.setType((byte) 5); - return noticeReceiver; } @@ -264,7 +263,6 @@ void sendTestMsg() throws Exception { .content(JsonUtil.toJson(noticeReceiver))) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) - //.andExpect(jsonPath("$.msg").value("Notify service not available, please check config!")) .andReturn(); } diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/NoticeConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/NoticeConfigServiceTest.java index a7c6ac19e10..0f3461403ad 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/NoticeConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/NoticeConfigServiceTest.java @@ -202,14 +202,14 @@ void getReceiverFilterRule() { void getReceiverById() { final Long receiverId = 343432325L; noticeConfigService.getReceiverById(receiverId); - verify(noticeReceiverDao, times(1)).getReferenceById(receiverId); + verify(noticeReceiverDao, times(1)).findById(receiverId); } @Test void getNoticeRulesById() { final Long receiverId = 343432325L; noticeConfigService.getNoticeRulesById(receiverId); - verify(noticeRuleDao, times(1)).getReferenceById(receiverId); + verify(noticeRuleDao, times(1)).findById(receiverId); } @Test From 53a506569ae5882ffc772dd80c3dc5d372f6f96f Mon Sep 17 00:00:00 2001 From: Jast Date: Fri, 16 Aug 2024 23:56:02 +0800 Subject: [PATCH 188/257] [improve]ElasticSearch historical chart display optimization (#2535) Co-authored-by: tomsun28 Co-authored-by: YuLuo --- manager/src/main/resources/define/app-elasticsearch.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/manager/src/main/resources/define/app-elasticsearch.yml b/manager/src/main/resources/define/app-elasticsearch.yml index 8352171bc3c..be6e3ccb60b 100644 --- a/manager/src/main/resources/define/app-elasticsearch.yml +++ b/manager/src/main/resources/define/app-elasticsearch.yml @@ -297,7 +297,7 @@ metrics: zh-CN: 内存使用率 en-US: Heap Used Percent - field: heap_total - type: 0 + type: 1 unit: 'MB' i18n: zh-CN: 总内存(MB) @@ -309,7 +309,7 @@ metrics: zh-CN: 磁盘剩余容量(GB) en-US: Disk Free - field: disk_total - type: 0 + type: 1 unit: 'GB' i18n: zh-CN: 磁盘总容量 From 9d62270131a42c2bd63bd792145947efa3fbea86 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Sat, 17 Aug 2024 00:04:19 +0800 Subject: [PATCH 189/257] [improve] auto generate readable random monitor name (#2531) Signed-off-by: tomsun28 Co-authored-by: YuLuo --- .../monitor-new/monitor-new.component.ts | 9 +- web-app/src/app/shared/utils/common-util.ts | 119 ++++++++++++++++++ 2 files changed, 122 insertions(+), 6 deletions(-) diff --git a/web-app/src/app/routes/monitor/monitor-new/monitor-new.component.ts b/web-app/src/app/routes/monitor/monitor-new/monitor-new.component.ts index 6534f5adfae..7dc647297a9 100644 --- a/web-app/src/app/routes/monitor/monitor-new/monitor-new.component.ts +++ b/web-app/src/app/routes/monitor/monitor-new/monitor-new.component.ts @@ -18,7 +18,6 @@ */ import { Component, Inject, OnInit } from '@angular/core'; -import { FormGroup } from '@angular/forms'; import { ActivatedRoute, ParamMap, Router } from '@angular/router'; import { I18NService } from '@core'; import { ALAIN_I18N_TOKEN, TitleService } from '@delon/theme'; @@ -33,6 +32,7 @@ import { ParamDefine } from '../../../pojo/ParamDefine'; import { AppDefineService } from '../../../service/app-define.service'; import { CollectorService } from '../../../service/collector.service'; import { MonitorService } from '../../../service/monitor.service'; +import { generateReadableRandomString } from '../../../shared/utils/common-util'; @Component({ selector: 'app-monitor-add', @@ -153,11 +153,8 @@ export class MonitorNewComponent implements OnInit { } onHostChange(hostValue: string) { - if (this.monitor.app != 'prometheus') { - let autoName = `${this.monitor.app.toUpperCase()}_${hostValue}`; - if (this.monitor.name == undefined || this.monitor.name == '' || this.monitor.name.startsWith(this.monitor.app.toUpperCase())) { - this.monitor.name = autoName; - } + if (this.monitor.name == undefined || this.monitor.name == '') { + this.monitor.name = generateReadableRandomString(); } } diff --git a/web-app/src/app/shared/utils/common-util.ts b/web-app/src/app/shared/utils/common-util.ts index ebba047a5f6..7b7ca16bcc4 100644 --- a/web-app/src/app/shared/utils/common-util.ts +++ b/web-app/src/app/shared/utils/common-util.ts @@ -43,3 +43,122 @@ export function findDeepestSelected(nodes: any): any { } return deepestSelectedNode; } + +export function generateReadableRandomString(): string { + const adjectives = [ + 'quick', + 'bright', + 'calm', + 'brave', + 'cool', + 'eager', + 'fancy', + 'gentle', + 'happy', + 'jolly', + 'kind', + 'lively', + 'merry', + 'nice', + 'proud', + 'witty', + 'zesty', + 'nifty', + 'quirky', + 'unique', + 'vivid', + 'zany', + 'zealous', + 'yummy', + 'agile', + 'bold', + 'daring', + 'fearless', + 'gleeful', + 'humble', + 'jumpy', + 'keen', + 'loyal', + 'majestic', + 'noble', + 'playful', + 'radiant', + 'spirited', + 'tenacious', + 'vibrant', + 'wise', + 'youthful', + 'zippy', + 'serene', + 'bubbly', + 'dreamy', + 'fierce', + 'graceful' + ]; + + const nouns = [ + 'fox', + 'lion', + 'eagle', + 'shark', + 'whale', + 'falcon', + 'panda', + 'tiger', + 'wolf', + 'otter', + 'lynx', + 'moose', + 'dolphin', + 'bear', + 'hawk', + 'zebra', + 'giraffe', + 'koala', + 'lemur', + 'lemming', + 'cheetah', + 'dragon', + 'owl', + 'rhino', + 'stingray', + 'jaguar', + 'panther', + 'elk', + 'ocelot', + 'beaver', + 'walrus', + 'gazelle', + 'coyote', + 'vulture', + 'iguana', + 'porcupine', + 'raccoon', + 'sloth', + 'armadillo', + 'chameleon', + 'kestrel', + 'weasel', + 'hedgehog' + ]; + + const digits = '23456789'; + const chars = 'ABCDEFGHJKLMNPQRSTUVWXYZabcdefghjklmnpqrstuvwxyz'; + + // Randomly select an adjective and a noun + let adjective = adjectives[Math.floor(Math.random() * adjectives.length)]; + let noun = nouns[Math.floor(Math.random() * nouns.length)]; + + // Randomly generate a sequence of numbers and characters + const randomDigits = Array.from({ length: 2 }, () => digits.charAt(Math.floor(Math.random() * digits.length))).join(''); + + const randomChars = Array.from({ length: 2 }, () => chars.charAt(Math.floor(Math.random() * chars.length))).join(''); + adjective = capitalizeFirstLetter(adjective); + noun = capitalizeFirstLetter(noun); + // Combine the parts to form the final string + return `${adjective}_${noun}_${randomDigits}${randomChars}`; +} + +function capitalizeFirstLetter(word: string): string { + return word.charAt(0).toUpperCase() + word.slice(1); +} From 584b70a72665af4a61888470e6788852fcd4b053 Mon Sep 17 00:00:00 2001 From: Jast Date: Sat, 17 Aug 2024 00:18:22 +0800 Subject: [PATCH 190/257] [improve] delete deprecate field (#2537) Co-authored-by: YuLuo Co-authored-by: tomsun28 --- .../org/apache/hertzbeat/common/entity/job/Metrics.java | 4 ---- .../current/advanced/extend-jmx.md | 2 +- .../current/advanced/extend-point.md | 2 +- .../current/advanced/extend-snmp.md | 2 +- manager/src/main/resources/define/app-spring_gateway.yml | 8 ++++---- 5 files changed, 7 insertions(+), 11 deletions(-) diff --git a/common/src/main/java/org/apache/hertzbeat/common/entity/job/Metrics.java b/common/src/main/java/org/apache/hertzbeat/common/entity/job/Metrics.java index 31be723c3c5..99d25fa6ac8 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/entity/job/Metrics.java +++ b/common/src/main/java/org/apache/hertzbeat/common/entity/job/Metrics.java @@ -322,10 +322,6 @@ public static class Field { * Metric type 0-number: number 1-string: string */ private byte type = 1; - /** - * Whether this field is the instance - */ - private boolean instance = false; /** * Whether this field is the label */ diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jmx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jmx.md index 71bb06ba2b2..07acbbeeec9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jmx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jmx.md @@ -155,7 +155,7 @@ metrics: # 具体监控指标列表 fields: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-if is metrics label - # field-指标名称, type-指标类型(0-number数字,1-string字符串), unit-指标单位('%','ms','MB'), instance-是否是指标集合唯一标识符字段 + # field-指标名称, type-指标类型(0-number数字,1-string字符串), unit-指标单位('%','ms','MB'), label-是否是指标集合唯一标识符字段 - field: VmName type: 1 - field: VmVendor diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-point.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-point.md index 6de319a1c15..5f390517206 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-point.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-point.md @@ -150,7 +150,7 @@ metrics: # 具体监控指标列表 fields: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-if is metrics label - # field-指标名称, type-指标类型(0-number数字,1-string字符串), unit-指标单位('%','ms','MB'), instance-是否是指标集合唯一标识符字段 + # field-指标名称, type-指标类型(0-number数字,1-string字符串), unit-指标单位('%','ms','MB'), label-是否是指标集合唯一标识符字段 - field: responseTime type: 0 unit: ms diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-snmp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-snmp.md index 387d67c5987..6e61ec3fa81 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-snmp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-snmp.md @@ -162,7 +162,7 @@ metrics: # 具体监控指标列表 fields: # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-if is metrics label - # field-指标名称, type-指标类型(0-number数字,1-string字符串), unit-指标单位('%','ms','MB'), instance-是否是指标集合唯一标识符字段 + # field-指标名称, type-指标类型(0-number数字,1-string字符串), unit-指标单位('%','ms','MB'), label-是否是指标集合唯一标识符字段 - field: name type: 1 - field: descr diff --git a/manager/src/main/resources/define/app-spring_gateway.yml b/manager/src/main/resources/define/app-spring_gateway.yml index d7333237c73..4d3688ac43d 100644 --- a/manager/src/main/resources/define/app-spring_gateway.yml +++ b/manager/src/main/resources/define/app-spring_gateway.yml @@ -117,7 +117,7 @@ metrics: priority: 0 # collect metrics content fields: - # field-metric name, type-metric type(0-number,1-string), instance-is instance primary key, unit-metric unit + # field-metric name, type-metric type(0-number,1-string), label-is label primary key, unit-metric unit - field: responseTime type: 0 unit: ms @@ -262,7 +262,7 @@ metrics: i18n: zh-CN: 状态 en-US: State - instance: true + label: true - field: size type: 0 i18n: @@ -324,7 +324,7 @@ metrics: i18n: zh-CN: 空间 en-US: Space - instance: true + label: true - field: mem_used type: 0 i18n: @@ -413,7 +413,7 @@ metrics: i18n: zh-CN: 路由 ID en-US: Route ID - instance: true + label: true - field: predicate type: 1 i18n: From f1a123ee760c93d584c121bb123ac63e12cdfc47 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sat, 17 Aug 2024 10:14:13 +0800 Subject: [PATCH 191/257] [chore] fix typo (#2543) Signed-off-by: yuluo-yx --- .../current/community/new_committer_process.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md index c4c61aac4bc..3aa91296baf 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md @@ -497,4 +497,4 @@ Thanks ! 配置完成。 -最后一步,别忘了订阅开发邮箱。 +最后一步,别忘了订阅[开发者邮箱列表](./mailing_lists.md)。 From e157d82a18d71525a9ae22af8b8d4493e188771f Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sat, 17 Aug 2024 15:26:03 +0800 Subject: [PATCH 192/257] [infra] update actions checkout version to 4 (#2544) Signed-off-by: yuluo-yx --- .github/workflows/backend-build-test.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/doc-build-test.yml | 2 +- .github/workflows/doc-pdf-builder.yml | 1 - .github/workflows/frontend-build-test.yml | 2 +- .github/workflows/license-checker.yml | 2 +- 6 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/backend-build-test.yml b/.github/workflows/backend-build-test.yml index ed07af72104..d9f16f5a68f 100644 --- a/.github/workflows/backend-build-test.yml +++ b/.github/workflows/backend-build-test.yml @@ -42,7 +42,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up JDK 17 uses: actions/setup-java@v1 with: diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2fa2bb96114..c227d2e23ea 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -47,7 +47,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/doc-build-test.yml b/.github/workflows/doc-build-test.yml index cb67cfaee2d..33841bd4006 100644 --- a/.github/workflows/doc-build-test.yml +++ b/.github/workflows/doc-build-test.yml @@ -31,7 +31,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Check filename in home/blog diff --git a/.github/workflows/doc-pdf-builder.yml b/.github/workflows/doc-pdf-builder.yml index 805d4f7bf4d..7e0e97dddb5 100644 --- a/.github/workflows/doc-pdf-builder.yml +++ b/.github/workflows/doc-pdf-builder.yml @@ -53,4 +53,3 @@ jobs: path: docs-en.pdf if-no-files-found: error retention-days: 1 - diff --git a/.github/workflows/frontend-build-test.yml b/.github/workflows/frontend-build-test.yml index 4479b300120..6893d21b4e2 100644 --- a/.github/workflows/frontend-build-test.yml +++ b/.github/workflows/frontend-build-test.yml @@ -34,7 +34,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Yarn Install working-directory: web-app run: yarn diff --git a/.github/workflows/license-checker.yml b/.github/workflows/license-checker.yml index a1f31996e2b..faccbd0550a 100644 --- a/.github/workflows/license-checker.yml +++ b/.github/workflows/license-checker.yml @@ -32,7 +32,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 10 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: submodules: true - name: Check license header From 4733ac2eba905b62247f73f86d5f7c972b9e2590 Mon Sep 17 00:00:00 2001 From: aias00 Date: Sat, 17 Aug 2024 17:38:21 +0800 Subject: [PATCH 193/257] [bugfix] fix monitor search sync error (#2546) Co-authored-by: YuLuo --- .../monitor-list/monitor-list.component.ts | 42 ++++++++++--------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/web-app/src/app/routes/monitor/monitor-list/monitor-list.component.ts b/web-app/src/app/routes/monitor/monitor-list/monitor-list.component.ts index ad3b36a2621..6169fe880ff 100644 --- a/web-app/src/app/routes/monitor/monitor-list/monitor-list.component.ts +++ b/web-app/src/app/routes/monitor/monitor-list/monitor-list.component.ts @@ -179,26 +179,28 @@ export class MonitorListComponent implements OnInit, OnDestroy { loadMonitorTable(sortField?: string | null, sortOrder?: string | null) { this.tableLoading = true; - let monitorInit$ = this.monitorSvc.getMonitors(this.app, this.tag, this.pageIndex - 1, this.pageSize, sortField, sortOrder).subscribe( - message => { - this.tableLoading = false; - this.checkedAll = false; - this.checkedMonitorIds.clear(); - if (message.code === 0) { - let page = message.data; - this.monitors = page.content; - this.pageIndex = page.number + 1; - this.total = page.totalElements; - } else { - console.warn(message.msg); + let monitorInit$ = this.monitorSvc + .searchMonitors(this.app, this.tag, this.filterContent, this.filterStatus, this.pageIndex - 1, this.pageSize, sortField, sortOrder) + .subscribe( + message => { + this.tableLoading = false; + this.checkedAll = false; + this.checkedMonitorIds.clear(); + if (message.code === 0) { + let page = message.data; + this.monitors = page.content; + this.pageIndex = page.number + 1; + this.total = page.totalElements; + } else { + console.warn(message.msg); + } + monitorInit$.unsubscribe(); + }, + error => { + this.tableLoading = false; + monitorInit$.unsubscribe(); } - monitorInit$.unsubscribe(); - }, - error => { - this.tableLoading = false; - monitorInit$.unsubscribe(); - } - ); + ); } changeMonitorTable(sortField?: string | null, sortOrder?: string | null) { this.tableLoading = true; @@ -504,7 +506,7 @@ export class MonitorListComponent implements OnInit, OnDestroy { const currentSort = sort.find(item => item.value !== null); const sortField = (currentSort && currentSort.key) || null; const sortOrder = (currentSort && currentSort.value) || null; - this.changeMonitorTable(sortField, sortOrder); + // this.changeMonitorTable(sortField, sortOrder); } // begin: app type search filter From 4f7ba7355f0367ea40697137cef973a8428e50dc Mon Sep 17 00:00:00 2001 From: Logic Date: Sat, 17 Aug 2024 23:04:15 +0800 Subject: [PATCH 194/257] Add Contributors' chart (#2548) --- README.md | 9 +++++++++ README_CN.md | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/README.md b/README.md index f012545debe..5d66bc0fc6b 100644 --- a/README.md +++ b/README.md @@ -521,6 +521,15 @@ CNCF Observability And Analysis - Monitoring Landscape. [![Star History Chart](https://api.star-history.com/svg?repos=apache/hertzbeat&type=Date)](https://star-history.com/#apache/hertzbeat&Date) +## Contributors + +### Monthly Active Contributors + +[![Monthly Active Contributors](https://contributor-overtime-api.git-contributor.com/contributors-svg?chart=contributorMonthlyActivity&repo=apache/hertzbeat)](https://git-contributor.com?chart=contributorMonthlyActivity&repo=apache/hertzbeat) + +### GitHub Contributor Over Time + +[![GitHub Contributor Over Time](https://contributor-overtime-api.git-contributor.com/contributors-svg?chart=contributorOverTime&repo=apache/hertzbeat)](https://git-contributor.com?chart=contributorOverTime&repo=apache/hertzbeat) ## 🛡️ License [`Apache License, Version 2.0`](https://www.apache.org/licenses/LICENSE-2.0.html) diff --git a/README_CN.md b/README_CN.md index 930d1285181..19a5e50fd47 100644 --- a/README_CN.md +++ b/README_CN.md @@ -522,6 +522,15 @@ CNCF Observability And Analysis - Monitoring Landscape. [![Star History Chart](https://api.star-history.com/svg?repos=apache/hertzbeat&type=Date)](https://star-history.com/#apache/hertzbeat&Date) +## Contributors + +### Monthly Active Contributors + +[![Monthly Active Contributors](https://contributor-overtime-api.git-contributor.com/contributors-svg?chart=contributorMonthlyActivity&repo=apache/hertzbeat)](https://git-contributor.com?chart=contributorMonthlyActivity&repo=apache/hertzbeat) + +### GitHub Contributor Over Time + +[![GitHub Contributor Over Time](https://contributor-overtime-api.git-contributor.com/contributors-svg?chart=contributorOverTime&repo=apache/hertzbeat)](https://git-contributor.com?chart=contributorOverTime&repo=apache/hertzbeat) ## 🛡️ License [`Apache License, Version 2.0`](https://www.apache.org/licenses/LICENSE-2.0.html) From 5b50307e8c39126df51ab15276bb0f4dd80aab14 Mon Sep 17 00:00:00 2001 From: aias00 Date: Sun, 18 Aug 2024 07:33:26 +0800 Subject: [PATCH 195/257] [feature] add valkey template (#2547) Co-authored-by: Calvin --- .../src/main/resources/define/app-valkey.yml | 1270 +++++++++++++++++ 1 file changed, 1270 insertions(+) create mode 100644 manager/src/main/resources/define/app-valkey.yml diff --git a/manager/src/main/resources/define/app-valkey.yml b/manager/src/main/resources/define/app-valkey.yml new file mode 100644 index 00000000000..a7cdd3c5b79 --- /dev/null +++ b/manager/src/main/resources/define/app-valkey.yml @@ -0,0 +1,1270 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring +category: cache +# The monitoring type eg: linux windows tomcat mysql aws... +app: valkey +# The monitoring i18n name +name: + zh-CN: Valkey数据库 + en-US: Valkey +# The description and help of this monitoring type +help: + zh-CN: HertzBeat 对 Valkey 数据库的通用性能指标进行采集监控(server、clients、memory、persistence、stats、replication、cpu、errorstats、cluster、commandstats),支持版本为 Valkey 7.0+。
您可以点击“新建 Valkey 数据库”并进行配置,或者选择“更多操作”,导入已有配置。 + en-US: HertzBeat monitors Valkey database of general performance metrics such as memory, persistence, replication and so on. The versions we support is Valkey 7.0+.
You could click the "New Valkey" button and proceed with the configuration or import an existing setup through the "More Actions" menu. + zh-TW: HertzBeat 對 Valkey 數據庫的通用性能指標進行采集監控(server、clients、memory、persistence、stats、replication、cpu、errorstats、cluster、commandstats),支持版本爲 Valkey 7.0+。
您可以點擊“新建 Valkey 數據庫”並進行配置,或者選擇“更多操作”,導入已有配置。 +helpLink: + zh-CN: https://hertzbeat.apache.org/zh-cn/docs/help/valkey + en-US: https://hertzbeat.apache.org/docs/help/valkey +# Input params define for monitoring(render web ui by +params: + # field-param field key + - field: host + # name-param field display i18n name + name: + zh-CN: 目标Host + en-US: Target Host + # type-param field type(most mapping the html input type) + type: host + # required-true or false + required: true + # field-param field key + - field: port + # name-param field display i18n name + name: + zh-CN: 端口 + en-US: Port + # type-param field type(most mapping the html input type) + type: number + # when type is number, range is required + range: '[0,65535]' + # required-true or false + required: true + # default value + defaultValue: 6379 + # field-param field key + - field: timeout + # name-param field display i18n name + name: + zh-CN: 超时时间 + en-US: Timeout + # type-param field type(most mapping the html input type) + type: number + # when type is number, range is required + range: '[0,100000]' + # required-true or false + required: true + # default value + defaultValue: 3000 + # field-param field key + - field: username + name: + zh-CN: 用户名 + en-US: Username + type: text + limit: 50 + required: false + # field-param field key + - field: password + name: + zh-CN: 密码 + en-US: Password + type: password + required: false + +# collect metrics config list +metrics: + # metrics - server + - name: server + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + i18n: + zh-CN: 服务器信息 + en-US: Server + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: identity + type: 1 + i18n: + zh-CN: ID + en-US: Identity + - field: valkey_version + type: 1 + i18n: + zh-CN: Valkey服务版本 + en-US: Valkey Version + - field: valkey_git_sha1 + type: 0 + i18n: + zh-CN: Git SHA1 + en-US: Git SHA1 + - field: valkey_git_dirty + type: 0 + i18n: + zh-CN: Valkey服务器代码是否有改动 + en-US: Git Dirty Flag + - field: valkey_build_id + type: 1 + i18n: + zh-CN: Build Id + en-US: Build Id + - field: valkey_mode + type: 1 + i18n: + zh-CN: 运行模式 + en-US: Server Mode + - field: os + type: 1 + i18n: + zh-CN: 操作系统 + en-US: Operating System + - field: arch_bits + type: 0 + i18n: + zh-CN: 架构 + en-US: Architecture Bits + - field: multiplexing_api + type: 1 + i18n: + zh-CN: IO多路复用器API + en-US: Multiplexing API + - field: atomicvar_api + type: 1 + i18n: + zh-CN: 原子操作处理API + en-US: Atomicvar API + - field: gcc_version + type: 1 + i18n: + zh-CN: GCC版本 + en-US: GCC Version + - field: process_id + type: 0 + i18n: + zh-CN: 进程ID + en-US: PID + - field: process_supervised + type: 1 + i18n: + zh-CN: 进程监督机制管理Valkey的方式 + en-US: Process Supervised + - field: run_id + type: 1 + i18n: + zh-CN: Run ID + en-US: Run ID + - field: tcp_port + type: 0 + i18n: + zh-CN: TCP/IP监听端口 + en-US: TCP Port + - field: server_time_usec + type: 0 + i18n: + zh-CN: 服务器时间戳 + en-US: Server Time Usec + - field: uptime_in_seconds + type: 0 + i18n: + zh-CN: 运行时长(秒) + en-US: Uptime(Seconds) + - field: uptime_in_days + type: 0 + i18n: + zh-CN: 运行时长(天) + en-US: Uptime(Days) + - field: hz + type: 0 + i18n: + zh-CN: 事件循环频率 + en-US: hz + - field: configured_hz + type: 0 + i18n: + zh-CN: 配置的事件循环频率 + en-US: Configured hz + - field: lru_clock + type: 0 + i18n: + zh-CN: LRU时钟 + en-US: LRU Clock + - field: executable + type: 1 + i18n: + zh-CN: 服务器执行路径 + en-US: Server's Executable Path + - field: config_file + type: 1 + i18n: + zh-CN: 配置文件路径 + en-US: Config File Path + - field: io_threads_active + type: 0 + i18n: + zh-CN: 活跃IO线程数 + en-US: Active IO Threads + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + # metrics - clients + - name: clients + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 1 + i18n: + zh-CN: 客户端信息 + en-US: Clients + # collect metrics content + fields: + - field: connected_clients + type: 0 + i18n: + zh-CN: 已连接客户端数量 + en-US: Connected Clients + - field: cluster_connections + type: 0 + i18n: + zh-CN: 集群客户端连接数 + en-US: Cluster Connections + - field: maxclients + type: 0 + i18n: + zh-CN: 最大客户端连接数 + en-US: Max Clients + - field: client_recent_max_input_buffer + type: 0 + i18n: + zh-CN: 最近客户端的最大输入缓冲区大小 + en-US: Client Recent Max Input Buffer + - field: client_recent_max_output_buffer + type: 0 + i18n: + zh-CN: 最近客户端的最大输出缓冲区大小 + en-US: Client Recent Max Output Buffer + - field: blocked_clients + type: 0 + i18n: + zh-CN: 阻塞客户端数量 + en-US: Blocked Clients + - field: tracking_clients + type: 0 + i18n: + zh-CN: 正在追踪数据的客户端数量 + en-US: Tracking Clients + - field: clients_in_timeout_table + type: 0 + i18n: + zh-CN: 超时队列中的客户端数量 + en-US: Clients In Timeout Table + protocol: redis + redis: + host: ^_^host^_^ + port: ^_^port^_^ + username: ^_^username^_^ + password: ^_^password^_^ + timeout: ^_^timeout^_^ + # metrics - memory + - name: memory + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 2 + i18n: + zh-CN: 内存信息 + en-US: Memory + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: used_memory + type: 0 + i18n: + zh-CN: 已使用内存(字节) + en-US: Used Memory + - field: used_memory_human + type: 0 + unit: MB + i18n: + zh-CN: 已使用内存 + en-US: Used Memory Human + - field: used_memory_rss + type: 0 + i18n: + zh-CN: 已使用物理内存(字节) + en-US: Used Memory RSS + - field: used_memory_rss_human + type: 0 + unit: MB + i18n: + zh-CN: 已使用物理内存 + en-US: Used Memory RSS Human + - field: used_memory_peak + type: 0 + i18n: + zh-CN: 内存使用最大峰值(字节) + en-US: Used Memory Peak + - field: used_memory_peak_human + type: 0 + unit: MB + i18n: + zh-CN: 内存使用最大值 + en-US: Used Memory Peak Human + - field: used_memory_peak_perc + type: 0 + unit: '%' + i18n: + zh-CN: 最大内存使用率 + en-US: Used Memory Peak Perc + - field: used_memory_overhead + type: 0 + i18n: + zh-CN: 管理数据结构的额外内存使用量 + en-US: Used Memory Overhead + - field: used_memory_startup + type: 0 + i18n: + zh-CN: Valkey服务器启动时使用的内存 + en-US: Used Memory Startup + - field: used_memory_dataset + type: 0 + i18n: + zh-CN: 存储数据占用的内存 + en-US: Used Memory Dataset + - field: used_memory_dataset_perc + type: 0 + unit: '%' + i18n: + zh-CN: 存储数据占用的内存比率 + en-US: Used Memory Dataset Perc + - field: allocator_allocated + type: 0 + i18n: + zh-CN: 内存分配器分配的内存 + en-US: Allocator Allocated + - field: allocator_active + type: 0 + i18n: + zh-CN: 内存分配器激活的内存 + en-US: Allocator Active + - field: allocator_resident + type: 0 + i18n: + zh-CN: 内存分配器加载的内存 + en-US: Allocator Resident + - field: total_system_memory + type: 0 + i18n: + zh-CN: 总系统内存容量(字节) + en-US: Total System Memory + - field: total_system_memory_human + type: 0 + unit: GB + i18n: + zh-CN: 总系统内存容量 + en-US: Total System Memory Human + - field: used_memory_lua + type: 0 + i18n: + zh-CN: LUA脚本占用的内存(字节) + en-US: Used Memory LUA + - field: used_memory_lua_human + type: 0 + unit: KB + i18n: + zh-CN: LUA脚本占用的内存 + en-US: Used Memory LUA Human + - field: used_memory_scripts + type: 0 + i18n: + zh-CN: Valkey缓存的LUA脚本大小(字节) + en-US: Used Memory Scripts + - field: used_memory_scripts_human + type: 0 + unit: KB + i18n: + zh-CN: Valkey缓存的LUA脚本大小 + en-US: Used Memory Scripts Human + - field: number_of_cached_scripts + type: 0 + i18n: + zh-CN: Valkey缓存的LUA脚本数量 + en-US: Number Of Cached Scripts + - field: maxmemory + type: 0 + i18n: + zh-CN: 最大内存限制(字节) + en-US: Max Memory + - field: maxmemory_human + type: 0 + unit: MB + i18n: + zh-CN: 最大内存限制 + en-US: Max Memory Human + - field: maxmemory_policy + type: 1 + i18n: + zh-CN: 内存淘汰策略 + en-US: Max Memory Policy + - field: allocator_frag_ratio + type: 0 + i18n: + zh-CN: 内存分配器中的内存碎片占比 + en-US: Allocator Frag Ratio + - field: allocator_frag_bytes + type: 0 + i18n: + zh-CN: 内存分配器中的内存碎片大小 + en-US: Allocator Frag Bytes + - field: allocator_rss_ratio + type: 0 + i18n: + zh-CN: 内存分配器分配的内存占比 + en-US: Allocator RSS Ratio + - field: allocator_rss_bytes + type: 0 + i18n: + zh-CN: 内存分配器分配的内存大小 + en-US: Allocator RSS Bytes + - field: rss_overhead_ratio + type: 0 + i18n: + zh-CN: 实际内存占比 + en-US: RSS Overhead Ratio + - field: rss_overhead_bytes + type: 0 + i18n: + zh-CN: 实际内存大小 + en-US: RSS Overhead Bytes + - field: mem_fragmentation_ratio + type: 0 + i18n: + zh-CN: 内存碎片率 + en-US: Mem Fragmentation Ratio + - field: mem_fragmentation_bytes + type: 0 + i18n: + zh-CN: 内存碎片大小 + en-US: Mem Fragmentation Bytes + - field: mem_not_counted_for_evict + type: 0 + i18n: + zh-CN: 未计入最大内存限制的内存 + en-US: Mem Not Counted For Evict + - field: mem_replication_backlog + type: 0 + i18n: + zh-CN: 主从同步缓冲区占用的内存(字节) + en-US: Mem Replication Backlog + - field: mem_clients_slaves + type: 0 + i18n: + zh-CN: 从节点占用的内存 + en-US: Mem Clients Slaves + - field: mem_clients_normal + type: 0 + i18n: + zh-CN: 客户端占用的内存 + en-US: Mem Clients Normal + - field: mem_aof_buffer + type: 0 + i18n: + zh-CN: AOF缓冲区占用的内存 + en-US: Mem AOF Buffer + - field: mem_allocator + type: 1 + i18n: + zh-CN: 内存分配器 + en-US: Mem Allocator + - field: active_defrag_running + type: 0 + i18n: + zh-CN: 是否正在整理内存 + en-US: Active Defrag Running + - field: lazyfree_pending_objects + type: 0 + i18n: + zh-CN: 待处理的惰性删除对象数量 + en-US: Lazyfree Pending Objects + - field: lazyfreed_objects + type: 0 + i18n: + zh-CN: 已经执行的惰性删除对象数量 + en-US: Lazyfreed Objects + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - persistence + - name: persistence + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 3 + i18n: + zh-CN: 持久化信息 + en-US: Persistence + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: loading + type: 0 + i18n: + zh-CN: 是否正在加载持久化文件 + en-US: Loading + - field: current_cow_size + type: 0 + i18n: + zh-CN: COW区域内存大小 + en-US: Current COW Size + - field: current_cow_size_age + type: 0 + i18n: + zh-CN: COW区域内存使用时间 + en-US: Current COW Size Age + - field: current_fork_perc + type: 0 + i18n: + zh-CN: COW区域内存使用率 + en-US: Current Fork Perc + - field: current_save_keys_processed + type: 0 + i18n: + zh-CN: 正在处理的保存键数量 + en-US: Current Save Keys Processed + - field: current_save_keys_total + type: 0 + i18n: + zh-CN: 保存键总数量 + en-US: Current Save Keys Total + - field: rdb_changes_since_last_save + type: 0 + i18n: + zh-CN: 自最近一次RDB后的数据改动条数 + en-US: RDB Changes Since Last Save + - field: rdb_bgsave_in_progress + type: 0 + i18n: + zh-CN: 是否正在进行RDB的bgsave命令 + en-US: RDB bgsave In Progress + - field: rdb_last_save_time + type: 0 + i18n: + zh-CN: 最近一次bgsave命令执行时间 + en-US: RDB Last Save Time + - field: rdb_last_bgsave_status + type: 1 + i18n: + zh-CN: 最近一次bgsave命令执行状态 + en-US: RDB Last bgsave Status + - field: rdb_last_bgsave_time_sec + type: 0 + i18n: + zh-CN: 最近一次bgsave命令执行时间(秒) + en-US: RDB Last bgsave Time Sec + - field: rdb_current_bgsave_time_sec + type: 0 + i18n: + zh-CN: 当前bgsave命令执行时间(秒) + en-US: RDB Current bgsave Time Sec + - field: rdb_last_cow_size + type: 0 + i18n: + zh-CN: RDB最近一次COW区域内存大小 + en-US: RDB Last COW Size + - field: aof_enabled + type: 0 + i18n: + zh-CN: 是否开启了AOF + en-US: AOF Enabled + - field: aof_rewrite_in_progress + type: 0 + i18n: + zh-CN: 是否正在进行AOF的rewrite命令 + en-US: AOF rewrite In Progress + - field: aof_rewrite_scheduled + type: 0 + i18n: + zh-CN: 是否在RDB的bgsave结束后执行AOF的rewirte + en-US: AOF rewrite Scheduled + - field: aof_last_rewrite_time_sec + type: 0 + i18n: + zh-CN: 最近一次AOF的rewrite命令执行时间(秒) + en-US: AOF Last rewrite Time Sec + - field: aof_current_rewrite_time_sec + type: 0 + i18n: + zh-CN: 当前rewrite命令执行时间(秒) + en-US: AOF Current rewrite Time Sec + - field: aof_last_bgrewrite_status + type: 1 + i18n: + zh-CN: 最近一次AOF的bgrewrite命令执行状态 + en-US: AOF Last bgrewrite Status + - field: aof_last_write_status + type: 1 + i18n: + zh-CN: 最近一次AOF写磁盘结果 + en-US: AOF Last Write Status + - field: aof_last_cow_size + type: 0 + i18n: + zh-CN: AOF最近一次COW区域内存大小 + en-US: AOF Last Cow Size + - field: module_fork_in_progress + type: 0 + i18n: + zh-CN: 是否正在进行fork操作 + en-US: Module Fork In Progress + - field: module_fork_last_cow_size + type: 0 + i18n: + zh-CN: 最近一次执行fork操作的COW区域内存大小 + en-US: Module Fork Last Cow Size + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - stats + - name: stats + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 4 + i18n: + zh-CN: 全局统计信息 + en-US: Stats + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: total_connections_received + type: 0 + i18n: + zh-CN: 已接受的总连接数 + en-US: Total Connections Received + - field: total_commands_processed + type: 0 + i18n: + zh-CN: 执行过的命令总数 + en-US: Total Commands Processed + - field: instantaneous_ops_per_sec + type: 0 + i18n: + zh-CN: 命令处理条数/秒 + en-US: Instantaneous Ops Per Sec + - field: total_net_input_bytes + type: 0 + i18n: + zh-CN: 输入总网络流量(字节) + en-US: Total Net Input Bytes + - field: total_net_output_bytes + type: 0 + i18n: + zh-CN: 输出总网络流量(字节) + en-US: Total Net Output Bytes + - field: instantaneous_input_kbps + type: 0 + i18n: + zh-CN: 输入字节数/秒 + en-US: Instantaneous Input Kbps + - field: instantaneous_output_kbps + type: 0 + i18n: + zh-CN: 输出字节数/秒 + en-US: Instantaneous Output Kbps + - field: rejected_connections + type: 0 + i18n: + zh-CN: 拒绝连接数 + en-US: Rejected Connections + - field: sync_full + type: 0 + i18n: + zh-CN: 主从完全同步成功次数 + en-US: Sync Full + - field: sync_partial_ok + type: 0 + i18n: + zh-CN: 主从部分同步成功次数 + en-US: Sync Partial OK + - field: sync_partial_err + type: 0 + i18n: + zh-CN: 主从部分同步失败次数 + en-US: Sync Partial Error + - field: expired_keys + type: 0 + i18n: + zh-CN: 过期key数量 + en-US: Expired Keys + - field: expired_stale_perc + type: 0 + i18n: + zh-CN: 过期key占比 + en-US: Expired Stale Perc + - field: expired_time_cap_reached_count + type: 0 + i18n: + zh-CN: 过期key清理操作被限制次数 + en-US: Expired Time Cap Reached Count + - field: expire_cycle_cpu_milliseconds + type: 0 + i18n: + zh-CN: 清理过期key消耗的cpu时间(毫秒) + en-US: Expire Cycle CPU Milliseconds + - field: evicted_keys + type: 0 + i18n: + zh-CN: 淘汰key数量 + en-US: Evicted Keys + - field: keyspace_hits + type: 0 + i18n: + zh-CN: key命中成功次数 + en-US: Keyspace Hits + - field: keyspace_misses + type: 0 + i18n: + zh-CN: key命中失败次数 + en-US: Keyspace Misses + - field: pubsub_channels + type: 0 + i18n: + zh-CN: 订阅的频道数量 + en-US: Pubsub Channels + - field: pubsub_patterns + type: 0 + i18n: + zh-CN: 订阅的模式数量 + en-US: Pubsub Patterns + - field: latest_fork_usec + type: 0 + i18n: + zh-CN: 最近一次fork操作消耗时间(微秒) + en-US: Latest Fork Usec + - field: total_forks + type: 0 + i18n: + zh-CN: fork进程总数 + en-US: Total Forks + - field: migrate_cached_sockets + type: 0 + i18n: + zh-CN: 正在进行migrate的目标Valkey个数 + en-US: Migrate Cached Sockets + - field: slave_expires_tracked_keys + type: 0 + i18n: + zh-CN: 主从同步中已过期的key数量 + en-US: Slave Expires Tracked Keys + - field: active_defrag_hits + type: 0 + i18n: + zh-CN: 主动碎片整理命中次数 + en-US: Active Defrag Hits + - field: active_defrag_misses + type: 0 + i18n: + zh-CN: 主动碎片整理未命中次数 + en-US: Active Defrag Misses + - field: active_defrag_key_hits + type: 0 + i18n: + zh-CN: 主动碎片整理key命中次数 + en-US: Active Defrag Key Hits + - field: active_defrag_key_misses + type: 0 + i18n: + zh-CN: 主动碎片整理key未命中次数 + en-US: Active Defrag Key Misses + - field: tracking_total_keys + type: 0 + i18n: + zh-CN: 正在追踪的key数量 + en-US: Tracking Total Keys + - field: tracking_total_items + type: 0 + i18n: + zh-CN: 正在追踪的value数量 + en-US: Tracking Total Items + - field: tracking_total_prefixes + type: 0 + i18n: + zh-CN: 正在追踪的前缀数量 + en-US: Tracking Total Prefixes + - field: unexpected_error_replies + type: 0 + i18n: + zh-CN: 执行命令时发生错误的数量 + en-US: Unexpected Error Replies + - field: total_error_replies + type: 0 + i18n: + zh-CN: 执行命令时发生错误的总数 + en-US: Total Error Replies + - field: dump_payload_sanitizations + type: 0 + i18n: + zh-CN: Dump命令时数据清理的数量 + en-US: Dump Payload Sanitizations + - field: total_reads_processed + type: 0 + i18n: + zh-CN: 执行读操作时处理的请求数量 + en-US: Total Reads Processed + - field: total_writes_processed + type: 0 + i18n: + zh-CN: 执行写操作时处理的请求数量 + en-US: Total Writes Processed + - field: io_threaded_reads_processed + type: 0 + i18n: + zh-CN: 使用线程进行读操作时处理的请求数量 + en-US: IO Threaded Reads Processed + - field: io_threaded_writes_processed + type: 0 + i18n: + zh-CN: 使用线程进行写操作时处理的请求数量 + en-US: IO Threaded Writes Processed + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - replication + - name: replication + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 5 + i18n: + zh-CN: 主从同步信息 + en-US: Replication + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: role + type: 1 + i18n: + zh-CN: 节点角色 + en-US: Role + - field: connected_slaves + type: 0 + i18n: + zh-CN: 已连接的从节点个数 + en-US: Connected Slaves + - field: master_failover_state + type: 1 + i18n: + zh-CN: 主从故障转移状态 + en-US: Master FailOver State + - field: master_replid + type: 1 + i18n: + zh-CN: 主从同步id + en-US: Master Replid + - field: master_replid2 + type: 0 + i18n: + zh-CN: 主从同步id2 + en-US: Master Replid2 + - field: master_repl_offset + type: 0 + i18n: + zh-CN: 主节点偏移量 + en-US: Master Repl Offset + - field: second_repl_offset + type: 0 + i18n: + zh-CN: 接受主从同步的从节点偏移量 + en-US: Second Repl Offset + - field: repl_backlog_active + type: 0 + i18n: + zh-CN: 复制缓冲区状态 + en-US: Repl Backlog Active + - field: repl_backlog_size + type: 0 + i18n: + zh-CN: 复制缓冲区大小(字节) + en-US: Repl Backlog Size + - field: repl_backlog_first_byte_offset + type: 0 + i18n: + zh-CN: 复制缓冲区起始偏移量 + en-US: Repl Backlog First Byte Offset + - field: repl_backlog_histlen + type: 0 + i18n: + zh-CN: 复制缓冲区的有效数据长度 + en-US: Repl Backlog Histlen + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - cpu + - name: cpu + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 6 + i18n: + zh-CN: CPU消耗信息 + en-US: CPU + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: used_cpu_sys + type: 0 + i18n: + zh-CN: Valkey进程使用的CPU时钟总和(内核态) + en-US: Used CPU Sys + - field: used_cpu_user + type: 0 + i18n: + zh-CN: Valkey进程使用的CPU时钟总和(用户态) + en-US: Used CPU User + - field: used_cpu_sys_children + type: 0 + i18n: + zh-CN: 后台进程使用的CPU时钟总和(内核态) + en-US: Used CPU Sys Children + - field: used_cpu_user_children + type: 0 + i18n: + zh-CN: 后台进程使用的CPU时钟总和(用户态) + en-US: Used CPU User Children + - field: used_cpu_sys_main_thread + type: 0 + i18n: + zh-CN: 主进程使用的CPU时钟总和(内核态) + en-US: Used CPU Sys Main Thread + - field: used_cpu_user_main_thread + type: 0 + i18n: + zh-CN: 主进程使用的CPU时钟总和(用户态) + en-US: Used CPU User Main Thread + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - errorstats + - name: errorstats + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 8 + i18n: + zh-CN: 错误信息 + en-US: Error Stats + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: errorstat_ERR + type: 1 + i18n: + zh-CN: 执行命令时出错的次数 + en-US: Error Stat Error + - field: errorstat_MISCONF + type: 1 + i18n: + zh-CN: 执行命令时出现misconf错误的次数 + en-US: Error Stat Misconf + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - cluster + - name: cluster + # collect metrics content + priority: 9 + i18n: + zh-CN: 集群信息 + en-US: Cluster + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: cluster_enabled + type: 0 + i18n: + zh-CN: 节点是否开启集群模式 + en-US: Cluster Enabled + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - commandstats + - name: commandstats + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 9 + i18n: + zh-CN: 命令统计信息 + en-US: Command Stats + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: cmdstat_set + type: 1 + i18n: + zh-CN: set + en-US: set + - field: cmdstat_get + type: 1 + i18n: + zh-CN: get + en-US: get + - field: cmdstat_setnx + type: 1 + i18n: + zh-CN: setnx + en-US: setnx + - field: cmdstat_hset + type: 1 + i18n: + zh-CN: hset + en-US: hset + - field: cmdstat_hget + type: 1 + i18n: + zh-CN: hget + en-US: hget + - field: cmdstat_lpush + type: 1 + i18n: + zh-CN: lpush + en-US: lpush + - field: cmdstat_rpush + type: 1 + i18n: + zh-CN: rpush + en-US: rpush + - field: cmdstat_lpop + type: 1 + i18n: + zh-CN: lpop + en-US: lpop + - field: cmdstat_rpop + type: 1 + i18n: + zh-CN: rpop + en-US: rpop + - field: cmdstat_llen + type: 1 + i18n: + zh-CN: llen + en-US: llen + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - keyspace + - name: keyspace + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 9 + i18n: + zh-CN: 数据库统计信息 + en-US: Keyspace + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: db0 + type: 1 + i18n: + zh-CN: db0 + en-US: db0 + - field: db1 + type: 1 + i18n: + zh-CN: db1 + en-US: db1 + - field: db2 + type: 1 + i18n: + zh-CN: db2 + en-US: db2 + - field: db3 + type: 1 + i18n: + zh-CN: db3 + en-US: db3 + - field: db4 + type: 1 + i18n: + zh-CN: db4 + en-US: db4 + - field: db5 + type: 1 + i18n: + zh-CN: db5 + en-US: db5 + - field: db6 + type: 1 + i18n: + zh-CN: db6 + en-US: db6 + - field: db7 + type: 1 + i18n: + zh-CN: db7 + en-US: db7 + - field: db8 + type: 1 + i18n: + zh-CN: db8 + en-US: db8 + - field: db9 + type: 1 + i18n: + zh-CN: db9 + en-US: db9 + - field: db10 + type: 1 + i18n: + zh-CN: db10 + en-US: db10 + - field: db11 + type: 1 + i18n: + zh-CN: db11 + en-US: db11 + - field: db12 + type: 1 + i18n: + zh-CN: db12 + en-US: db12 + - field: db13 + type: 1 + i18n: + zh-CN: db13 + en-US: db13 + - field: db14 + type: 1 + i18n: + zh-CN: db14 + en-US: db14 + - field: db15 + type: 1 + i18n: + zh-CN: db15 + en-US: db15 + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ From 031e83d7014c52d90f432caceb1bb2a54ff14068 Mon Sep 17 00:00:00 2001 From: aias00 Date: Sun, 18 Aug 2024 09:30:09 +0800 Subject: [PATCH 196/257] [bugfix] clear qbdata when cascade change (#2545) Co-authored-by: YuLuo --- .../alert/alert-setting/alert-setting.component.ts | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts b/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts index beb75d5e8b9..cfcb9326d96 100644 --- a/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts +++ b/web-app/src/app/routes/alert/alert-setting/alert-setting.component.ts @@ -164,7 +164,7 @@ export class AlertSettingComponent implements OnInit { onNewAlertDefine() { this.define = new AlertDefine(); this.define.tags = []; - this.resetQbData({ condition: 'and', rules: [] }); + this.resetQbDataDefault(); this.isManageModalAdd = true; this.isManageModalVisible = true; this.isManageModalOkLoading = false; @@ -593,6 +593,7 @@ export class AlertSettingComponent implements OnInit { }; cascadeOnChange(values: string[]): void { + this.resetQbDataDefault(); if (values == null || values.length != 3) { return; } @@ -645,7 +646,7 @@ export class AlertSettingComponent implements OnInit { } catch (e) { console.error(e); this.isExpr = true; - this.resetQbData({ condition: 'and', rules: [] }); + this.resetQbDataDefault(); return; } } @@ -653,7 +654,7 @@ export class AlertSettingComponent implements OnInit { onManageModalCancel() { this.cascadeValues = []; this.isExpr = false; - this.resetQbData({ condition: 'and', rules: [] }); + this.resetQbDataDefault(); this.isManageModalVisible = false; } @@ -661,10 +662,14 @@ export class AlertSettingComponent implements OnInit { this.qbFormCtrl.reset((this.qbData = qbData)); } + resetQbDataDefault() { + this.resetQbData({ condition: 'and', rules: [] }); + } + resetManageModalData() { this.cascadeValues = []; this.isExpr = false; - this.resetQbData({ condition: 'and', rules: [] }); + this.resetQbDataDefault(); this.isManageModalVisible = false; } From 19c7d84e53c72a1bb9699fe5b087f3b3f7698b5f Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sun, 18 Aug 2024 15:42:35 +0800 Subject: [PATCH 197/257] [infra] remove redundant security bug reporting options (#2550) Signed-off-by: yuluo-yx --- .github/ISSUE_TEMPLATE/config.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index a168e9d1a55..8d9fbbce9e5 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -20,6 +20,3 @@ contact_links: - name: Community Support url: https://github.com/apache/hertzbeat/discussions about: Please ask and answer questions here. - - name: Security Bug Bounty - url: https://github.com/apache/hertzbeat/issues - about: Please report security vulnerabilities here. From 27d66a11c1f62e1e1fef9f709fdb9bfc86745c32 Mon Sep 17 00:00:00 2001 From: aias00 Date: Sun, 18 Aug 2024 16:36:00 +0800 Subject: [PATCH 198/257] [type:feature] add greptimedb template (#2551) Co-authored-by: YuLuo --- .../main/resources/define/app-greptime.yml | 695 ++++++++++++++++++ 1 file changed, 695 insertions(+) create mode 100644 manager/src/main/resources/define/app-greptime.yml diff --git a/manager/src/main/resources/define/app-greptime.yml b/manager/src/main/resources/define/app-greptime.yml new file mode 100644 index 00000000000..1671e872e5f --- /dev/null +++ b/manager/src/main/resources/define/app-greptime.yml @@ -0,0 +1,695 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring +# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 +category: bigdata +# The monitoring type eg: linux windows tomcat mysql aws... +# 监控类型 eg: linux windows tomcat mysql aws... +app: greptimeDB +# The monitoring i18n name +# 监控类型国际化名称 +name: + zh-CN: GreptimeDB + en-US: GreptimeDB +# The description and help of this monitoring type +# 监控类型的帮助描述信息 +help: + zh-CN: HertzBeat 对 GreptimeDB 时序数据库进行监控。
点击查看开启步骤 + en-US: HertzBeat monitors the GreptimeDB time series database.
Click to view the activation steps. + zh-TW: HertzBeat 對 GreptimeDB 時序資料庫進行監控。
點擊查看開啓步驟 +helpLink: + zh-CN: https://hertzbeat.com/zh-cn/docs/help/greptimedb + en-US: https://hertzbeat.com/docs/help/greptimedb +# 监控所需输入参数定义(根据定义渲染页面UI) +# Input params define for monitoring(render web ui by the definition) +params: + # field-param field key + # field-变量字段标识符 + - field: host + # name-param field display i18n name + # name-参数字段显示名称 + name: + zh-CN: 目标Host + en-US: Target Host + # type-param field type(most mapping the html input type) + # type-字段类型,样式(大部分映射input标签type属性) + type: host + # required-true or false + # required-是否是必输项 true-必填 false-可选 + required: true + - field: port + name: + zh-CN: 端口 + en-US: Port + # type-param field type(most mapping the html input type) + # type-字段类型,样式(大部分映射input标签type属性) + type: number + # when type is number, range is required + # 当type为number时,用range表示范围 + range: '[0,65535]' + # default value + defaultValue: 4000 + required: true + - field: timeout + name: + zh-CN: 查询超时时间 + en-US: Query Timeout + type: number + required: false + # hide param-true or false + # 是否隐藏字段 true or false + hide: true + defaultValue: 6000 + +# collect metrics config list +# 采集指标配置列表 +metrics: + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: greptime_app_version + i18n: + zh-CN: greptime 应用版本 + en-US: greptime_app_version + priority: 0 + fields: + - field: short_version + i18n: + zh-CN: 版本 + en-US: version + type: 1 + label: true + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: greptime_catalog_schema_count + i18n: + zh-CN: 目录 模式 数量 + en-US: greptime_catalog_schema_count + priority: 0 + fields: + - field: value + i18n: + zh-CN: 数量 + en-US: count + type: 1 + calculates: + - name=.name + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: greptime_meta_cache_container_cache_get + i18n: + zh-CN: 缓存容器缓存获取 + en-US: greptime_meta_cache_container_cache_get + priority: 0 + fields: + - field: name + i18n: + zh-CN: 名称 + en-US: name + type: 1 + label: true + - field: value + i18n: + zh-CN: 指标值 + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: greptime_meta_cache_container_cache_miss + i18n: + zh-CN: 缓存容器缓存未命中 + en-US: greptime_meta_cache_container_cache_miss + priority: 0 + fields: + - field: name + i18n: + zh-CN: 名称 + en-US: name + type: 1 + label: true + - field: value + i18n: + zh-CN: 指标值 + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: greptime_mito_region_count + i18n: + zh-CN: mito 引擎区域数量 + en-US: greptime_mito_region_count + priority: 0 + fields: + - field: worker + i18n: + zh-CN: 工作线程 + en-US: worker + type: 1 + label: true + - field: value + i18n: + zh-CN: 数量 + en-US: count + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: greptime_mito_write_stall_total + i18n: + zh-CN: mito 引擎写入延迟总数 + en-US: greptime_mito_write_stall_total + priority: 0 + fields: + - field: worker + i18n: + zh-CN: 工作线程 + en-US: worker + type: 1 + label: true + - field: value + i18n: + zh-CN: 数量 + en-US: total + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: greptime_meta_create_catalog_counter + i18n: + zh-CN: 创建目录计数器 + en-US: greptime_meta_create_catalog_counter + priority: 0 + fields: + - field: value + i18n: + zh-CN: 指标值 + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: greptime_runtime_threads_alive + i18n: + zh-CN: 运行时线程存活 + en-US: greptime_runtime_threads_alive + priority: 0 + fields: + - field: thread_name + i18n: + zh-CN: 线程名称 + en-US: thread_name + type: 1 + - field: value + i18n: + zh-CN: value + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: greptime_runtime_threads_idle + i18n: + zh-CN: 运行时线程空闲 + en-US: greptime_runtime_threads_idle + priority: 0 + fields: + - field: thread_name + i18n: + zh-CN: 线程名称 + en-US: thread_name + type: 1 + - field: value + i18n: + zh-CN: value + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: greptime_servers_http_requests_total + i18n: + zh-CN: greptime 服务 HTTP 请求总数 + en-US: greptime_servers_http_requests_total + priority: 0 + fields: + - field: code + i18n: + zh-CN: code + en-US: code + type: 1 + - field: method + i18n: + zh-CN: method + en-US: method + type: 1 + - field: path + i18n: + zh-CN: path + en-US: path + type: 1 + - field: value + i18n: + zh-CN: value + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: greptime_servers_mysql_connection_count + i18n: + zh-CN: greptime 服务 MySQL 连接数 + en-US: greptime_servers_mysql_connection_count + priority: 0 + fields: + - field: value + i18n: + zh-CN: value + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: greptime_servers_postgres_connection_count + i18n: + zh-CN: greptime 服务 Postgres 连接数 + en-US: greptime_servers_postgres_connection_count + priority: 0 + fields: + - field: value + i18n: + zh-CN: value + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: process_cpu_seconds_total + i18n: + zh-CN: 进程 CPU 时间总数 + en-US: process_cpu_seconds_total + priority: 0 + fields: + - field: value + i18n: + zh-CN: value + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: process_max_fds + i18n: + zh-CN: 进程最大文件描述符 + en-US: process_max_fds + priority: 0 + fields: + - field: value + i18n: + zh-CN: value + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: process_open_fds + i18n: + zh-CN: 进程打开文件描述符 + en-US: process_open_fds + priority: 0 + fields: + - field: value + i18n: + zh-CN: value + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: process_resident_memory_bytes + i18n: + zh-CN: 进程常驻内存字节 + en-US: process_resident_memory_bytes + priority: 0 + fields: + - field: value + i18n: + zh-CN: value + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: process_start_time_seconds + i18n: + zh-CN: 进程启动时间(秒) + en-US: process_start_time_seconds + priority: 0 + fields: + - field: value + i18n: + zh-CN: value + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: process_threads + i18n: + zh-CN: 进程线程 + en-US: process_threads + priority: 0 + fields: + - field: value + i18n: + zh-CN: value + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: process_virtual_memory_bytes + i18n: + zh-CN: 进程虚拟内存字节 + en-US: process_virtual_memory_bytes + priority: 0 + fields: + - field: value + i18n: + zh-CN: value + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: raft_engine_log_entry_count + i18n: + zh-CN: raft 引擎日志条目数量 + en-US: raft_engine_log_entry_count + priority: 0 + fields: + - field: type + i18n: + zh-CN: 类型 + en-US: type + type: 1 + - field: value + i18n: + zh-CN: 数量 + en-US: count + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: raft_engine_log_file_count + i18n: + zh-CN: raft 引擎日志文件数量 + en-US: raft_engine_log_file_count + priority: 0 + fields: + - field: type + i18n: + zh-CN: 类型 + en-US: type + type: 1 + - field: value + i18n: + zh-CN: 数量 + en-US: count + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: raft_engine_memory_usage + i18n: + zh-CN: raft 引擎内存占用 + en-US: raft_engine_memory_usage + priority: 0 + fields: + - field: value + i18n: + zh-CN: 占用 + en-US: total + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: raft_engine_recycled_file_count + i18n: + zh-CN: raft 引擎回收文件数量 + en-US: raft_engine_recycled_file_count + priority: 0 + fields: + - field: type + i18n: + zh-CN: 类型 + en-US: type + type: 1 + - field: value + i18n: + zh-CN: 数量 + en-US: count + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: sys_jemalloc_allocated + i18n: + zh-CN: jemalloc 分配 + en-US: sys_jemalloc_allocated + priority: 0 + fields: + - field: value + i18n: + zh-CN: 数量 + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus + + # metrics - cluster_node_status + # 监控指标 - cluster_node_status + - name: sys_jemalloc_resident + i18n: + zh-CN: jemalloc 常驻 + en-US: sys_jemalloc_resident + priority: 0 + fields: + - field: value + i18n: + zh-CN: 数量 + en-US: value + type: 1 + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: '/metrics' + timeout: ^_^timeout^_^ + method: GET + parseType: prometheus From e10e4b2d8d7143932adc1638a5c7a804b46f8f75 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Sun, 18 Aug 2024 16:45:06 +0800 Subject: [PATCH 199/257] [improve] fix docker compose package script and update contributor doc (#2552) Signed-off-by: tomsun28 Co-authored-by: YuLuo --- .all-contributorsrc | 27 +++++++++++++++++++ README.md | 3 +++ README_CN.md | 3 +++ home/src/pages/team/index.jsx | 12 +++++++++ home/src/pages/team/member.json | 5 ++++ .../server/assembly-docker-compose.xml | 8 ++++-- 6 files changed, 56 insertions(+), 2 deletions(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index f779e176bcb..2ba87729dd6 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -2009,6 +2009,33 @@ "contributions": [ "code" ] + }, + { + "login": "loong95", + "name": "Loong", + "avatar_url": "https://avatars.githubusercontent.com/u/16333958?v=4", + "profile": "https://github.com/loong95", + "contributions": [ + "code" + ] + }, + { + "login": "ceekay47", + "name": "Chandrakant Vankayalapati", + "avatar_url": "https://avatars.githubusercontent.com/u/104664857?v=4", + "profile": "https://github.com/ceekay47", + "contributions": [ + "code" + ] + }, + { + "login": "MRgenial", + "name": "b_mountain", + "avatar_url": "https://avatars.githubusercontent.com/u/49973336?v=4", + "profile": "https://github.com/MRgenial", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, diff --git a/README.md b/README.md index 5d66bc0fc6b..7f966fde011 100644 --- a/README.md +++ b/README.md @@ -469,6 +469,9 @@ Thanks to these wonderful people, welcome to join us: QBH-insist
QBH-insist

💻 jiangsh
jiangsh

💻 Keaifa
Keaifa

💻 + Loong
Loong

💻 + Chandrakant Vankayalapati
Chandrakant Vankayalapati

💻 + b_mountain
b_mountain

💻 diff --git a/README_CN.md b/README_CN.md index 19a5e50fd47..7b414e5d583 100644 --- a/README_CN.md +++ b/README_CN.md @@ -468,6 +468,9 @@ Thanks these wonderful people, welcome to join us: QBH-insist
QBH-insist

💻 jiangsh
jiangsh

💻 Keaifa
Keaifa

💻 + Loong
Loong

💻 + Chandrakant Vankayalapati
Chandrakant Vankayalapati

💻 + b_mountain
b_mountain

💻 diff --git a/home/src/pages/team/index.jsx b/home/src/pages/team/index.jsx index ece83dec6d8..a53509082ad 100644 --- a/home/src/pages/team/index.jsx +++ b/home/src/pages/team/index.jsx @@ -1121,6 +1121,18 @@ export default function () { src="https://avatars.githubusercontent.com/u/83876361?v=4?s=100" width="100px;" alt="Keaifa"/>
Keaifa
💻 + Loong
Loong

💻 + Chandrakant Vankayalapati
Chandrakant Vankayalapati

💻 + b_mountain
b_mountain

💻 diff --git a/home/src/pages/team/member.json b/home/src/pages/team/member.json index d5ab01bc08c..c9ebeae7e13 100644 --- a/home/src/pages/team/member.json +++ b/home/src/pages/team/member.json @@ -108,6 +108,11 @@ "githubId": "131688897", "gitUrl": "https://github.com/Calvin979", "name": "Calvin" + }, + { + "githubId": "77964041", + "gitUrl": "https://github.com/yuluo-yx", + "name": "Shown" } ] } diff --git a/script/assembly/server/assembly-docker-compose.xml b/script/assembly/server/assembly-docker-compose.xml index 2ffe576bf5f..b7a58d8ea42 100644 --- a/script/assembly/server/assembly-docker-compose.xml +++ b/script/assembly/server/assembly-docker-compose.xml @@ -35,6 +35,7 @@ http://maven.apache.org/ASSEMBLY/2.0.0 "> conf/** *.yaml *.md + ext-lib/README hertzbeat-mysql-iotdb @@ -45,6 +46,7 @@ http://maven.apache.org/ASSEMBLY/2.0.0 "> conf/** *.yaml *.md + ext-lib/README hertzbeat-mysql-tdengine @@ -55,18 +57,20 @@ http://maven.apache.org/ASSEMBLY/2.0.0 "> conf/** *.yaml *.md + ext-lib/README hertzbeat-mysql-victoria-metrics - ../script/docker-compose/hertzbeat-postgresql-iotdb + ../script/docker-compose/hertzbeat-postgresql-victoria-metrics true conf/** *.yaml *.md + ext-lib/README - hertzbeat-postgresql-iotdb + hertzbeat-postgresql-victoria-metrics ../script/docker-compose From 4fdc048287105e2f6b6ea60c7c081e44075f58c3 Mon Sep 17 00:00:00 2001 From: YuLuo Date: Sun, 18 Aug 2024 22:53:58 +0800 Subject: [PATCH 200/257] [doc] add new committer docs (#2554) Signed-off-by: yuluo-yx --- home/blog/2024-08-18-new-committer.md | 96 ++++++++++++++++++ .../2024-08-18-new-committer.md | 96 ++++++++++++++++++ home/static/img/blog/committer/yuluo-yx/1.jpg | Bin 0 -> 33939 bytes home/static/img/blog/committer/yuluo-yx/2.png | Bin 0 -> 64370 bytes home/static/img/blog/committer/yuluo-yx/3.jpg | Bin 0 -> 46212 bytes home/static/img/blog/committer/yuluo-yx/4.jpg | Bin 0 -> 79679 bytes home/static/img/blog/committer/yuluo-yx/5.jpg | Bin 0 -> 20912 bytes home/static/img/blog/committer/yuluo-yx/6.jpg | Bin 0 -> 60078 bytes home/static/img/blog/committer/yuluo-yx/7.jpg | Bin 0 -> 80907 bytes yarn.lock | 4 + 10 files changed, 196 insertions(+) create mode 100644 home/blog/2024-08-18-new-committer.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-08-18-new-committer.md create mode 100644 home/static/img/blog/committer/yuluo-yx/1.jpg create mode 100644 home/static/img/blog/committer/yuluo-yx/2.png create mode 100644 home/static/img/blog/committer/yuluo-yx/3.jpg create mode 100644 home/static/img/blog/committer/yuluo-yx/4.jpg create mode 100644 home/static/img/blog/committer/yuluo-yx/5.jpg create mode 100644 home/static/img/blog/committer/yuluo-yx/6.jpg create mode 100644 home/static/img/blog/committer/yuluo-yx/7.jpg create mode 100644 yarn.lock diff --git a/home/blog/2024-08-18-new-committer.md b/home/blog/2024-08-18-new-committer.md new file mode 100644 index 00000000000..2376fd17466 --- /dev/null +++ b/home/blog/2024-08-18-new-committer.md @@ -0,0 +1,96 @@ +--- +title: Becoming an Apache Committer is a recognition of my participation in open source +author: yuluo-yx +author_title: Shown Ji +author_url: https://github.com/yuluo-yx +author_image_url: https://avatars.githubusercontent.com/u/77964041 +tags: [opensource, practice] +keywords: [open source monitoring system, alerting system, Apache, Apache Committer, Hertzbeat] +--- + +## My open source journey + +Speaking of open source, I came into contact with it at the beginning of my junior year. The scene at that time is still vivid in my mind. + +In retrospect, my first official Github Pull Request was to remove a redundant Pom dependency for the Spring Cloud Alibaba project. I was very busy at the time, and after more than two hours of figuring it out, I finally submitted the first Pull Request successfully. I am very grateful to [Cheng Pu](https://github.com/steverao), who introduced me to open source and took the crucial first step in participating in open source. + +From the initial rush of using Git, to now `git c -m XXX`, as well as participating in various PR/Issue on Github. Looking back, I have so many thoughts. I think life is nothing more than this. Learn and explore -> use skillfully -> make achievements. + +From my junior year to the present, I still maintain my passion for open source and participate in open source. To this day, I am already a committer on three projects. + +## Participate in the Apache Community + +[Apache Software Foundation (ASF)](https://community.apache.org/) is an American non-profit organization that aims to support various open source software projects. ASF was originally formed by a group of developers of Apache HTTP Server and was officially established on March 25, 1999. As of 2021, its total membership is approximately 1,000. The name comes from a local Indian tribe in North America. This tribe is famous for its superb military literacy and superhuman endurance. In the second half of the 19th century, it resisted the invaders who invaded their territory. In order to show respect for this Indian tribe, the name of the tribe (Apache) is used as the server name. But when it comes to this naming, there is an interesting story circulating here. Because this server is based on the NCSA HTTPd server and is the product of continuous revision and patching through the efforts of everyone, it is nicknamed "A Patchy Server" (a patch server). Here, because "A Patchy" and "Apache" are homophones, it was finally officially named "Apache Server". + +The above is an introduction to the Apache Software Foundation from Wikipedia. + +The Apache Software Foundation was originally composed of developers who developed [Apache HTTPd](https://httpd.apache.org/). Starting from the Apache HTTPd web server project, they created many excellent open source projects, attracting Common open source enthusiasts around the world participate in the maintenance and iteration of the project. Projects continue to be retired, and new projects are constantly being hatched, over and over again. Only then did the Apache Software Foundation become what it is today. + +![Apache HTTPd Server Logo](/img/blog/committer/yuluo-yx/4.jpg) + +### First contribution + +The first contribution in the Apache community should be to delete a `{@link}` code link to the Dubbo project. I'm ashamed to say that [Dubbo](https://github.com/apache/dubbo) is the first open source project I participated in Apache, and there are only 6 submissions so far. In May, I came into contact with the [Apache Hertzbeat](https://github.com/apache/hertzbeat) project through [Rick](https://github.com/LinuxSuRen), and started my Apache contribution from unit testing. road. + +### Get nominated and become a Committer + +This nomination was recommended by PMC Member [Logic](https://github.com/zqr10159) of Apache HertzbeatP(Incubating). Thanks to the Apache Hertzbeat Team. I was successfully nominated to become a Hertzbeat Committer and got my own Apache mailbox. + +![Apache ID Email](/img/blog/committer/yuluo-yx/3.jpg) + +### The meaning of Apache Committer + +As the saying goes, the greater the ability, the greater the task. Becoming a project committer is not only a change of identity, but also an recognition and affirmation of one's own abilities. When reviewing the PR, my ‘LGTM’ is no longer a gray style, but has become blue (due to different personal Github themes, the execution color is also different). No need to wait for other Committer Approve CIs. It means that you have management rights over the project. + +![PR approval](/img/blog/committer/yuluo-yx/5.jpg) + +## How to participate in open source + +Anyone who wants to do something needs an opportunity and a guide. Among the many Apache projects, there are many people who pay attention to the project's Issue List. The one time that remains fresh in my memory is: one night after writing the unit test of a tool class, I discovered a small bug. What I thought at the time was that there was too much contextual information and it was not good to write it in a PR, so I opened an Issue to record the context. How small is this bug? It was so small that I just created the Issue. After submitting the unit test and the code to fix the bug together, I refreshed the PR List again and saw a PR Title to fix the bug. + +In fact, there is no shortage of people paying attention to the project, but more of an opportunity! Opportunities to participate in projects. + +### The Apache Way + +[The Apache Way](https://www.apache.org/theapacheway/) pursued by the Apache Community. The community is greater than the code. A good community is often more important than excellent code. The components of the community include developers, users, etc. Users are the first users of the project code. A healthy community status is when users discover problems, then report problems, and finally solve problems during use. A more likely scenario is that a user reports a problem, switches from being a user to a developer, and solves the problem. And continue to participate in the maintenance of community projects. + +### Paths to participate in open source + +Open source is often pure, and the Apache Foundation exists to protect projects and developers. + +#### Apache Community Identity + +Before contributing to the community, it is important to understand the community's definition of identity, where a project's Committers are located, and how to become a Committer. The Apache community has a clear definition of [Contributor Identity](https://community.apache.org/contributor-ladder. html): [Contributor Identity](https://community.apache.org/contributor-ladder. html). The Apache community has a very clear definition of [contributor status](. html): + +! [Apache contributor label](/img/blog/committer/yuluo-yx/6.jpg) + +#### Project Committer Nomination Criteria + +The conditions for a Project PMC Team to nominate a Committer are different. Take Apache Hertzbeat for example: + +! [Apache Hertzbeat becoming committer](/img/blog/committer/yuluo-yx/7.jpg) + +Each project has its own standards, and these standards are not set in stone and will be adjusted at each stage of the project. + +#### How to Participate in Open Source + +Next is the highlight of this section, how to participate in open source and get nominated as a Committer? + +##### Open Source Events + +Students, because of their special status, do not have the experience of developing large-scale projects and do not have the opportunity to cut their teeth in a production environment. Therefore, it is often difficult to get involved in open source and there is a lack of opportunities. + +I think the best way to get involved in open source and get nominated is the **Open Source Summer (OSPP) or Google Summer of Open Source (GSOC) events**. After completing the topic in question, gradual familiarization with the project's features, code, and continued participation will result in a nomination. There is also no lack of students who are nominated to become project Committer directly after completing the topic. + +The next step is to **Improve your Coding ability**, find the missing unit tests and code bugs in the project, and then submit PR to fix them. This is often difficult for students who are just starting out, and the Coding ability is often built up after optimizing the project code and understanding every change. + +The ecosystem around a good project** is often crucial**. A good documentation allows users/developers to quickly get started and participate in contributing. Multi-language extensions allow the project to be used in projects built in other languages, expanding the project's audience. A good Example repository allows users to discover more ways to use the project. Therefore, participating in the construction of the Example repository and writing and translating documentation is one of the ways to familiarize yourself with the project and get nominated. + +Finally, I would like to say that getting nominated is something that just happens. You should not participate in open source just for the sake of getting nominated, and you should never forget your original intention. + +Project communities tend to welcome contributions in all ways: be it code, documentation or sermons. + +## Random Thoughts + +I'm writing this post to commemorate the special occasion of becoming an Apache Hertzbeat Committer, and to motivate me to get involved in open source in the future. +Keep up the enthusiasm, keep up the coding, and thank you for all the people who have helped me while I've been involved in open source. I wish Hertzbeat the best of luck in its incubation and graduation to become a top Apache project! 🎉 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-08-18-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-08-18-new-committer.md new file mode 100644 index 00000000000..050fc8610a8 --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-08-18-new-committer.md @@ -0,0 +1,96 @@ +--- +title: 成为 Apache Committer, 对我参与开源的肯定 +author: yuluo-yx +author_title: Shown Ji +author_url: https://github.com/yuluo-yx +author_image_url: https://avatars.githubusercontent.com/u/77964041 +tags: [opensource, practice] +keywords: [open source monitoring system, alerting system, Apache, Apache Committer, Hertzbeat] +--- + +## 我的开源历程 + +说起开源,我在大三刚开始的时候就接触到了。当时的场景仍然历历在目。 + +回想起来,我第一个正式的 Github Pull Request 是给 Spring Cloud Alibaba 项目删除了一个多余的 Pom 依赖项。当时手忙脚乱,在捉摸了两个多小时后,才算是成功提交了第一个 Pull Request。非常感谢[铖朴](https://github.com/steverao),是他带着我认识了开源,踏出了参与开源至关重要的第一步。 + +从刚开始使用 Git 的手忙脚乱,到现在 `git c -m XXX`,以及 Github 上参与的各个 PR/Issue。回想起来,真是思绪万千。我觉得人生莫过于如此。学习摸索 -> 熟练使用 -> 做出成绩。 + +从大三至今,我仍然保持着对开源的热情,参与开源,直至今日,我已经是三个项目的 Committer 了。 + +## 参与 Apache Community + +[Apache 软件基金会(Apache Software Foundation,简称为ASF)](https://community.apache.org/),是一家美国的非营利组织,旨在支持各类开源软件项目。ASF最初由 Apache HTTP Server 的一群开发者组成,并在1999年3月25日正式成立。 截至2021年,其总成员数大约在1000名。命名是根据北美当地的一支印第安部落而来,这支部落以高超的军事素养和超人的忍耐力著称,19世纪后半期对侵占他们领土的入侵者进行了反抗。为了对这支印第安部落表示敬仰之意,取该部落名称(Apache)作为服务器名。但一提到这个命名,这里还有流传着一段有意思的故事。因为这个服务器是在 NCSA HTTPd 服务器的基础之上,通过众人努力,不断地修正、打补丁(Patchy)的产物,被戏称为“A Patchy Server”(一个补丁服务器)。在这里,因为“A Patchy”与“Apache”是谐音,故最后正式命名为“Apache Server”。 + +以上是来自于维基百科中对 Apache 软件基金会的介绍。 + +Apache 软件基金会起初是由开发 [Apache HTTPd](https://httpd.apache.org/) 的开发人员组成,他们以 Apache HTTPd 这一网络服务器项目为起点,创建了许多优秀的开源项目,吸引全球共同的开源爱好者参与项目的维护与迭代。不断有项目退休,不断有新项目被孵化,反反复复。才有了今天的 Apache 软件基金会。 + +![Apache HTTPd Server Logo](/img/blog/committer/yuluo-yx/4.jpg) + +### 第一次贡献 + +在 Apache 社区中的第一次贡献应该是给 Dubbo 项目删除了一个 `{@link}` 代码链接。说来惭愧,[Dubbo](https://github.com/apache/dubbo) 是我参与 Apache 的第一个开源项目,到现在为止仅有 6 个提交。5 月份的时候通过 [Rick](https://github.com/LinuxSuRen) 接触到了 [Apache Hertzbeat](https://github.com/apache/hertzbeat) 项目,从单元测试开始了我的 Apache 贡献之路。 + +### 获得提名,成为 Committer + +此次提名是 Apache HertzbeatP(Incubating) 的 PMC Member [Logic](https://github.com/zqr10159) 举荐的,感谢 Apache Hertzbeat Team。顺利提名成为了 Hertzbeat Committer,有了自己的 Apache 邮箱。 + +![Apache ID Email](/img/blog/committer/yuluo-yx/3.jpg) + +### Apache Committer 的意义 + +俗话说,能力越大,任务越大。成为了项目的 Committer 不仅仅是一个身份的转换,更是一个对自己自身能力的认同和肯定。在 Review PR 时,我的 `LGTM` 不再是一个灰色样式,而是变成了蓝色(因个人 Github 主题不同,展现的颜色也不同)。不用在等待其他的 Committer Approve CI。意味着对项目有了管理权利。 + +![PR Approve](/img/blog/committer/yuluo-yx/5.jpg) + +## 参与开源的方法 + +任何人去做一件事情,都需要一个契机和引路人。在许许多多的 Apache 项目中,不乏关注项目 Issue List 的人。记忆犹新的一次是:某个晚上在写完工具类的单元测试,发现了一个小 Bug。当时心里想的是,这个上下文信息太多了,写在 PR 里面不太好,于是开了一个 Issue 记录上下文。这个 Bug 小到什么程度呢?小到我刚创建完 Issue,在将单元测试和修复 Bug 的代码一起提交之后,再次刷新 PR List,看到了一个修复 Bug 的 PR Title。 + +其实不缺乏关注项目的人,更多的是需要一个契机!参与项目的契机。 + +### The Apache Way + +Apache Community 奉行的 [The Apache Way](https://www.apache.org/theapacheway/)。社区大于代码,好的社区往往比优秀的代码更重要,社区的组成成分包括开发者,用户等等。用户才是项目代码的第一使用者,健康的社区状态是用户在使用过程中:发现问题,而后报告问题,最后解决问题。更可能发生的一种情况是,用户报告问题,从用户身份转为开发者,解决问题。并在之后持续参与社区项目的维护。 + +### 参与开源的路径 + +开源往往很纯粹,Apache 基金会存在的意义也是为了保护项目和开发者。 + +#### Apache 社区身份定义 + +参与社区贡献之前,先要了解社区的身份定义是怎样的,项目的 Committer 到底位于哪一层级,怎么才能成为 Committer。Apache 社区对[贡献者身份](https://community.apache.org/contributor-ladder.html)有很明确的定义: + +![Apache contributor label](/img/blog/committer/yuluo-yx/6.jpg) + +#### 项目 Committer 提名条件 + +项目 PMC Team 提名 Committer 的条件是不一样的。以 Apache Hertzbeat 为例: + +![Apache Hertzbeat becoming committer](/img/blog/committer/yuluo-yx/7.jpg) + +每个项目都有自己的标准,这些标准也不是一成不变,在项目的每个阶段会进行调整。 + +#### 如何参与开源 + +接下来是此章节的重头戏,如何参与开源并获得 Committer 提名? + +##### 开源活动 + +学生因为身份特殊,没有大型项目的开发经验,也没有机会在生产环境中切身使用。因此参与开源往往很困难,缺少契机。 + +我觉得参与开源并获得提名的最好方式是**开源之夏(OSPP)或者谷歌开源之夏(GSOC)活动**。在完成相关课题之后,逐步熟悉项目功能、代码、持续参与就会获得提名。也不乏一些学生在完成题目之后被直接提名成为项目 Committer。 + +其次是**提高自己的 Coding 能力**,发现项目中缺少的单元测试和代码 Bug,然后提交 PR 修复。这对刚开始的同学往往是比较难得。Coding 能力往往是在优化项目代码,了解每一次 Change 之后慢慢积累的。 + +一个好的项目,**周边生态往往是至关重要的**。一份好的文档,可以让用户/开发者快速上手使用,参与贡献。多语言扩展,可以让项目使用在其他语言构建的项目中,扩大项目受众。一个好的 Example 仓库,可以让用户发现更多项目的使用姿势。因此参与 Example 仓库的建设和文档编写、翻译等工作,也是熟悉项目获得提名的方式之一。 + +最后,我要说的是:获得提名是`水到渠成`的事情,不能为了获得提名而去参与开源,任何时候都不要忘记自己的初心。 + +项目社区往往欢迎各种方式的贡献:不论是代码、文档还是布道。 + +## 随想 + +写下此文,是为了纪念成为 Apache Hertzbeat Committer 这个特殊的事情, 同时也是对我之后参与开源的激励。 +保持热情,保持 Coding。感谢在参与开源的时候给与我帮助的各位。也祝 Hertzbeat 顺利孵化毕业,成为 Apache 顶级项目!🎉 diff --git a/home/static/img/blog/committer/yuluo-yx/1.jpg b/home/static/img/blog/committer/yuluo-yx/1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e89f546f7500298d37d449336f7bbc40d437afa4 GIT binary patch literal 33939 zcmeFYWmJ@3^frtlq7n)UC?OI8(nB|b2+}Rx0@5*b3`$8O(l9g(-QCjN4KoZO-8s|@ z@4)Z>to6K~o-fb)?VYt)tb0z(x%b&;?`vOspI{{gDZFQ-&oD4B@T9*;s9<0`1Y%%3 zuzB(r{f)K%t{eK_1E7kOI7Z1Z`4;-&k-3<>7zRdpB+iWy7W(_%`ioBt)vaO502{>UCotDe>Q7)0u!_aL(39cKs>EkAsc5S-xsHD z`+plqq%ZwNM!Yh)WNKF5)_7m=?_S65g2>FDUbo0$co&J1p2 zQ6smPqNhH$2e<0ywa z?b_Awo?y~V^tp*?pCd(FoV32J*KNv~wlSG@4&HEq#D1RVZOUWo85NFFFFHe%4uU^x zP<=4XVhw(&DB-<{LXEUI?&26sBBaHTKiUjEI|+mNHAdwX6(>Y*PedouP^a|oCs%&& z=;-L2c03nlprRjXw4J+QG^eWxYGR8kNVq`oJ1LXG!i*DLwV0ovYWyC;oVW8> zkT}v*E;g-#+B|suvZh(Bg>fh}{fmE_VVSE%a2I&?-#=RKTO^{YzCQ8GAhO5j1{--N zfhk}KSxKW3hm z0y*9@AqIfQO@c$u02<9XjO(YbQqn{x{{3Fle~tSXJ=k-+KcqfC|AV%sW_oxyo?SEe zNj0tUFt@uxDv4_g@?59TU~RCInBc$OWr#5^<6(+mpyWAnS8T{oSSs7MGM1D03(C6;6K(`RdXYat(q1zb7u5DB>V$ zsHn({AR^GEGBbWw#lQ(@C2{RS-k!rz$;WJc3@QKn!~v4)s2v|(AqXU7Xu^gjGGykA9glHaXk^4(gqnowND-fyl^1q*L@~-Je@8XIUOA;EM2`V3 zO|dlwOKahd)UO6_R1V^W>sF_<8 z<88fU|4%Mcj|0Fr)OQvG<1eV=e*WI5cUD$bUmdC!=$7qgh7xPa%F?4B-SsfMHGFAc zkxU45{+(o z3zjQ3mcYMjzkZW^A+rF=MBewt_+Wybm8_HeuIZ!XY^TQjV*vKu!*7E7H|jh$CBCzJ z7>6xJ6B7ShPcG&Et*3lUjo9FWyKdk7g+bQmNt5F>?}4lTsIwcLSJIr zh+c1Uz_yalyWl5+?I6Lz{p0bua<^Og>F$FqbCE2&uV?WDbqE2FZ*x0BaYVp$v$Qq1 zMDTCMVSr4bqSS8{j3ylGBnF#>8RLCKnxhGdByD`-W_s>+hw6%TyDOTkl9=Uf32Gl3 znUivWDxu{P@wjS6O-BF^a!)x%=2DK?t)I<&J#9`yEiO_aad0YROD-%j z40Ola*T&hJE~<2|d|Rr8o31>s)b=kMIgAx}Pus3tQt^{SS;mw$MuciXS0|BGzl|r_ z_jlpfFkWQM90^wa4tA2#<)UKRZMD-b0ZK=c>~a!VxK{Zzgsjy5!1?BpXF##IsGi6f zXT!n(NpwAr!UXp1aju2!a(R2~N z>*O>_gGTFQKFMDB2515h@fuqNVcB|bGIR3AD)|5iHsq{*ka?{&6tiX9DI@iZS}QEc_$^ih_W@$T(P* zQ0_q`A$4wC6!mdK*1J9z0{UTdf{#C?_w~jQntF}2ZVrf0cS50dn?+vmOZnYb>BgSH zfh9qdiDNY_1=#=bzcYZ zV=R8)L7ztodjP_Z#_5}-dN?lf3*1S@1jYY(sOXZ^Q-dMpYO+CTaS`z7(fl9c-_rr} zE&v3qG$p^UZc8+pax#bphY)C^6JlIa;AH4J^~yex+k%I2&)$`K{TnH2!9d!>M*>;7 zd>TOnEQF7ZmV%@yk-F_n=bR1mNS!1ej+5)Q!YDG97EeYi?0V)v!XSf(0vGeSpI&h` z>KS{6w~$@>?%%9QopKW4n|rUNv|b0cJa0g*B2I_ku#5r9E$1-N+aOS%5o($TtAu9^ zngPG%yuJPfA2**l)s}Z(*J#as|7J*y%n9_#73vJm1&L`~YWEsQ+E{Oyj1_e(WheWe z;Umo?>9ya<`0Pu)fr9AP4pAN3-v}%IXXb3DW37dL1OUK{Bo_<-9^v zRBg6iorneEel+qJL|8VI2Z)e8uQwzB?!U5fbP%sZDo9yA>*neLD;Wvg9G<6%^$S*t9JIXZB-h0OSU=*@_stCMJ@wlc{7^e>rEOX zIwe~K-_rOZR}|sOt+mpDC%NH*T)gBzeAyg=}X^dBEI zaL$R|2GFDS2Y=YLPl`kyY#t74glI)*I|!sw1o2A>;B8g;3Az@iB*mwtjVl>;N{i~@bTvmngukX9e@7bRj;j|VtFGP zMS8M&^Lx?ywvZ~-a5JlN!|u}4$a zS)YQc+jz+pejT~s!22*|<;oCb_mV+;5<#k?<;?IjU<7la!v}mR-rRe!fUChL?cpeZ zt^W{o@oS{63H3*Wl8wz4?9zYkyERN3GhfCKwf4I_p%8-mKK;*yrx80(T)i`|hYbhD z0r8z#$1h0$3Xc8$KGCS-N3CdLvs>Ge1)6d{S;#8CvmvvONCg242VvHi0{h7K>x|Oa z8-n||W~U@XOG1r}#tQutyp|iqw|k2orNqYZ*eo3w)TiH6u{PjU+tw2Hedf2l9j-n- zpz@Df@vSXZ4VX9~BxD{yMT<9LT>JIb$`p&!wZl&`6`i_cGjgaJ$eqKnX4%%XKW5MG zgf2m%grDc$el5{Ek8H?hS%=o4d{FFyb(t&i-Saa~meue)XnCq6r;m}1uYl^Y8MJCI ze2n%e{{=`#PB*MXEVRh*Xg&B3WVjf1fw`EK!y5$8h=kyBL6*CE05jH@TDTPnaNNin zVfCh*Z{T4Mu+8y+5D%HNy3bn^G-oe3UDQP$%|+1^D4MAnPE*kl{*D6BQv_NKV|`8G zx2M0+NW5M9mUh=AFXyn%bc`%#QnkqPRN-j{@zu@qtNBPM|539;vXhV*xXJ3$nV)|- zaEmi3Xxp^e!r;@B&R8l61<%WhnV)rD&i$dtB<|L6V&FcoZA&-&1pFrORTsHz?C6tM zCS>Iy%#9(;UAnQR-*>RD_McC1QB-M}OxF2cFhOY|6})9!PYtdgb7Gu7zIO~tfDIme z*G;koHj%n4`na*kO?-1%MV8pk1f;&F6y##UX^8=Mp?RO-^B)TyYz^iM0q5FsvoWGj z+fBAx^IL`qg^4!39crm|HJK{73pgTo{CbbV*nXpAPoLgvZSYAX(;zS>S*!c~awu z6TMbLkF!xC$y||Ki!27siFO)UKy3%dtE!K4Ur2qPCxT{dwWN9<%znDVhc4`0U_l{+ z=`aEU`J&(UT?zV7o7S1dsQNl7Bd^YOE2fpjrOXXC-DT3SP?zhggJLU8y(WUezd@Z5 z6Kz9&dEI=0nyoCIPalw#Q)GIA!xlRY8)slpGz5ncSa1U4vx|BNc~j!C%|oW-Nf1g_ ziHy$i7_%gP*bXNpHjSK+KIjZ4&d6 zaQvqX4Gqp(ZZV=elz*!UmcJAMs%J9=W?5c7k~Wv~4B6q=}Ivgs`yD zLiOy@#&Zh*&ljx@dcE92UFO1rJcXmqPXxKX?bC8yIGpniD+ZRc-(6(ha+oEq^4$Il3Ze+dfqP%uxT~9ce8irMp*Ba(_-|&ecXkn(s3fW5d>AXx{ zS99i0U!66tJjD*mP;0lYicdoh33!z2O!-#g#lueHn!aKy`2 zS^6_2oyen%U((8+>dZlDoGs|2uX;l3i+xaWfh||U-XwERG)Il|0tSNSX9=M%-qk%^ zoE_TAFc24~X`HTd44}k;EH%ctzNq(UXf5DSdn&zhkm9~&+wK_et=1As`~%hw2_CQU zqhCKQrKff8a2Ww}N@)^4eQFolvbXpME$e_dN1ByI%I*UTz@&I<-T52uBXnr6{v19! zTH2aEQc`Cw=q-WBj1XlfosFwe+&Y1KXOX<~`P91CQZUl9a;PJDi-!65IecZv8=8g_ zCReAJ)kBRd^70>tE;l^SUGQcPT28o+wSxH1RMG`pdwdZB6GD!A=BEP&$T*Pb7V|D@ z-4%K2-XrD2PfI656T4iUw4e7&vr}N?iHz@M?r9~Hilh@MQU-s`bZjN1M_qv9Uv4hz zgt-rs5h@OJJGF5t>s_P$<@7)bu+cIo-FyM7WmJqCJazKXTg&p~%09O4W0GAQz?-QZ zNb?r^1m|B&;$S?ChnehtZlVQ!|5D+#uk1&1XgtI4LF<+7uc(uw znY6JrRm!nk9~NWo!-^MCU-ijOPYgV4I%;WPSDU4x&XSDYmpY%{6a+aQx$;IG zajO=442bZth3TDR>=NBYUxJ3l!zny+b?$#1t3FW^bnq>#+92p&0trDI3}GCI5`}pG z_>t36yqK5l3szi61RCQBjjbhtt#6ZiH_&?cILhJos?@%{6o=UQ@=*jFK>Vdtu9Mv4 z+53fNE6&yo#b$$N>~a!?;A_YM&;eO(^UNXFb|5~PT?eN%cSP91faCJa9|SL@w;mhk zHE^RSHAmyuG?e$!F0T<<3(c(dYg_oPPHaP4ZnxtlwN69J^QzQZt;bTFhII37>C{x^E><#fDOKZX7fFeczthtdg)~ z*%p|iQ6~{0iI0Z$b)>7~VVt+Udl7B(gIT2|=lhv!pRIKM8SOyf*6fq56 zk6Sr15drg0a#b2o7pzi`2km2|7g-P@HLfbe72OP0(-$`NN0DC`4g_@SJF7B~rCc^l z+L0FF9mK`RP(|-pdg|huX;2wk_~eFGh!aoM}7Xz)#e z4Bg{L`KfU;`PkDxCDAz%%k90eHqNv{I+;Je-5+l`mK+B zdN~^9*-(boQ*JfF--NDsn}7W2H}afoc}@73a#y3~y9IR4DSbM%*){Y6j=1ncJh%^2 z%M&osj7+3}#60Zx>Dyt}rh_AFY|6W)cWAn2NspTB&Ekf87eW}d^w;G73(n*JOp~0W zg6pf@lU^t1IwJw6Z?ZeiiHk|jZqWn(dftb$p8A6ORSQJZ1e3yD{aeG9%S7p#r%<-Jv3U;lx z;GrX142%P+dkDh^(C`xexHr^u;R#YGLXn>hz7}T)PVf|zMvU;N6mLk9Rg0$N@(roD zx%pZ~oT`ZVM3{Q|JyR6Suh(xhnD|Hu-pdIl^45EC1pM~^Po(j)yz^B{!`E%Mdbv=O zW~X`2^VcaLt{t4@?72(&)yWg14;1)%y~;)wagJ$K9!CW>4WCEiPd^RzPlT#X@O(65 z4}#?T9qc8HQ(DA2+R*3i6dD-_P+pa#Jq!HEbs~=}JpjrbTPI#6@#AJRIk$*<*nRc6 zW7W>t?lt~7z%?U{uA8o7+Ve9GruWG_PJ`~u$01jG3kP}0^*D=DGn~+Lg(Uka1%3d+ zC1JSC&9W&e>&ACg64l3I`K`@)-J(+xS2BQ;*5BG%8CBw+AKoCjZV}K?e{2SB_*M*a z!2iV*H2Ebih?dx$k-SiC)3m$`K9%b65lvcMHsWcMZ>bqsQ8GRKN<>Tynn@jIM9h%r zD_s*1D6=^2^;Jdl*qP@mLIRS3Re5Q$tn0HZ#C-_oiy0ES{7?RKIV6j8&s7Keo~(FK zN#$AzSg7rrM-z0NnS*`I7N1!9SB77)tD{-nC5-#MN4)@^v2+hBbwmvGDoGErh*+Ei zzx?a~p4SS~eLHV25>EgO@T89+1OD6^sPEZzQF3#$9_y6HHzx$t^2lv`_G-qJOSX07 z(~{s39rINVuK)Y0B0{!@38}RlRG9T#mPSj2DksqsD#KljiK<$zo0-Y1faS{+CRF0&8{g1qXm23auZd!B9n5cc z(mrIAq%*c+0`w!!g;+cWj2YgZ;t3{^))iH;lETk}eP+0e^0 zO#9Ek5CY{KAZf$oX62ok{nhgw8!}d~{$PH(-EM$@x80wwE0Q-{Or=*_74BRin%+1m ztw)(vskWYpsqpV6M=A|c5l{P?c_eoh0}@P#e}!c!otD#wQrAQvRK&VkWdJ?YHHUff zl%a${@%l_8^A}~pZ(bAK(e&pe_XpY?puz1(9vBw3XNXjHXm$#d?8^HTIv~XK3nBB` z^pQPVM#ox!-inGm>Fns`iY!UB?2J!Sh!XkEx1)GN3TpI3Wm0#}ZEq=rzf5s9^CpGZ zyf{^n-FB=K&ZV+S;p#z(?lbFir$A`Fe(_CLj>C36ZR?jY!o?RWf2M=pp_>o!?{*B& z7{7@#!~{0!RFeD{hff2od4JnF|8v6VP8dJJ{-@X7!#kQgG~kDJkmyhElmF!t(GMJV z;1%t+?;8I9A^*wsP8R=nB!;HWnOIdp)=9*nS0^G_bzJf3b;5O(9X;%M36sec1%1rgZ{CrdHHi}Xo}SboX(=>_Z2N-}paJtaP@XD$US(XH z-`*OWP)G8AR^uP@wtfQR^=U2=JrJomP8|7&nH_Lv`K~1gtJ8$VxTRQC8smS?h4R0z z!W9)1vCVqEy`6u)i@^e+-CXG+Cz+1|ZNHgomdU!YYtffM09l;!AgQGMa{0JI*25F; zG@9BWDDYz@w$1rIQ2$P(a$ePXUBVVcx5c})mWU5XqP#!q9w=$1vzUR`M!>lJw3Xqy z_1D}!(7o8!l14O6V)h$mG!;MNDv(>h&fkhA0j#mv@N8b@pzT_$!lx=c~mQ zg`r=cA6;T8{c$nOJ+pAwB<4OwR5Q28>@`$Xf^7@DVVS`Tc#}77`Y_6Yag3DVJjy4P zVtyoGK5kKWS6@0+0Len>wh;GKe#&p3Ntu~I#?ViBULd!yldOP-V~gj6YNLT&_eQZL z(F`x-ZDdiG?x>ZIkVsv;fzM{v}XKfzKgv z%;0h2R!){w$>``-%ZwnxGINeWboQL`CJp~C)U@EceRP^hgkE`ZDeBT8@lzxYwaSIXx zR}|_AN@x+UXOU?TeB!d{XBu)x@_BV_?y3Qv1-P`YfHwU7Fn+OWUV4^TP$-?PBlgL+ z9}U~ojAhy8zT7^+{YLz*VUC2Ma#^v!(1WJ|^ugEC&^WKkhUhY?82mIWu^u%d_p~3G z#c1M#=1a8&okVP(qcN)(1%u;tECX(XpWyAF+fmqNwmOWxJvo~quSH}lf;TG98OnEM z|J5hr9Sa~nU0BC!T#}n4kUuSO%M1M5Sz2jkpoZk@Lu1y8w{c%|U0mL+6J8o+0|kr~ ze9W7EUogx>d|oH$)a`W%(bEJJjHYBo{7RYB9ZBp950{$)qmikdF3-jX*DQH?B8kP9 zfA{HIyN-1rr4CwYXN`I@U(EOA$7RYKS#ec8D{-*}5fqmD*(GGfBBH(lljoN=(GJ`E z3ty_|Q|2K$r$Tj--@NC2MwUY-Pl{TLHp|rs=Tp_f>jX*j+wIw;W|F>Xhv8f?opHl! z#R788M7Tz=%{?vJMn$;KyL0Vg?EOEoh*`N9_rZ26X*TUCjtFy36&|IRX7 z@ZP>=;;LJB(r$TH)NXU}5;sd%^!Uf)wAc^7)}RE;Yu^qBYzy#Wx?(k&og^W>r`{;X zcj2Q?x34T`2k84EAq%TjbzIVjsVaY$+YZ!dm8(jv_c9kuIi>KV*-9DDWjTX6V>3qr zvsHQ^p+8h-vXfdIrZFQ!!4_}fP!z8qwXS$(VKz-R0V%#_*{z25OPe^8 zeA)ULGjDXsM^>#td?@f}XRpS53q`=YtHb5twbx2gCN0Lr&zox=b|aHEE^~?N*wZmH|2QcT3-w2 ztjR?eCw4@hHCc*O+?26}!enKB_1S!x<2_{j*a?2Mq)+NySs)1`?rEqe0B}xg@v3-k zKagiG6OZ?l6qsW7fr@)2r+pq?l4Mm|j4^RYYKQ<*8P!tUYFOC*d%z9MI4|bZS$!FPPQe1*OjYWCL^dC?UBPF#4Xws>L^CWWUHz%ivSnbqV^G`t{;EmUngX{Z4vB zWt#0Op*5{q!{Nd;OqyQAYJkf_6}pxEl=o-n`+nzy=i*lTuIB>LeHCr>aU9~11n`{#CBLI znyYKe@^41x%h=6J@zz24V3ayvm$ecJ0Hr4i8)H}PeATjA0I9Ex4h!hK?1Y(1k2hx= zFN_-$Vc<0{bXx>j8P12i#nC>z&NkFt3I(r&`45jBB(Z0im-UMA?B=wjm! zSN9a{+xr=Vups@$n-b3Rbel?5eNNe8nC;^T7}f?NgG5B5j+Gam%L)|o82b$f;EeZF zZu__rONZ~**Ko)S=iVUm8PoQY>d7?n%3Gp_E6G}{85Sa*0`?rRY&7}rIiqx|buceo zX<0sRC(rgtanBe+0&jq$U(pUzf>%%EJ&j(^=1sJF>1dH?VN4{Lu|~`Zh^g+Y)MtF#@lHtmXui!2d1+41_k`)^d8Vwq+wHxr=%$xLMsF5hy!lup;Ph0 zOT{`XQkJ&vA=d0jhnhWjBl83eLcCg&C4b(i*y?Q})&@E?MP27@u_6@&Bl99Se~1>R z1WwsVG&!?xM}Ho)IU$1vd8)}rNWz=!JAZmA{&8gJY>2J`^C7rlZ_jfVDs64^Zfvp0 z`jp%r8Pw-3?j1&mZ-^KOXe*hfJHOK|YE;weCI_z(w6R|gI96Mj<-b%mRY}FEQ?J2i zsBO=p^{cI~4EunX=25LT-8J+6ChGAeAYbn8bg39Ejffag{1SqH)N>#b`b}w@bQiGW zsew|z9TkXtjxi;(^r)j!OErB|@k)bgUOu)&GPSo?cRwn+TUasIVx?Sp+DZTOvq;*r zvi^!yArjjWa&n*M;DK7WE_e zsBRUQ$xC4AL$ur*uR%qK&2Fc5j;<{n#docf)mL%$EoIKlAtdc@`-d*vp0?enWfdH+ z=Pj-bVO{(jmU6l7J8Y6mhQ`Ujw~9Ls_oY?v2mX`w)LZ=AMDReIY8KV7aGT_~HpH94 z=*;%l#NYZ--A{RvzbEGzk)L0w#{sZC6Og9$^IVma!Fe1XMzElaI@mEf1X;8jnV-w# zI0M@$lokB^S4=XtuOx=^wCuHcNZniH5-=~9J11ID@&STvLCPF=Z} zFcd;=mbiYqWSvX+lA|GedZyl@U_~h)f%BxUzv88V!ktvnI(@-a4rxt7*)3TuM@f5L zav98$-x@EJ)9y96Kjl6HOKZb$q%d@`tW>6*DkRZ(7oS6xg%`3-C04Stmw*BKS6=kVI$!= z%A~K%8569mlrhOiM}*>+TP^~w0{PSNyl9ceQ^Ivx1Z_4JRmJ$jT<9(M#}Inq3xRfa zLdB-@GX!4`T=?(^dHg+6mXtvsLe8V}E5i6^J9gOyrXYzq3KFr>p$SYn*iLWic?g4C z`8!K#t=5?}ciesy?7Yl^4AYW3B;)BiC188MdGYQyO;gI5j-`k=aV5k>qTn%3F}_Y1IgDn8DWQ>cal&ZsxeSQtx6RhG9{EDKic z-&m2{i~83`lP%WFR)O?(HE2xN#V{mnuoGP3#eJrcXV)#gy3)_QHtD-Mp1Gw2YNq&# zV>;z~O9XrU#Vr@ooT9XtsOH);MQx}R)NmX~HpY7e;zqWm^lG{_^mfLj$%FytFK3z_ zjjIME!e5w1mH1nz66e*b6whhVe)uH~4Q8NstSflq!Ye;4^v}tW1n;C7sCv>cI7g;v zm5A@;Kh;2?fH~*(FR02f_oA@tjJ&72&?7pOnMYp-a?e3Ikcen#b-y~lvoL2C#06*z z^XCM?@N1|RRBKIP&kNGczE-^=`JRpZ6aH~s@fJ(V$klJ_1^OKPhYx)hos4X=9e1kb z1e02cm2G(osTtN2>U+qEwKjMwcA0fIu6PLM{a+}@`E8N}=P7e+14*idDWrd6$l>6+E77Q#AjClbRM>O@ zK-9x=@3P77c&TQ3Qj$rVV%dC%iCl3U$2+T`fIe zz>oz=Yr>Xo4(dBLyzJyEy3k%ZiO0Nc)#W%qYW&LpVvHh|Thvt9`};TUgwz!jKYWZc zz8?B2`@yY}A`LC$ucuHW>$Pu6E+&uLTloHoR(yncHYE?5;1m$hiF4cPoYah#8c^@z z)%W!mf*N)UrL50boKW4N;|z+irp%0ks0v;0q<|f?pVrOUpca`zGmE zNNRZ>=0lN8oe6#{J&vztkj8av#qK(|%G^o1>8CoAZ{APzU|9UTLbW1}WA~rfU+=lg z%l<+5_b8*x=`Q!3UFN!E;3OT!p4kD0v%3_O4}|!K_u#!vs{>MrbB1m1gUu=Sa1^Cv zat74mh(+vafN(<3RLp;elZnz486>??3&yA3U3=pYZQx&W*)P!t-B7>eDs|AA_cHnM z?B|oNT9dx{RT*?S&K6Q42yimNRm(1?@SKV9T4Ei+AC!vxyN7D{x zY&S!{>$tvtS|A9Q@I3|A6iH>rgY=ul^MfW!wHo7g|gB;c#4&k${JPLn=FI#}1PB`N1b*LnjC4*nljhrRM+G$a>7dh9V}i zH|eQl9tCYen$8+?nX&l~wi_m8sw*VhkYjc)eio%IH$|Trd;HSboBMh3a^XRcF))@m zYr`19)fI4g$hNiX{%-xp|K#%U*_jOVTx*QecCuTdE-6@+(DcS>K;Kg6_{Xv3q-E$cMM@=Gx(-U52$W$s(j)u<>TW+YWnEZs>D=^HkbhY4>mbdurPo zz&jx;6?v*cwe`x#fu_;fTntdFWww`E^i!uc^4(XO^$;slNt&+KUdnNJW^gi|)9G>% z>h9Hk6y4`*M?&k{J6@`Jnrr(Ie*G|G_KfP%%eSG06mq_9!d$Z*OL3PrdtjuBCKnoW zAth1s2K_;BF6YCIhz}bg@v*ri`4F2QUr)_72Ju?9b2xvIRSDvqw$1%8uzV$VO@*dh zJfIfKOWXhoQ3AL9+h<1y;t`URdfCsnPn(@Ngvk;@^wJ=ymEmfIutH(4D38>$N>P+@ z=gNRW0fAAG=pFO@PUrpzt{N-g)RqnsFefLN5Onx%p?Zaii)==(+G(A+D)Z{US^v=_ zB$)arpZ4$j$`E65Yf7#VPHNkXV>uz92Yt{s%;KgXVO{wk0RzgzP@eC$)^Wt|nRiE) zhKPxcQn;vS0y#Xo;gO3)xf7A*Q@8uylhg;d*f&TnTB_9hu_eK8yoIP501f}i-i+FjjGj$0j8IH6J6}!f)e5U!58MuJx z)`^eYL;Tv4X-G954ia5XjXRpI?(BP)gJmhdGNECZP0|$Wx-V)-72Y>qx$|izR*4)> zp;S99*3B4pcPXGu=Uzd2y1??^-oFHL5W3F4WE~bB+v#$lTZiVW9$fC>C6V^V(v9EGL)Z~)s>8J*NlaJT~l$5QprRflN(NvTVGp|Xz2HKQEt4G zY|eC>d_fu5luY*(te4w`MQkX)>ncwl-XJa=T=-bvbQv@0_t4{oE0MV$_9oy-n|}CX z0h(m8f3tsvOi7#PWU85ohuHgBVM#UQETmG&YBcNL_ptQLYRO9Rm6rF2iFkRxJM2E- zE`*h$%*&IB(5su(yH~>#V})>!sd9!R2}NgP1x@!U+1#Da|LR=ANDmYQsI7r<9+UV= z8`?G#EFLZF#XN>?G%nO7^N+u;6f!yW0zbJV?&#KvF1ySpzxVAOfPD$0b}*1vnEUM1~p z-Bg}}9%M4~{FI>h+t@b~<_8MhAwX@@NK!v%n4W3=6o5|py;4|iAaEbGGtG0`Dq{n= z4~u>->a=kSd3`7Ix%+arMG|0G5_L62>x11QMU`K2$i>O5h$^uWHf|*v5c1C4%&OnN zJqh}iwVugkJ*3>JK-qo!;bFUJ5X!eG@BZnAm}K4JJs-!DyJgDVZzMd3|HVkZ#!UX! zHzw|sd$WHS!+Q6tE);i)cURwm{`_CbW!XfA*8i7t`LHrW%8~v3=9z@{w`H?nm`tH- zJiRAb_Q177jkCL*l5P?t_@+X{jr>A0IXAf6U3-QS!TLS^{NcSlbpVhYH5R_9Jf8Yo zkZ+NZI&&4u)hn7lpY*ul+}T4^cHO*od>i(!ZxMUzxaGI>bs4snd;gI&SvnE&&1;Dp z+w(=ty>b1&|9LDjS#+_&cj(2@&8t~(4ixr)@pWtee_4U?5VB6)u7CHLh!4(RzjMLK z#P>QFg>SQCJ$!{d3)ZXyi)uUy!wc_~->RZDkAe<548WDp_<5BVX0 z0P=fHmA$K{mb~92=?t3>g{r+Dk-CUM@7N{{lIDCxG6h$bZHFu_zqCFzDXlI|FYEx0 zKbn{n8sj-&PZYW1gScOro4xZXWIgkaN#) z`3uUPA)hwf{&J5U4u5Wbh81`sgtnTb{CXYx*aM*nQShD0n8q>5Ur;OOaHsV}+TuDXC?SFl}H5&50!nyl!g@&Gkn-a`xty zn@NH;bRRVdD}>7GDu{+z*bSbGArG#WXA^UQMzvsACyu`_e+;G<3i#Y zF=3M@o8VR39SoLjS?PHd%Kl>g{?@0tzYgjRDcfSs3U!5e%=3E~T)CZDyz~rA{#230 zp5tV&2Pb+H%usPE{K2oE0f~ya|1Zn(Y!4abt{tW98z8;C6w!D-jssW7Je!GD)BMdF zr=GO>5Y~&QbDhBIRB@Re5in>4(plz>i{+i*>REaJ$MCayT298Gvi`O5#jyZIu|>u; z)<)c!xkg?f*#qR55hVV=!T*n{MODJMT}~FSR-Qe zA>U%y9dGh#)Y&#cClC21w~wy1z7*(|Dv+Y}EeDpGb1miT3-o);X0N>nRtvX)81L_~ zrz{_G>pM@_Cmt)>=6ea8a54D$U36@z)VXHHurk8t^A5rrBtmz6_KvV)A~)ZEqc!o) zjwVBq6Vax?cwsAC&J^aY3c~4QyoHRV8G$+>L{<5cne zPm7z=2b}RHygW;%nC}?`J=qIePzrNx?n&H+z}jr8$x-j~Hr)E<1w4CG{E< zikjZ!mZ<7VMgS9?2cm4VpL_SN^srPmgXwSdKG>2~^;#?yGRZAdHTv8TubF{8znVER z9<;MDfsA+3!(B-gvuxhzOdsuP*d~HX*uqpBW3VfC6ZiZI26Ux#cJjR?Zp>H7Xa+R? z+|2CBpTLahg|l!{<{gq;SNYHvFy5ub%tn)p&`(78KvVy*IyCbS+r%;I6Ev;POE^g4b9X*l7V?>*|iuhJ`GA>@8 z!j2JV#svL&0}CBB{(^KCyydvu^_|I+!J|sG{V+GI>mUhGSI;M#qU8!!Ze$UDyz>|9q*n$RQUVY);mWq2vh{G52a?L26P5^5+7!*_l zc~v&jVqLJA2--octoGU`IyM?2C4Ppq2AD_cncxmiQU)fKb<5Apteb4&5;?7$0y`03 z^Mszb(0wMx1qa6e0@wXJ=yc+<>TuE*XpR3_XH!9DCT07Cgz$#zCyvZos%P~&W1=zettW8x5i6pPreL)i|ct4xF>VG@ZYa$ zTv|(67EPG3ZzX1nP{DZVMZ16HztzGmT+<!)SENY$;qxaq4=1Nrw55MJ_-|0LAJw9R5Y4gG(ExB+K&eL99x6k<-oGLG(ovN3P2zyC<9O4? z7MHxx`7b!7c{uBUf?L)}Mp429Ta`kJJ(R$RiW-)`6Bln;QYqY0r>HW5g>M2DT|gg- zKdKJ94auwL*8w!su+y5j2lPVaHu-pkI=&Obb0-WL2BxcR-hpya)3u|Qf|xPSPl?Lc zBm)}zK|cu)-iS??zbRYn%Fdlco9_tj{jwr}B+uSbMt&OG3tG%5)7U7|Y28^<``y9v zM}tV*87W>)tsz-kH&>R@d-~L| zHM)i|HCs!c$y=!Auanx?2K`upF1M%&`Im9Eu4^@d?M5H$l#SFz>en=dXFgJL1$4|f zl~fUb_>w8Gxk}N)x$@SEf??rKsUL}}U4{(#(EV#kp@!3|#-M9W)hre>=6PJ;hZB_M zB$dOG8-D7ukCkYCQWvR<@U7@Hf0s5d)&Cn-Bh$6>np=Bi@syWPsOGiQ} zw>Gel);DwDqqUA@$9CLOcA|#9mwU|LQre>zRZGBaMGJSyImfJaJ)730x`Rfkde>or zl!EOqpR-QyaA6JRyLE8wr`x>9w+)e-U)DY99|#DN!@gjVO$2pwLzM(nEjQBScUmQT z>?~--iF)x6>I1#ML*HsYFE*Gb&$Ti$dHK`w`;NfY<;-#vOSKt|nska}5f?SlV6??{ z0Zu5n`f_tEuleNoG$7AnD2O28#Xc7vuM1kaLn{>`Iz}=Zl*b#O4fuUC9}~c1GQ&HD zZZS%=t@#3*kA-R3Q#h7g;tEa#(R}3tn*BcQ!PpTWy3FRItO+*ukRmAW4Q{Qn?}zm&!L|=Vmncg)RNbpO zeS6PwM8d=nUT9LmhR0QQvSoObkIFGJyGRcKfN4G zNM=dnkK2X)cxfTc|W^LSjqLbA?q=6Yc<^SH63fUcVPrrhU1|6lqf zj_U*N^u-~CYHnTu_~nf`5dsPH+IfBLFrwU5^!=41Iv4mW3ur>{f@N*G^+3EA=>!>0 zf52Ra#TWc3N*q^_iiDPuQ@Wkvv{1{NM_WOBie6T3r%6TVw53q9b9+9q-vIT+tg~k*54?zp04kqjAUQ~0}zS>4=tavDQc7ngyLCol(dnyw_~_SEcPUdL7^^5a0C zfGaaFhn{ZKO+1G~N^0XulV#MZtx(+;L~fjON)!i=;^oXy)*lDw(eB3ZTx9BYCphd0 z5Y{Lxy<_HH8C&}3l5%SCk+KzjB#WSXBMnrv(Z;pn!ygNJzKT&`5U-f^@yI)|aIesWa%iQ`o^>$&TMAjejrd75V9{KU|sZZ?${4Y#}4 zTsJK+Rh2Mya|?~I=1aMGl}<2{1EqZ==66>rm$AD0R8BGUj1~(mp;;05E~*m_czbQr ze6zvSa)H-Gt7&4T0Md>^;-@&3{=7mbu3kAAJ*s~t3PF?-E)?8cqn=b2Uj3mmUDIZ% zW_Jol^Qqp4y~6+kEM}!Zp)g6*p<^dr>1#4)y=Oh@EM*P8J8OEHdINK;d)=8bsjRem zE%Tqs&weZye@t-~La@3MjBAusEyC`y(K4cflNY}TRsKd89d(@nm&&_$DsUn-o zI9#VOFrMuqqP2nuzDq<*>leW%e)R>W`ow-p$dNGxsuNoI9;XFr_PmDKye-5VTm~Y%o4RF(BAT_#w zpiDetAiQy;$$%2L4DdNHr7>YeKsb!d_r$(7U^OpQI2DGje_!N#X_&bhQK|+TVvLG6 zJ3mO7{=D$I&AYXTG5XPikHtbs!>qLWLt62jgdromLn{oZ+~Oa^L{qe~`wej?Gkz5& zRI3nQnzPL14y6e! z_JV2neqCI28(E6qRQe01#oWWUa}4L=*v3wcBxY`n2o@d(LdG0g&GE4}Pq_{-Z8At} z*i(BRfFy8#1!ceb%PHP#lOA9HJlpDNAk$p6I-em|t_n@_=9q$giy0FM0o9Kr=JQY| zB;EV7d?i`LLTqx)T{+j6M9%b8cpcj+3kk$ffx9OO(kI;No1PF_lU(lPsqM2}jtrg> z9@T>j!z5~mw!~RfO&?c)38Tno{55Ta8=ZWd<5G6F4>fn==8;ZJz|l3Nju#z&7fTHp z^**ZTq6G!Opp#}+!-ThP4L!4ne)!HB4SV34ZZ=4$Ah)!@x|Nnah0iBpwuAFMKDDf1 zh0Z!}@IwH)C3^&Wb{D;CeTet8i_e+*1#>!4H@6u+#MI+)$cgEneUklhSeFDqU6`)* zj({x81&8BS%zvEU0l+E$yOAIN_LeQIwGsvh&2G@p3u81GZtC;1*1F1yOHAJV#KGYk zj_uE>ak{?e=Q?N*2av~JpMdJOtKTkxbQh)Dp`};if?c88k!1jED|fAZcm%uu={!TL zmT!?n2(Y0zzx42RkHX|S>i@Rndms*ElzdmY(K~P6qG@4cbkQJ$tybd@2J-|XTm#vz z3htW%0%@0P5B`~KFA=|$Nb|!&lCkcREaqZ6HHSFgR37;n(_f3nB@D2P!uZJ<=A_st zh-Owi3%T!)St-clN3dP}!4v+_d0j7`o&!f{d^F51hYlM6GNjxYSx^MgSzlc#|MgEi zje42XzrU2yyTn59 z(Cu@f;VF4Qlh$duHKmnen3-60d)FrwBwynngT@h}tSfy9ict)Er!+=BOAl2UFXk-0AtsuNwevnkF~LDY!<=T{JX35oSKx*!%FVwHk`cv z(TA4#5i;;lpdpl!ErcZ1A-I`AE%{8?mF=WKz$q+o>RSjoRTJZqQSk2O;32c})Qfo* zXiE1WypbYlEZy<~vUTk1x2f8P{2k6GxLB(`E3*Mj>{x74t73g?r#2I31mG5i{K?B# z)BsGECV1VXpP3fdD7kNb*LqW7YLX}@St!AeU%Vwq*x+3PWcyBe&N?B>*^RJ zx}F8JwO#|5Y!S}j7hdrB z!l8v%puG2p6>LH$>un~e1TZPl+rr7t^H(mhr+z#9QG;ZScKh^U$=5Qedn#C5_+aSN zc+pfAAz-G8!|q@NJ3tb5p=8Pq%LA02dxUJnZ=`b28n$E}X6QHR2({-SSO2v2By&2P z%i1Y;xDH7zc107vVm;lAtXc9ymL|0S$RcT+E-(8~opzC8Mm^g#2ccpK7jQ0}wQPhh z^UOPW3aOrO*Lg2YPXdA*cnU$WWjV8G*o%eXqc=lX;Dma1;YGI_CEA9zh2v$#(fqVb z92Kt6+;7gvnYUnbtt^PrB?FTp^I!U-TdEGnp?aE&oGMd7IsUZh5FpK<>)yz&Zz_p` zcLXrbKue0GB<&~cQchLKz{(1}7_w1SR{NN)5i`RJtkcW=RVNM*O{LO4Xoy-3C{Nae z86UPYr!gRfO+tL1pZqUhUgk>%1-~x82i4;cEs`az-alk7j%s48m7+3-%61`I_kGVv zQY!)iAb?rVfXA$tImba?#?~u`cQI=kF>v26iC2RCn0@n z^K_Nv-vFd~=<-3>+k+oZ?Y^4lc#*l2_g~#Jha?A7`d^uIp2u5Y+3Zt?M~O?xqji7y>C?m=V@qR2wC_jeC9?y6eZIV(FT8hNpgCzL%ybs z^e`EmC!n^D#XMi~)A4UD0VW3pEkVi1l+u55SOQoDxwIg?0Iw3-sMUAPD+w|?Rr_ikIFA{Yx5g6V{ z0rr+pibv{i>i4$CX60_{-4g?F z`o(KDDW@I&!g*9a12v^)Z_WAMN{$XStQl8SK9$ytvpyQ`V3$}KwuPf|m$|xrP>5cl z1`gqA?BqMh&e^dpuDzsZm`b=m)Y|$OHX+85g2(*vNeF1^@P|(#%L^J5Lr6R<8m9mp`||L&8-Qf4wOcXx|Bc>?Yp^CltK}ccKo6m$qX0sB)E`mnAe;Gqqw#hS!Bi zbW5UrSLSWs`l^M$$6hV<3}K2iE&3brnLIWJcL5wI5IbVgSpRr={jtGL#(Voo06_O(oJ!=N`Aio$@jEtT30BLA6)R&nN)AcDZ$^ zlyo)t{x67U+n@c<{$WuWpyJX;`{!R)s$i`%+3(t^_XLQ5O8FB-I2dMU^s{o|)MrDR zfJ;{K9F89=*|#3~V5HWpBlt^Z)B}A413dew{DK&AONd>v@NgTnHX$7ZWbQc}Rm4Hz z*>R2lcw3>PnnkjA@!m;9X1NKQg4|54U!)x+ywtnft>t-)jl0UUGhQEC27LjG>rGy61eWBo32+5D+~D$b6&-09`;z$KN`JLkx5v+c&D4Y7y_9cXPh3n8(`(=uPC1 z5RJh67t`DRt&{^E{-*-p{{LJ4gL?VUC-|SGE8u9hVupc7@EYQ93Dw z=(4U3{KNAF*@~gv_9sw0l z4{q@w@R9~6JFlQP#k!=iE1SzlNZisIUHVxm@0*XE1m z{NNU8I(wJG@h9>&ag&d@fok@wPEh{&^0xb-)w|s%%OJ+q;tHzX+bh6P{7oQUv<(Ry z2$SWs7s}*zm+tx%C$AsnETCAIHu2OCy& z=OrZ7lWs9U=s%TD%hxc($1RzA3T<>@hoXhh2?mAiS5ww?=SKb8w^}QWw2`-EYNa2 zQv;~ZN^BXD#Y=0MAt0G2CQu05fdOI%7w;t+>nR+VoSC=Rt_rlMs>I`?p7Hv=sB588eRKY|1}Zv=g<>C7V>?dtRG155L9h zWjCTc@^N{9W>R*mZ};l3-)w0`+EE2ffW&YEgnQE)eyeA}wTlNg$%X%!=uIaH!?CfveQZ++z+mfNShOm7JgOy9k97moU4&*ge8)a=!ohdAwd=vwU zkx}jNe%qXm17c=%I6IIpI4b0QD*N>3?{dB2#Z#kyvn7oNyxAxwVW&Mmtl*g>7*l-< zcWLK>var!E_aA9nasD9F966l+dKUQ+@EjWA46{wlr1_O>{_VYDtze$^V7j9JdCB7% zxGF2{oU8v!UXA#rEe8^o@t2&T;bkq|<1fG^(QZO5+XuHu=M(p~t8b(Kqxay^R?~ouM?y{N4Rb?wv8O_yvjtWH%JHv4{^X~JU zTExuiYTcNJB!4pEer#kTFcRRk*35oC(V1Chi{{NV*Hm_F%dR@f|MY5e*}Z9Y>k15B znC97W2@5?Rtq|a)Y)Vmad0Y1S^&Ef2K>!3wTH}`p_E8*)4;JVe^lv-a5MD|>K&Ckb z?rLdu4X%(8W9-(S#ai-peC&P!{e#^vdq0M_cF@36B5sW=_hgffjoB5Qj-V)z>McN7 zW7G@69k{^f7oFk?JB}nXH+e&61Mr^Kx`+(0&zaC~pk-aHM^#X)X@zU#WsIG)FPWG& zhg{Lrjr>`%{ENVs)+e73yL~JiOm5)AqlB^Z0qMIBCFgHrbDEX=IX}eXN+$>hPyKXE z;KA6RkJD$*%<$lrhuQI4E%ou>mL)}q%f-6{7)Iz_v%6D!=+1HJxj#6l|43!1?e)0F z8wF&cE7rN%oxA9opng16OK_7JQ*Ljk?JGb$zd$OYd)Lp>cd2sZD?G+Gr zz_wKVMFu_#jR(RUgqnndKQ$csPu?R^Q562@xD~zfDnc}l^0Ih%^H;ZfnadbX9Dd-` z^N1}6DAklpx2@RMA(f`TPK*eWnRX|VzAR0eN>nDROrkQMcOLYtO|d69+l1}9KIcHx zuhf>Jb0$e{g$l{hO9!7kM{uYM# zt?9oQo*=44hx@^u>f?u&ClY%zA4ihRIcE<-r@^_2A-R|k zEhzcEB}R@;k@{@WlHpi;g!XB93)73NDps93n?9kK*ud0S&heps8=-^cpK_|8J8yGZ zg%6T>&K-@aj4*f2z98EQ!J~(MJK+ki^n%uRRCS48%GF11<1{S=eD%`PH=uyygEQbO zK2vYEJ|7mv1S86vHz?6rIFl(xFLLe$vdr8Gpi$&vAnLB=^zhV}Ue-E}N!NR?Qcy7o zK3NFihk4Zae=HsurOJ?e&!o-Lj2SXJ+OK$DXZGdt7M*~FgbXU>P$>BqZR1tU#|lKa z#<%9GG_q3h52V)PcW}jx!{Zgka4z1fC;2oo6u-n|v6aFwB;%YXe4lB;xkXF1+zvGB zo39|g0Ulm+RUi2<)aUtxH`2%~x3Z*yq0yR&PkgQoJ+yKjDJsY|Gg>$^yF;?{=L*;+ zq*i1-Y^@EjAGNg}6!|xZ)*dfJ^*%gM3NS718nf*oDum_@T2*_|e>^(G>@_joEa>Wvn(03v{Qd+o3@(|zy$(SMLQluRxJL;pz7E&^X|&P*3Bcc z`=%<@sWauVoUf7nY-6>QSPi6cl3C7bz4*#gd@Z?~CQ&B%GOME1V&AfperJXe9$K$` zV!Qm&^D=mzXFBdVwsZLKxr12T$Pi9ucj|amO}D^3L#+?Ux$apyZ8c}saZ+T= z9AxZGxWYFY24Yw>L-IpW_*uO=U8Vgn&q7f@T&q&K4%?bE>4H0GC*vEhT&)>I+lJ{J z=^#4H0~Hy0+9wF-%Jqw#_S@G_2%0%mXP8I-pFn)6QZA_Nr#Z&nUwSjvW?~Qw8n+=MM z6Fkc5%tPbunM|P0%oF9aTkXQ)*Y-yV6LyH3Y~5ERWAfYp2vV`6s)G>%&|qnZ?t93J2kj$!GWHeOiyt(1W=Bj^o&>e2mkRH8OFFhl8ny8Z6@c{${w zI$F&7boq_1RqeAB45eqvUkX9^nP6~YpbPCvt4wv(nHbAk)l54WO^M(@)dy*oA3WOc8k=@G0 zc2(6r9jl`#0fRlkPxnlQmcDCWP3QekXosg1uf~@(v1VYpus8*iGnTZhIulrHkL$T4%|f*- zF#jZegtiw)VL!vDP{~&RU}IKQGgK8lwL3b1K}PUJTv#hRkf|wci=!V*MY7rz#p|Ob zvpFFfn=HM^Qa9StNFSP$o%Y}V)saL|Uc*u8eEAN2WiU{7f#Lkrz_zIpZ2KhQ`BQR#bBCCq1+g78+Dw!!&zS& z#%Fu2_ZR=Ewz$%aB4ntvG9V`JVJu>fBLPL$OnHK>RY+x)mTNQvvEqs1A1-oS4@=*ANEmv18R)0E#esO~~b=bmA<)g;}~ zIo<4*2g^fR+&PgZ;(<%Jg#U_OPJV(7? z8m>+`p%o#Yt`NP=%3?h~JlFu3_>)Y+mj&-#qL{1)H1nyMtjBXCLeP?njazYS3rjEL z_4Hp+kXjZ2!CxJ;CbPPYsS#HH_*oNjbtcD*tw=wUwIsby*1dya9u-C8L#~x?^y$E^ zDwG;ZZg(6rw;rEw7OJhVmt7vEn>*oGIxP3f4ajE zlE~09%NT=`TDy6|F;_aZ7Fhl@E|OE_wMN$#Y5swk?8)oEaZZd$Jf@!JGiG^T1CET< z7U-hHD^27kJvGh7@UXFcW~V`@vAaWIVvQga`=`IVzQbjEjb5dm!vURyo_=1FHAHnv zMUPsZDy?i{5K(BgJoMCbCr|0itD45U^+D^A{7ScGo|Ns?^m7lD zb1GS`exUvM#FTRIp6GsHohHViC>oBd5@ADy?zZm6y}|yP#S>6r^g1bPiZUSr@*7Vu zLmg!Q?p^%d?-A$BlGVDG>Ygs&8}S_YjrOTgrsH4R<1Uk6xxBO8aL-LpIx4#bvFZHKn^hAC>kDh1$h6Z8k zr3d=jQ^m`r30-wp&XebU>JA@0xwd9>Cf-Cmd0*!O{^#d7{7+%MNx+jdS)ln-OieWI z$6TFWMSN;z{rSuiyN1#M$-P73GZ0k1rSUScXJIWmqL=9=0cp0G!=#?13u{+S@78yr z>&#T|XQ=&1^)X(Jg}Jckuv7It%F?s@Roz?Px+C+C>PL+z=yo5<`Fy{c7(3hY`kr}W zdNuunxtTii3C+g+<&mAsKeHQ3U8kB6wJAz*C+{7AXsqj67Pz-7-gOe}i%_9F9;Z5P zUUIcRZ9K1S&u}<{ptu&PHcCI3-=I>nkvTn)_RDC{N-X7)vYTX?b2jBWhm$rN%693Y zDLa^0_B7A3?U;zzhFmQrCWm!@67$@g^46{HYw`*BN8o8qFw(%Gl{{os;KSxdYN`eS zerZk%(+Mn1NFc}7NogfAF~1#NbS}}m!9L2DCJZe7JL~JnwF_Vo^)#Jibw39q=4DQJ z`I5X;5tXmnSI5GM#?}X7iC`mviu3w?a=L-0?990Op*C2%kWRWsurhiU$i(wUu=ay+ z4fk+k*Li4XB8UDaAPCYeVV1AsrjzH#07yS$l56vXY8Yt5WHg>0*S?KWS8e~tGET$r zQOLQPn3$*M(0t0|dqHSQ&yku&fW2iHiT$x_eqDK?h-Wl&Li^KS7VPk;s>0 z!|*V}1xsAJzpYnJ$E$A?Ua3CJ!;|yfO61~xOvTQ?HZa>W$a`}8P2+6hGwOY-F1b_+$JFI~GiLu(Yy z!t0q9ub^!agDbM-i%8MQ0v?9}xuir)-gX0M{!EImhsZ+Jc`#zd?yk3eRUCcqy_juE zGzb6DY1wP#CMB}AFf5?8uzMOv(>eK>s}1iAc+UP1I{gMGJJ~=$8GL_s4aIX=tU4U&(Nxu@BU2T={;b|N^W4<)9x04W`cj@4tH|=Fe#tGO^_~gkNFX7 ziVN@`Ww-b}#eZ~!4HDcyEI0lTf{qTSW}MOPRnUwP1IdeI$BG;ZI4Qu_^cwjR2yGNR zs!z@m{RJO{XGlALa9KWPTO}uSHI<2_X_?%YkJu%c`I{^+^F6}xGLYdM9{^faL8^iL zs|Nh=vbmw;*9PRyB1u`&jh*DppOT}&Pw>tP4Z!jX{?(khQB7OlVjP_w}LVyIVYnoxF9y{*ibuELt$tR8}3j+|ExsZuh zy70`yT0$pZ8DEQCR|+oAOP!Sjv>^~?HiZ?Gz`@)HV!PIPx3p&8UCU&^>0>}Li#+dx z#f>Ke`BK3I+UiTHS#kBk`>XrEjH8B?`{2ZJLA~}>xn(g)4x2g&KL($uw4xY%O4n%V zuilWjUb5FQExnAp4x(`mMg9@Ud9T;_aa+0OWT>Ylu!&`fDJY@mCGFXWs21~)p&-H> zHiuD3HMf;nPX-}jEeUDY_j7vDuCMtN$T4Y|8}^$q&yMRcBtKviph623My`M1qHv2M zsosPEi01uA=QEoYJ6R9pk7fX)zMxt-&%W~-k>NO zo6h80i2nSXR~s+@LMoe?cg#ZTo)K&s-zRwlgUW))NSC7bi+BZl-awvFo?@-K5z$7A z0Q|d*2&MO*kA*+^#Clr3UeLRCr^vZZ+4<&8;{-JSKuX!5G$$}*F8s#3jh;17z`uF= zFq*8AD6~v1Xyt=i?r8Yp5hVGd#oKLhaT=3+t0JGnfrd%a|h=#IU|gVUgJWxxCLz-_U764-kuvzI1n0|7`SEFlK2t#)bTeBt?8I%Q&6pz-M>AC##aUm~KS+<7ZzUScgE}rVB2$An zH0pv8lVb-Hw7zgK{@;Ci|1WY}`FPBifNI3iHF^6qyX8YJex}x{C+#MW8gjw?a!3c~ zIgLgL=H7|g&3XBmwgwiRV+{4QGgu9auo@?^o_Miyb}y;ZUpeVBu$sPZA5bQ-IbJsx ziDw^mDqDhmk4z3m{4P)D%w29->IKjMl{$qQNq2zryWwd$+Tj#;$X3&ay*n5`6tzAo z#6>ii?Ya_soTBp0{eRH5P?9%qOZ$(Ff=;JZ3D`YzR+VV)%UF1U112`qa;%@tvD2c6 zQ!CW_@FVm%-{tQ)`Nz_jkR7{Cm$V3xKKYL8;ggq=NVPotxL_^LKkv{Y;#8pNWd=th z=Q=)DP7ZHLI9-CF3)2^09FTP^(^}rUq{(upE4c>Z#*e&SPr^W;dJvEIdS(g#=Onu6 zcr{X6_cVJcA0Uo&w&Q-}W7nBGw`QoWtFa(cm9P(j&^9BXa1S6x^pu?=j(nH_fhr@TaUOdc!sPnJ3AG}ITN*tpV`V|JM1hW5^C!1f3>JKJczg5Pm zjASj4{+;DyyL~TY%>~f8CVH;1k{Y?ckSDZr3etTf;Zj<$sr+L#ZG3}?iq{U6^7FXdA4IJwtWE7#(a59ugs;kRYg6k3X2U^U? znUsCyWnJWpkofYLCaJG&(}ng?o2sCbtTyQ>%t!R1S5oV|YRVTxt4TTs-Qt`1$E}dn zOQ#fGuV0}cSt~(u15N#~8yTYp+J@c+mOh6CxH2UgQoldI#uvuG6B= zG}(&e>5dm#@JX@2e-7hpGCG)@9|h96vW8$jjd&_$iuu|wskDPy`Ae8-zr zf;ZO;h3FtG(xX z<~hPd{__l`!^tv2XY>2Yv`kQsA_G$aWC_9caM;PkZS3327C(sAE%AVB2}Dynpry*} z0E8740yYOOfP)0c&;6TyzqLIB)+ftJQ7l_bdtOfsq{h1Z9h(gpxl`s}^8?P+?BxG%M0 zB5QYP50IUHe~->a_#e_mrB9^J$6fd|whq5)D4Y{Wc)gl_%J!@Kk-;wE8PUe{@U5X5|O z;giBo^Xw&eztmS|OWxrkAsS7K58J1uei91uOc=4X8$8ubs0Ccg&z#XakkB*J&Fhj9 z+2lF>3; z_^(j@3xs7Gl@^v_?)k9O7hn1@R*wEF3xWEx4?ft}Trz60h%~1!!ShRnX=o{(xeMT= zSDC@JAk zLSB@!5+uT&-_8_Vz_dQ4lRd#`w0s+bZDg2}!3&UaU^9#5UX|S!;ZyD|%7{B=KwRx8 zc!6@$iN89xro#uN%=IUy%j8MJ56bmE2O_8?ogF|_>D7Rt8O8Vv^(=NBd25e@js#pU zi{%D)pg@v3R)}jG5a&X^K*N6T5DxK=`bRY`3uAPD>{ z2h@3a-^7w$L8T+^vB@7N?`zHZ0;0_8x)=dxRnmPn0#xaOMWg6+^n2@BB>nxyyfnOu z-bDw)qBJMn*+VbIJkN;_!he(tJ}d$p(AxY^Wz z#BWmIocg8!W8v=zLFnl&IuD^32|HqaWMe6;>dnv>Rim-25BCsPc4lLR`q4^zUG!Mp zCZXf$s|7OF5g(wwklG&G2(XYVs!z3Ia@0+aQ6+y_p~VGoA(lNta2B@r1DWA1o1!V& z+9Fk;O`1@siuH~f;2>9YRlZaaS4yJ8`(nh3cx+T=yt|YIOBGu`wA?W5|J;uQI%xTg zND%L3T5L21;T_LmZx6l`k8>L#qY+k5i1CbYLQ46M0~`3KYs)s}IPRl#gRkg%arPJ1 z8K5&MadevS>Yv~u0Em?K5F{wIak?miFfVGkFOSb6K*%(KOvvdR+gUjIP&G{FK?oHQ z-ey-{6i+yIPq>3@7abAoBY{e>8*+a0{Kej%aG>H^@ZwR2xN*zER>6s;{qmhahCH%f zlpu)dpFmS7Wd9g69;o zGZok%p*M5?h+vIp{D_4ZNI~nV)fb@p^Wa%P1V;>VsKEPmva9-qlb2NXq#%L=Z{DWJ zq&f+%HT32Q;J9#&0fsZn;f7-0D4JG3t|GETmsgvA4oq)rl0BWNvwLw)vVN`Hnt z+F;>w^&C3+-MWJ$H&@oQNZ4T0g_ETfN1Edr8lRwZ;IA_gHX0tm`YZH|D-@N>$p&6M zLq#~s7Ql7U3_2_LD|^y{Q@GGTaasmudW5#}Y_jnjzCBn~gbx9Z1P&mud@lXuhnv;H z2O$j-0^SNBby&c5=D{G3^GUW1rKX@Kt+_lv6kt{_<$MxQs(X$Si0r&`lD_w&ny(LD zr7hs&=1vdb5Minl!^2iHtR;Q~OC7=H8eEo~W0MNJmO&dUKrwo=hkA<1plXm`+V?kT zd#ojVZtA#&t?nZCvM><%7Nc*3*iXtW=A~3+h^q-kn~$^_ zwo$|LzQj12^9R9hvgtC5tC54$u)Y|JO|GyYV7SBYef6Zfd~uL@Xp%y?#`uA59Ux68 zoqv!t4`hP!^KN@TPlqc3u3lCjGSEfRRs?cN&(f@=J4rs#ZJ{QaRY|^m3N3*Q-gdTY zD+j>Xx>T5q&;AN753JZz_g#J;2Z7-eT=JDndg8N2CZykkvan0x2d8;svQKM2Wwhw2 zJanSksi6H7kV2X-FydAsw~I^9e)_#SmviegV;YZ+=Zstm z4tAx}wRN*u_OexP7M^*jsWjI;|T6;t^5b0SzvcY)z77rUda z%z}X{O!6})@Encx=?GNKp%c+`hf8}*IiuuFt0EJt<9Wy_2EpB{Ql7G&eGE2~bbSJ? zOToUrKcZGPj$h9Z@M2L-ew`vu1Hot|e{t7IpU7T1(s5vYD0Tze;DB%&u`|&Z<}e!8 zdOI78(EM~45XE$wM~fKO=<-qex;WJorakm9P+gE64_g@S_j|@;U^RbA;(A>8Z0AJ_ z-AWDNVsNaS*q1;itYWz z*kjCr8-Ol6{H{*0Hn2b5di}R;=G{#p#@$XKObFOyZr#$knfEevJ4awtS$Vk@^6lLM zBk=sY>3>Km(SQ8^#Qf{A0_X+%zY~A|U&>{1`7)15RDEz&YCCA^^LbZM@AL)whz@D5 zSwwfP>f?jEtF<=cXu1CgiEEUpN&E#>_DgkNlW(Fi3pN~9>b~ry5+|Jxb1iLSu2TO_ z+VUGs4&y4+C@xCr7miuxelsU6RRdhJw?19Z&f9S3ifaO^^7f61aE)EgaiJ4iioHTJ zwEmeAuLZQ7ONp4P@k6O-pawIjG#k(e{3+4*8n9KLTe;iaM~8L99)ie&;<>td)uTUJ zjraP9fgv#HNGw44c*ob(49@!D&B%_+yZ&e*-iYsl=q&Nk$DMf%C{oJ&hhZf^Totim zGLNoj}_rZdymx-nS|mol%&z!WHRF_UQN_ zOT2Qo{JArLK!XnMJ@<}c>fcAMcq}K@`qZalpEo!f`7V!3cS{%jVA&rU{C;pJtVS0r zHCT{XhHwTm2mrny5yb(rlf*$#b+EyYElH}TG$QAA#L@nSp|xEs#N7Kr5)UT+=@q#h z8k`1J{(7A4MhP#BzgW%8n>N{3crDe`JZj_y^&ATQ=-7~))mYAIp9u4CxMIdlDFO_{ zUnLbqzui@M0`~eRYKcGmU5u0zWz3(>v>7(^BT1_VC-RQ;-io zOj!HlXR5*{#q%?PV;Pd%T5;);vRh){n(S$_Ixq-Cg)pe#u$m_6d`zJA77D4p_3z6B zy(`t(`F3Ao8VHi;7^?Ew4e44^!6pI5-ih;j;DsvwmIwH%vcbtw z0;#_>GEB!-PHL4pF(bK3aVh{6-O{FE4$1?D&HJ)9&;GQ1Dq^9ijR82jTV>Z~^$R8x zU+0Q$c%6%K>YhubUxvc)9`5i`ezigyTqxP2Bu?63+ZJT0Y#?N0gH0(btS@M!M^ z2}F&lfX-&svFd}Ke+RSf!zL(QE`7?g@{pc~~?08@Ca8`|e zVR*K5!(@3)qXG~fIwPljncx6qhe=13>PS#-din`=oD03lP($X)s{9|MqyG09mGvLX@;-<$hrRi9O2-B3=*EW*SP4&vowNhjEc&%xZ@iz z(4vt`9HU9&YkRs1?<>fR=N~uz@NREAox&%buEXDMS=S1^{b==8eNLTxB;V#TL$)w? zW!Pg`?(?mFdW^{jaHBLLz_qabG-J;QK~D5J1Z2XnV|WrO4N(RH<429qi}>s&Hfztc zSYvpuM!hqvi;IrCg@l(NtAyc<>)a_A*A7FIe2XSq2Yn~*y(XBxhum;}H=~FD4>`Yo z&62-ST%!*Pm?i(e@)%4Ohae8Fj>{v6(J%hqEL#zYE`I9hQS_H9M$W_NXK&$&e)Yay zN?xv`72ng02=oM;|L(YrxB2GP8)hHkfw!PvGua8~&u<=AMFLZK&>zJ~ nYD)zy*!9P9jyG1CoL=4bn_)R|{6-^>J~DY}6{+Hv#(w_|dRkTd literal 0 HcmV?d00001 diff --git a/home/static/img/blog/committer/yuluo-yx/2.png b/home/static/img/blog/committer/yuluo-yx/2.png new file mode 100644 index 0000000000000000000000000000000000000000..59849af6c9f311900ca6f2ec54b4e824a66c3a9f GIT binary patch literal 64370 zcmY&<1z1$u_x72Ap;PG&6{JH!=|%+vgdwF%a_Ei$B&3xFX+apeI~5pdL^=ft>F)T( zEBF3?-|>0iahS9B+H1Y*U3Jbom6!7PI8-u93i$7aNS%MsQH#zFrule~H&U*i?qIM6riD84hU@O(S@soTm7zmr9}h*K0ilx8@H z$8}rxkschTJCp;ENR(s3iB-->!ps$%i@U!nOg9H!*YA)%o}RRl`FjoD6#%^#_4J#| zOxx3h5f&WHQy(wc zCCEOS_*i!?;JSCjC|!7SYDIbS`|bDr@dQAsri}@t`Z!&omymm!W3 z%STPMM=JrAHnz7yt--3_@ukmY2)@w_vA~8ONO8XnrPN^TYRB;|KA7LK>d|hzGX3IY z<8%A{mw1N6L1kMd9X`!;9vDNvjmS4okPHFhaul94MJ>IxXr5!C)&gSRl>I~hULcO_ zOjpi_Z0f`0AQIWvPrfc}`GeTrF)8%=c{oehPlZ}jrT_da`B(VlC=)~Cqw=Ng086%W zjBIRukZP1i?$5-X8yzx|Xuqq^*@0*8i2gBvYMu!YjvOq>mF=bsZo)A%?PKqL;@tq(vf^YgqayM# z-P!dapu%3qL*=hkHxBnG7BYa^+N!C*m@37 zL8D0~@P^cghU3sLVu-tNBcfQ4Ous`p{O`BgceVm$1 z#71n+E>ky&Q_C(A>w5Ft=dSqEzl!d;*VmREQGKwT?L<;XXop_fAoF-T{;C^49@ZiRLwI2!ueip%tAgR)~*0NXd;`7bzfl%_gU)$&%*@juCyNIm|F?3(X`?mTxay0 z`DEzO79xA8WrPVd+}@*WKT@O(UOQ@-X)fZuwXu3PKxP)Fr+^zpJiKgg(sump-auniyZ;C->C-#o9ibA?6#Pa%Tm_cFc}*;S z3;!MxJVRBvi5BM>lF%bUK>9-saXTGzgey&vm^hX_a$Jyvwof_XR#*_4)}w_j9g&tc zl?@2QRbe7kj-sh~-X>@`DsZ_LZnj|K+wfO_IBbfxY>@4=kL-mO-2nu+xWDfcXsQ^b zjqI`pWE_Mroml=kRkmUqK!&g7={1a{9MXbi=&jz{_=G@1y|lBz`-Y?Vs>YXRo5%Oo zhaL@~jEnYxi@Vp3_H8RTDWxhBi~8;NHH}1u!t?P$q3{fWzXrf}#xop-=inM`&Qxp| zsfr5R7hs(*d8--`NLm!tQu=O@#CP$poPU=b7`VcL0WpVt_uid?XWVD(`^Kol56%ki z{YCfy$b z?m{Y9Ap*Iyv%>`x^tdG06`^na_(23eKP&&;8jTXAO@>XaSC60!(V*RX9%@rS63`^E z8}ZPO(L{|`)Gj}E678?%-!P!O>bw4i-V!0vA5W=Q%?Rwqng(ppcqk;Ax|(o~r|#*c zAGXon0&s>0E^Da-TJ_};l8HES^&%ezin|Wq-gLLGbA4b4 z4=#{qxG1;yOANyjod8*Ge45+xa)H?;sf6nX!CW%745{VNq_GchKTqHHSiSPTN+-rU z41Xc#9|q!hLG20Kv!_V+111C+cgXj7r6%z@bBLOG5Mcq3e3~eaQ9qB#SkbqC)sKOq z9#+mZDT)libtWhYi-(#J`WTF%ZXkXtKva%m%O(gU&ienK_veA$(hcE*0ob&0Q*z&y zO6MR+7Y!h7tpBuQVe1O4&TD4*jp6vt?TPbICyqOrbXj_dX3q&!`9;2}Hud7Gk%sP& zwBM6Z*ev4q@r^0N?L1Ba>mQT+?@h;NF0z>5uD~_?i-?({+;TqEogG1fF+bS|H{hd1 zVYXIxQy6c|{T_hq)-_(E{JrPgUUD)W(KQd?yT2Z+MI%jkXz;*Jq3XG83FMHV8ybYK zXz5(25Cg?WQSLw2n+0Pa{M84px9`(N_Qi`S|}D=+mHEmKM{C+^1v@REPXAd&r zHKXE;>4kqQsQMp4&kjF#e$KQWI_AOHx$MY~UUki1xG`)GSbU+$7QvSK;tMx-mj0dp zq-^^mWe@o{8F}~`8W#OF2uN!D#ZyU3y;SwElnT)+a}Qzr?s-ID?THVH4!Ky0H$`tj zs)7RP!=-~Dvo9RdHw1>YteW4Zj)GX|qKSP!?WPsSV1hQcT$jJEVq!z1SaCep7ntAP z-f92mPD#*k+SUnL5>cf+rm=b#uDN;tDujj|qMwwSl{ymo$dQ}%zi z{29K{Wq|p9#I}g7IK%6;xA)7Epx>na*L)?yH2)X5h|VIK{~99nA1wZEIn^G1k8?TZ z3VD!MyAyLl3kzNNtyi|KR!)b$@Ekc1@{h~%NXaE2eEZV>ZYYEH$HCPzu0;Gk<#sXK zQ}9^bbnp}x{H>wW~64%p8*fpI9BLFBzG#S-7w%1?&BgX$@iti}+^HkCW8>YkpXarH>S+BP&4@ z;!g!vq}pIqsPnei7VeoxY+QQm=2G8m{w~{w@bEp1WCYzDUNR9UoQ#Mqrt zb}IbNR*p4Jw>#dB7t!YISs2FQ*B1WG&hHA4iARqHXXMeJs&ceAl#P2C77mK2W%g$K z64pjh-L&bAx31GSsr|lGy@mCv-CfKdUoCydrpJY=Q!gHnnfXi$UQB`)IOk z%TK-C$Uj*QiwX(egj7tm4r3?u)L?~QIU>y>3psmhN!oTlJq~@#9d1=4HR}1F5XdGQ z@QXNQa*~YfHmHho7XqFRk9)Q=7}FBeELqgHEY%zvVRIp6K-t9ILR+Nn?}vY8x|E@l zNZL=j#{11CHhuQ@dAr}qIXAk*vUQ=fpmt*fr&vbPn{mbj&3U7pzhsl`A7NssDrJ*m z@Mvo3s0dW1c~iyIN-rJdlbe-@I~Gf7K-ga`gcrR%`}vo`ktYlFmhGgKJHn9vf^B=r zFu7vXraPoXuVQqQNLrYuucGWTt6a{!`_4+8PwD(bo4(v%U7(Tjm$7a;lZj+dq+>$M zHHYxnTIZ!Lbk5Aoj`Rp5*GV=}q_Y&y$gFh3!2yjA4}PtF^KBqE=thQ*o)$eb!A@X0 zLb=%0=CRxJ#z{u)-QRQx4^0>*Xxp~0dqYB#eVPZ4s0Aw${}RkUVZC#fCFcb>*pnpl zCp{#B1i9|Z*dviJ87BRMh5nr;;hN{sK$a6#`lNrrQ>G_E#1~u%Hb>Pz?-ruz`q+Li zp*238DibC#0c}nmW!zAe_c!FbD`(R!rvC*nt0>B}4oBZEKQNP?^8;yURcP=xNdT!7 zW(9m<$@GkK{PRG&uy`WAwA^nHxQMnYXKO0$Lr8}xK-h8cZ=&|VhJ4Oz&8Jrn#vHE&&$dK!r zWE4m+I8c9u>uH}=x6sidrdF9{Vs+7*WC><*q5QQn?};KYZ_Bd?TXW&5Dq7+y=XN@d!A zasTuQE@Nk>{6V-Mc*|3rdukF*7R`X*F|$#IP{82p3AJ%{X4q@LzQl2|9oV}uXZ$EJ ziM6Ts<1khW81rptA27TdGc8u{eJNS?%T1K`o~*LS^J;c}wr7;;o)r}&KNE0OE!rHx z5-l|mMYs?Zg~I`V(k3wA5!oF1_l$g~9QPL@GLdX^C2;Qv$s`bfnNIXj;=bsO3-{;t zgbiJbjAatXqkL*BvD3?`#SxGnDwedL%LwhJ!d9JLMCVX>t-gyasVW?e~AfX(PwQ2h0MXS@mrDdP+{&@64zU!)HLV*8f&`@RRwi>GbE-msD2yA~+1w*E#UDFxN9toi`>cQmXZ^8(r&+ksuxt zEeu(W!)fA`;f|K+_ue?Y=eS<8Gm;2kq~)-g^)!5Xg4u7*n!W^=Qoz>|y+4t6CWhe@H9X@j+Cm z_fc3xY1`J?)a3%D_G~YP9+#2Q{2XIC%LLlHe9O~1#s$q{T9~#!O3$YCY=(8*~0s*89Jm`I1o`5vt)Xojagyu za!ve)PpAH~TZ9IKIKrOy2AOFB`{ELD{)~}CNH7y-g%tj&-Om&s@^SBm_3vREK`!JC zAF9CPzv+?RpGDsb*5yi(&}a(K4i@`L_wP!f(L}F)h&}N$&e9p92h;Cugb@aLk`Ohw zT8}e(NYZoSX%YBGAA;KHZoHP^jq7yi@T*FlQjiMS^?RAt)yqqIZoEobMtaIkUq&Q+ zN3wRDHe>T*G}HpaYF?+#-st#qhTXggV+AJEZW``pO!?iMYHep@R4-B1PXrd>m(8klR5Nec~vgP z6Lo87;^S?bC^80w54C`sipKMSG}`fZ{uyWz8UkZztga2u8-@B5I(c9iK(v29aO`O9 zM$Yy6jd%6gM)V9~(8Q1h&M1QWPuEP94;7|VEhnGsSrCcRu<3!=Wc&P!#99jtc3Bkj zpoWI|PR$w1bJZ>72atJN6+GM;T4GE+LQ-Uyxo|d~X^{8ML{i?*`sn6=dm5zOZP}qI zT&$_!wNh|5Vx3l(?NkdY*;NIW!pod{DyQ)Xt(bjxV6Aj)78{`NV}JURbbcJm#2Q50 zh1jQOQ9uh+W~`N#j=Uj#BBy~oiFzj{xPDU2x%uvvVGB@cmyvCfK^(L0E&#UyeE9N( zZpLHZL(uzQhdC;;OMK;i_YnC+JIqyK+1LZT5`rs7n4s6SI2XaPRXjhI1IJ2#Px>he zehw#!Wd#7PcBG<~ctt9y3}=YsDOsOMU!pa(7W;$3h{I%NA0` z_}%fK^!3oxRVBoxb?JCoaF;bwrloEvDm7(-;2%xI4WRap5xCCWw1-||`E*x-J1{Cd z$u@*vYQ*&AmiaTHl^#cB{$H9au&woiI z_))uEI~H=F^BdlhQH}#12hRG46AS(k>o%q#I_(JCz818-``iA_(}^eASzYCK>OE(& zgKJ7OVNltZr{xIrG!tvasq*R$4wkI$ZxxTXH5Q&|8+FZ2Ww%HYmI?mt&!W167-W#@ z$}dfnOtStXH!j!tzS>H|k;Kb}D;)#mbkvnaD-dA9`T4i!h$mT}uS28nh0VLZA)z$D zO0!&UJ@B#MszR+C8d0=;qpj62uLKpIp#GPpP$<5#5wZS0jI(iP4;O&onsy$uGHha0 zw1B+D!SEDqvq_u{dpGM*anw z49ZjKsRy{I7{xZkfICT+qz__(P?-dl=z>>hU+(rf9+JCj#^8n*h?wYazzN$MHdP6l zkD8MpBVrptoSY<~q!ooX-Sh=jT*6ZYeUWsx?YXZ2+0oCv@1qZgu?ZHt+w_ScVU1W9qu;^s0NBNY#L~ zea6Bc$BbT8gu09;U0Bb9;0crWNy~oCEKCzK#gLj_FpDpaqY|Pb{A@b6LO+JW)n;aq zlKg~Af6)&!#;lI3gRLtPx&}o0#f{4ex~>(Z|M?LU&=!?8rUc+@isG!1Bf&(DD^go} z_iPNG?Iz!U+A1^7#+m2v8IU{OT8m_zq*tW%G3WY0VXhN_iW^dcuPN$i2csKueM!Ac zdwSCRMg!Qot67g<{=i^AbR)VP7--l+kOFf{-7o*K?l)9ZFYD)zikwVGZCDDCjk$^o z_Z`o+lQ#dK#B4myO4n%CHQ=@o^0+i^1b_A{L8xPB*c=WEis&jPe9HsN#AbkI3 z)~`N&zHwut_4Q!%Rqu7|g<*dVpyH;wXAiwG0nNlxGnf~; zs#U!hdnnpoBzCdCo-bbPm)88@)?JDC%lKx7Xgx(qg0A>Ko{LbEFXh1flQpvVE6#sR z+Pzz?*mk4aJh&DQ-G+$=;-Gv$bUI7bOhq=OUghfeehP1~X95}uEN`^Wy^fJ_Cj}Cv zE;{czo)8RG-YsuuOA=-6JxEG(3BjlC+AIB=72M~6$F~c3!xj~_>X^LYY>EuH&37^7 z$SJ~}IQ=Y!5NeL1{n1_&Kj-WF*DzCSJhq!<E|D(_Z)`|diTB)GL8|2MDs9T81ro*%7TLczp#lF6Qa_gYng z;a5%&)lcH478eY!sxqfZ3?Yo<(da~3^hb2f2} z&ib&h6qbxt`S%o|p;Cqz8o^VHw5=c&iid#+mPzE!`=+)ymaj?mBsMEjK|1>*A1>Bw zK@ubN%komiJCMPu4<+ZLKXc&C{N%^IeIoNuW`|D z>9;GAP@!p2n)mJYiw7-A0DAr`zd0iw@;z*~rW=k!_7NiG-wcHEpbL(b!vXK8o^>m$Xt&5}B(wO!PO= zVdM#27OYWkE&ueV3O>^1T2IEUP0~2#Zk(T5Jv-OQkBIcNDbAe!DJZ8Ax`j_Mf~wvw zyDrve@gp7cWZ$NtAZu$ ztj&xIa9r-xvCQ3o0V+QphkIX7RbwWxZS6wPXA@h*<;69iHvHtl1||Hu{U{kXlW+O| zJue!&^rI$WO9GPlbq_`x3{vn&ZUCxmkJ?SvXy>c=WPXr7>2{)?dnyhIcGDOY7B+IG zH27J|yx13XWe$-^Qe?+YdmnT?J<)t2S<#hF>!dmBH^KimV1+W70PJW#XuTF#tNZ5B zLa$ZZ(kqDkISzgBc%S*`wPNC$7U&#&=G2=70g)C7N*bMKv#{w#+Qr&ts^w#h8|_97 zENY}wRWvvl+Z5sjyOGeape6fZcuac#ZT_6tP7h>UxZn$X#`|JRaK3NsA4F!G8UgHg z?F^e&?m#yQrYY6SP&Dfqn}0c{E1N)m<+D{NN}cb>3@8t6txe9A7;5CBVwoAnpN<7U zy<~TUv3yU_zD${6BWqZ}ifxkrq2w` z+InX1!Pna1k#x(o(q^G)i}uRJO9n0Y7a8u*b}og*lxSlxV^+9Q zCG4>uE8fPSHD35*xa||)F09@em+<*IwK7uVy8dHzhC=JC7L+0Q6AzUk%40P^TOq%N z=ji!}=b- zG*M%k`@S?B!l|GEhMWBs5M1x$+pV(9E-xH#6|8mp*%34rugP_qp6U{o(Jo2;BN8I+s z%ng}xTK^;KxZa?Ae#57cb+3nnq`AgvpN|Z>;qOOM*XBFj+~R{(2+t5-Q2tCTiH$CDiROKSC z7nA0-IBecXD0BOpei3(e8Q#Sc`9mAkGKmt60h$u4%)V3nbMCN@^20~ougCq25;txd zvo{`!-F&(qKPQh@z1qwkUh(2)rWwi5TZa*L<(ZDF{##8x^qkpF{-ot=MYH7k&NVNx z1>JzlJ$e4PMA)#e<)q@j4N;S0e2ELMS}yov+p|hA2CY*M%%&jIL0k^E_3ar=+z9SUuUwPf6!r1!+4sU*fL9jvIpfs8i& zi9_$ja+~*TQnXPST16F56ejFwW}&G!w_@U(g0e8ZqnQ@&IvctxT=QN}uKT%`S~)v& zsJ|y}-We4gAbUDM2n>Agdqi9?K(O0re=aP0GW%>CaHCcgk9_t+)xX7;0 zL6v{_r!sgdkbz@z}qf``m5l`Gtq3h1Ql*VL4*tNp|W=u>(Pmg_rpkY4IhZ+5w+-9I0E*IIJDq zNH>(zz3-67yOyjb7L!;rC@`Z}oR3kuV33|emQkh8qt_Q;K!4!spn+vV_|}O&qDZ#? zp<%x9dvI{QLLMThcoy=WzxWeN63I_hiNj+4HOK2@MKWIj+oN>?U`V@CMCD5Q<|qKY z!gz)NpA6|3CmG}`S+6o_NF@|gttP`|v2z>S9Wl|gEbWwjOF99HJ2>l`E7Lzc#B1b1 zjn-jIG0P`jL(87NiSbMNh;y$Kg$><}L4RZ8wXHRAU-c?~4s=}8AZix&JPERZ=-A~G zr(KDJOS?@pYr+3uS04x}dUX8q+hH?s73n9@Ifcrx%OLPbJT?X!26m(;)vsY8NXZ5P z{b(OsFIi`4_H@*cf6)XTWV3`JOhSWX$u1WpO>cm)kx;ar&m1x8@|1BL0`wAXn&gY%S zN{1l`vCB`+CXysL&_YRI4vIF@x1->j}Zazz%1t|@6|>g>-agRFAo zabiUECM@5!PX^kVpaPbOw1~L*f2-4@R?jcXw-jSg1JKR&&ma|j0?7op2Cii2*rq{P zr-$e%a4b}Qhl{s$c)pvu4NWzFIY}Ese?*I=M=OehW#P|(YF6PK%^6JUh zhy9=Mf0kd_x};>5{pE)SmiD1D+rVkUdtB==c8q_0h$=Vp!P;49(#=ci1>l6W{v{1{ z+x3zLW`)u~V`cOv&4rpNzkljH)q&Uf?dDwS?-;2$?4N?Rd%c>@A^-Yf_NgHX+NNGb zsnBw-*7DMTh;P5FyCr${WZN7i6#a|Vz|#9>1Qm9h*S50`q!+qvmqQ=~s@9yICiYyB z`YPgNbmICP&!6LLg6C~C!M_257$y3z*=$RG=w6^a+usyeJVaRaa zwO}+KO}f1QHC8)!ck|HDSaYcRk)Vp{wAVded&SZ>^pGD~f13%d5*p#d+s(oMWs=u# zm1554Xnc_APoK6N0morScjE`<-OP3@?fkPYF1T5;JpFHWvW*gA*T3I1iOT}GfK^n= zYjZ3%kh|0v{jcT@SV&i0vyc9ICHF8Vd3g%~X`1%ocs1-l-|nR6FZ(Xx`m%e6SZNn$ zE%EVayvO7GC2A9&xALICabgbU!p6Su87`QmLc+0o)Dj}IlP{uBo_*HrC#Ufv8jLZS zm`SPqvxZk|jN%syp8j7=5Z@=@FMBnUk{#Hi*hvNGhh*%Rd=&LD=xAHZ%xF3MjmOCU zL5RPC#64%T)tX?(xBfaGjYW6p#4CXyU(uu|GfV*0PJTbZm+yI;3%hz>*>zZ+%`$!s z|4%UvN&fMR9G$fGcbdQrm7y3eHU~DS8X-QIbtCn?*+bvoH)MJ1Qm6re7SxEZqQgdh zw;W;ov7TqMPB+Vf3pE}XLol{{44-7b<>ou#lv)pMT!tkr@)sS*#q*ChMT-W%nnYqx zp=A#uJnfH>?$JW%VP=6@dYv52=4WEeQ4t& z<@Nn(Pyo`-pQ3D9*H%9N8VFurqZ&c5$3RXwsZE99b20_jy)IkN#(fXHVA$W?sjQy1 zj*%Yu?IOGPO$a#}Ef%MD48C6}Z2eSY=U8h5K6$xzWKxqjVUQA?B;%sW0xHo28iP~s zWzYZGQ){msIv+3LM~k5uHi4^&E?;uYS7y;szAiIt2Z`ZYNIKBMn?WMzM|u@+hO-6U zVsol|Xx4mmMb!IUF#Fu*9wITIbiLiLQZ8vejD}(z`+D!YV$_fSq}*3huH_!D|`RBQn}&egzFsvTO^#?r*W!Kk)GJziUC!sPX=@J`b~p-SypoD2?fJ%AF1Hg=e# zQI!~P<{6SoPo@70qE}~Nh-`3K6t*UFZdLa>{ejc_-K{a$j_^A8C7L##jzFrkDuj@*HJY!wjp*?Rk57ORCDZXQ{6dM7y8q}Z&a+cmHLn4g#98A z6dxYDZ}9kb{QhimGFz*t+uXoBx8u|q05J=GCkQ(IF*LGWcQi)ox}J<`?|G=qAYufM z=bXNlqmXWc`2^R6cyH)v#8)TBQsHK8 z$DEsWT?o2C$9Hpzd<>PX@}|cWVJ>(BpA(Gcj0UBIT4*;s_x)VcJdKAp-Bko{%?b9dRTi)N2PJS)k8x_Ff0oAOSW0d3duldY<4 z3?9MJV|I3n3vhXbZ@2Oj{EJ@+w)5a!Pmy_(_5g^NYtktuoTmElt*~9|5k8P4?&ljd zz9GtRTJ3c)eod({OE&A;OM6hF6LTLS=yxmqN&RE3;;zr4 z+*Ygf^N;LxUdYStW7I?Lt#*vZZ8J-VK8 zqb7X=?Q~fRA*ZKgb3eLnSKCJr3hfF+4_1N@D};4-^Iq z`?x*2Y#z71H4-6=w8NRG6GvYvjyQGKPm$UK9sbVG}mi-<0$4Aqo(Ll&MWUDJ3d_?a^<-juE?~VFF3(R_^;SE0Me)AZDfEp&gD11vuV4= zE&d>`fW@-H(iTq~G_8l3Xf5+-6vWdyC6Ed{r;t6fP~iJ9ff@|oU9+>a+Pdlh_25Yd zM*byOTcVFr-M`nRI^@N~GI~BS^4)#z;-|U02F?6zU~pAW=h1y?L0_QPlc0?Q z(1*4fF_PdfqXHDAmn{Z=gok~4q$7m-!*@kunNq>ZzJ&G5Md9SxyQ|@CX=5IMtJA9x z94?cZ)i$+w15X(<-@QO#r5zTWX3Q9v(ZZ#q4#N`?wv%r$$I*p2FSbkAI0_a%zm$nP zS9iFBp0QIsZdWSPIFKzHKDAx96iq+xFx^imy?arT$kLa5u;g=Z?ccXffWK2nRk7fJ3m>QP|tDYb6Wh%J1 z;z7phWyjwKGBQj``(L|sc77psj9OxM?o^Er%Dp?89Tyz@@RuWVua*H+#rp~o5HLG& z&!H|{Gi^!g@r4C!?pn0Q{9G3mK%wylA}DhH#VK*a9ODGSqg-ed1&8$-8bQr`ziq^y z-Djy}AtKJrgW!_UFi1S)@Nqz83&&l`)nzRg@}-(+eAlqXtr|8U6`<*toQdtb(@dQ9 zd*mgzTjO>0%VVT250{nEN2T(k#cy26EG?DIJYk&9-$;RU!@|7J|Kt=ugfICfo0jC^5d%M5i57T(oBSSb4@`s~E3D)utAuYJCrA`t-}9BZsR3m&rfqWr$zS?jnApUD$f-={RR~T9&Kn^(CYm%U*AMXNOzQ^UL@Z*(-0HcHh3X z7aue&1`c)TYp!{JOsK<0BwdXMkawICZTN%P7$&R`9*utGxnnRUbCsieM%Xe#!uD}n zcx-P6kYEzB7WL6m^a?%BNy`+bgooP3?Hkkgjj?B>paH@s8{)P}g!CA8G*?J(Y+gBCZS_T!S?JmQz7|`1 zSfa14!$;n>Y%}ZpB#HqFxo?qN+(Hdde#v3|6v&})ts8G}PJZNkY2zsU<$(Y;vFg4S zlfG(DR;YU^N3e(pY7`;dXt!6%K!MHBSXf6pg$nT}0>E2P(LKszqm53LIJvr*-OjSV?hA9sHkyS!6#J&C7K~Wex1#crEGI<2K%bsX7Z}+luQJH7TDUlxE@_$04`{7i zHQ$X4Vk{wS;tKP?ndwaRp<=)YM;4Dla=W30s~!7!)gO1602#|pn=Im zS6s!j^%Bj1R=12UoEbUnP^zy&a*0?ilkZ~>z+NzOPnhB!E2L$^w_m40p!C?80j&!#ZgQOXQ9i?XHmi5T_09_v6|dD7dj2uFv39~9_E)fsV-A+Wr5vMmqg`(_`e9hfct+z@ z%|d%&iS8k|ZQ?6;kLl~_Rd+H8GxffW_Top8JWHkpd!~OXfNg{EmnBbdc+7*T3K4@z z8e3(%Pf3n;e{vS;&GAhC!k;Ht;yj?vX|Sz4`BGEblzz-zacq0>U3cm_zBbz@{6uA5 zFrB%dw#(TmF|Pe91=Rh~!qsJsAQJWH5jD$NlE$prcj}=q$1$1e-P0^sb9HR9Aiqa* z5*h>7$Z(^`9KLFp_wY~POPPKl7wgk^k;G{qOLdfk7U8EqxDwe%JYT(Z@%>%iqT$2| zXK!1GA5oZ=#_vq&Y5HC`IsPv6j`eUhH8scvT(l$8B=hJ<+-=77;O;*^ytbY$zF^gG z))T(=MCE*eQ84#Ws3s|{3ZPZG+soIk5m`Dx-BvxYjk`#OJpa_(?-YH5o40o3DoRj? z%SMmLrXri;n~uCmvhA`qFWl4UcFmbPi99(t=1ea-E>8EEu5}NaZat=1oHnO1w-Y3& zbp6I?-Wd0#gW}Cw+^RAV+wa+xXZ}U@fyfJ40#uv)(wDq?+hV7G$fPEl2{$Hu>6C1~ zkb`XzXnsV5uhyvsJ}!54 z8Cbypw~r%Kvfa*xfGPW{UYEq-Qwf~Xrv0A$Zan=u`z$l; z=6ShsDVdIH%C{F0?A)O7bVRboZTNGvABEVh_w(uKJ-M20+3H%0f<@478BhmKwF-1O zdv862-E=vH)=Nh{a_b>BZ=}f<}Q((cvK z1+4tUJg*34K{Ot=DA-TZUN3Cj6bx3vOgHtF#LJIL#8WBR*k*7Vx^Y3?ApfrQfbP)K9Cc=RUdend@J4y>B45q%(S_bX+jE&Xq(sQfxW~ojnO}L4 z>uQ>xmFoNL7UxQa$=2= z6{0y-v{+NmVQ=@?^^yk!Tiydzf-c2tJdH==;>}WvS7_Jz8eRmu0x;=4SWw9(att~$ zR(iwt>pQ(%iT!KoFM>b|Xb4!?5=*p^bD2Bcqvb(Ad4zK-!ScR%+V)X{AB@Aa?D?Sa7%uj|lHh zUZO24+xcu=aIip)x3M}I*|v4f`85u*_KsId(9R4Foza1+H2Ha$Tn&CL9u^D@&Clqpo%zlKn;R7; zl1}#@-kTZKH1O#tuAMvo_NR~-I(y>z%qQ_rC+-{StzJW=i;_9*l&2UP*)5`sf}KC+ zxbW>sp^w=F<3@|tl0EFr<|NgCk3{wl@7STn%kw{)c^iow1-{Nzh?$s9eyktjGbR2L zLB(M`+Y(Y0Obrbgbo$Aq$)>~|@erXa{K;Ij;Fr;};FS8B7lu~gO>%BH() zaSv0+{M6^vy>>pWoZnTVee{ZP744Mp4ynqkGfNl!Z_Pbk^i0`x4XD0>0UY@n^dKdn z=%XO4y-I5=NphJpA%~}WAUA?{0 z1;)UgW4~`E3yD~i#V>{>G`=R|8dZ06c$9CH34JEu6@V*mL`>lrKjM`!zeQYI>4lVOZ7R<^&u%1Q2Z0;>3Q=Oz{9?E*S-JOfG}6Ox78pO7s5xzQ*;Go$cEBwHKlKgbT|x zdXphsEoUom%;_^Mqo&e_z>?nDQQCa!0DItFHM;Be~D^8*A?X4`SLUg{)t_F+N48i5kf5h%l z_Ww{|4Xoy(I<3)3#Vho{Ymgi4TrTYQm6w|S9PwFyeL`qlW`R*DUUaQ@0C9&{q=-+14eZt9-xlsIXddER{jV|`aPi$D@6bJayU3U+hC`n zNi*LV1ov4yx#FrU+*i;UR(B)yVk1_)mM%AkP?bAVsL zi%P2VrzB4?Oi42DlAIE#pQyLX6ZKthE-Cr>F1}+q_o?NDlUfw+n|}nY`RfUG>WxPY z?3AQwd`i2yKyV5R8}jJwx%-w-DVV1pI8?k&k>y?ULZ`i8Lp@53@RwDwsR>@s*=$d? zwDtW~UY0?+7m)8jD3eiDzO8~LJc!xWZvOCo+~^M*OrF+?veV~Ujv2j^_b_b}KgvFv zYa4qGRZ|fdQ$H|GHE`m@xolgb>E>9m@l^2EG_`8%)^(9YJ@R`>y(ru@;u&;p{7UU= z9;1hTssSVDMtT|fyta2Cik;!y325wGYDTZC;6t&_ua$<3%I-VForAWuv&SI#L8(^R zLUb^pxOzJ_P!hoU>Ec`dKxz3FQBzTv#kAX&`^?Sz_7;k^u$<+jW!19SW`rnx2YhHsYFe z4_DtOH~Xg4B--f2+3Yz%latb_yPjVW2ghZlk%wPtOgK>pqq59yCF1hK3d)}pCY|1e zE^Z9y+7*Cyae-D;+SP6drL|1y@5y)v8QnufIwYk-snLw?kR08k zrMo-dXP@uy{Rg)Dx#Prju5+DD5Jw>kTRp{#12isX0taV#z$V_DY&4MUwadzeI>Fg% z_s~b?&y5BIL%h>$nOORXmLI#o7At~SqCXU5&nzFE+_cuU_6UET>dz(WV0sgt=6y)6 ze_H2neSb?0Beh}}=h;NPfYEt%({lSQZU6`@;Nc(foZTy7A#{e+Nn0&%sv-p1D}z@6*0%-(-h5?fuDXz~!O$ zSa%rfZkP-dYuSF}VGQ7)EY-QPZ~B|~SFh-#`WAMvfMVjb`tsZ9Hp#?7ae&hSyhRyRRRVN-b2(#V>J8$j4j+qK9A@oX zWt1+o20QPUJiY!Ip1*zy#Zmaw8-Jl5vQJZ;`|i`lnJTN+75Wvdxy*{a80f%AY(^ya z->sO+oRNcMRz_5*xLuF_^hNGIr1#m0mS$($shQoonl58YO1RYDG*6s~pKol91|m{J zL(~AWgOiFnzw@s!!_6bp#b$s~6=r`v+~SpQeq#t3h}x}jF=T&DkEXkT*6WH{NmVp+ zWr*hOWI1bLnT}#?lv*S)l}<7`w{8(S*dU7O$SgsmU;%hT2i2|WgN5SVOv7{`=Qct+ zl{jsFjJEcRmKvAAwGG!pbp75?t0%UWc^=;p83YpWd-RhWRPV-nSMM^cQvmRaRcS1WWe=PLAV4zd*#WU_|kD4+705+bDo~>_&#SO&e+a+76ltylC zcvkPQN>RAjOy*1(V{(sY6CpU0YRws9g3innep=|oUH2R{6gEcR7d9I)lHRVbFmNbSD)t@0-FP5FR+CfWI&r|Tr^r446LHUij^ zP~I$;wl#XrF75TRYaM9Hy2G(DS{t+AOg zlL}{p+t=jNQt5XaQZ}ZG9~yuNoa$%Yja@zhM&BL)L#irMQZD);tDQ-16iJInP!0@| z^cp1eeuXf(azoa6GJpD}qk4b8vX_lT(=E}>DAv0-I{G}d4svs^WJj}TbBB*D!LY23 z`bNwQwwFsq)`}Ah&lhpCIv2U~e42vQ7ON(UL_T)9GwcyCP<>fS&)e)lt1fA(wBfgn zPIz;vbnD@2Qy$x}abrdDrNaFtHavNsJhi`RrHH0(WKuyc)7pXJX5TM5NBwW)7r zRJ*4sxqOn$6p}c0O72qLe2%T7U^)IPX`(C7Vc`wQ?Kas{fagrSJmFb|i-oV>GP|B*3R6d`_ zp#zVoesq1v$=|^2emfgAp7^sTfWe{GqaR>YD3|rW@)y-h3}NB}w$^>kj?dy4=;)Q`mr6GJ_fnT-aBlc-7Kx~mXGt{}6*Y?URvbPnZQbrm7Qp7> zkfA1^&C zk*=SK$s=mFPiThcM2VCQ^tc{qE|!m)DtDgpor@Z)y?LS`zu#G<*R(jwhCay|*l0{=mzjISF)0!6YiKa-CZK)bC zxHfiD+s7u+K}UvR@g$>@-jfH$2O!MbN&_mJASUH*zPCPTUDBjb7N$Y=`vb|ZNyvoz zsnF2I4)uh-$?oL&3%UZZE$PLj@fxdxXXG{FDyrqBc&=7k{sHEp)>n>o7 z;nj#|xhG!u^1&BsPF4rqM>+^*YO1G}g~`8`z4rSOaOdPUfIwRm5x!#mT94X_}H=YZJUva3o*3d z^%=8Lx~T%AX|j&{Pz%U3H4QX0ur;xCiCk4BO4V;v7qnIG7Azfx*_oNdqB6;H1CiB* z_E;l9aX4!fB{um}hbSsB9b=L3%2zwhoIjj{!rBf5&4o-YjoPRv*Gz@xV0bYL2GG)> z2`MWe6s>|;EC^h~ps%1_ED$^8UA0lfq?}xkttMR#@r|lv1h!#ULHf!yPp|NVe*;&!^ttte9+>qcva2-(0w-<6PT zsMezX?OTXf{xp*=Ra(Y@9XFfzmM+LP%8U^XPo466F-KZ_DGsXn_L{{@2quQ6NkW{7 zuUTuEd)sg3p)6kRIjB`*d-u^ET1FZ|v6QT9ZFwKVt{@vPOx6Z|uQWW2(id7s8T-6f zZ4y#ZrbRL!s2|X2@PsP&cp%7hyBdqX1r`9fCRYnNePicyw}hx)FiHl#$renx$Yu_X zo>n6j;iyc?Tr?C74N{R3d0MNy$DnWb$+BG|jiv_qyvy{T=I-Xfint0U)19O~?23sEo4B?4^4umCz^LPg}n1$IzC*5Z)iMo%gBlE_fB$ z@cclMMa_mKFzrYvS;wd7F{ux}_xzwwLSo6c#_91jku-U6#xeU6RUfVOFL5B}{Mr)s zN>Y7S9}ioRMMBNV663|&z~+|B9W=*3Yc+4RelOhk^psYtZ&Ffg2mk6J@~SC60m!ec2#0Jn z>*<3*1?AJ6u7iIP8*2AjE&~Q_YNU;wQydUtRwEa%t0!s=Pk%C|85L~RjyWx9M^d-e zN?IcBFz2Bqh-ks9DRq>@S%U=?{lsJ6=_-5BvTHWdTk29`D{ao#ZtDMS3EW ziZWvG2T%0{6W|+cW&mZrLO`b}l@0Fv3Suj;>%tpj@x=aVYxN>W3ioFS?<H%rW7FL}7#XcE6CIumN~cub|pok>lT8(t&!qbXvB`ri{#Lt^>7)t1ex z2t^QHWp(B9aGXTZ?7y}9=KU*PhylOeeGU?NwzO_70V<47em!4YKo#|4h%;--qCt0t zqNJ79cTm#IzCBCB-x9lHf(vo98W>`TcxN=PlBBeJx*vSP7>w$hK ziL+-!sxJ++E1#G)F`V}&u#mlARv{S%I^L3TMf_V9BxKBGMvsN=AH#Nr%%Yj8daX&f zlE+_ai+30*E-0KReEM-;9OB=QK!O4?h3nV*a!BWw6HmavKUs!Rjxgc-dWvnL zWTHz1WUPLvzE_%yy;cUz_7xJpL?#A`ZHKEdav9qq_gLHIGqN+5PPaa>#{r1hf1!!q zw-6>@0DTEWUS?@D&W%{0P*E)Kg{Ek-_X6W?-3?a1t`E8mYASy9E1=jR3k;T9ZS=P) z%WdO0G*)n#?6P-=Lb0j1A8b!shXhlzs5IaoC(hc>gHP zQlpo5ZUnn~F<5wihDtaWLT!XrxrvB=-U85v)NYnZl9bEgGHE(9QSWDdyZar^`+iy3 z-(>!@7-+f|tWZ*T8j*F|g*$G(lKH2hj|PE~b3OmAU+DVNz+N*5|fMe}rW$kT#vza^6*ahG}sY`kFf%bg_G zs+ss<+p&C+sJe5S=VL&w;OFppb*JneH_@Ix`%0Wz$qi2U;FcN_9iM3SW>}dTPcYVWQxfqerQ2(pRpr*u{sjN*HVk z9X$Ow7~g$f*5iuCjulu>F$EdAIP;1OM==*^{W$u#+79$y6HDM3%l#|4>-?ntrH~?k z<*!v*@n*h2Ya_#QAj!Sc;q8z4EFJq-%UU$>T&Kz!rnQ~C?)lewe8mJ;@P|eWtXD9o z@r^==IEoxIs9<@2n?hT>YcVZZ{lz48 zbyYTI?0z1t^Fxf4>U^z3!6qlDr2=syU&frG!cTL{K%8kaIp&H58))Vezu zoIxMe_t>rWBNcWUt_$Ofp*LMv2Cz~f@KG7qd2Erg;`r!61D~3^s3fkFuN*iYr&y}r z#}hR{O*e~g))QPUcE`1BLhF@4`G^~0)ON$&^QZSCP+pY>{W5ny52e^$3c2J@thFs5 z30b%#UtDC6SYuRT9z^Tmt;_|3iWtglB{b8lozLwMLO1W5YAcHKEE~gS{HQytI4D94 zU<|b$46?We1v{7Ny!Jo?hiVfSgruF~I{1N5Ht*b5jEG!&6upvIN2qywVfxO9chZ>z zeUWSTJArwZ=yD@3H)dr<1*C91Rm*TpY;HxQe*9FMg!0U0Vejkro!<~;!V?C%8Fc1b>})`{r2G+kHdAqDx0c>i1i$(K zTz-Q=5f=6iud4Lx%es*u>y+J>yHuM2$n)Q{A?KI!fOfm4qa-e)q_#MT?w1u)`|Nmq1Pl=xvHig zhOtWs&3sa<#Y2dg5N`>beSv!9a8Cc;#%}OIF{j2grpE`hjkHGVMZD(vC&9E#!Q7+C zcQv@xRrxEW&BD%kq$7&^r4F7E^r5p;KnartbU35$>ru2WH$M5#ow@livACo1JpZS@O!JM}-O5IF zgIkUhBgO*8Db9t=T!2*p)NMNu6}Y@s`{;STM(z&zO5HQ32QpL9J;X`hB`BiiI`^_a z{)a2ylLg~Z`U?rvWPzAUv092i<&g6f=9&C2WG%rS@qm_J7Xa0naDG_pk+Qe`jbq;B z+;*8EnfOxRCc3s93K(Ew?b$_xKLe1%UG^?9hAK3!M01N|J$Bs37h9Yt10gj_(3OO+ zNLFfd`Js$mYFliPBjm-o~Zexy9jUgAm!Vr>me0&Pe7+{(7%dxRaPv?Pq zyn%X^0gEWA!>km4DZZB)j8?iY;604a)qUJW7E9IplJgrJ9bAK|KCxIwXNNh!D(L9F zFPZlr!`m5?v37yddznOo7LB;`P^=9({45w+=^* z!wcZ8E46vhK*5ozb}`cAx)ssfgm)gFPXT0YEuzJu#y5_hESXJ57rDcn%okK-rNVo?AOE3sGbOBkQq0+7i>C+ zwgC+@&s^|38MY43S2};ltmm}t4*apwzfN)p!a|7_D53Q39mSy}8ypC#Ffx<>RnPUK zaTgH7xf-a-OK`7bv?@)huy_ud@f}!S1tM?pxMDkpIp>);ka$J?&4X@gZDt3RWZ^rA zeh(?z4kBLR7HK*4X*Xx|n>$Z*FBxp+;ClQMs5cFRlg+Vq`mqPTYI4!;*+!4efJG9C zcBBK?9{}$g8#d@n1X&2g(7&^;Jb(z>_%FfmPK$HlU9U8)7(k%o4Hv|7W7$WYU%$tWuX z2((<*f1(+nw6g^bzm0(GwazKv1Nl{KJ2E0AT_{y63Btr?s0 z#S|g`Kr8B1$Hva>rtRXxy~}hSs3`(XOiGU*eASM5awD1$0$!G zq5amXbYK3kYJS+cmUzPe;+zvFW=n==NEYU>s#{fdTEA5PnmM)cf*Qj(XYdf}imbgV zU5H26eEFh2Gkh`OGIUWjkjyc0vVqJSPp|E3`@a8 zf;Zb_hYFF3TEVANVs_2o-otVr23tWzH|#xp2r!*{h$gSQ;>KXwpXCKhrH@df?SU(V zZq!)*#H%^+wxke;@}~m1_QN{V<346%dg;j(NNE5g=K0`Fd%r6Pz!E*|bDV$>v~a~S(AX~C~!pWWNjy{PXn!_mNArm8d#1pgMuOKd`3-d*6G!crev1e)pJt>-Kl} zOFT1Fb)Wyo@EXqgb79g3Stbjfv@KfF(M!d-9T&<-(zk6y#o~SJQoPOR1bQvp_gYPa zw59+i!oOXF5D7uxEz~0%_6ljV|9Ph=jr3VvZ7{tNX^>2SoqVyR+SB7`v$I+bg;3~L zI9gnt%Pgq85s#T{^}XR!n9zPSxSc8vGb5b4+SuG{MhEE{u7ZJJ(}M;BM#oj3ZSowl zX{&9FLtT>l2il+N_!QzxsdcIh9gzH0ZPBw0dWX>W?fTQ*u%mGsfi$7CZJBo3G(4Oo zm167~f~7L$De?k<*1eLZHgiW53YSse2x*shm6zZc_Q?cv!cruMkAfYdpCx`Q0 zC^?xc$cn>O4RyMXMd1poM|oR}OD%dhXATVYo{# z3waxhJET-}Nb~o)vOE&oEMrsxoB$T7e7<5daRT$uZ9q%Ue-Hl2G@C7|agTQACyWTy zV|X2oVH6XbR!6wR>3kZ6)Hufr4sEUqyv3F0PU zfMGJLu~2)`Jxhx6c20j`70nVBtir=&9I_=eWQNgwT39Y&bK3Li#WGiFk^k6vfuRD6 zP4P8|j&rW8T_Mx9bA(BBhAs+qtf>%{j*-xVE4d>*o`SkB7Wh_UB zjM#mCs$HCo&81N6|$&Tm@>B$GZt zyL_b$_>vEeEdytm<;DgCA>Ro!Tn$RmnWODC+Gi{o_L#MY*&oWvUTSj7(!!p{Ij#Fh zdWfLT8O_mvx6d}*ITWv34b|0{%ro{Wh`M53wzU5P0zGD>UOREQD-=L0R&hbDRD#vT z`D&}Ch5M*8gg&Bt7UMV3YWKV%Xk40(t$~LJiYX^2n7Jq7ZXv`%WpX02^)r@1&rM z*Z3UVqd<-8a8n6kKr2yR-!Ocpd--Q@NSJ)!m0&Tn{LeSBAJ4kY?ua4Ny7UdMj)~_A zmF{vVM597AoK?jWejgD=iOhh-i>^`-DH1^VP)H`=%QiJXOE0QNR2xVCmd+vz=gE^RxwWB&DF19lCJ zZmrLyH*8C5@qeYCr8?aUhqh4D3d^%AG!T%Tg-cRVPLGTv?%nIA8iCm$rm6CVcDjo9 zVJ_Eu3{?|;sJ+e?5^q23+kS!{|FV4wD4-&*`?2NOVv&>Xr3Lu#@xD_46+QmUfWdDa zQga0s^ABFy(stOOc4E_jd+lzu`Yn5ZXH62PZFpF3ew;QIcww;qa;N*Xt>-U7i{D`b zVs+d5{+O)3jmF*oegruaTKieqvqXeTe-QOSDb&)pyk(FbXJpOU)tOM(5<|WIJ!ss9 z;QI~t`&?6K$?Hwn758(!o=TVjc%X8Nbo+R1?64LzjVvA ze(I4+ekiHg3r>(j`XXFWqMZa^MXkrZ_TzVo*4_!H z4ibn+UKM&3!)lT(Ob>8-IYr>b_Y?OP^>ZS66CHoEpMURDIzLaYSt6%s zd)BLu-^+({$Lyh{RkiNMQwqr8&arVHoG&dt5>s+3vr{hlvtOd>0>06SwHDo@ZIkz5 z35eoudL2Ah7v^OMe&}qbw&|)wY;o|)QmrzF(?n5e*7NQIktPza)zmcXTcYK+l7Tq} zq1JMPJ{}(dtWZNBi4eh9>2_Xo&+iaVP81?VjV|$h>gn;>%xrcrjTNH#Wd3#Ap`D@8 zeYvFzqPpw3n6t%FZK^7Xtw~qO>m&9krk5y(5ntGo} z?qih8;sw)-3J%+G_xyeNA%yc5SuKo$>mxfexN-!OaC-KMn@ z>Y0Tf;8Xqq*pxoNJn1|Av&D|l)JGsQyZPYhK@#Zh-nX7E@puc7+da~h9d!t`kn1Q*bk;p6wtnnbsO zTj4LXHlrjsFx+hlUBCPNx(D%-&C$IbtTso z3aG?w_;#g2!u$2lR%CzH9$)h(G_u`yzBqR~cx@jq{7U-L9kZnK0r*7yP-XRS`vIZ+ zzw*{a8CRsnz{P-DRm~UHXrOB=XRWUel-LG{hL216^6;1>tvoL1gt4J5l6IaCj=<<0=Gx zK1ad|uTuDIg&;Ee(36D^k*>@{SKOjg2^Z5-7D&$fwtq7^iE(luOH|oQ5%EdPok4I_ z4Lu-I2{^^mm+WzF7lWh0cr~(CU(4ZDA^*k7ZajG!d^3=I?Jmw$7Qgjoxa{-Zuu^@o zhO}T?%c24Hbev@xAZoJ4`K;-e)mM;@?qM4>(D>JXE z*js|+;*;1u38MX|MyHDMLp3S7thNi){qL)`m zOpk!pskCcDSst$E8zoZ4t z?Ug!wRUoMR$PDFay=t_+Uc}<+?6PJ**GW{T^cXC5w*B41ZUVc9mzrzjTS z@rFlK9h*kf_Jl&1z|LD-fmAEh|65!lmKUwC)tO>sL=i-7_1G-s;a<0mqt&D$dd4aKKLcG#AUNFEZHax(ytKq_-@9(4VXfUnu zxT}F!+R4e0E}*hZdJB1K;--)A)zP zd!Hxlr&6mn+hkeFf!hA&ohN`6t6xb!ZT1mbHsSnjk8Hm!I#t{#8{{l}zJ&(52<@En z8Og}CO8@7mVke-R=Uy7Pn-k3F3n=boU4(v(j5Lxl@n%!N82b3rc6gA$yDvjj1{g#` zA1(0QO^t0;ItUo(_Bd7=&Wf+M*eJVX(UG9owyswWR%OEh7rJ{cHH>aDhN=Tw*vAY6 zO*Cyw`6Wj8^n{cDkyQ#9qN?k?iasL<@vodI%|oRLD{DFa<4?$&Ex`8dl8f?++2*XP zqtJ^Ft83v1kX!ibT=h%SbaA`;n|PtxRw1u5V-$|^#+J{O=2@3SK^3yb249@dDY;>s z=#DaAcWD6`$~Vu#_S#_>xkp`q_UpM2zihvLs#ps1b=Kd*t$_}r6 zE7qOV`S3S0HyGQ!UsV9heq8={zg7eg@v%MHT4}kpEw7cm7-CaS)?3ZON$4%jNuEp{J!K*~`akvDR%cYutyTw{dtoZ4RM>e}-V6!&J z8l6xBqjafNbXGFS|ARF;^sR^IX?vtYYX?ky!^`<>tNm%3!>#F1d~WiHGE#vA2J~ce zf;JGvD|LJr^s7j=+wc_yXv_eR5L`vt6?^g(X(5tEckQn=8;7MtJmO%QX2?(Jv z^ICCfc+^7@?R$^zgvZG5!*T0+FFjy>y~_q8r#Gd9e_!_E7%b{NVG66b}Yo`qZF$ zH}JqrLQWpwg3P4fsh=)B35W9NH~6x><@fzhXbNOptiS;@-fk7%7=1^FiFiv#Pe11 zUK~TJx{q&x-r2i~BqXScMwOFX)KI>r0uYeMOs^g?JQ-aW(LTTrx?`eXGpD^4da}pq zNbb;wu1N2)f-lsde1!hO8FQk=yyv@;M#^Fix?ccle%S{0F);a@Ax$-f+_8ow@;2GE zfv5BkKuGe>$wNRXO+X%B5C6li>!bD~GC5Px_~X{Cd3Dko0NsLXCIr@63G$_z%k_?Z z><`=G?;Wq8Y$J7>t~DMs8-7j77ivKT(8+4)GzCV?fZ-8by&K6J2(;AQ6yANA^#9e~ z_cQ06q$(@L+^g}IK>0Gy$v$_;AYnowOk2?II``76+?Y;2d*r_bh)dBV&>*zyL_KI& zG_za@;TK#YqUF5qN!jg}V0pBym&p+po4 zMH>(Pl5X>D(l1HPqllu#6AnE$w>9(#>f@cOWN(>>ag@3f57Q$_^*`Xh`UjTXnr(|A z5A{oX5%H=`d7C^SA^t;-jN}oGx!6LxO>R;5_I9Q&WuB!F(B8Lx0L6A?R9>t*KGj$p ziESE-Ljl+<%_gSm3#~loGK*6!tEwP9N{ycbrvt*lyGf^9NK_lSBsHm+3jNr4r*&=n zb{)5qp5dySR_huocd-s*2gxhr^^kgfQw~D??qHFZ+WM|ZNFm>ZquZEFRZ|~ z33kO_RoZdpNU)X5hB5_X#eUiryx5BPFjy_{rwgw#=OU1aE}Yjhab89MP#0M?!P5L_ zsHfwj6DxlCGFbap7m_l({0}I7#(SXgK+gAOp5ua&UjS%G@A?pL=}o*I)T{O}7Pn$V zWs6?5bg4}AiszWw-ORFs302qO)p5K3^zq1mel7-mK6Z?a^ZAcZ0L&k{dfvEh&gE{d zxAki<pvb8xVQr8@lKH$4<=#b70OmWCGm@)PRSfRyA)B~r?uQa}xTA7}nx zTxu2QYkZ-E6mznRBAWEXzL<{xGQ>L~-_Jj3J9I|a+GHV9BDq*@5U_l9*-ad+aOWOz zU3#hjOojv^Xr9u4m}`blhVU+O?j^O1>lc!rbn(16Zw*Og9TQpYpFCj?6mb*p?&NXS z+>BxkT4qWuIy}3Hz(Ryc0_8ao;!6?1&?rRXHY^SiOWeV)S`UAn;RFz%N`-;!W6EXr z3fYH`?O3}1yECS72BSd;0K#+)IXM*0UVOSS_I*t`Fg5E*^gC_u8}Q1WV9p!OWDbf| zXwa@heq4=PyT|nVd0G|T+`rKD+^?W+x(a)YeWY^s$;LRHY1zFgqrQSZHlA~gw(8$M zIJ7oB@*J}svL)mi@S>~!Q0wF+<%iLMYz1J&jYTIyQX1Me2fKT)@>0;zo4KEN#lii) z>ri9=HFoK-MsMVQ-9UG6dv@Up5)E)@K=I;Z~qS=h+YM{ejZUAMhNxc5@j{ zhH&L|H3L;_GQ4)&y;Rc8Oz{r^z=J?@q}QYSZTrnS0hQBRY;m)nf9fsXpL7695mcCB0(o3z1P0s03!C7Yp-VX zrw5ROYMHn(C)mE{c;!4E&hj}MGwrMZuD8FT-$=Y=yUXh_Ht9t97fO587ht5+@@G#v zOAb5$Rv~``H)}r4Oa9uXZlWdIpP@~AI`PnUBg@=L+)OO_LXVC<9l%b$$kHv1sQ3?;?AKw`w=BA?L8tGMFILEU)keGS6Sa23T6X0L%!Sww?Zf|~ zSH6|?d=y^eRg7poQW?m&gJ@Od?v{dcg)sqX&~EN2dHHM3<;g={q?%e(J*ddhJ$)vMurbzwrpVY($2- zMDZgam(1wObT)#tYb*J{EQ5G5GzLg6l=0k9lksIyb)=#P z&>e@(9EWA~t+TVB`Tl z^vJvwrP`Ds!|r(kNI@#hxL-o*o?KM<>e0&Nc>0VOdY%PvwSorK86CVXUO(m<4oUnY z!|2A)lK}SjqTj&du3V;RgO2K3vLDQpo0U!}WM!;0LMW%olzd8*t1X73MO#in7WF%`uM83c!Yor&n1i?w5@rTKdn{2ZFx- zaw5^IMkK%hMU>@iDI2%1N9W z;~;HUD(d}f!d5(qYn>?{E>^n1p;}7$kTFI1MtzVmx*lDL^4p}T_yh~ao&5oVT~&~H zFoo|}!`+y^p9gT&kJpthUtP%`xaUinyA%ZPHUz4ylvC1n>Mq#auoqV9+}NrF-*BME zyc)a#hGL)Dw~CAq7tc$Cto@ny$GuCc(6$Ff58E25_H=SOm#{F+;N#@$64|lTsc87x zjxu5ANv(eluBKE4ge`#nveFcMg+gVN!WRQiPLp!Bc2t``qf^7=ehZIApf9_4K<<4+ ziq%m>6GPZDD!<;#1A;c3OpcX-)}rDS8>n?rz5xA~$q%_XD?++iegOrg@F_pi${|!; zZuAtljjKwkEpMWG)HOQUTEE_tAvF;IdV`}S{e(0-k@fP(MJ^qWz-^JGV-~WFqj%e( zK-XAwCExE;pp9^t0RGaA8`7MHfw2Y#IRpAyJ5U@3AH9i`9r3&{9iksbr33(sL~n5}u_9iPc-`Z!H_?CcWCX5{6A~r} zu${BrfxmU%%2N^{satYtEXsgEvy265m~tP^16bo^=lc>Pjw#EAh6i!Pns{nmtaEBS zoe%stC69h~9@aDuYAsFfp&$=AZVIKC@wBB@gl1e?|Gg}>Mi3{~-rz^3n8ur>qdxlV zX+q!aNOa7~XK!hmwoNWHyLVGc3jdn$X|7Z3E}GkPdvsi09bg&@6Tcr)_slrMR| zHXpMrX|=7sQJFM282eG^Ck_I6S$~j~(tsRlGQO2Kt?@^k%Jm%njyZhy99yw9qDTjx zfY4s+w^hm+XsP_&lx^}Fl9aymyH9i-bQj-I+0?xbj9Aw?l|=SN)L7aAczqS~5d>E= zT^#^Oqh;-CYdgJ3A+HFE5Q9nV5*D1m1m>{lDgxueZ28*-;vS(@7O)Ua_-%{nbwoI5 zO}5?-koe1Ae==BiVk3GSy5-=}Yf+Q(t+E0lJQy}3-fJV?F;(Tci2kBcrKfJ^+t_bJ zN_^~Qa%H7$(2$#7B(361%0()90;qi_8B}H_wQ^EOfaxD1$$)XeV8AUhOuz5Wz6Rmqe@Z$l&a^nL(>^19(Y+4bBs+?tRUO1DOtaq z{rsgNb{%j$kBvP{=K5e%&4Bd^2}3511T>J_OT(o@&pKdRfUhRfU%SGTu^`tQZ^x%K z4sG+KfTrE~8GfMBi&u+UzvH&h{Abo5H8+wV-yA-*!N@V^j8to4N^De_l>1N04i})g z&L`BM6cdaKV;&8o;!yX3fxIJz^lMC(z$Db`!zH`f2~Vll9D@*A2W}R4Tos>{|JV_J z_-h++V|E?YarJ?NWk);3geuEH3=N6ybcni zZZZ(`Xr#z1F?ytjwAMSCd42$-8EAl)JK|}Xu_{b_iKIc9*$9goH74vo5o7*|$XI8m zu$I&B>qt>mQvs8BOYa~oyf#t1qC#2#4Zo9NXh$qg4GyrX_i&Kb=8RgmYBq0a8Ya|~ zl~k%V)p|V1w?Y~$Z7>xyyrU2;si}e1L2;AX60N1SQP{_tirO}6B#dzduA96NsL4}g z^zTeCUKXE=NE@F?iN{$SPOWT|_FFE0si>J$$+z%Nt0o^sKV@Y1Bm9OBl!;4P+*&zA0IEJsM6jo}y#PG$vG*^R`7OrNR*62;i*WyLO^tOU{7Hw0 z*-cleX}tGC-H0&CtuTv?c8`S18oAb!8+MtCWj1AWphdao5+&zyT|nT_)TY5~rTa#J z#$)&zKgAKCCp@A0y)O;$AkM9PoxqB|&-KP%0q6HJcE)!2!{F{NdMP?jaGTkD5w-{h zAN`qV#i*P6o4?Ym2jj9{wR4T8~=HX2SRU3`N|XEb0F(|^PDk%sG>*xkK7bF z@O_CklxbLnR=)t=K8xkS3((!*JbL=NY_EZ=?P>62lZ&z0K4wi`{IZ&Btp<`wDDVcD-vfe! zWaIjhFqWTH`%ti)D%uzIvCc)azXyZ^$2cI>O}1J!EG;C!BHyo@Hd1aqd2_$}x0`YL zw)MehwAh#UX2Asq2pel3J+iA(Oz*x2lQPrnbj;A>5G+y=A~70hn~d@bjEG))7NfjL#1R|4vw?&EpJ)t8z3AC$AlQ8=>7&tY z7YOXN8?yO3;2^P)PdQQ2-`nRjd{DHhpaCeo2o8mf5bmb#WSZG@Z?_ngG)hw~j#BKw zyD@B)6n-G<*{GC<E^Gz@-fyNMNi6-Dz9&U_8J z%M3)O@wq`!q=*y%noEc>%2_&amkPWBMj|FXVS4nCeMs~_(|tz_Ry{hW?^q2z82tF9 zc0LznNx;QJtoCSx4g|5NKkn^Ovy}Tg{Ac6bRi$a?s$0#1!(o7l99ffYTP=8$eiT3N zyopAn90Sjk5#5Sj7%)492NsfvjjBngjM|$Ccd=r0PFk88=GPk3C|vW$N^!4;`w*nfF+Q0`~vQYPQ_& ziJ~(tIBncpJ(KXVf^XHwkc5Os2dvf)kFh<#U2B0lJ1A6*|2?+qa!ZTKv=(twM{V|{B zs`pwT%n1WnOX1|E7;ak&!7y+2`k-qWuv&PD?I+w{we!dBind5{4&6wRe&YpyM9=MMROe4Uiw#ZUkSB!QU*rb&M9U+A&Foqr#N`JWiKc@aN zDz4}00)+<;5FiA12ni&(4Q@e$LvVKp4DPPMJ-AzNcemgkg8Sg^?)UKj-RHhT%CekLg2d=Tn z8-8c8tpRR8<_ZJuyjE&Dg6ZvnYX;*+0Zqf^I|bGDSy0wIPmfD<89>eTFvICQ5B2~m z+QkR|s0_%5*G7ICTs{a{+}tsI|0WCg%U?X?t@ugq(vuwm8^Dy9{znN;i&Xr_WM=qO zVlmn?z+#LHD)823@7YN1p7||O_+J$|Xu7sI`HD%Cm6?6#_yyZXCX&|9)T4K>zH}0zYsxRp&rhe9`fqyGKHWK}K%nV-d7HgH zY%xA&>SwWn&~^}n{u8Ds!`8S!i@tZ;*Qs&+C65V;3Et3xOQRn>xJL(uC14K+FxSMb7!r)yvLE6d=KoUnjnN<&Rq*THpN%)7(5W)F z-dt<XkJQ6X4Rk|{e#CFSJ=>!G)8tL&#UqZ@*-lbqN<@yyVD`pAX=S$Lyf3jb zXigE}#DC7e>DawMGB!>8aDVVx#JAA?kTVs-AN4MDCptqa0YHQxBpg5r*!UDqUhdDU z)w{FoQl5Y!G%ZrvxHP^cd1+1ArSFDjhOq|tN)7DwxB{EJb=za4miCWdy+A{+2z8pc zk;?ngp`NPO9~M!BB&sgIn0Nf+!TMyL#dzA+#c@gC$pIN#VZL^t#rghv6NhTgt+gsH zYj0L{r9+MrV#T=@?|5q!U_5<)|4SDZXkY z_zUTOW$TmEYX^w?!{057(M=m#Ks({1<@LrO8d|E}ni9y;YPzU%<-<(q{^LLUd8wS5 zS8=VXOEBo6MeS&tZead@uxwO7T_?eTu*b7`70)BHe|`x-+I}O4KvcvvJ1i)bH`b>{ z8$%a;PDv8hgI2A*%xN(w_UnmH1#OEiVt?5pX|lRX0?ekbr@^Qk2Bs>2oZT>P9;a4g z<7ra$lEBf%bQv%;Tfo7S|BogtJHsz+UgR!CaiLZ26fHznIL7#toQ+YK zg5F8WpaLn{pZ9P8DB#8waEhy}xw53N@gZxh<;@I17Dxa}WZ0kD`%${H_gJ?-Lxrj{ z{2MDqJ`mcOE?)BT7dAtGC+w_jt_j2VMggo|C?_qfu4b8fR24YZ!?37%F2LMQq=vx& z@+-ACEEENYb4DLroCDmB2DgCV^3AW5BQ8LGjWqaq;%buV67Th{ue>8iJh<|{u)NX5 z1%Sj+0{TQg{nLR+<(W%bFCR3M0qv1IQ4nM&OJ7kLigxQ3qM1eG(j<6%GV6lKfUYAB z)Y|5TeP8pr4Luvw^}OEH7$himcx$&*LcU1@7?N6?3#dhe3xJl{XC9iMi&XnXj^!XA7Cw`{B@7{kU4z@J6xc(O`C?| z*7sls4?4e)->h2|59s5lYQR@c^@Nu*{_kMKbzD4cQ_tKoHUNPWJNZtJb*GE73Dw)D zPb8jw@tIfaV8$(dVb5vo1RNO7g|n(-=7egnqW_zvfj@+rX}TUCG-H?k=S3(uSI=C< ze)=~QycuS~ZIYK^?@64pkb{|G?XkzqdWjUidiZ*^^$OqsQV`uN*W9_KbS*R$n;3zF zv{2qfo}>oug}@9~M7{F?K>A+W46~C39w(cW?6Aop7~s3{gq`d)$Sjt{;>`6&2;!`B zL){&G%RiftF0EBm5N;34hgv2_dcz;w+n&bl2OT3Ubf zUE!G7O59f2j(?Q#O$7BExNjOVZu0!EwMA|WAe@lr0fL;(HMt-DI>h4Xsrj-r;vZu| z70&*5+S){@i?U$qg;FniVk_5_1O#pL!qfj25MILh_6_50M{LUT850H`3e_tS*oGhO zZ$aO?6Ge+r><7PhBLzjdr2pa~2e5e0AxN;=4p9o*%_JI27RbqvC z@vS$n(&tMVI0k#2fF(^8#mTWgX?o)bFd4{YETH8lDvh`-;SlbpMeP9x{d}$oQ41Mc}ZF%noSq{Cp-<#@%*S zrR-br6{H1fJazeCke>Xme%KOy7l7d2Za)U7{Wz1))_Xm-#Gys#B~>29H^3IYBrdh$ zU7>6lj=mhwo`j6P6sYm!07wqxI**8AH=K2%Mn9q1!UAHZ zDBDF3xS+g)Hoey0=TnNz08Ml$&`gUw4w5~!`wqdR{TCnNAUrH0YWC@;u(IS7z^f}w z#|>TczOo2O+|l`zeUPwDorDn@zrQsDe1L0Jb=GF>YaYMoK3NqPC!&0e@y=G+OX5@3 z3ycjn$okQm?ld&dKPu%_AcgllIQXC3KE`_a0!gPI#)`08P33xwEslJ9jZ9nADmU6v$o+U~~zV{|p9T9Yx$^sY3K&>X=`oK{$ZRSC)CL zO5eW2+wDKby zPFZa4Q9#Fk1L*x=3^|vKlnT&Od5xyy5%LmMw-QU=O)lniMu4b39X?2J)>oWg^Bli6 zNOA1}8l+HL(^xP43YDtS=;(t#YKs|KFnx#2B zl>?r!LRG0arf2&fo>l=$)zATRewc4^@jVHsH%H=5&FL$1Lu=9X_>a=9E+E0s_nq6P z&i(@W-R0w=!08l&dXQ7+VjP05*R1GQ=qI7sUxb^}?Opj$-@>u?%%#RpC+*>6u-y1C zbpS~lfdOmPj>xxIZ;K~;+qq-$d}NWA?Q_~|Z9Qj8aYr4K#yNe>V+kv?=JkY^4bi$1 z`=OGo^rk-pq5x!a06oyKo<`IW&T1``Nv>Vo6{}*nfSrvgm$Mf+Yr6A0f|Z?`g1Yka z&+ZAJ_fS!N6i^g@X{K`iBPM)_9E?lV7~mYV{s4Q&4+2sq7BHvfG%7^rFN-Y~x1Wa| zFrb+yBUEeceDb{Vn@YTu8ju6^vYBFeG}1arjND&`CLHACF5!MCZ|03Svhp;FW>8x- zjy%;KO6sTGl(r&$Lf5~-u8mZ&uC9_v3>?O|JKLYScIecam^eq8W+c02obU&g*JmG~ z@j$w7qgGc}7U-?54H$Au>oms~ zGPOhI7Hw4w?i;KXy{h$RhTk^mo6Q?AJNVtm(#w05+su_IAI_t69*L|W=3`k=^|J6r zYCIf@WhPg-(xc^F3X~SX7r9PWRNWiQUa)!$ig76y&kFq!3SCV{%aQs6!oY_p)XVr_ zc`^=RhO#Bm#-!ae9FntNYS?-Vcs%V)Nv+Gy;Jzzd>r(5ubvW>YW($6Se z#v_f)@ML7|gOaSi8^)L2yVdiAeB9;K=xdCDGM5DZLMIYx&st!kOWgKKTOW_Wp(^fV z2XcXt>!3HXw>C!yz#7*!M^BD@){{TB=C;2aSH=W6&s^|2ZuEs>v6r_ADZ_%e7mc{u zDBS4aS3x$IS@S-!h|!8StMecRZ0Ja{icuT4<~>TK47_UiYOT{y5fMS_hY$O*+;`;1 zEcMytL^ulPN=Z)w5pK$#tWExy8*xVUlZND8`2%Vrg15gbtZ${IOsPaD4c~`OmpNvLVnHaNFEKqANZDy^5=a~G(Phb_E8AV@3rk)(GK{JJOpRRn<37N+W z*?GyR@_OSTZGv8WO%BFWY6P@>#-^}tmg4ZmIsV2+0J%^<3xYJ&e)>;_(e9n4d9Pyk zfUvu!=o^*HMq56li(>;P)e~1jLG}0S@xxQU^F^*jz20PbyVqD?L_K{}tbs^qK;*tA z{y5?~-cT+nLAv4>MsS8JG(rDS;|S}q^HT;=LEPXvy(0|>@mp!&9F-#B60VJYhkmx? z{cNc6ZFT(Mxg|zW+2Q8T?bWy1XFS`7g@fe6&adgYy~_V)tR3NZq#t)@4>i8h*E>xW zwxgKcU~i$Q|4TLr&PllIOQ3(iR$DoC%UfT1A>r9}$^Q(_5Z_GrXt8m&At~j|0&XGi z{rAzr_=nit@o>wZy^Q-AO)#~U7G(ZIKO8hF6j0GZaK2%qq|T@`qo0fnE=)bBg(;

{{sm#^a9~xiT{KTV#dD?Q$L@`{TsP zB0iH9+>*Z>vpi0YT*ep6qGL#y-fBAd-%Nu?8-ZiW!eYV^k^ zs4z3G{!ZdU^tcLfc%l$KXQ*v)0xd^9Tcc8pAF{ozNlF0AaGaUren6;;( zdA5APtshp>ti7j88R)u=lbZ1oJfEq-i2N0)3(Ep$!ZBT5`gl{e@Ql*#~cIt;Oa6kjT z10${esgDiQ;t>QWyFJ&56r3*ZiHun(F+tJcGZ_nDP4|rZc}@3zYn_@bZs92ZADBL# zNG=I;lr$I%p)u4}8qQC*QM1;Y+Cm}@pacwVO9TlAZ1TFU#C$mQ>h43`l>}VXiG!=@ zL9*v!HY+j>9@OtVrtB6@o{y#)?+47++(hA{)v*NJ7p*Qdc0<>;M^8 zoUK@8_u6O{>DExtAU1~5`}zg+$~vG#I;so|-c zUK9`Kmi8T9AgT1JR2JbN_sJD}--q{)nQSW3S>&hc7*{`Kr`)2h$6wvItF2AHzARjQ zDylQh^mOXOXmQg>`>t_6{kjR0lik=vP(Y;zeTnUeoz~ebW=HvT;u(#$W#H|}I^$MK zKnq_Rh7OVzpIj{z25oGlQM4d0p|GM{9QESK;g2mGnpP{J>$S>N9lfya&vFxfyuo~V zVlRz)k(Qk4Fki@sEx+BpQ%#yK;^QS<3U{gx1UI*!URG1{I?2|c)1)#$br`P@ldWyb< zPY3(!?Cjx_#W7Xf6Q&w}Jx})rOH_`XybXFA52ZO9ixF7yoa%8^K_W18l^nx&iQeaz zrX$-^q%Y}A6mZd*wLJ1LyhAFO%l%*S@^rt(gj;D~y>J-xFy((P;v9Tq@(k>F2s;zF zCCI4$+T&zSVaKVac49+7<7Nn%XjaD%5+$t6j8NPGC%+sZn=6J;@y4daEU7p36KbtnbvLhG#Y)58PiSgv$4eL*U*$O- z4?>4nAALng``6ymCMd0)@^jRs2M}0JL8^Ba4<|ty;8-Nd2O(LXSnX@L*T3{*Xf|6y z#$y2qrH!6Zh&PwlMYEKiDRHqMghCBHi}g8J9VK4(Gh0gm0fF6mU)@f6rP)!AEm=8@ zu@&`)91~jfqh-Nq{kH9^TZ@w$?Cngv@YbsxF%0^~t2R;hg&LWDr$}~@Ydmqx3*9r! zgoRJ+Ba_7uBgzD_SDX@+q<=*g)x597JML$}wyA9SJtqo8T!FiW5iin;2cL>}m#?o- zd`sg}QZ(CN_E5`6sma;?qy~5zui`LFi;Y8aA|RhfVmX-^cuh-`~J+5kqv?)p&Be@<_tR!hAI^(GtQ@_C&8(_3E)@!zttstQ@q{30s*6cBFjL5k3 zg-hh-Mdm%rSt~DtcK0L0{^wJ@s3QxQ=!vBptP{$%0~jUzoE2L3GLKgJy{DRPJ2&O) z!?QHBNzGIJ=nfx%OMvMpOe7Y*+raUQStSdo8kxNjqp@N05e5-24WT&0cP4`PaiI2s z0ty3!IEFHTWhMPywI;NRX#m1^Fg845M56=heIdT$RsrjbYEuf-_xxfe@lJ+!?F;IL z%sq8IdKIn|Qu{QBnr543IStY@42{Z$Y>NBqvU=kv^Pfbta^}YBe2jkQ_bS`XmO!~C zLtHB69z9Y~kh^l9;AmuaK7q*oYlb}KNusLJj8*5K_rf!AuiV{-RvnQ76ByDSsqaGU z=tjuoj=z3rPaba)(KIe5lV@ghNYdguohV_(1qdun31t{r{?J7e^h|ffAY&USi}e841{WTbWb*%XYI3yLp_ab1Df_FQ?R>Q@1phHO+ z5s~!o|L!|iFK4O=F`F{jkir(6xt47FVKDlH7Km zSPf5Zdj}F(N3`~~yS>^t@UyX9wLBW}Khn-T<;B7D4J_s*$X�rQxP}fxMsahGD@K zjHzI24QPOSeXGE56q6w7vE6PCcu|H@+^`@ZmTWvab=gkt)Vc)nbSc!WmZcjr#q5&H zd5}X2<~edAELs_&mHGb?pQbh*70cQpHEXb3=sh-Hmq!VVH(yl{1~OB)UxYp$NM`=H zfI_-99%nl{X+h`+r(665JFqJ#050iuOsPv$bNm3T?=Xv7Im~4xyt_LCL~b((LDTPn z@adV8w#M}8cu1t5>Mw4F^wAEYn9~!ZREey#OHSyiBN^YeJ)_%Jnmuo(CBFfxIZ}1_ zg33lneCt(+KrW~f`9n?l!sS$`lXj{-o!krk;toubdIw{`<41XoOVt0rDSYxI)GlFb zo%EHSOqvk`fqHS2+xWb-+3Xgg``TA)%ul*#qD3Y$@->X6%cHDf>begtsSdf{bi%JU zV|d-4#o(DKoud3`?p3mzrM{IW&>L9@BQ;zkYDI~BOFa29Nm2pFV*k<(Z>*0j>6=OuKM};D*j=a|tg%25hH-#mOKc}9gEWB?7 zn4>Dh^7^M?QL1PT>dzt`zkMRCr zS+GY7jT#iF{_*oAisqgbl@3g}e6#yE&4h7T1YS4@&*%%|v*UwIO&w>d< zG5b|7&3aSeyH(Z1#0#h59^_J~Z;g5@9Q4u}XGiQB>WjrV_SPL_%OaND?!`ErzGyBy+O}x^IrM_wz09Vxd4+6Nb<_gFw{he-yo%2ZjmMO z-zy*95XTU`N6I0*Uah~98Rh+a5{%DrNWA2H3r@|{`kRrAa8OSlCXQ_c!Bv6d*7a>q z(K8V)Tyi)F3TrpI2KXPZBd44^52iHv0QdO&z)~NafDbI?C(3ip1EvzyWf+o8joEMH zB@Fl-%X#j<&kee7?O>A6}*pHfC!BovA+MKG5BytMcR2d!w$PF1(~G0Py}g3QJCZyCH#t zt{&CHa&m~sxqm*Ur{-KqD6_tWCx&^3>B4%bX34yh;OVa~HYKsV+D&b;KQ|Ed z1yL89=kCcj>|W5x_4HfF{deR=v_pOyQs0m`AKWN~ClV&G5#A2MTt0vpekF~wdqWA^ z7`kTN4%+8k^Q4ZQ88v+^YR(IFQYr9fTnv7&hUA^~=LHL%5{hIx67$}4M|stO)%Tig zddm7k^;bWKW`gkl9iGiR7r|=j$P;>m%xCh zRN8N|TAj3?sEIUKo4}>wBi`VZ4<+yUyhz+F+=FhoSLF#3Ze0LKgCDLc5zZ}-h*zfB z;ll-RX`%fUABZY8f^YI?EO2)k<$>)-4%?4Sp!@QsuOC{#Tb=(lJf=e8hQw^7NH`BM z=Sy56r&4$4wqyF!ea`i&*Kxhf`Bj5;N~0b=^>2Nwf&EL+eP`s$!L$dCVBb) zUN!A6hhvMC!sH4NClcs)mBKO>V&r=ghl6HZ6gKDW`(CMKh!=Pr>FMxWp9+lIC;s<` z?9UCoK)@~uG!6d_-Sg6fvU&KUeQAIbx9mesZ3 zM-(FoEcz_@hWu;$vL7+ZmP7>OJyxcf*U3L}OycPY;xUEO-tcA+GP{ZP9G>741aT9t zexP$(bc+e?lt+*zQf}jI^xiC6gaKL{@cF2Ks^zlWNkkKZXAp*JGX)WanS17go?-31 zy~WGI)FpB+*!waJJ@BMv?`aeUGsd7e|IG^1QUBL5$rusef%50EP&8if^oNorILms` zg#DTtcDkWRo`e|IRb1$1{UNJcTm1)LaMu~Q4$rdJR>X2dr5DU9iK@zy3P|C+4h2<{ z-|j*CdR9l?$T>oS-)CuP88j4I}I z78%V5eDCYpV5N2ms$u_c#h8wGB=0-zc==QX&qSYAWe}=;wqe!(8Ihyr^OCkau1v6w z%R`f9jw99J4sxI-RnF_W&gKPdG?|~33Wroh^Jw5Tb8Is(_ZYh z@)9bm^-j;4wqeR%e~X{MHRIbYS9Pq>0VY6#q3~>yfC!h-C;_1R{m-@@_w5#vEE3#D z3mu9aV22=nll#l^Mb?2_N@N4k`epuS+FJxF>IJf#YrLBF5J6lGl7U|)V_audY<9%% zN|)6Hc^Hp=^U42bCvwqI_0&snJ>CuskB09|U2>|+AXO9dymw`B8r^Y6Z+_DmJ#!3s zx&MfEyHcNwni~b@he6`l4u(9fud>qq-*#{|cGw&fb$F3>T^NCG(xVtPwbOL2TD?@B z#x{Bu+iNfSnUs;7^2Pho!T9L%sAp6xt7zP!*D(Lzvo+0#T)OLO8-Bj+J?>hBdF#Mf zfx+;JaN6AaMsW6JhH<)C6c$9d>iL&Y{|c^{!E(@R{RXE)Ad4dSlWi(c43#&mk8vUM zY22o;IOqI8WA{+%h(U+V#*@JN|NeqP;%qj3r4^tc8A0PUId{+AFaUe4WP0gFam{0( zq79Q}3c21`QPo6Bl3N)GsMcBY*r<0z#rrS3e%(h%D7oEHg2u+{FeBPzW|rhqTk4Sb zI-ah6@4pTW?LdAd;+S#Yf$;T(wnwnyiIKlQzqFBUvV2ksoO}ATKlG-t z8Mlu#?cjp`KnL*3`b#d03wPFFC%OyqabB4E3~mD2SthCf?~P2Z{kL8;C7t4@lI&>( zO-&=1+sno)qGwX)90MKHPK!PMr3bSEx{!xijs@DvBA8WUYV1488S8tE*sc;x*8VPA z(#3{?S=N!U|BjN#6p1T$uR_vo8+Rkcgx+46RZO0X1&hi8a47Y(@RoCmjURs4=_(v7 zFMj*`VYsd_8D+W_V*1~n{;(g(o4!GQ zNd~^k{RLT9e*v;lu%O8}YQtGgLlZKLX)LU9{G>?11b3NSR=i>mt%dl`lEQsw(2sL` z@{6ccTP*mmpCzN_EARA8jQ{T|%eevg#8}Ehy@sKQ_nY1-8<~pPcTh9i=)F-c$ngWY zRnZVe3I=)aNp8qfElV^`+Y@~MStv}h>*76vHG86py|@gSSI1B{Xbdik=9JppRxD0( zvOw;njqyE!^&F>|Xkf4;2Bs+oxv`H&Obo>dqfV=frl|l%$EtwfH17(od+=ITMX`Rd zulD*bJ9#xXF}nA3a^&p6m2M6d%da+VMMpzh+cWng9(L=5lh)v>V)+pzeSdscWzE^T(YoHpG^UwU^Hf49_0|PN1~Y zOuyG!&7X15gBpIb9~migwz@bLkL_)z7LXkd+KpLFCR&f)>yn&HKT7Xcy9s^@&Ijt?M0vs@SS##;l|yu2;fH`sU6~ zRD*t``CL$|?)PdJOI43?cka=c!bh^Kelf;SG`0|0L2Lm~b1-klXGKtB^YzPmDzB)u z0RP{q6`0ixfBj>@ z7j4^vrTw6=i(L~wa1+fuYPHv{;ahsaSf! z(|TALrL9{zwh^_HuZTNuM0y8aS$m=$Ra*c)Gz3^8VFhEQhEGQb{x}Bl+>PUKb=uft zsr;E1ZE=SUD*N`i>bXbseC3{O%+ZJzIiyJ-y7tIARZ1j{v{B>;AXU zwY$31`V;Ht!_G!oC2L3R26YgAr}?U)$cvn4{x_|;f<3|5k~XED%N{(n6MWA_brfRl z2Q-OyD&})u>&Iae?h-&}l}H%RmHMrA9x1%mNu^&)JRa8+zXTTqgIyjikj zK}#O2M9Ew0GPj_uI6W>YZ9$ zv~*pCc{5lAn-LugiulG#-i3d37=!LOMxQ~m_QwS2c6j-?txq9<-a;S2;uZh?UHt5qbb!h(0W(9gX-*+^w)<(0sMRNQ=`ZLYHE~V2?TBRiDLx>5$>}^*8g@(q zKA3T(G^NkvaNAxSs1;C*56W)DcI;@LCJD>+%N)^au)(Wg%KH~izHBH1EtWn5Hh4 z^Ai`ovqYRNH6&@XM2+t{Ly%Rj>MWaL5-Tn)+~5+6T~uf<`F~iR7MHmy`)0<+pQxoL z?g&(1@oDDYdH4>y=508p;Hq8BjL^10F4LAl_ft5R2Mju~hT=^L1W~6 zQwTO){-c`Y`6Zri_&hFNl@}HIM~wd*2fIY%6vu}U=Q0S;8?P!nu+Hcd#C$?DhcoyR z9~&DRGhL&KAGtKu)6ee*r3aDG$TLKKeoOL3JSuSD%eSR;i$d7(2M=FyM6`W@Iz#WW z7;19ahZYO2+m36i545z9llJ2M%(`3DP*xaQFjpc6#n>W!dHNOW^``oP#G8 zVv~#}aL%vLt-RyB_uAa^jr}1O`Pt2g88QY~dhnMCVz* zG5reI4s&&{PSDgd5h{@TiR;F88QekDB0xd#PT#%=7QBoxvXq|Q{k}0>+gCe5r;@CgC0@oH$W8zzbZCf{VRUq~ zt(a0nknEiZ>ELkcv#i8mj?6ODqWvE(f43+bR ziWO4y*luTS--+Yq&gLdIy=HwWRd0VK@n0``51lB?#=dk|cEQ?S`U)9Efg^^7#ml`( zcU9%ZVHmA15s9&q6fQ)5GB!pngfxLef`U=Xda(O$roCFxhP|D-t#`hnncnqV_3^Yk z9^qyR(21obGipZl>RU?wAB&is=aHu-S7(=d_Ad($u}|-nU}mO`i*XTBwX|RAF+vZ+ ziGrv06zq|GU?Px`-_>|7mJ8ChlnQjZzL#T^@Plj2Y;(w zx#Z?ZO7rGx1ObtV?@Qx}Z2)f81wy4$!cRYrx@#%Lf)%$yvxQg+H%09&3-5Ue--5Q4 z&iOT)(+IW6%!|3X zbas)VHQ&@mQIinGUGU4N6ZI3xeCo`y;7RLIGu7 zI#&$wWu2Qz-1#uQM}Qfh8cTnwklRkuw_Llpi&TZe&W0Ug1*iBBtEi%L z^3s;f#ur*5C$p@rL&Fou?8(JpR6$ZJZ}5;X8gIVptb}(L7ejOqZS4G}b-yZnFc82& z31VlfOJ5SyOT+A{dnYY=6s3aJE7)5WgigZlc7M4y>)=Nrj2ODd3moT*vN$BAmS`1@ zFX)MZB_h0uBLw#S*4AX6I}tPAAm9F>^nHNThUh8gF2BIjdgg`Ps}czaYA3dD|9O&e z)Gp<#?`vTcBhyuU_)+Vw?`QduHvtTm9Vhx-^XT?Hk<9L!DN&UuT12!)Cz9KCl>`gnDl40i zYQLvRS=Ks50SRUQ4Yc8OkyFX%A`Wc<_3)dv8z&`qq;fl&lWnJzf5Aqvln0t2Xl`VA z%G87RA5visZOc;UGGxTpTQJ&_%7r4vIV@p8&Z^O)q{7rNHp*g~sdsmG1;FcURi4UpccWMDN^R zFAW$0p$Q)Cj>Gr}UT?GLwn_N^FkMvuUn|ZsulTh?MMsDEMUd}dYZ$0yao&FajQ!}# zz5OkiH$E#PcDX{gw}QJ}sze>?miBf+2dhu(K^NR@lbd{b-Q1D6d~baoG$o2ApCjNl=2B$i6|GT=UQBn+sYRR(nk@`6Er~4ybj814_KqT%@;7--x zq$w&Y3K7bNVr1Pm8}3>bd5xJGU%BSgyK79zT!m1KaYzaUG}IeYRf&e-9w7($-v~x1 zf_k3aJX4Zm>ii7D9o7hclNY0A{8;SoI)8XFgdQ7EyNLM+H1gYNS&=i9lQ0g291B=7 zXGo}WbJ^tgx4sJt3k|J@JvW+SE?PU0y)&GzHU@v{?Yet+XFGCm$5t`xjc`J4Uc#H{ z%^*@9rQWJ3TK+c3PnDjY?(uisqE?e#p7whbeho#-$bgtlkK86%JjZ%Wa$rxQvhFDA# z90D(NCTC+^)j+;`MpyWqrkBP^Qj+|8n&uYzvb@*$ACm!njH%}i(TAS7gmLI_;$bI! zbDqV2!TkWpux)tA1f%*6mvY4ZcT?IQb8ydgJgqB|g68J- zPUffRIWos~7PKc73EMUMqvdB|4Ep;sT z8YlF{A?Fd8%~a+v-J2v%LkXtF;o0gtF{9BkuqBNtqepn;SQuPTs&}5x)?0>2`=*|yLUnIE*{W7+c6n0Jm z_`%k&LU5~&bwYOEd6L~NgRiYEOaFmJy@mQH^y^c;p!x7#xoLltN4ww2xn%7{(U09> z*q;Wt7_}s>ebuGX`SM2?-iU``APZ*!F0_8vkMG`GO7B!{0+;FcA~J`$k%m8so@>KE zjV=2+35gt*Y!wFmL*wIQjM0mR`=3ry19jfNMI4T;%l!2#Ak$U4=}o6N^VY*Bj#b_# zs6&RmN(PeW#2htwK@wVA|88*Ncy=c2zi37SyI4_93T{D!GeRwKI2#?NSnSoTY;i zu*0yO!H}{;p|Jdh6&yw0^wH+uFwkmm@ zK{p(#bTs_<)gn(?bf@OP{XtpfxCw`lgSp?#c#(FS+0i^eC>vwyFIj&pWn`sg)2Ftt zzWf{jOr1M$H9n3`=YV?*P2N!sg(`5(#T9`kp%{~g=YW*@13(qPIr{eX?pe{%#@JIj z_n9vK8*{+h4sITvIDjIYl z^_Yu7L$*`XXy0;7w^;*r!Gj3^VsjJnGgi=7@1%e{d5UiSL3(t2T;OB+4k>z2i3L_H z%puUEY2?}s4v%!;?n6;W>r%_my{QrS$2&pyao3sq-o@M9oX9Nm)(pKh&vY;+mhGvq$P4#{@_1>uXwk~a zaXOq(FzMxl4e^HCc2+qx{#&b`>z1=QbeM7n&2u7A9oKi!6mZusG?{xbc+iwBF^%~M zCfbty77&Z}sy92yGEz-VjZDC{XyRS^y4XtE+U};Hm$SuwdCp=ES;Wu~THPZvMaGHH z8oJwpPx3yp>^pR%7i^BgmH}*?m}^?VIH`%+t*^Z4%x!9#j5I(w8sE2n$G5mhVQkFv zj|DdYa8hrNkyWr?BOl!jKRi)UZ=@bR6LZGySb$fc-ET7$o}TSE2_%vep z8fHoB2~`Uo9upD$^I(_o-CdYYU0wrb^eK;+%lz&c&6$;lnUxKDVm?$sEydky#A+=V zvVJEvCnn`3ug9Z)Ash4c1o=S>Bd~>V^v`5eXPEPDp4ssi06U}A+mGuQXVZ8}0(uEk z=j~D0sB3+EJaK47@f}iTZkY8)#ba2C-+yAzwNj>WP#Ak{t^t9d^)&S!&dKGW&*0$oWVe6vMvB?vepZ5U$o84W#cD(=F#a+VoOyGTf z=0X(Q?Vq~c!t?Xc6XzDtPsx))1k%HeWnp@+F;G=}*6OS4E+}BfpIzU~5hP~{R?T}d zM_Nhz#2ki1n1A6QD=UkrSXdsG0gYk~qb{I6kor3Ez(1#kBmc=ni#5P{2)hKSLeI_E z)HKycHnNFRG{IQ8z-<+X?Rdytc=NZtEc=%@x^R4WiDRiTaC5f@7udK@f0v#-JRNMe zCsg%u%J^i`oT@*fwp#j@HYW;T-|x2_oIJ4|Kd_faKKa$Vq(%HLGxw@K6RbOXA+)U4bzundX?=Hk7k*sU=xGrXvF1~H^Z>S-h!{2hYlg~+$OKG7l z`BE@vNNZKHm7A4Hk^RCSkn9@?aHIj<_LtK0#cpUTn7&8BY0vT7i-;3!hl@!Z9-^&x z^F$E1af4euF4TqT+28_ii!EwsX|*zFKj;#^P3V)wv|Qwj=PEq-@SI~VEdtAb?zm7Fj6E^L*y#W!8@UPKYRr7)x zN+%fkrx!g?n=o-6q#|Lq7Er}>k5&<7zHNK_a&@IgCunyS%8>x(y+W=SZ1`e&0@Zq5M0TY zf6R{d!(+%r^6Q;WT1D?>{B&<`ukLTiOQ!epU@GXkk!&_U+0jn?cklU@N*;_mi;p(5 z>z!-`$LSFqz4dQn3lvC=GO}KXW8_s$@wjcWJ|~xa8q#ds($&{ek)yP0B=_3qK?-G) zggcAFonw5@eq?l}atfC?7xzL81F*$-8^`Pm`}ObQlluZ0kG?jv5cTigI_E4Ozzu47 zs12vgqUqH_x&HxT{()|Js@Z0xpsxa3kl}sv(a0fU0yEuE*Y`L z+_1B_=RLTt{TbaEXCl)0crQmuMaV&@yDnZr@Dls73JAY&eZsV?gYmEslK!ln;#O2Z zJA7vFh8WVA4aHWsn_EIdhLTc;A(XB}R$gK$0fE)X*0q10&oj0nNU z{PV`js*9?yHVs4W6-A-)zl>cyeT`~c7m3jkxKwnJY={jTTl8ySGxKYNR8&)6$lwzj ztt{FretjnPf_Aq)`Vk>Ce`wAK5r_5d9PH9NorL4^3=IuscfWwcDG&Mjk+tUj8|b9K zz+0!n8Q+I?Day9nhScKN-N|ugoR>h^9&}mMqAqvp2DcN`-POUASn&VU^_5{!b=}`* zhIBweN=ibJZjo+K2?eAZNdf7OAteMskwzL37`nSex|NVFrD5nA;ypaijnDo6@SZPo zU9-9#r27!fJGb z$qCMTkjH7sVcO%u`4xCYRu?m2fcq?kMWY_`(Yt${U8<{m&+c|1yeglPea!%5c7-?e zt)F@~N-rk4S>;2uZ+pG4q3u1zp)!fAyqx7fvCkYR`XX==I=F%gLEFCvDSES5>wukJ z8);<=>cL6ve2i)#D=Xl%qRH7i!R?Z8nVVxV{30HEke|!wmx1>4CCKZ_ak8Pub`rm{ z?Hv{>`~DQM=5@dbQI}^=>!9(vaV}1(?5Q$51Dda!gXOKfk0r--moPBa(X_NtN9w*< zE#(nS{Or9;aI^w*xLtLu>mt7cIJ{wliT4#}5S=DLKq z&t9pIBa1Ag4%gz;4rsjXFI~Y27w@-Rw;c-rm?b}sE5ZiIo}Bd_WA4P(e=M|Qnd5&* z3=`V|m9uK|Pq)wFx4MWXCML+JmuF(UMr_g9n+OYM+x<_d9Een_P=jq zc{bM3u&9`Q>##UcRi#`L3w?rOSthAC%h`pp$uIaFI(nJYjNrSaeqrsNP3zjdMWq2|VmJ_YNT*1vGgxBFNM(T|q25aYqv%bV2 zh0sB0J>JRps03T(Pe~#ICLQmaxORT#p4+{aQ)otVR}%PbA$s*U4_{FBPNx)kVK)lZ zc^6Y$G{n5vX_&4pzf$|IRN=Bcl<~ArNO|vxJepQ@M0v-+J(GT9H1~z;oO}Fk+8XaIqp2( z9Ot55P_J!V^`$>q!G>Ix?h4qcNJwSeHZa>-<_)d;tju%(@xuqEqvQGEHKDxPvUUWZ z#P`7>G?Qy^1*r<^JwH#oh1 zSFYv-61r%B_<{U}&6I00T`<+Ur>e-fcWUekiBJ*8{=r-o991hZJ+gS1Xq~xHJScwU z-7BsNzpP@f1}5tlk`D&KO%7RUPRY>T?t5?sMMCZxR_9i~GQA*f2^jF+V(;Y{|l@BJ$y-gu?CGvXSFUV4?LB2DND0*k4K&ZubP5%2}?$qAMkKbTwcjGr7 zJl=y7KHD)@1|QL>{Ycf+l1poSAH!Bn#)a>((fn>h136=S4BPo8ph)WPA2_W&RMP(i z3#076(!?Kl)~>emXQp`!x*|Cp^l_yW&y5s?c(ENpC-*Skur60Os#eL)805^M_fw(H zf;NRX;-X_u@c4j_T)ZKaPE8Ox9E+~^x>VEwdIQWhDH-E8RV!s2VDHZ|o4z}L55FuP z!z_Eyx)}(L>f~@fZRmuR0#tk3tC(9NYV1r@47pqOx0-RniMXKxR;Wadpyp^A~}I zGotgZFv0Z-iX?w?k*n8y^_xLf76j{rGH*l1+&(|N9Pv(V$03+5y%Iuj6W)Sq`84~! z-!h6}b;@Z#rJHFdf=23EZo$G_8~)&6lD+-l<2+CHUf5%P0&f`A<{?Ip?#qWPti`LJ zqiUN^ek5q#MvMN*O=S9JOuOvMN8Wo?pDb6&!OJ|-HQ3~x49~a+4`L*?9uL&Bi1VZj zI!LxXAgM&M-d{tqs;IcNt3EIj6*I2haIQKhgJEd8H~QO2+h=WP`;^?}Q!=-(Nb^mP z2qD2n4}Q9ttDzF5Uw`%xPI@GbC%h=1TgP}hao(Pj)-*D?1?tN+8=G1vffxC@F@JV{ z@uQWTI6vRTQA%<7n5%+d{Ni+_`c+F5y!T0`|5{_7t7fw!HUW-497h?aGQX z)BjY}T_Gg#(twG#Lpx`SGJIU-kRO_SN4=29Exta^wKIHMXH}=!WhP0z_`6S?RRR1f zESRqPC;3>`y)^42wA4} z&wa)}k*|GQ_~u6ZYt{2gv5bv%8@?&f@qN|vYxLfBASl2V%N`UbQ-UVZg9Wy}0?slvng!2N2pI_@*E_=@NLVlL|= zVxdVYIQF=5XQ&=Y=X{P>;@jQ@cZn+vReOb(T;?kCo0a}0K@4p}rXoLw8iV;xH1>1@ z^Pk}BX1ttUHp;MK5}H~Yk)UnimQn3<==-&udLNw3!)ueib4?@`be;vmjg~J3Ns8 zF77Q8kzeUDWmzSw(dQupyn*gCLG37l8?@E659cr_$?T_r%qMKOF%=pOklF-;X~R3c zBmgb}HzS(6069wg8dXuuxkCe>uqGcy?_wy`l)XZi3*ZwK zC)Ld8^oice=yzjdd7m%il^ngL6itvh!>6N%w8^hm?23Cnt4MqcDEjA8A4j009V9aFpMyggp7~Kwvp0%ZJVNY z{nJq(5}|Q%w$f>3uKX;una5prq`_XGfyn9{o4fyv#8Jbsx1^LeT7ICPUrAhGbYJjCX=R-|yf(d{w|p*Ae9R#(?i+*R{mM?H@2hdN8nufeC3~-C7}~?n&EBxL zXHNBSyQSDj7EzxB?m9R_T;}Q*J|BRcE!T752j*TmzfUh#zBCWK-C{q@Y02^PAyp;! z6(fdSmb`6cmG3~w6ABY|)Ykf3k+1!#RHc+~6%RGgsj*)u`l899R^1-c9W6#w3L1=I z;NW>4@>IEuZQO$eJ;we>q=`>-j@Z_#69x6?$pG1tIs84vA2cVCFBFuOMY5=orR7T- zi9Dhzy_WKO(l`#%Sbqi+xbOc7tW{!X5|HxC&kBc5GWQ-U1hlF{`#)%JI*fQ&O;%V+ z*O@fEcEKq~G(nQ*Xzg>+8e&mUcb($^R~oVWqeCu{M)0&5Ejt_Ax_7x4y`k^JsLUu+ zSBO1%GOHVXE)lg3yie79BKK(pYKqjy?*Y-zWgg3@ok9GPxQw1^rt;#d2?dw)mhqQG z`!i*Zz(UB12lg>$iRK@SQ?4bLZnsrgr#|lMJIzfwq5{rLO@-W`T?&6{C#t6L{Ohe{ zDnl;>iap(!1=i$)Hu32vDtV#XG&Q=ijN87q5a$_uB(0#a_&gsp)w8bGZ;V#$Bn>Mo zhv{wmR8nwhd|h~t^Ml(tS>Pq`;7arOp*J}mhOVCEfoG$(-wOLP{cPRCBv4)?lMlnu z+$%~yq7bRMx{8g}EI^~^sOF_&t6#&1q~30JGO#?YM;gBWE}t@0JiRHzd@p z)G6og7lEqB2#rb%N(N7x*&XV&t6Rf#cMrcb0&=p?beF;Fj&!{B>&@ z?O9yDgw`wU)UAfWxXcjn%+dyYR&_)Q5FMusen>j_5;R9P{C+1Xn@LAPZM9H~?jfk_ z5C2-|cPZt+@qNUEte|OQ2qPbvRAb+-4?-EtMC(`4HX4@$G+j{kBKMRq{Fbt4+yR+X z`GnHjdRr70`TTx{YM-@JPpwy)_5PORM1wccp)hZ`#WsXg;{@;6?xwCX5abZAJpmBRtY?&PP% zcHbB6m-jqh561&zGNMZAMK6}csEiy(*02q>VB@~y13tB(G&Bd(8PXpTMpO3TsdM6l z+~bO}%MA6#Hd6_hEs6>gW1*JA&zRk5#CPauKeUg~nXjy@q@~vFDbG9RD!%iWCw4t5 z2f2O zAjp^8z|J54d4KDul@P7DExZKh z{L2p;=0Ipu%qIMKicqD}_c$_wnVA8?Sn@ph*Dk{>mX~1xYd>?2UvyMCq4_)9q5d6$ zX!=1?baAZ*GwC%2>N9^TAiI0_)5fXyy+HH&3#xb2D_f16VyNGFrrV652=-%@e1f)x zPtNN@UPqPbsVit|aTgnYNmW3A+D<^VFXfgRTS>MO=5^)6L@3~Pi>kCQM<#w+4rj`G z;r;@R7U3Ru2aiu-O~WaBGUfW%IT5ueDB%isE){Im%D?E5BAiV~*^FV!k4n0j(CCV4 z%q4q`f4MNRn(nAvl(&bIl(x=T4;6G@aoz(NI#j#x(vZXso*CB*TFiwc!r@IVM);lz-BMrRWfOlS5>rZMDLJy6X z@IzDcZo|4+nYB;Ios^I117e3RPBlk%^e~MqM>YcknnwmW#)Q<>y$rh7hwB@fjG0-8 zAbtHkh%*k_MATe(_*gQS9(ZARC)lMR1Gy6cDIArMtG==N~b05fA-$swJ@wD>n< zSI=I@}W zy@)oifgDC6mNK&6jeiaayC_82@WBAZ1yy#Ju<&s4fnI!cnJmnH@gMJ3uUXjMy`Gdz ziT$3}s`q%i0g-v)a1BEy`N0ALyXFr`(P_^i9Uo6jb{PgP&C=)E5I26Q_6MLt0??veWYp(mGjUXA z)@zl1R+g{fA#X5yun=miU1D|(@sj@|Xl^=T0Ep^^8?b}$0NzB@T^^$rJK$odGS|YT z(#gr9>8Ll3eTtRK_I>STyDFMLVFPb&Bd7-*li+hr?wAc0YVHM@I4LYq0bVwuW7RJx z{Ps;W8pU2KHw*u;_d!Y#Oz|9sfIYI=V$L!>TD@?RXd>7DJ^t6XvSJc93 zLP|p+%&n*yw{ooXcA>$5pFC9ffxWeW-bwR=ctc*2Bue#1fgJKjf`VKD!>E?ZL-ulJ z7x}Qj4-f9XcwJGrmu6a_*SC*iq`)&k<6>JrCH^j$X|j7wuCpspdIf~?-2VGw`d zE%<^)-0h@DEESsDQ1NKZ31y4`vkS9xC6&Zjlnr}=92u^M`?XQRIv!TGHgNiew4oaWZl z=eFseqNDv!PH4t6%c-%%(Us?5#Yvo|dD_@K%BAYgGh z0jkS2fy$#k?!<17k!ZNCUugdjKL`dIKxX7KtJjP?TwSZW>jNhZhnc{j4aoP}n}o8j zv_T9WyvdSCr@sH%9N)ixiY%e_@Vb&gzpde=sAr|E;UYAkG=i7z8o;1QD1qERg?{h? zMaDve(Q>~)0f-nMyz^5Qx8-cTr!l;K@emnhxeV2!F0bho*L8HldEyE8xA-1KmcB}( zYVJu(#evJp0d>KhTHK*t&>6vt#l1&)X)qA;@FETL&iJ)w1wtPkjD?6W zNNSa-5{)=7)kss>dmY(&>oVS;B715i@2VG2%hL7 zl|*qV^*Inw6IKNbs&7v@U0koq1g|1cMA> zhLX>|`ygs+-1YAxolH6-sh93Y?wUyUD-nXg!TJoxL$HfSuI<*%uZJ^5YyDeH+d3!# zPnSR5$!Tf6p||&;4#{sqeeic99;ngoOSK2(C5pZnp}CkHTU3kfS0e~pA9z?}27)<{ zT`D#F^dro)M?q6tnwC0=M_HA@BFEQ-Cs9co;0>9PDk=w`;@5F3x(hK-IrEX1*9$(y z^;>7ERUP12OcKZ*F)j*5h?3v7SoRBud=ZVHCQ`Nrh zP^N}Znl5+)QwM7@3N%6@8k;1TiAe~U(BT;Z6hloB5my`^>!law_t)IJAtzDelv=l2 z8@V}ia50L4x++0!m%4%BJQ|2HFKGj)NY?O27l01dic)Ac_*%wddXVj7K6VDC0;_&l zs72I<&)s=MsOWgA4OU<)93(sjir&vW_G9l(H5MuA;#`kimGb3MSFPm)LB{dplT0{C1JT2d!hw9)I>Z=bE_p#bC1ZrPnpNpP8cjUV(y>hs@i9y4x`i2j zl#QAOMIhflhoraxuT>Nijnk!&FZs6j&d$>$BaUS&`XH|1R=#%~l|if1J3rjG?~GMb zgi9y|5ZJTUGwuVT)L1yh=flb8a_-N$XEiiQ^mZB%;rcCNKXnL|fXHcGPG!*ZhlJy# z$+Z)%fIe%$dh+T3WpGs80WsqqYv_sr5Hhmzr-uTC2`HRCQYgGWVM^JoUxL6LRC*1` zu>TJ>3Ewxlmo=c6mo`r3Zs;FDuOsXb6A0n@t;H0HHIxPZxkl7(Tj`+6j>g($HbWSeQp!H-geeM65(( z7(xBIp}(2zdDV&~ZEz}{1ZbW?i0453GZGr8iS^_U57jKRy_u-M(a56V*Y>H-)ScL9_7^NHogjlyt~te~W0A!xwzuOMFnuhwHOzH- z--LxnpDGbE?&2ZY1`{(aL@_%uT_K|lwndi(R1r5Q zKpYn{5GAxd4U<-Gyd!ZxS|9Y~xrl_0!#<)#<);haOPH7)A1Vv7Y6s4^xyzQj8i#fY zcP|{E?QD@=sV1+(et$QU00xtSBiZFtKx{AW(;Qi(+C6p_mdU7ZJdz~n;^955Fki3E z!mPMbw%%K0SQfW_>%tyAmQ+;&a&Fo4so4hfJa-w4RfRly?A+R%(ZS4+QAE?=D5@I= z+c9K&JBBqY>s+mm(%4(UFMjw4I3iO{R9CM(u2E(>*%(my%H%ow$eD^* z9BXLo6?&BvS0lo4uzSINL3n0u(!wmM`jHAb(h5PlN38Y&FC<_ByzJ|$R^A77I3hBf z&g_mpGais_@LoRCaAwl=3tSBVQdi=#8_ zDV|`aTJn~X;&(Au)N5Q z$?Yu%$0sB_6|{_52vxnm)3pU$!i?O0Vvsd_RaT%jc_TpF@a7tCnze)vIviw!OnuZj zbPLu<6IT71S!NtKNs}U#bnq0Q5~0)6JJB+MujhFYF_zsa<()hV-H+An<5hn=&3 zl{u6HZ+~ecBp~pBU{$+FwrX-Y(306blt`F&L>7deFA00y(`s0&t(0_D)B28~Kc`Xw`rge|;RdHaET_sy> zRGq6h0DalG!}~(%;DggIyjUJ_#3YTk=WT#;C(xsq$Mg6eGXje|@&L<7AW{R^8~sRk zF`jMx^((1($?at0siRA68B9E?1MTsCl_)RtvMbFWNzaOHQchpDE$qaB&NrQ==0Zi< zdXFrdSu7Gx==7Eh)zv+Y=!0gkSXo808@)7X>V19b`4n)-;Ga@^Ja#{Oc>Mg(-Bog| zDMU}mXgTgXm@1lfus!2-RjaN^DM$H}^?EkY5}f3CCBAwluTuy=Tgx#7y#k$0S&D-~ zH)Z%uC_Ian1W=u#R9r?wPxTM=)>#(!A$`?sy-b{(;u@nOBivt_7X(Cs3COcV8%8vHp*#r6XIfc~IWLmOp@)z8r@$#GjXbA*RbI5S17!8b1`PbV(9L4e|#T>yK-NC?v z;_hEfhjEpfbmEZjkRdNSrBhN);hXphE(Zicx0N%iBF0Z~2)Q5+iv?Y~cFOtC>Yv z(r8*RJre$QFi*)c(UU9Q2~?f$)+SHTbYaV_hRuH>6UgX44Z$cWE7webnxa92yo&7M z=6`y+U0IH5Jq~lTfgT8nf#=_EX0$=Io)%5m-}95UxM(;vasnzkldrxwHsff_Tn;vEs0=9FMEM!vbAAT&f-wc5 z3Fx#4RJueex{PX*uNEGs=wB8M(LoAQx*D)QN^YCMT7pH|6r^sNZ%aKIw6i{GuJQ61 zpTJ0e7$Jz<`}9v3DZ(h-Ni;1%q5=b0BeA_34RLPQ&jzX+N@CV6NP(DossiM|fm0R8 zEDMMa{AUD1jk87l{GxR{*L`Yy9g!iG2S~N(~i}DQ-uUV2dfc?W< zdie)7kb=CW0x_Tot;T`|UFMM70Q08MN^ugZmDp8B{4v!JaDii!*neLt_-zKJ2>j2J z+402&m+h%NhF|bPvDMW^TIK~Ah)h&asenjuOa2n8W3P)ldMffvlBCI!A0p)obh7EI{4h1lG<$s^&avlJy zMZl;+#vI0aJw9k4$Hl<1yBYpW^K20?fM%i#_Z?>%;?7scl7Bm+v0hIQrkA&?V92h+ zYrz_wcEi7<=pVpXOjhAN0&lX9Aif6TN&_Zt{cjiJ(N;zw#$bi-}8a z{~F|7`zJz7XfA^Y^Gw}pENpx_5DIkUw<8D70_1MynGAbW8L8GRVFp{v={>*I?Lfq) z7?`{GzYlv{84!0gc}(51N>+(mazD6rJMgA##F!b!Xo73qoQ8N@{QM&0*!JR6_h0t0 z$9@7d2HvKbg<9zyO9uU$j)nP_QKGsqCpyAD*1fZ^H)cHl#9tv)(Pd!v z%g*G|E5{$hDvSA7NAyLye|3H6;1ufj$IZX3yOjo4eq{kN^>3v|&*TAxk;LB;wDh3l z2u0gVODpWTN{~~$VbNAp44{E7q>@e$J2?-=E~WYxD_pJJfQ=*~B>9TK08`$E2$OOy zn5yny)z5(G4Ss}jcN69mp^Wr#*ygtkeYp0eA9DU>hh{yRR^;jf3|iP-WJR!m&VIyXm1ZUQ0_;2Y-Ig2T%rBNs zK<{H#Myd8C@Z)}m&DtjL7So$UCDYW>3+Nqsmzhgh(P(?l8dz9-(<-LgN#YhEi{#PH zvBs5pOxqBED?J$w=Gp%5^9f+$8=T^>Ozv@!BI;kgl<@k5alvN@y zG#;1rhpw6(&R|kAQ85{@{%c9eSAfN9t8{b3E_ipaD}0V5yJpwLMtxts{QCu#3KqZY zb;Xyoo=1`nK38M9y+=IEH|vXuM2Dm$tfjV3LVq6S!d^oBuWg0-=OQq+IxL*Gw?80HieK*A?e|s29id7=x@~OOeRDI?uykV~ug>P%Y^S5e5g#dVjMgtNVxzt!V z1pnSe<(ab>XefBidmMm{x_D-_{89HrxxYJF-bf}8H`zhrfP-W?k(zd4yL8x1MA74qjGV7{S-Y*koM=_js9r*8^8jp~qh2k+=eU4N!HHwb=55FSI4Z%9L3Z!7R5oG|RyY*R!>69-=;U}ow zFk*x-v!3ALsu&Z3ljkOQS!ixn4OA6|S2KNT* z1Cy02?tc;8ExMBDLecs(TaETuJ-pem30;Vf?`rKr*!VPfZdOHT?tOBA%lP~4PUv0A za@(27+ieD>H?x{h3#LD{}D*3yOb?wQUDAaRqQH$6(u``nB;2TB@p-E%JLw@0PZO+0) zEsC_u->^mp{Vl}7rxMA=^r^6&xnG~~_g*+MaW3wuJ|vH5{-FQ>0E4rvmb2+AXEPxa zM>FsT;NjwV#LmUf&c&z3B`CzhE%fLSD;Jj#7ZH23)b6Q~HTZGi-U Noa_skA}OPQ{|6btv@8Gs literal 0 HcmV?d00001 diff --git a/home/static/img/blog/committer/yuluo-yx/3.jpg b/home/static/img/blog/committer/yuluo-yx/3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4875015dc602621ecfbb617b2627d887c57f412 GIT binary patch literal 46212 zcmeFZc~p}5|2N!BHBR3a=c`lBOd3<6R923ayTVLmg&UPAxzc3jt_g0aNHsN%T3V2r z8%|bQE|8icF3_fghGvGSsH7sOxTFZk{#?%TyYJ_m=bz`#`<(ka$D>^8b#Z;(`|I`k zka61M3125Kv z{Nn!0nl-gK8`ZzA2VQTua>@s@W{pAI>em;emqAHu);#|ne*BknNrAJPIcji-j|j)t zy^ksOYe3wy^InI+ z&TChlcSe=nIQsth`wM?~gCATEb>6Y>o1=y6GB51kaA-l}*N_$gQVlRu&!TT)JH#wZA?pxpfj78%yf}3itoQeiYi@0WqyPEwMd<(8r>&=%Fzw>9 z&FbT{Yz7|d#ZNF>PO*J_vU1HDZ_U(6SU6p$S+L1*lAV`uC}|A}5ne0mJ%G|FxkWKJ zv7fn!E3y-*|na1xlttBeq+)-r;o$Z&v5}oo>jp) zX;{pLDo;5GYB+H>$Q|{Et-~w{m)2~T#w?gfT{2F-*sP3P=CqVgv{u>K1vCUX#za>=#>X|If4i&+Oihuj#F=zQw_GkQ zrB~1nf-h5)0-s`Xi>`dyHYU8eRO2UJcJVi=hG&Z=2iabU`)z9QOWCePPBc_Ea{D$^ zL~?#DTk?=R4MT4qZJ>t;HwwuL33}wiianQTuXk0EmR8oTM$QbO6^CAwJ)Cv7tE1zsF58cALae%tM-W7jO+ zwMAB(q%WTOo@kUMK1Hw?tlcgzR=lQ~@5IiKg)ap97z?8#=?wdk#%`AIb65909_E>O zE_4%oY5i_E{l|*Jhe*=*O#1eiaB|_Tr($ev*W3%RVsUpX@(z4#RaQ4^D;- z@SYGs^gkOVAsLg)edRCuI2Q38e-yAGQT#*Y8Mww2_rF zl4SOKNWtJV>l-(P))H>WqE@4{$#&|LKA|pgI>*pWs;Hdumx-GT?jP*wlQU@=?=NeUn(&%}b z>>cO6%NkPLmlK^2r3@NBain0*M1H_Y^L9~4>KQp4;61dT@}*Ny=qthUQzCoOr$~N; z2`boaF8j^>V~M?HYUcjX4CQxrRRRYFOIk!#oFQcka^d(klk=WAA3R{8!0itSCrcJ8cPO*)jY@g4-4;U zPqT}1m5`8FP=tr0<;me9XgM-wYs39FqaE4A5dxV1F?QFOB2nGjJYUxBnFS|Uh_JBy zzMOP!?(Ne0Y^T)lVD`s5U_~KjvUBM@OBg5O@UxswaCbyVN{p)G)kUXG(;MEbOLQY3 z$by509P_S10oLV)xN>Q5>!&&R5?r;9Iv>8oF&}^ge`PjfQxB}^nvk`ocXLy-sr$m} zOW^2s>Rl~Afo%l@bBt79uo3DXa2H!m3q$kyVFox^aA~|jY65I&*UQ+ME~*W;H7;58 zn^hM{XqP`cWNa}ccE9hKhG>$19t!TkSVV0Z;S!!2aoPlSVn!UI3o#qB1xKE-H2s!j z>n+pGW+6K!9=+XtQXH#$=9P&;*xAa+ppK0&|QG@!;d`{UJm99Z;Tc;A*lT*%&^h6 zM7Q(>_<1`I>okj%&Bme#`kO1iW#!1zw^;R*R*P2-y~(A??J6}4%m-|8_%SAYh|A=c zlJL|1t#2jwA`impy%KL-dZOX9BEl6GQq#J_UFL-(s6S>1Wr?%q$^ajkV+Czl!$j`d zn4B}w5Hzd$`HAFw%Cr@G4n;Ogn`B6%VpUv-FQ}M^XbYpT^${JC#*XI`qJU<|;VdiH!+N_QJ5)>4g#G)<3Zx z=kYIpd7M#p2dw)v*Jp9qZ<;q@7h?r~Cd%X$5p#y28uZItXrSfog{?a+8KOcVY1h-H zUPbs}6hVCnv)p-Sb|s^Yt+hdMjhn}Qu7}-ep2<2%s&*=U^<$b8;#f$_`xRLTCzTYqMvn}cp!5{u=8XDnx zwI|#Bz~mzRVz91w5K@)F-VB2LiUMgr)2A1H-O$5vUho`DBBUM`)ikh%yz6OUt_1$j zaN5KuYaxZyLb1E@T8Zf&-7bm9ERN1LDTY#>CmrOzq1REMX-%}ar4$ze42{IRKvjPq z*t3Mexbrsx*-5?P6%mL@XKbT%tKvbDh~Olr%_4Eg9PK#`mp!qkityUEAHqV${8pD7 zFYi1=8+YK(AJ35F)_EjR%=_Y7wjNN>MSZf)$5kcM=!Gb@l%aYmbUnsJ7y;gO!6s$q zX@yya=stMCulP~_LM0>jqVAaOFKLV7z>|bjSKF6ohx1eOe%5DwPvH@D=Sd^YlM42O z^m4J_E(g{j4@}`IEwssMvd|volxGlGvw`Tdgf-1J-$G&7e_mz@r#K;mtv$*8^aj+E z@5J=+av`ZW11mg-iOjpi&gkTT6_@N5UNqn6dfGxM2sCM~7<`x}n~m5qX>NUNlPdes zo0n$7Dx@nRgflH1IKq1P=-o93k80Nyl6Iz-bHh!XZ`L@CIKh@FE)Dk0ro=RVc@`Po zebJ3Ml-VZ_uqo2H!djtPPZV_6-^$&W1D74FqJoAnQI!rY`&KH2Q-*!>>(vE5%0WxNE^8`tFoTu*DFSJFyne$e!eAm2F;itQGvN8BsCySHas zwC$*|l}@j%=N4^@J`c-pc1f|?Kn{)q@{o%^Q8b5bS6q6$V0cpP_*C2XSod9SV~|tY z`8N5?xQoV$(ylmGz`>U8H(%rvNi7v1Xsa?g0DAKGr>pK`%7P{F1c8E+u$6L->Q=;F zdNBr_OEnnilh)Q8c6BARwu+iM8%#MW!dyrmUR}g1Tx*2-=pQ@L>vL2M(g`)}Ye>|` z*2Pa1g?fl;%EQ%eYoI1gB;-d0UK!m8feUUQtfV9~b~?h*khd1J**alS4%7wkuVSE^ zy}WcXoNl3*@tV+snrN2-6c3Y@^fZz!RuS~__hRmFz7N&;zS*_7U}@Z(Jz|(+d|$n@ zRhF}2|7+b)c|R3`@*c-4To^9J?C277GfiZ2Y-ZoI&0I>)H6eKrG;ax~Ck9-X-8z4{ zXNb1(GV+hW*yqN)6MuWvW!cSpJu&1K%EJ+maRj-@Y(Yj(Bw17yTfj$2Zvx$SkTsN> zxzfH%XL0SqkH^x>GfJczsyq z^lPS@fs9>PMGOg=AEheGMxA<+6Y3Y(R2xWfbyh6Z%n@OAvH~^RBFhv_?&y<7?TD+& zDJpz8d0>b4{wZ73aY8FJ#zXO2z6~gP3qsgfxE+)zrbbQKe&0mNZ$O0f-~-$>bsO#T z{E#TTbsNkmAB@*-juMsRLd#jBuCN{Q&KX*D+kEEdm*KoQZ;$6A6{(LoK{nO87phxq zFsrAvL)NiF1+$>X`~hFewQTU;OHXzi{&uFj0w_^FcvD53pGzjCWsU{z)szbx^_%^h zf-}38O{v_GoKzx`5J8i>m8l82=Run`EHzfWZ2*kErsXb??O{tf7*iZ%hmq2@HBh(y zZpJG4kukoR60<`k3nIpYS=1W_J3_Rl1Bh25TXZY4+O7!7cu7F~ju;zDRsOsbslH4v zJEZ+D%8Vrnt+y-DMwXh|>3!VH%Hlih3CVGbK`hqg z2`--cncOmDpR&?FEm^p(Fi@Chk6bCw(SZDJUrcD-G(c5^)&G!~Scj?a;chnR^Q_oG zud-8TyS}bD{(P!)IHmt~;u)smC_!*QpzAKV8I1XDtmznCk4S|@b-+A-cmjrgZBXT`DG?qF8BT7K)M{&&cn zDh?JUFAeLRZF~Dwhm#l35k5C#alvgpUmoz0`)>p+^KAQw@|wOI;q7+Xf7J5~b(gTSiX2W#8qW|OF4eqy zWU^{1@T#enDs8O!itfbn2sk08U=XAjs}v44$7iH-U^1lPEh%WId0+g8j(VAuUR=({ z?8#>H@{H^|SE~up_11m{xdFnO7U&|wzFU;&|4Tr7o5$TNgR`m9aQISj?^5a7sG(>| zUsyI6+vD|{*9V~>-NLqh9-D8?-HO?kzqZf(M~8)-^b<|tby56ubKfqz0yds$WYt}r zcB)?1*|)XFan!gaD0q_*WfbKrH{#-M>GN@AcFA)E@l}|^B0BqWENaL&%IVJ(5~a(& zygtOk3NjD<=s%QTAzZ&hv=L4ar`osO>=~NqRN<4WZ7oZnNkwdI?hTU4*2nVGcL73Z z$!$?E*Xkq;=p+#0algv_7x4%8z()^vbYuGONKPZ_Ru)w;F7(N?%&nJ*Dyg^+2HS$Y;YG9cAjUIv^0C|lW zTsTYm*g~hJp%F`m=Eha^Z?=jVkxr(Ye*2Zw6YPIkD)dEvczDc}Ai8xeqpSei<%%zPpnBpm z^sb*>zsV{4a?jlmE7hn!WSuk^qtf9+I`1J*By{8Z(bY8y0-Y4R?ez(n*H1sqhw~3; z?niH3SZfk<`+3tuswI$J`YITkthIXAbEt#q_oB^5XF?h@@hCOz!NjHZtGKpzIQwM< zBmBQA%!Ta-F!gsZ-eSvd-x;Xwd{XOZ5efVSy6Hmtugt0PAXF_~{Q`?Z11(voDU%xr zr>CX(!~Cq{Ole#sYIx8KN9y9%>!{~FrR*tTj^Flm0ELA{iScBP!>F) zt~-_kw|H$eX;zez>(tp1?ZuzJM0>?nmjIZ?5!KSmwd^^4cgFGFv@3OXTGlbP zcG`&8U2-e@@IQ+=|2)}BJA-fAKX1E$FxcXNs!n^`luVa;H7_~KX9QQstL zOTes3Ceqjo!HR+^M#-aY_|l*eCOkvFJE!XeL0}%=Jv<&<3ykp_ma>Z0g8qZrF>_pD z1&W%V$Z`PChb!Ay@TDaD;?6Fe8U?d@O7$cNJnn#+lrzHVic%6eAPcP6wxkKmbt??m zJDLzP`8mBo>prxhBvm>)%U`ltx#{P%^y}fM?wKuWu8z#B*$<#BgNhkW!5ZGLRKLNW zUxmpQbP@DT@&EN6CykOXF645zBJVIKUo7XDw>x$V@pN8IQwfLXKO3VVIQYP9s#FOh zHGaQ;cgHr@8HVDOKrs;+4obh-!YuX1K&?3nw7FBK?#OFO4+8G@BKlik_;YlC zlS^?~odL@;8e+1;Ubwa&h4+)Z25GM}Uv>hbH#F}GZTt!*sqoo!^MY(vUyxXRk1_71 zh}SK_R1*n5?~)cw_x$MweYZGSRDpPGzQ<^#y`<|DAtr&IECs(v_-K?E{o~l9#myRn zR;(~4!Qs}RT}z zn9H*p&&vJ#h9$pEcb?S*XE@AcIaFNE2x(vDOt;N(=S0Ll)p*1~Br}iry2#Gst2wbL zsp5C2?xAy9tSDaYj@)_~R9H?Mti*UL+Dq0NzOB-2Aj-uZL_wB%yxW8b*42n1@acHg zXBOJ*sR>b(g&@SJU6yYDy+#5sHt^n!c~1^+9!yiOFkvLEH{cezm+^ljPs6XS=nI@u z%JBjuk~uR|8}{n+IIl=19`>-NqG~I!vMx&47TEKlQzfZiN0Abr*o_w6u{z$96EaL6+DA69c4zsA5+(LTr$-bb@TchMTBE)ryN=E3X*cwIH6sT z=`(IVLJqaonRiLZ5!nr9Nfk_TXIrY2VRsqRw^;fOWT7c;NZv^>@J)e;)|?^^wr-el1ZHARKUcA87e%!bUj-_TP+`o&e{Kn*0LYS zNd?Rq;|cw*NqEVFX}nD}Y$5vZz9W(JgHnwO>QiB);maca@Sz}B6VPuf`QVN2gmis# z**O$F+?=`exL#WY#xljy!e#(ka?;~v=QR#!9^AgTU~fZ;&K~*6{e##lrP-RkWA+AE zasKZ*boQ*nLHg0uP=fy?X1m`5bNFaFSdr)J{;gY{)5wO}6{hAA?TM+6fq_>Ked!9u z5fv+Y2h4-BuFQV3t$sxPu^g=v#rC6dR}?8arXIT-`Xt2@gXj)-lq6w#UgG-JeIN-x zIE+8ZD*>ZwGZZt@mCk{RsD+e;#jvvzahUJ~LtZMLCzbyfDxKsZ8Q*+v%^h%{2-rJf zvORR-Pd*l~kH)3eY03hE-pvf%!eIp8Y2rAi{tGZ(8KhLg`dQ223Tk8 z`b(u8QO68^XZ3139NgndiJ0&F%`v{rF7@#OsC9KFd>?Y^t3c(Lq1YfC0>fS?{JQjD zgiyHLS$q?xViCnNrOmq2^*uee??2<6Bc!$)WKZ80jHF2C*cowul9KlrSM>O0q_8g- zVT$Dt)@!}};jc>^wMlnRZwiLEx*olfu*ofd=C>lYF=VY=TG#D)tW*~PG>oJqUARUJ zEedC1IxhJ**c3rSaI!*9kYXqovKR9}^yjdVuT-=`Z3n zX-;YHMX%RZfk?k&>YjIKuR_l`)Yl~Mm`P1g&Dx%~D;;5!a}EGKQTVNWcJG#Y*hq$b z&-Sl@M0G(lbc&Gba0kqH_STi8PU_G1zv-zAvLTyoX^v*2XXqT*queHJw<{%_Oj9%q zU$h$?=_v>pYlPbM=BB>iN-=sj)69-2tO{(TVrA8oHug0H1u;^Z7HaiLPP;GJKe4(k8cV6iL&;QD#75gv1<_d{O^~HN(o=>DP zqvHHZ2e(!SzH*GR9s@&Q+sFi5e|=G#5h#@0#q1!t7QNONEsrHvQDyf? zf8GBDqpo_Er-`AHY4dDV(Z+HqB&zkIsqlUzFA}iwc+PY#%{J+mfu3;5b>a2=fcLYv zH?%vc%&2BQF%l*gPe?WG9IQzmKie}gn}_s$h5x3yLH7EcveBu?)&m%hkozYbt$_At z)J!faG;c5EII3FJ#lyVeIA%@XrMV&6pxREn+^Kj4dPm-$LhAGX6Vu{*(L)}!s1RMSL4L!eeGk=~ITxoG=i0mQ=_{x52CqR+pGk2s!+Pkh+sCgk6OSfy)>z>&N^1NYris4&C9wG9J4gGm{NEw-_7ugutCX(Tx-Vg-D2$c`IHe+ z;E=J>pEJ=9Wi2J(0!S-*;^8XWc4P>C0w71*@-=#Z#l-r?h{g)j+- z*zhf|_tq~f3MUpAN5DJVX2T99@p^@V!#E7462qToC`j?hx`Vsm(Apb8%Eca&@PnH- zXgR^b&@@FqNz+Q3NIgaH_eeV=?UmikAG1Cbpc_wdHi3Lq>S z+(}gL4zzo0_x;wqP~-YlEBSo1b7i7&(9aPC#7;4>y<$gGL+Xwv^MYg7NSbD?|MNzF z)NBQa-bNEnnlOJ+__kWX(F(1cfilZr(nAA*rN3H#jVL&lKj%W|YOPJ)2~XQ*j{_JZ zHAa3!?v^*fW)0Rrt;h6oY#{TAMa)7&@qBIXuZQEQFU|^GJxaqpN7syHi!A;R&DSthe7b7+J0vy8IQ^xQAlyCkx2xP%z#m|{tMz*SlHl($ z{(rs%?zwtgcz%lTFTH*u3vBoI+5da`|8q?e+z4ubs*jau7Vr%0Y%Q}eCUs&vdv}iG zA)jTH4MV|Z5x7;=Gf!5VwZ9I@kPJ*C4D+=zDx@osE-Bf3fJ|xp_qX4L^MFL78kHA9 zN>l3;`FKVC&4b~dQUBchh4%^&vEao-pHa2rXog}%Nvm*?;oDL$X)hi{5)_BZ9 z8*y%5dPztZQGjrgo|C*WMBLjj?|g9P7;e?CfMdXfl%@on|EVXjwB=g#T1^deM+9(1 zQIY;|f}bQ0)zVB8)VUUEDA;&OrN+q0s4W?fVNK>+#n6CZWi3ek3G72&8ipo^4Ay~Z z39SZP*vd=&&PTFC+oX@7c2gab=`i)L7^X?$5r)Kp(f5~glUJcmAZdEYb95^?Y|(-$ zRf5nhBl@fyxD1_&VZ+w|&;9yj_K|%51RqIRDK=J;nH9)UcQh@%i8<6sIIs_&? zFK!5!JQpZE2kNT8YPFsSE52qv5dy2i@(ExCL&IYnnC#zo%t%`E-LpKXNXE^eG~Dawrd_e2gj_E=(9r_C@nsI9$4{L70e_CsGBfS-MtAK?wa} zM!zsKWC6~&sa-EybDlFA7M2cVz~rHgrN|>7%>c+MCr#|fmV4l05*|EDo2l)OXz6^~I`b52BtMvM zFrEX`CSm$s%8%p+#9}dhDbaLgPdE?iE9uQc`A1GN}egplAm?dww zNUNnym1UV_2wFqi8W;K@d;d-wKR@!bQEL};X_8JPy>5_!$4h#1(&9$i3=^X#Wk$AR+cHF|FGj-}1Xy#qd9;4XVfm)%Ig4clIx(*w9?9zZ; zCVg(}NaHa}^#`PgpdmQhn@aO#%@%B zW76jfn7Ib72mJ_wdL^mLZ}bqY(Atb|O#mY`3{?vwB2YEN^~fv!ma{%I-yaRfh)NPY z#@}u$08_?^WH7Bi%nx~pdKxf&-8tUJ5bt5ln3<3fiStce_$TaSa4fq z<3dQOu2q<6(@IyUa&ewciH${QL^HK1x}?nfb?!=kY?&P=>fDlL8Pk_Cx36NUvLYp4 z94K@!MLk`o2!W~wg(VDZ6ozA=`yj9@CbC*?kHZpl8uoTdbLF6_&mw%ZX=(W5!~s`= z_{Hc3`iWk$yNHS?<7$-DMFtwAe&+OBrp!2-k6K(wDO+!Mp^zYJ=hfgb6% z6_v@nPGqI&((xWkS1SJO~;JasyJC`o*~`1!ly;#=^k zmIZnW+PR*5x*LARG=KE-?>27QjtG?sT@Oby&zT-?c;b&Fv_|-qs^(iLG+|+c8D8gi ziv#i)P8QchkdJeUbwd=*5IAIKbDgXbz`C_7Il82?^YXL>xfvs++wnxh2(_)ub`;37 zl)+J>7Ok0l>&#tk9%HG~9KzepE|oEIZr=y@@Ty%1V#a@;lZWyHhYY4AE8U6o!*<$S zy+Qar25qrC(Z;Av$YJ-iE}OE=1Ern9Po2e%TV;J75A=tZO^chEP0uGDY1{V*0DpGp8Yt=jAc!g^)^vOMR&&_sl&<&w`VgUPYE)brQ-vHH<`1s>)^!4P5{h3jubyJszlO}ExFV#RFq>ODu z*(w4y_?w>D+9lP>NW2so!isJ?C7APl9vF9# zzA~N#(TzVk-gGeusMt0%v>HgOivmN>wy#?Oab9U8(rzT#lzx83)3@QS>b-r`;T#o^ zIaQY&YpV@DTiR9Q#;SNhc251uCd!a-tOn5SU4ojLv7}F*iAJ)3JA9zku=#2>zqn!W z#9Zo?$w&8txWzN;`L*VDI+&KJ3k_T%~XYw80U|JY!PowVg(XMXtO(2|W~0AAj%u zxes1J@1gI@L=T{jiXkq;nwF`qVQ3qLHlvDemgXTUZOp=>-~1XGO@62huW0@TZ9cVc ziK-r=SW#N1yof2cBRm#XfZTd8X|Tsi)HbCSb|Z%E6h9qZ{@*-?lRd;cSa{4ttY2+R zZ0_{63``k`@qlhx@<(>*mtrsTg!tWYty9H)dCMplg8zoopyv+p8StTCblv$ZgWhJn zy#Bqv7sb%AdEamtsA*L8J%3JfK7IKyQI(K!QL1^Jdy13cn{gR(S$eVTQjJGarAP|o z-ciA*0Q%pVb$`_)((nmq#0j}hErQ|Qo!TM$DhBSo-D+}5`N%7H8BQY~Lh@#giQ}$& zw-_*MEW;vycnJIE%Dbf(YxUha9;2IW3ZTJ^>GC48;8IDk$c@f9Kkm2(P$-={G#smY z2~xiltUqYRYS;rOYprz2)pqaw1=ddGFijtC2INx3yO4hW^Os!$y)(L71x?G;Cc{!MtVQ$sTiWh; zf`8U{b*J!gGtRBlVZJGdkjg2>E4ol5^$0JoxCcIA?J~MozpRR3j^lBVTFNz-TLG+z zeS`abK~yiMGjxaJLU+$8=x8g#3xa<#hKq_~mhtYKhj5LYWO`fu;6}kS+x8`n;1VUZ zIt`;pKW(Z}DA#>z1N0m5$1v@=F`-(~MlDiujRzeVRw+`7{G!`}msZ$%Y4K>U`oB6U z`p}sa-q=Lq=%7DZ5lrL}rd;iUNnqL&(u_FI9K^s#t)2bc?OaUXqNoPUjWUuA4G{h4 zKI=m;)f<1-p(oY3`6*C<1sfkTjDUs?v+nUyrd;*i$pWqkw9=pdiYQPJOG=veoM>;3 z^w`D_T2Xv3DTX6DKh`N2Z$IKr1f#%NhIk|7MQD2VxRlGS~M<&Fy zKxpJWx)|LY!<|r<9CSsi5Fw&l$`q${%>;jGF_>G?=5j*i5{I*Y)SNta8di?@l$+$u z2waGb*q^rhPv{Zv+jeGYUMCUurc8H8m`0U5bVbK;w2d}ZdIhSifTZ*5btuQE?S<`5 z5^ve@KJ$^LUv~5do+lbmcRe*}`|tbYw$SPI{c!Z{09*&;p;oh>S=sopyr1}&{Q4@p zm-EKup8Xg80X`+fJRiS{XVGWR!M#V09;f>C8vR}g(`fF;g!!6U9FFRV4J#o&z*Rn~ zYLHUuDkl~v`mK$a|3N)SB;y?%4MK~5k10kVOe z!k6pSAFoA69gCsvs;p4{zy+H3f@bb%WpKC+GXcc zZSm&Ty%+t;eH+ArCk8e4Z-<876X6+NQGJ}AGRp>27AM<~Z2$ef4YQYOZSRV~G?0Uj zh0A=nMi2V=8Y{|mS(2lj$}wZf_rb|gcLR^aZO=C@m8EL~;8EcU0R(0T_pd4riY!Xs zZ^m-6SdIQLPC5A^ck5zcoHNy6sM#rV)XmEgrhN7iV%xVJ)KFC${w^vws2WRF@4PAh zel;arNNc{}zQG5#Sf)Mr@ur)y$WymrR*<(RV<7iPq^p~YTjmU-oG&5C-L&P%C9jW^ zgPS}H?tiQzDR!TjJLxG+}-^xNUxX}!m$yh4OD%{S;1xcPF@XZQy+?g3-2+1aSK21yS zu_@I`5H`S6*>-{@5NST@BGbl<hM8u@{Nj@1dY>U!MPB4q1#iy^Ubqbcq1DNYzZ=T%*LJ{15W48S9!i`oR zk5?U+p6|g{Nt4f*;iR`WT`Y=n&0RxPEimwhjADn&)4cdR zM?NjGL7yG}_ptGzW7=gr6(45Gj|-+nZ@B0J_J%WC&%r0}f9o~w3`c5=5Renwo? z4;?u%W09I0FWAr(Y+?Ct3t@o`csyo$GT)uP&Qt!PTXi<4LHikKFKB25 zhtk$$w>&Dh=KK3_6Jn-o{xoOgKF>iC#7Q6Yf|}R6@yh0)+Y&o-1%*Mlq>A%_vAtD= zT*hK2{DZ&zIBX{d$ zb6x6FB*9#PG%ZV9A`pwR`t$ZD7)V8F^P2dB+cq=loij7r?3 zto(7$6cH&Ux&WL5Pdd}`1Ljr_b$hRo*UZiHLFa&GGPx&!=Q#DK%J&-(bn;ZW38j`ehUx%`RoU^^|;r+O!OBBFiBERD2|E!$X3j%f)@MyOe(Caxl9r z!-1#<3jLS$elczFY(Cj#dGb+gZtJ_}PLRVvZTrqtL6rnSl-OAeh+*PgH{nqCwI1@h zWA2igDycLeFm}gs;^!6sn_<;>I^*^(s#mcqBC|~2;9k+n8wm7_*$Y+U$sb-{TdUVU zVn{caQ2?BXxNx8gFOC3(!y;6U%-?9)vH%ppv6Gp>dkTFMyS>(fY|(6b8hV0C04mq~ zLy+pBaNISJs~$H;^`=g}29c_>mj3~W#Es_BeNJ!UEf$jyXxtTy92h?Zq-l_9;-Zfa zFiJukCP{*_;zFX#h(41`^y90gqWJ>HoED)niBG&nAzud_R()x$ifeDusskF8ES!(eq3p!R`}R>dtTQMU7Jo+ZZcnExY6SFu;1+}6nCJ$-%F5T~M;E^l7=jhvEL1vC9sMnnr(iVpACISt?Yx)nvkiECz6a9MYNXHH+17cM?7AIeME=9S{KgSu>r0kr=O-dJh z_wn+;Ms26}euZ24llD<>-4CS{^1nV&;eTD5(6>I&<;l##X$|u^;XYlOp&gx5m33kX zWlF?cmjxqriCUsH`^cXGPz+UJVw*Ui{?U_jtS&cR&><_xv#p5S{a%jys?c7C93f#3 zcf!#)BkYf7GzFaR^3V|5Qel-GmNbDe_hCJjs{2 zVJMD&%wwW(wPIV^TqDm6MBW42sl4C}mnQk;Su2vB2o&73gzFAnOgOK0p#KD!Li;c< zM4yeT3MK1;P4jpQO*k}f%SfWX#CUpfyLUfI-OR1I*AjGgl*vu_V3b9lFCn34ZY?(cxX*~lb8%FydjKi$ zxHnqfj*H;2ZTPKbikcfWgvsQWXVE_5<*$y)UHWn-7T*{esxlGsEnGN1=m|j(cyB+_ zU?3;xU9$*4fZSZHtL9fBC?6jt-%8(Ql#l6gBBCW=mh_A3P?LUpob8_$>!oR@Tb1Z@ zi(bpV6r&*g=T8=hc*W{ST(5Y);L)0B%91p-sw;HMrDjoo@3gymyN^#&@}hKFP5rth z(|rHB`F%@`>mE0lnwdhJIie7#q`(Rnm@?NOHn=*|$7{*E@5W|v)V7{(UW8Wjy;XHM zK$z#(UWvVe;y6y_9S)dlWM>4;@dA6j#LdaT)`gw5=9W)%%Y14K|58!fzF)|UF_mEA z!P)lr)?d{C!gIANfZAMwtsEVag7&U1Wu2SZ$V$~JXw;oGA0DL9uioMQh&%lq7%0ot(NA0A=lMG z!QmP@z4@&->X4H3n;s-~=2fqCb9*P7Ax~iglL1%FK9b(pw?{$oAHEAyhDvxg^eV+v zC&Ec)vN!T1P~Z2CXBa}v;ITN@TB1N1Ab##1T-m(c=UY|Xb5oYHUcvmCZHA*RnKK*B zB?rzgAFafDqQCUcBGl18*qsgF+dI3emt1g1E31lgzv*WMPUI|SxNsrI6*Cyq`dmTx zh6_0=xi@K#dA znP-B>D>{bAf>gJeh+b-*_)>HL9NlZ2`uR6daq9g)>C%f8L4oo2ijjmJtL@;kSfzu< z25Qpd`aRfzyh{mRj=7`KivuK~hbZhO#= z{ao8PXZ2xS-qP`}`?I z<#3sHcllgpM2cWL|Nd}}dcCOanPM0~Wa8$!%k2Kf7?Iuh6ui=T6>JP&Ixl{B6R3oQ zz6ykgeM~|x)ljq!>v6Bj%`0(t)A@pH^QMc|o++SzC**L;FD_>x%Mqom`XXE} z(uX7U5(zENa-u?>o2*)8P2{bE%D(m_g?1w7A&)aP{(2`Zzvj<3i!Gx-6I^DbiBg(V{w4W7%_$}-TjM_I>kRDxb8tzQpc8cW@1%ti*@v*pg+{Og^s>9#Iae=N1icPDl%Fr<0# zEyr~aC^`Z1OT}u(vCuN9ASjK z2>xK}0st|lZYcj>xoPtFT=c_r3|TJp>+>H=cI5pcQEMtlg~pyrAGQk6TsJ>Z?E?B4 zyhJ1Cqs*8Mp+%kN;%(~oB;5WYOQOv+e9;vst24pmRF!xjv&Ezf%_Fx76U~V&WN;7l z!r#oHt>3UB!vTW4rQ4slW+fNS+aTsVu?|CQG$KLYGCv7Jr=d5tyAn$wKCMCp_)qIoa_02C#A zPK-PqAMN4uQq|o#HXy~+U6$S&rDD-xAyO+GO@Z)K9hq~ry8%?I@!mREntDZ`9q_m# zPORx`be1#cy#F@RnhUAcC15V&t5iu5-#_$&Zx;~(9sG;KOuhSnY|Jg=H+b+W?Y9Sf z+tTsRzpVK#-=qj!nko^?={E2`n9gGL+_Nm-|Dn=f*v`G>)kI3m252Lzd-7M0TRqvz zagTtV+K;OgrcNs_@~rTPKe+EnfzOqs1^Z$B7QH+sH@>ja~LCL1yGlG>SZA3dVZhlWy0~=9$q!`Fy_qcaP;4V?V2?mA;67IpV}0Hx%sg( z-Vl^}+=y%XCjDS#mGwt&&Po1iI0wJaod-xi1{5wt=73T^zZCwyJv!8*6zYVH+?UvO zfLiTCSzWhX4N?by6t1ru(z-I1UD+1uR~GLY_T(K5N7YtVr$MzV6ZN&vZ~sfTC)BN` zRQ%`LW-NxZCdQroAAl;>sSbSsy>up^;Xz|1RJpC4Y`OoK#}09=!GL9N%^P_HN;Tv& zKD5LOR|K_EVs0C2v=?PvLc zW;Q%`W%O;?-`A#Z^7_(`A$mhSC$3UjS{|r0q$2MPbZ*{5Fkdr&^b=|*mU21+KAlUr(BMVwsuYkA=fahMF z_e(fRNcExmvhF?c{@@6ywmBL#mG&usY8~c!r---`m9cc&Owq6$;I0Lvug?@O_0xVT zP&GR9y#9h~f!pT={|ZMBzBLM&fsX4tEx$jScYB!VZyB@I^kMvD1Np`LLvTHT9R;7K zgB5qodnP_4I%9cW6PcLZNCNEh3qTxsEWMD*(9G3jHC7q_^E~^zjshCo0q=|{^5oMB zQaD{TvT*Ry89y&jfGan5AWHzoPTi(L@yDT#WmF+)9)1Cv`GC`vI@BWQ{Dg9QjNjU0qmL-I1)Zx@_ZGP-CA7iZQ#Q&KeL0#^Y~MA zBhhcy@SI)P960`wbsO$2!Ekvz?fIT3aQTT53 zhZc(e;~FwiV0nhJ=$)bJ%)zTC{*r}*JMfdpwBXh2{?&J_nmP)osiwAxOHs&gdETbK zmK^V91w%@z%-`0ylrsfMkLq0BB$~c3DgeuyzU}h^glkr@dw+on=z?#Ll@{Zg)GYd9kLBR+Rs zjwQCJUo1~`DA*YnksCt7{_a=(&xHog0{h>(R&#*$svx85k~Cp|rn6<#R@502N^^9} zEBm}NZAn&*hSSHkaf&KZ{mKW&M3c{NItF8Kb$^TE&n<)qZz!b~26BXA-+v(JjB0#By_MMqUFSN-?oQZ$x))g7> zxh0oe2&gp0B)bm*a|*z8+}(Y$5SX^9cCq7c;@E?~{j06JI&vMXAOIl$;|N~#3Bo_Q z_eJzCgj80u!B}&qKft~|HE#ENehyhwb)ezy0G_Gdz+g;$uF>15IFBO zPzshTAUwip_gmzRQcOQ+?9ubu3uMv~)!RGx(w~}G&77-5>D-K*4hz|X8en0(k z(@M~4*Og^E{@wJiKVKa_@orJ^R=p=4X<`+gWsD5VoDcs<>45)d_T9T<_&*TOcNgWq zM>MbQ#jm}puL2{g?VxbMUJ&wB6rkE26CWAB3VF(w#Cpa&$(7}T@J_gAy5d>JK%_Kw zK(*zXRA1R#1@+GfTe=6WZC7e;onG)Oxc5~)J)&#!Fl{mfIED!oeS7SjV%32&eMjFu z=^%QN3y(6dd;VwYRn8l^>We&)I14(;Y4S3yKJ7JGMWEIZ6uMAJCwNl7CcD7aVroV-N1w%Mpj-tAkk^NFkf5l93Ega}r_sD?q5JW%tt(LlGB)(C zO*E&v278igR<29W-0fmm;eoNxSati+oc2oJ1+XA`xP4|!g8k|UPK{BkHpjCjJ2cwl zID56%uz2Y>1+`k;0&M<1=^#)am&=~{#N0k0xJyYB;|21jSic3Bt4P-JIu-8YR-3ak zRfYKx=*N1Y%kUz4o5rghu%9yf+7gssDNs;$4O=Q=uAH1b-88XB<-5in4F7@~Q~WZ# zC#|(3CcJs6V|!LR7Zml6C=(fWgU`}>Vh(XCOQTC9a911q^juzN&qKXq9OgAeJ$7Q^ zAxC^7g_#^^!o_R>0Ery2&~TvQ#XQV)cr60K>T$icen4RsV>3u6&i0*}SGQ2FFHRGt z9?*%^iN%dH6uxtf6gAI<)UEZ)(N!M?Z?rpdo2Z~6*I1m?b?&vVAbvuCLC{<+t_v^r z$~F6`f`_O~ife06A{v!kw*dRVo8xX23F3FM82EM2h$4)$ewAA@hVwMzZCSXKX;i=| zd*&EG`4B&c>Ig6iGA+XbkyAd}%^T`;gX58Td_zub=0M1V628DPS-v9i!Fk?Qk5Pyc zj$?r}`IOhTS({BLyNT)ELt_?5Ul&eprYQK3hFP>OEC$^r~vOQ?pz3#i`d%_43xdl$Jn1 zmv(YT%)+{AgRYtLEf-?TR3Rc?@4@MQ`G~d*bwd!Ki16~^?oxB~HU#Xx!;CQoRf2zW zl8w;qiaY4cR$mtI@d^E<=eW#@nzU`yp zdUV$fL!UN$3*wz(TB0~LvUKgoLiJ2>{hQ~%SH*QfxFj>>1yVi~Kv4aVC+e1B&O$AEj=LZ0Lb>=XxxYtFsCHV5Mf zN*sl;_nI}Rk&+Ro96 zA*U#HuBoR)!A=BN)SR2|pnf4DG*$#7e5~}i8*b@j%e|%gAyjZ0aJT18)a~d)zR-YaYz-vKh2#~UbWrpk4pPtwzd*L*o_pbH`M$V)ai4Pq8q+{BjB?y< zq@()%!TFkVY)8Q(AH!fB+y~?7SPGb+g$7t3Hbj4rRRf8t>da1`+^gy*@LlhxMT7ek zl@AgoBGYV-rF|l|oQaG+)OoLHs%rdR!a~*HfwXJ%9M(7oHtp6m`*fqB(=s<^eH`6` zf992l1|HabkWZG+`w4FIDDh7i@FmM#6Dn2_ak*XPI<}g11mG}O zm8vGG8)F`gX^g*UUb-Vc;Th{pafo^W51+r96w7w(DMc;HT9~x?u~dl}DvlOc{G?EM zXs}Bg1@p(K8vBlc{ulHvac?x!2ieXQ{@LjyX)F|4LzD)>Mwp-Sq;KTTLYM!D%rrMWS6heI?dw zr&wEs)z=FmM>wOe>Y-ICTaF1=y*x%V7sToBO_~hAV zLr6R6DHxXh0e!Zt*NZ83XLq|pu(#!zI~Yu}_~;n*DV{}r^V4__mA@Obvf(r=j<=+uzwBv0c?W2W0$9{!*>T;4FX ze|N599TgcDIq%GnYcCN}Pozi9N(^?JP^2J|(j`kRCx1A+))F^$wv73yV_8<9xTP(l zEX_RaK3Y6xQbGRDEdGZ~u)v<8nk)p68wU*C8@$e0zL_izs9r)>9KcdkU&?R*xSyq# zMh{%Qw~-F_sGRW#c{IG!%M>XKraT?HP-VMUylmE-G4S(E-lMgf5Ncm?FkCDN@(%N| zHIF-JySj22g0aV7@_5ywD%eb73eY@}a1^t)+Jxb6#P(uu3cg}F2$UPL2X;{JIlu`` zU=M(yl%H^0?ld87bMe~QH2aN*!M=32yV{Rto;&m6y1pz<`{fBOHt$Ha94&VnHu72d zRq#m>PL||%gn>V#2^GMw-*ZPAd$F2=(+oz(N2MVDARgw=PzFH~oR+;kWGdKTzQWP#u zgECcKOShJvp7B#TlZW?Qm4stH%J;6&%CZqw75kinY9~=qunF2nFHrL+Zl%hFTZIHq zTjH#pr5ifkGe#38?XmKc?oYQ5mFMe{@Rp%ES|*GFLHRtSb)wq*dOpQ?#l!=9Q8Vha zlU{yGFVR&6=lbn^yMsoSMQ~~8Q0y6~PB+7gY-an?)664GFSIm2i%?0isQl-r?TWa_k}9)Sox$v>AGO)=2-8GSl?D4d<}5ZVl2<42KipM z>Xn;eEn&E_;Fs+_eX#T5C0UpY)#p$;NIIkFC~p~<#zJx+f?JKnvaC?P?|mi*AACi7 zXLyMr*sE%=m2WQOC9tYd>?FtNPBpJS(_?ONn2ZI^N!nFQva{aZ@)Dd*vYqm9mOKim>;wg{Wa%+3=bflPHp>SCOI!jiRv=z!>du@MQHJ zInq8@W;AC<5~!8=9kX`aBj+hrJvK>Eng;I*5w?yK)5GG!{8QcNJ>7T>7&47R&!pcrj8PAAcr23ON~RDkukuO+)lGK+_tgjS?15a=A zQVRVrJ)OwZndGC#09`dwcSqGoQ{D0YKyXj5WT|+;xx~0JQ!J&5W|=R4rd_^NXtRvA zm2XUSPMLK$iokHdPdKoC`Ttma7<{Ej;no#vIXPzd2o zk7xrc$APqKa&yJXL!jAvS!EVKF6-=e8`aJ3PXsvkPz8Ic;uot6RpT12#M%x;?Y(&bL!%fP*NSy8kFr;?y7fr_%-Rt0J*XDy5KY*-@I&&Sx0uU zVj7i5_0-rxwk|rHpu)x2aOhekj4wcs9Jg0(x>!>Dn8{n}5Hvs&rrb@c`kDvI6s!H@ z>kVtTJlOG#e(Wr(?GF8 zx|3a9yTnyLx2zN=Dc$-bX>N5x;fIzR?UKOzzMV|--LVIiI+;cs$LT+C>)L0^j^och zj+L$@S23TZwbCEY*>qmw9zYeWZwq-Xz(Q4H+Uytd_3PiYbhJ;ML zezBp%C|7S__>b{K^6`5_A?!FTH3y>h z+=LWnQ=+*^zUdIWFGoAp9EvShjk4OnowfadiS-ogXlWFy!Ru=yySb-Q>kCdNe4?+@ z$6)EsB#E4)O(eN2m7#`9fIC^1w1Lhs9r4i;do3B5V|DXJTANOm6r!+Eq!a3BM>5RP z)V4x`gpdJk3(~D;+^wDf@a0JW1$f>WR>R=+=?+VwhUY{2p9>V7<7DaYu$X~HX=tTc zsf6b+o^Dl-pxS@CBwOs?6}&_?6svrb#7a{5L;up8DBc^~zb2=x)n(QX zS>9NO{c}fY>1EGvOMQ?fJ7;R=6JG_CypQNJIvwu>APPZE`qc|C1>tVzevVzWnSIcI zXTKmEI=3f)q=WRdgEcS_6AqR$@wu;?jXubl+ExY%_QIoJy^Bx2_gUos z{2E-<_x@L|NyW^9&?#-uds(LJ&Nm)*N#4p})BqKliQIYgAQ)`)uMYCvSj^CjhCo!* zD@2{5GzcL{DVn8QuoLg1ieXNEhOLlexk!Xt-2TVx-zzc!-P!aZPC&NDmlnpYQKz>n z-ru$}aJL@VCwN?{5U<Zq|ilL4NXKL@0Mth}d`=UaADA1dlgegG=}iA<01pV}&2 z+I7fA3dQ4F!so_{g-b9VYKIQ(!vCGhXz;V{oj8sJ-n)upnNI18Kq=tJ}wnc3pw z_swLn>)vN&vC^sCdbK8!X4l--qhgKYRW->{ZA-uZ@3fD!OA$=FT@)3w#CQ}B!B1SX zYPrtAfOv%dzJAYovCn`SbSRo9y68b0kECz&9fQX~=EZMHxdCGytj6y9@ZQ&%h=`tl zJTUHsy?Vu|R%Ck4))xXFgy8WM3 ze>~6|DD)TJdER_yPZ=%nK|mfYm2`SKeP{%O%uguOZ~I#+9i10EIkpge6n1=hdi7su zA&Sjl-Qn;$0m(}1Y{tH!Tr3aU!}39weufb-JcgYc&MyERK#ku3n5N?7LV)Z2d?wfPLZeHj(_(%WuH&^Vz>U z+gt^9v*DNa8B;j7tayFyFYiu&-vIUeEeSSJeVYy1cq_G?EVjOs=Oku%Jv>S1HsHed zWJM7KVppnPC_0p|voeG`t%putHS%#^bq@Dl@*wypL2?<%F*;D7Zavi>feQA3^~YG} zvEJV5_rey2{NwZe&kna{n>;2em!2^0C#D7^&=u@bI`V8}sEEZWcA`xU%9&F*0P#9Z zn^DY7u4>dUoYC0N$g<<&dX9eJ66Hum$am3&&^9K)u+;SGX~Mfa@>eO~+wR%=I|d}# z!ZqNjEQi5g{i8wX{t_Nk;>EqiJ8cuE578J{@;)-E8hVOk=Tq(uI%x3Us!*j~R7Yb& z@TwWLkx@Dr6FR6Dr!q=WGe_MRQdK`x`X6H zlm8C*f@2+8vnOo9*~VocKiI*RE`S|=EkJLN(o>C_E7vm35jSUT6`Yn(#B|5Fwt;%* z@_@V$6NYJP7aT&yvB3M`Q6QO?m#8@}%;HNT&;r~-o^qkAxcQz3FPbvUKph(N&xG83 z+t=f#h+9@C>~`P(bD`fG#98&eD0r8$`sRs+e|q!VlEp0o@Du-|XSNpVzhBAb^)fn^ zq{?dS;Nc3L$hQl>&}&mwPbIeUKpugrgC|IV$><{K*(*s*Aqa-vN{eTmwx9Ktm`mS* zTwaHPS>Ql_rS0E3cKEHeE`#trf6PWvsKh@P=o@=0y}kljx8fexlv{=GbEqtlgBf7@ z%Xio0J$F!$#yviee*r|c*)(L9aiNy(m%kclIc`Tpmtdmjt z6pB`36@y8I4@UH$kigqfTlTtBWy#O*15MV4NvpH3?H83iUli!{#ClaNI^e7u|~qqbWSzzU9wE z{i*0vh|k0Q)0>37iDl&JYl{z@c@{6^9l6Vlb5sEPI7;mx_(8C`beLs9cH#Vn^CrF> zK4$yJa^AH=K)@P=V`@2}9sFxBaKp)r0S%fLXZaCq)%P6rw0q6xz93yjG5}?Alk@$- zK1P2$TjN=0R{P#z&c=FAmZ0wag~=H^{r~zAG-UsH{(Dlw|67wUtvX^rwiDNiI3ixR z2%?K6k329_N3nA}l`AL=<}(IHvq7O^gF(U^hCpadq+n-C56(O5&;9xl3+gIsTs&A? z^6=Ov{-lJz?9dWL+dur%OTY{s=sW%KfT6`xDOUar4>0RKn4-mvADUj*iKtf8rpWq* z$a?gx_%rsY;RL%6FrE`(VrKRZhN)oIY88ZCI0I0_=X|vWS$R*R0P`he`MMIr(U^x! z;vK!n(x8XwsUfFimQ%3q&OPCf>wOSV{+vaiyO##ElUp`w&n-h~Cw@S1wcI^C2Ee{P z+ca!8<-Xw4haGa=#*1aAVdMUsqI(-?6I$PH7Uq9}of75y*jS^^h5@(tWK>rFb8)GK z5%BCd{E%Q5@O&N)ubv*W;xxI(r$FOgM`en$5RZB~uQ!*foDU&82De|}9^^6Yxpw#( z=?&EHOLxEmK!0ePV4pex>IVrI$E8M1MR;GwIL6m>YDiCEV8*aTc1;e3w0+XZy0Zgc zLgQTC8a-x~;8K2Xd_AVnn6Or9Tvh8@eKNH6vqPC|^kgUDi3Sckm#;yR`T5T}aUhh1 z6#zwvk2^kjfeUpSOcNs}-My(S%^1nJW$tNuTu!wr`W!{ zbWf}PnuM=G#|NT&PrWMWaLTuphFQCz;@!_)+5Gk6@>j|A4tvqM^`+t`k!hGN1y0&y zY$!i~4F+TMVcg`TI&>FzfG*+cUvj;B=&qUev#y`UnsT|CgP<-EB~so`6X{3ds^9L; z9~1a@Z`Ym0!t`1vn}0eS3PBA$&-b_fn2nfjZq_Hl9*)J1n&d`3cBSxlh z?BVin#I9w*=g7l^X}-TS0Fs;%vKRt{9_pLE)_)9nD7h+&y{l9qon!IHGHQc}fu8|` z454tamM{Fa9imL^aj6h6@HMo&>h~pB&(YDhsJ(wnOzy5o)f^Zw<;tjG# zb;3>}59uP1ZUyL%Y9xUhTb|U5g(b7XwV@!U&a}{DkV8;Jbj13~AqveySoY+f2&J#8 zPnOK@4uEp3eXzAbzI3Y#seaS9FOPUgkh_EZUmnPRDvLEiMV%e4ZVw%Ih5vRKQ-t0g z$KI#9$FvBpffUXjZd5PPF*y*l{B!niaPo$Na4{)p+NEcfbunhr{fOwCOPIvMgK1LM zi#~yGBZhZHjxbdr%e`c|eocm>bS3SSortbqz`UCe$W}PN8~L3m{k!M+-FKoNm3)i% zuIKxDpdb@a+=+HO^qD?4E()0f+ZS!^g&f7?wzG$ zj1I<>M>zQC7%fuXPji=*TN+h{bB$3&nnko5*W7T{qNz}4@2GaAyVK#9yBHGoRu3oe z`3!2+o3-8gzpz6h+Woo)((Z2}n`5SHZFw`@Un2s;WNCrUJ>7NXoI6j(%J+iE_WJ?Z zv@Z9DGA1!=pUtuB%?Dw7H;UpeCLynoxMU@#sXacUDssW6ETTvLy3P8Oa>WSh)p7yC z>pGyjE4-+6eFZpDbS<$P4Jj_$FqW5k1<3$Jk+b&Q8E+Dyq3M9Y_s?k!f*s7hg$Dd! z+Z$!gRz0}iRKf*nzR#TUu%Z8HWPQT|WoID#TMOnFu^DGU3MmzRi2MFKn!#T|2K6t_ zjPF0cYdgqhlxeux^cRBB{V#;$|4L}t>OX7TLCUCQ8MX>L2v6ihze8X;z+_ussc10v zVNNU5)NMQ#CO)$Gc2V> zTHbzBaqxJ}zqL*|se{MGSUYNke2F1PRZeZnW>w?=((=ynR*ff9iAoEbyI0^F^TRG| zdYa+5j?ZkG1M8HQpDOj&J13&jc8v(^CiP`58*ligh=H~6_5CNzD}q`DMXtNUSf#>1$9qt4s@?793x9ZmJi zClf*T3#(MBalr#k`_%!pr;;OI$8)6pI=e2!VuuJ?xRNzy=CB}Z106ZbeHOBSpA=W4 zuiQ?t`i&D}+&H5&J)`MzeGTZ-X4gOA&6wshtY%VQ8lm$3Hr~)47eIWdnYQ@ zbC8-yFYZs!SC7r=x)C6PHSx{qmK`9L@wiRbZ1KWz>UvV4n@L-cKK>3XP15ycL|BQ; z;HROHw~(Bps{d>LbOJ>6M7Ml;;YorR78oW?a$f9 zvWd5JZDK)$BzXeiKo>gjC5fL94^(m*i~7Pg z;N|WY8kczRon`s5@Kj)Gh0)}7D= zA(`H>8&K85QzlRF>s!#Wl+=#-g}k;v85i0noXHY2q{lKJBqJ>R#H48gl3i)&$BuBu z(Sjvf=H1dQOZ{b<6bkg~>J)6j-evrwdeBi|^AA$e=AUxpbyKZ#M?{!OV#89syhw0+ zaUfr6u>c~7cVK)>{AA@QXPaK;o3XlqH~OirDtp)gh9;IdJ$_|j?0%kO6g$K^r|jq= z&ViN)A{tk1&x`ZbBUGk+R=P7so;l~2pl@;dw$Ud(MZE3(*y!ULT2}+pLK_itXt1nQ zVcBZ%OqTZQN`Qv=Mg?KN3b8nbeW;f*H=`QYrja@*7c?@A1F5KV@WLnl(<`Mfdc0OaCUX2bG7poex9bXM)?xd|i8g(>c zYiyhsFEHf%Mb{_U=m_UIhU=^JBH~2UjaT4ZszvwU`O_VUz z-daN|cC@twMA*+VDDj@sxjRzj5$KUo4)dr6OpazsjV4TM!8D+&+%@=(PdyTNBBRXm zQ7vgc-g6PhGi$w!c;u94(LJs-JjoD#D&-V*N{{zSqI(!k;|mPw4F?1ip@Q%);p&|m z^2=VB8ls!%*5=+qtci->JP5G6H(n9ss;k^wLR81S1uqqDv29Aid)QQ@z(MAZ$4kK+ z3%Q8e;)g)kng`huf9W><4)O8o`8_;D-r4EQwZ=O#M$jHs3|hF+5L+Zd6>5=re_!ln z&N^`&Z0GUVn(V(9XRdJ!=joZsmi9q(wpXO@dCCGLbXpc(rdqCr`A8pKTy%Tk-M|qNLg9h;*?%lz~Ot8$mxCO5qCV`s_qMck+zL@jIY~T3R`Mg z5DD?Wd?vyC9cFXopcrYvJuoZW9wGoi2<~gUZ=zBC+O!M?t-BGmcVo^usV>bCo~ z5a3ELOAOfC&FG#g2UvwsD;DrSsU9$N)LHu}$Zg*I7)P$hhb*k*Q8M~dZI-EW9KeeX z8^{OxR64V8YwollNvQUM3JxQ8taT)JW#_Rh#F!!iU?;AgCekBfM7jr$S;f)yX>HPc zOe+{fL}wGDyOKM-=2=CG^f|j8$%*OsUnaEOuClH*ElyNn$k;Iv%QvCgRSLU_x|M7F zWx{U4BOS$$-V3aNjEiLKAk@_#HiAUYPc z<5pkK>TO!hir|i7Ox|Kk0CYqoUf-ztLEVk-opu;J1-;&oM=>T_ixL&iF{q12&d0q{ z-Q4&WDl2fJ^93B^NdXEt(;8kEXY-)7D*n4Ow^xMQf3BVhb^gr8kiO5xl-=a;^~)2Q z)xPtE{s^wx3S~l<&&);v3rn(cj*2(rT88oRlLkM+%so!p!J{U1@mCoNS9>m!9aDU2 z8f;CIu-<~EF7tg95tqyhXR}l3xMqZ)RE0a~DQ;JC=$-NAuIGJs2G=~br&eU7zVZlC zE{uw=?|^ShK&v=o6{7qYZK$T19aAHG63uOD;_wQZ9YkC;uOq35Cpus_2DJ8!ayzDo z^5l|kB2t-xH!6|4mQ^Osr2~|M4inecUcuTXBVsprDN)fFFB7h@;8WJbO&)D6yb4cc zvlA$$qIS?EE)@)wmd}c+zA7VgGB+$7kX6!$ zE$v2hEju_DUmeW$9z;Tds%$BpIe5qG&9_t`3!Nft0!@x@iWh29LS0BQPM?BME&Oz1 zj3j`nr&oV(S=cOsAyE|(hq!iVy%JgMJ^v&rPT`EXa+kwz|H%wA-C(}KHJ_4$?#}vK z<;a%5E6uEdND(`D(fV!?EG)q`X;^dDAjI703&Se(Xu=Yjtfiga$gS9{;j5>X2KqzT zjuQICsEN58k{?E3OdrjS3r{D6_>x|r#4>PGfXl1dU~-~R6Xh7vD;T@kDOG-v*{Xb4 zEY)jGE6l=4V3Ma-cL*9pQPE;uf-teD8rNCD^g(tAd*0RiLPfrszPZLxZ8YErr~U<^ z5kb+o7JP+AnpvjBR&)Dx8k`E25%M+C#?-?-bbsICD+(?==0 zvjY_HTX!r^A>Iqq+~|X28k_@YgQa^yEgT-^%nI~1)fttxJ1g?H_dD9wgP1yufE6+4 zOOx)~U8wXF1qU*4>|p*yG4b{oM*$Co!2Rucv6G#*?y=|y30S2r=D24e;|k!!8Rz>J z4r}A`+cF&SwFiDMdQiP3S4C&E(Ph)R5=(B4x@|B-ycPh!KNYG@CYzH*Z$pO$q6Tlk>D1$YE~t;JL^d>q&)l@W1EcI&sX%+`_fyLlOM|AWJ_&K(fmp~=!HcF~yHUYyRjZy-HLAuDtzloP z3Su(XGE&gGGffrl5AUHvrx$!ea($o&%XDi=Z*psT;2E8^;?BnS*LHO$6ZO2f~viOCMw?2){)5xjL_yk?!;vG1LZWL&6C&rCXI&TC0_+{F!EF6&e{m$i$oFFt6AmtnwiZy4Ej zDNq}VwoCP$gd|qyl5OV6ojH3^g_+WZ;w1YdfljsQl^8jn>3EPdKD~f_lGX;sOV~}+ zoJVFz$JiI*z1*Hz+;|+MYZhsE(BvJw13r4sD=5h~q;2j=(-l#E!=XR1XALS6U{ohUG@Qs35!4e1+bbob6 z{upv%uY4gw*Ftwqe?!nH7Qm0mj{@uJrAk*>P^`4)9NUhC?lnAi_^nF)D)DvOezg~i z-qGUFbKh0YX~`~{v*R@8z$gba+3V1H1xJUHG)l!&W{XEi$qm_zzQsIIuN5_shV2*( ziKSM|%2NCWzdYx2A`(i5wxOA1@NkaQuZ8WnHL6;DCZmXWV*I&uUI4Uk$0gs2eXViP zPN_^O8lbQTDrQ?IWe?*AwS|IL>8oQi0E5%qP%Rw|@*b5n$Ud34dU4}8xypX<*3<~f zffVCAPF4v8mq`cH+}KgY!_p}d>!V7$W#;B1nyEUJ1{a}?^6@A4ZOow#JK>}8vDXU- zEvK6bvCrLtA{X06k3~*q_VdTudUla-jXmrGd9z}!oDkp(3x%%|p>YqS%P()P?$Pf< zX<2go4Pa_Fd9vZIkk(l)3r{&BxGOIzBoA}&jZ-ss+XC2WG&4ir#UftOD4(TTy=Yz` zU4%8DF?krwo78a31w0Yu<<~&a%hw77Y6zE@6L-#Dj!~{id#<=1^e0tj4}k}laz@(i z5fh3iq-1D&UoM^Rx7oGPzo9UrJDUi0^W9}xj>B#?GhF;@?p7+3&+WA7ptuD6`s zI;i%;@a=#_An#Qn3;&dwVDzo$%Etz> z!V6j<#Pkp?>H&-;43YeY^h~+X)vUSNsj1Z?YaM%xRU_%^l>(XT!=$#g&?;e`(b=Zb zq#}Y>7Ac(Efw=Od8?Vnhocbb1uX10&(Mil_8$8QG-I4PQ*%cX;R$-U}B_t91hx)$o z%!jT-P9vRsE1o1Ftd;>#s#c$jTZe%)21mhtSbC5N?yuh)(jZOSh!#=zde0@N&E#E3 z3L-2H_*LA)?n6$eKS~NhjgA4`grOcn=78U^4LW5u$^;d+uNYSGqSbxL%MvXwK&yzh z;334lz*%ndL(0$E7xZG(i4)gZfl+z98raAFKA#Y_WqWP$P+%79|>tA z=xH0D%nr*3br+I+Mc?+#@Ly_>uxv+J?3T@_}{&O9e>jl3xI;o8Bc3pn3V%UtNy z<(kplnrs3XlsT1|vSm2VU-vrRD7_=ONpLyus~EG$qR{cGZcVdB$bp|(9gB;46I^y* z_)ct&ARI%iO`kBV?#=T5QQxPq6S5KdnxaSXnGjY}g9D3A73gEGACi>>9^^h9RgEhu^j5N=n4EB*l$n9zDeLgnc#dTTbjP#7H z5LMX@XF>Zj0qI%uBs;PvZ7R!~tI~F_bPOiAJ_ef-H}l6p^&Mt-#zMjru&~ez^&Q(T z0g|M)BxWISwjD9s7PK+2Th*W7{eix26B0F%it#Fwk@z`&%DxQ&KG>c&&*AjT$@1Sl zxSBh3R_gsgXd9I7t5;*Dan~A1mcef@JJ*zrccX$k#t`MG@C_MA3*%UCJ^1tUt~yYi zXpvIslY6>z!TY#$Djd3Op*qk9TyOqOFc&^WY=TBrKjB~!{e;KFoxQNeK-qk4CM(dz z5=0wQR8O@zo%T!hOX47CDYUH!7|?{K2gXRHV)@E)UJ*ZCHv8nRYHV=JxZG#4BHoT5 z>#97xS@ADt{}Jqc%a#%4{quS^wvAXT!0AedgJ#UAvqt&HU~~s+wk(bqt)Dle@h%7Y zajJk~LL;I`6wOJd(BKVy)9?lbxVo;ky+AZzeWc(li1o2T_h&t_B(`VqwI zsCZ8TV9im3u919q+#W^AtMp^Cus@4F5woQr;IAlT|gwl0xmV2`CEF#zJrO z8xGS;GT@j9>)qJe5Y4Aw@0akMBNuZ6_j`QZ&}vdvobRu?j zTG)JR&6Ve@V&XjaUsoO>MFC!dwGJo$IbI>^Y%VYM;Q}Tuj5V|D{z31Cfdvc=anl|- z;Ut9shzNe5!L-ABWTa7nYZKFAStBR`81Xny_=U0%e4{ew#w_GxAERzv?@cVh=Z(H2 z>?2TaGF0IC%$8-@*J+X@Fb=y^Fwa>VyqfAn0WD4+s^_39&%t*tS%@lv%`7uEz;-Dq zvroZjNH`5H)MTeZPC)7eq53-Qj&Vm;%<_W;T7_WxF>3%)uU=zKr;8yw9$*@q+ATpw zo_=jk4d{R4+os`tw6V9&H|EoF8MiNMpcKg0ds(KoIVe}MPwopY!qhDDR4=p&yhZK) zLY2ed_)UDex1_PBVtOpbX71nybeZ15G%J^i9`joATftcinxRnTR0Ju26W{o-jHAfsvS(b%E!(EoP#O%L4 ze#-noU`@3T47wTN=<}_$&)*Cb_tjo=KCZdjxQrh$LRu@#C!o{JPu%Aq5#j%C)|&?X zlhws3y;kAw3mz@%jc!BzT~Q}Ksp@^|?U*2xM4m)B4YY$9y%<;`d@s9Fj?0bx{K9;q zWVsqoK}|Xu%Lg{rCeP`7^5ZSoF3nCaP|FVzp2fS+eT{EC!LehO-dtDg`U7 zaU&GrboRC8gS_bKv@(fn8BE0GoD+vXl51F`Pj^aQoJC9A<(;K1BE8Np>q;Wdq(k}) z8>}%`7kbb8Iq(QCS|bjTkAa;Vw6kN6;%_ls z*eRc`vFBKV{csk^B7J9H9)dP_3)7!u(^IPfDZ<(8gDgg$nh*%IA7z*dXTkDQ_iHy) zu@2(gMz1Tb3=6DZ-yLaI+?ItZg;{m=N%bX%#J`E00^{t( zij7h=dmwqwa>d?(!ah1h@S6ZLnH(EE3I;}3R{`;xm)ES|oTEN3Q*8J*gGghLnrAxy zta|f>?~a6t3uiug-6L0|bQAB8O9=26cuw%YKezc@5a|Eg69V=9!XK+9P9b~WtG=fQ z3jF+Q*fj_iQt_VZ2OkgL%cL<65L9EGF=U=z_DdLurHDXN3+Ai}PN#h#*Pf`ooH@x? zU6nY<-*-^vC7L}YX0Lwq#;fg@%p)Lq6dsxC=Cr$1fvkLzsYMQJ-~T(aF#K$Irr^i* z3Sk%%G7$lu`z%W5@G-t4~bAE(V-_$jk9m@e?O%j1$i8*e+o-~ zSy(!#yq}fH<`Cn;KoqRq7J2DB#vW<@Zq@!kuY$~5Ag2ai^N-BH04HaEC=)v0S|U?} z7`yxWfZKvU2`0fR39!$?@8;irkE{R)o zE?ZPmM_y7a;OY1O;*&Z%*i}fRkpLl(|c zqS_r6bG1NuLbqqx?lQ+c`}bd8L(bGZ<$PxtpVQrnP1f&cNDjS*ctG{7FPCC}R?TRL zPOOj=i7uO)uj)Kd?!xq$@6TPgg^GKxU^9YW6W{QVDo=jI2D;gPzmZ^bN&1Y%D_d-r z=q5AzQ>I}aBcD*~qO`FhCZ88WuFD~-BCe5wr}K73HP;Yje@`?nQ5+n?O^t5n1{AFw z^8@Twixcv&Z2+H@mKA?KSVvn+vgw3BGd$0D^!j^DTGo!XFM%qbued_hH?t6q$;c$g zKjVH*8f(P;F0Nv)cH7~-rwm8u>lRqA2w@$ABr(pLoK>1P2OzW?zk5k1;&!r;s0NtEhvjdfJ zMIhDoV5ZYaw9}#Tpix@eBhzb5O4DQHOE1%g+U%2JJACCnb2&ec&zYj;uii@w?4jkv zzMptCZ)d~c*FvS_WQ|e9szCHFQi5k3`HZf!S8$yqw#EBg0JL5c=mlPyCm;K(m#YDq zP9MyQT8*t(PWF6?dqWsbs1tF=?WyzStNVjBx%#@ytqgoCTGu%rr8IQcBP#l5T}#5%X4Tb$f%hKEEJYQ)!lElI>+_R8u>an$~6Hkxlxmr*U@Qay22tMZ(#PA zJyWQq_k*nZ7g$*xf6I3|igpSB@dOTz!4MEn&~rXUZtlS(h6J+@h^t5(mOs$hyVj#3 z)VW!r6RbFO8r!(CBTW((9UlyrG))y7PM(535zK0|W2FquYD8H(g3uV$cC{1qTPuO)m8t`Yds+q_qJ9j9ZEtNLXwGwrVuJwn-+!Q&p6*G#2tQ z&oM`oJ8{I0$rpSlFz52&SbI{N`p!&=K^r~2G3*-0BfcIM-J(%uvPAa)?aDC#6@&gi zYdiP2B-6Fu_s+Z4taoYRkuATLH+BHUzHQ9;8G^@QVGoEl1}vl)jP{Gfn$Fp;1-hKv@8W9e2`dNbg*;l-@ zPx{hGW?L}C<`m(J2pXS#M?<&N2bSkuy!rvf`D)+iuc5phRd2levmyw>itg5s3G#I@ zJumM+#99SYxg!ne_M%Q1m0OF%0~xZ{e}1Y?_j~wytWnjjWGu}2=wGjlMu>D-xmQl` z{;1hU?yOAGIuqBJYkQ8OBt7_)`WElMB!}rD4XNl_1!Wd7$4~bokP5G0{_zBnm4{#= zO$-NK&vpq(S~#1YdV!}B~ewkp962_6%R#!YDJ{FQkYDgb^!qq&(m7V41zI8pKTf5dNoermmuQ9zU)=YshW63NPf0U4nkc(7G*<$~E*qCe zQ`uOiIUC2BlZ8O?5Z@!J*W52R(?zXEA*K@?UpZ(;9rFGTFl{QhFpl^#{|}2(zQN69 zm$uVrHMAwl9#qBVUHQmx?T^}B-%ohaxo`!CtFak(^IKjO)pmlECzy1Ra42TOPeZaNyDDbsFw z5p;`761uF{YB;Qr8QbMv`DqzI|9QDBf{t^nknW2(1U2#dCR@ZS`>vsDTZ_*WmYmy+ zLUhe$CI(^AeL>nbpjrA?S9$;xd%pBbZ9;8($Hruabn$eyo3FFRuzWys;KEw3LxeN6 z%{Q`pPNcuwU>vSacVGM_h_Ti~5!6S?Z!aFq7b1e> zQ60$`Lf1Lin%6T7DsPkB&8`k)OC$l4n+mgYde&e0MY<#(vmZtG^Ba(;Awv^Ms5oKY zu~Z*uDZ!T|Bg=TE(g$f*wpibETzfpQs{nNbhNU09vxfwuqBXjE1q>VZ;p!sKuP6um ze|~-AiD#9pG2VMDF7GZtdGK*$M6Gx>o!O+asx&%`P~N#6d$~e4;C8e!-QaSDgD0X~ zq_6MSnYz*#U5-`s%lXEKSmSbDkr}8)jxbpw@J1++ZPx-;yLy#&2WNG9?_t-KL! z$-yaX>@zvnA|?!Xg^4SV(gHMZIR(cIW844+LhYM;40eU=(0-N4cIa0!l!PCo9%E{{ zJu`*?Eki)9bonlc{EQKGvs1nH)H^-)xwLRR#e5qo;=(Xfv(^UiS9UCX5p4OcwX`dq zP(~~Rl4gSwK{sG&b*d^N_z6m%@cAt72@u91ob$yIrPgQNMmh1a#$z~r40dKgqHW{f z=GF|rWX*4gS?*Xrd>N>7ZFU)j^||&_k=T%No=UQ*`M6kMbT$1 z@ukkDDK9Ex^mzY0uLM8)_yW6Y63ZE!F4}EWh?l_itp!rAAA|>kQAbgOz=3LWaEj_7 zxc%CZNm!#^fAi0ZuyN_uig-uWR-Dk{i?VnWupb+V{Do7&Bqcdjz7;c@Wl`R#w!Mj` z*j2CZtjxs0J5|Yh$Nn0>=4I`pvRv!atXd8dT z3{$6Wj%$ueYlypx%#QZXl`dZN)ia6~bYHBq1RIN_aNi!|M01**C}DbI0~vIT+jK6g zlc{sdPyl(Mr*XXm5;=_|TH`r)32JtdUJIKoV z>s=jo6Ypw}f;+DE$5(KFX5b%rhO(q2B7mmrNZWiPQ;=O2iPUb?*p=v z{5}C(j?Rwc3uHWF?yD8=Ro&azcztd|LA4^1;{f;^wCW)3l;r7}n6NepMRQanN_2q# z3Hx{&p2}We?ZlI!JBFZ~E7oSF@@H=m*wOTAn4lC48KQKUA{B$;O8pJrA&vk_xz=@l z2e+m@tta6z(HRmbE=_NT3JUMg?J*G|e8!8?6{naMA!#MIo)JO|SmI5lalk`n$Xb4G zMO`p=iD8zUBhi>Ks2zOBOKix2}3e5(#{v`3RYM;;{n%j`oSUJKNGvDkK)i+<+C zQ?COcy_otRZosv&W4Wl?w=8#RlPsBv%?mHf`(;G#)c+F~!dXhxmalfl|9Uc?k;Y<) zl-xJ>u$6-mhp1mFnnYI`;lm=Vw9FPGe|mQdX&0TMA+O1UhtoTuaS4 z$hCkq+GRk_9bNai7?6S^A1T$48Ly zJLTcmiqAl&0PqSBM)FoG^=}3W{8Nql0~3gV$dOETs!Ec3;?jF7_Y={>H)d-Xw#03G zcjFQwW+tl5m)|7tavOn+QDcR&^e5}{KE(d)jXGHF1^c89=~Hq&R9tB$)^Dd)H0e(D z3`lKGC$L)9`}PoD&FJBRWnjIxB1p`BJgIdB*v{8Km0U02?%Q7crIwQ)RJ#B@e9N$O zmI+SkvY6)8emUCjAxiPkgnh-6RYTj*^?|~-odcE{&95r5848-=#~p7}gi;di7o1W3 z2?2}p>`&A7`ra%)tB#mnU--mlngMUA0=wFOt)2SPIi~;T+R(A??q1*8p2qmw%Y(jY zAVu=poQ)G_xBPPM-v)J&o>eh6@mJeyn5`Y2LppWTDfyei5xf|W@#l?Bv}~)zNF{ip zd>Te0(f(T5p+J$dw6`sE#%^|P1o;>HCi*@edbmL~{dm$X&18G$^ND+~$7X)}@qBgu zr9C(Cq+ID_x+3|^M`PS5`9MwnlNc!#R@IEfPY2|)yCl7=4#G4VOw?@|*)M(L7{1ds z6qqVkE^TgZT<8>I)XVcZ$A4)reZ55QH=b8A4=HZ0)4{j_ZTNkap*jLi{t6}tHjy(# zcjRJ{+v3K4b92W&DBm>SWr!8!TboZzXvtH^ad7_Ec^M{GyqDlsncx{$`G(86i|8ut z-42v)G&Io`jtlRX(jp0`TxK%uc>33df|%LZi|miJ^{nIo-_=-2A_pakOsXdm9Uyz; zX`a8M*TkT~4vx~nicon0J>gbDKqWW(mia;5)r?!5$IAq;d=tB<^&_HD@^YTYv8q-L9=JT$Y(dr$0ZZJ`N1@^ zB6Ti9<*h<}k7dWR$cAc7XK@>0TkV3&iJzQAm0H-_-NCgoFE2s`#iu$?uw4qD5a!n^ zH=5(d;HUkiib&e^UTRyub&?x_lyD8bv84)_#r|t;0PFOqo2#zKLPcYo*WMhMI;Eq> z1G6h-{+42@JEu^Uh>{4Ao9j z#TeqfZfMaZ5XKDmaD}Tl2?;4++5$lVu3C@U1%9xf--O)2$UbCcay%MPo$c(aVq-<4#s7$%!CS z8jl1sN-V0X(*sq-=&-vmqra`HM79_n6F+lL_5dQ+m}>0+sIwh{bgcaaWTCv3VB7)N zq{kcA)Wkq=^4S)wbXhSzX4}d&fzFUkw2{Em;;m z1;Vy%%&T3%h+c>cg(qyzV&w=fBY?+(RtPpiD=Hn?RIWiv%FIQilg5OZHE4XCx1PQc zXg&%iVjig{sxCOB9+T8(;S|+tQAA*9r%Aj}A9fC)mhHM|Ls>oJXkvnkgDL-bV{+U| zZb<-@Pfkb$wiWqppZ0}0???OL`U%qT!5@VC!4$UOZj#^Mw2pT1?IE7NY)aUIZ&3<% z+QZQMExkl8PKb|DZoRX1BbcoG#!r9YTf$NfG@rY}(98e0KWct-j@va>C3^y?ZGJ-v zY;TG+U5xCG^heLHN@0=8<$Br*~sPYfSXfir=z`!4kIkfqYYSqhZoco?f z3B`$E4WTm`&~8hoBJ4haJsc&0>oi}S$0m;&kp&a1RpMSA+E`&COy5)@QXs$^7{PzD z&moPBeE17elN0N|MC8BeU6eP%)%aoj*gda~wA{I6^j;|5+k<6*=1{%ojL1U)6ayG? zu$GFCiw$=)2vG|`S`&L{LW<|LIgQh0MTI4Sq$%c*sewEeR}xIMy3{yh*Q5GZ6fxGN ziOOcMJocQS{I)Jzj?LPAbQNh@iyEtma0V}$*HYL-$NG}5y(5g#?Q#$FZbXK0Z}9>3 z6^@mLVXYhC!Kr8E2IOTRWBSZIe$UjEgIvm^Z#O<5ES%uA5n@<1Q(T7>HxCE- zaDMldy^>(JC7}AeaG&lLjnUk`dN-?mw7pMyKYD^eS0k<+GGX4 zv{;)vRv*B&6Kec{rG77F^$>>m)6A5>+JBsO)u_&hh)bM`FPTd0^nb3JoVmSbC4ow& zkePf>=*pre^{nnlXrIH?-Ag?l6qvjoiA8-l04159LLW@pmd{QiRuY~X4|I>d{PUH^ zTOF#JbN=(zvA;S8&YenB?)~E1W1<`+~ zbBi}(1XyZXQ&aEl&*4y!`O%}HRLfqQEuxbD2B7?ZH&%K+>!-an&0U&??rjc?3T&e5$dN!z%FFCe z-wnOK?Ezqrdv^!#b_87}g#*r>(GICvTF14yS93mNh~8)0qvrn5jNb$NBP&y6 z^ePB^Ut1RZH%unO1;8|BXP+7!a<48MHgfbc=d7Jsy;IuCf%WSFxp}KmE zcYFGet<4jB=4VgV@H8jexEq=w3M=>udeFn^c|yO>-y0`{_nFr)?UcZ?AtShmiDvjX8r0UODsBBB_4ofOy6v`O=S085dL&eFxH|H%OAvCl5*{M zs;dr*$0(#AKbPhV&=~%%(Pu2oKf|qjC?evJsHG|ARkpEa**10rVTSB)%67{lF|Pm> zw8@+)XMYSj)Nn?pyFJEQWxaIWn`>La%QS*cX9(?4uSdWL@vblEJ+`m&mtK2Inp!EF zT<|DBmlQ=D0_svss`JRD#k1>k5hUf!Jak^&_F=MYkL=&J?RRtO(;qb-cK&&NXH4ju z*^E0cp?e{T_+Ks`mXdWYA36;ARa%U)#%{W~A31uC4|uzqiRx!c%`sz-Bjg6h|8qJ~$sCqV~wd z082X^>B$gB=su}E0d{q8F_$Zc@{@934H*+k`0l(Nrn|BsL;DQ4mz`4Mdy+j-BW%t- zSEwNUO4&HS!|v?(cxVZ`BLvWt+X-LSr4+GOgbwESU*o5n&$r{b(35J%a|Md3r^|R^ zc~AiEP+XDEkLwg2qa$zcpG|j3vWM6&!8yPK5u7NqhAW05f?YdC69CR8PUA&f{-Riy zQ&9M%bb*d@N32!jcfx26mUGOJBJg=a+<`{aEr|3{(u@+8`&?95L?~%rrZh7C75HsN zbM}zoTYdu4J#&^pCBu!G@deTsG6lsNz@m&qb&(A)G7Wq}mFosqESl~0^JVMAEX3ZeEe z?NiA!o4`2)ah90->SGrGf56Or;&WSp?dEXD&X8qo#Xh?=ej z9O_E*h;DBEomjlrBi)}j^L`Bs|6!q~|IOGd?l*BZ8^4_@{J+1Shn{!Ue<~NBO6Iw~ zP~{;0u%;K3oKs613r>Ub);Ua|H|{)-2!&oup0*DI!jz6q56T3wR`YrKFHeGxhSLJp z+Tt7Cp!RS%lFq6L8dD`-StJJe1&RT>+?!HZb;-YdbL`#Ct zYLi{zMScny*EwudaPeZQ`%=YZ;mb={1>ZeIj=JcOgln&sGgiyS)v6eIen_xAX3>v_ zH6F}SklZfg8d+b{OpJX+i0%;D-a)9Fmhki*yy#kh`N~Z$eP1Ykc(>sM*9m^Cs`#IJ z>ks)Q1q)C}96jE-_o%V50+)QynLc2FSEsI}#uH{q=yrBUW#{v5Te9Z4mcd5rx|Wb9 z%H$N16<$F2QMS}wmwnkUP6O|Rn~nLzR^ZVUJV?1#FNXQw_n=Pue;B}3^gHypyNAZa z#A6W|mc>b&XR0ZSNCQgfQcF^a*bV z@P^6Layv7-^-3elj@%kL-NfEEhybj+XBm|sWC6h6cK;tGa8KQR=mXb7YT8OTAMogA z4t>-DxCWxxmT~x(Ime1>TG#U>J>wFv+C20owkx55nuc%4MK0mnLMgSdCX4v)J!2E# zPbZFydrvD^5$v<6GpZuOkx-$|Cp??e?09!b=yfs3Z>Nbrb;c=+F_xN?nqswoc* ze_0Y8pwhXIoo*yEx#_R6H{NW>`Pb_U|Ni;^_^I^ipN-#qaDL%yMC?l^OmmrS;$LF| z?`Qmkc=hL-OO{y<_C`*{sh}6v@tcPe?>+n1 zJjGMDsgc`rFmP)M+W#?b-?g(h_LQ?IYphDYbgjdmd?GW~nIo3AoPzv#iYo^b=9$(M zwizSwMZ@hkTV6T##|yZ3`~UIBj*853&9SIJ(Vp9zsH$JXKYE{uWqUU0ojmE+sIT5j z*F-ztr_g;-b^BdZ)DP?*-{X%;oZgf6zIEIGUbce&fBuMV9D>6v#0ogH8%5n*VDPEX LlgzKa{m1_X3+1$d literal 0 HcmV?d00001 diff --git a/home/static/img/blog/committer/yuluo-yx/4.jpg b/home/static/img/blog/committer/yuluo-yx/4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a4d4a79d879ee2fb4462f0da1bd7be73b6273d53 GIT binary patch literal 79679 zcmeGDbypkx_XY|h!QI`9yQg@exEGfI#oda#yA*eKmli0l!L?|S;7)PZ!~MPQ-+G>P zUc>n#lgwnY*JR7JuYG-HB2|>6(U6Iep`f79WMw{ofr5g*e*gT42>-s-s%lbte}n$^ zMOqxHc9Qh)eFtFiN%0dDR6{(<^H;d{eI!R2oo`T3*e?IPpdscI9#BwkC97tKSj3C``>c2?dm^wqf&?{(Cw9OOv)v&*qvrx7?PS()5 zxBmfgobj~&FWfhP|D6Qm;<>I^TjXO0?O|cqR%{zH+Z+3vz`A|C>dq|DI_Q z_%H#r#sfAS;wxVf;){8P_wNC{&48_f1L@0X=1ul$7F@DU+g%P%C>Ft#tjK~05^28kCZ zScTIbYEjSTh7+cTTPfli1#F=Tuu+hd&uiJ)DC^Rb1^J4n1eI%htI8NtRK+{f= zIGWj9c(!WV@-)GJq30i*x?WKrZh@q*V)rD12$nr@TU;@x^+={?z9>#*8$ldlhW0um zesD`|54g+z;-Dg@oi%Yf{RRUR{>Bn1$=P{b=w#18xE~wjAg;ZAPHq5CB~YP=^-xt{hLVwQTR?fhC)BjW4Y$K zKh2PW!Q72?I0Oe#_lU_`}Bl28$5M4aD#QMt-t|3jg%4p(@Y zWt8lT7gn`Rt#WRLDtxO=O2Ll`_s8&3PfJBlF9!Z+K}Agpr1X_0y@9xiw=uVkf zzJS969*%*3Yj=%k2kOzT_!zn~h5Q%F_(~RHH$vhM*RgMpwU8v`;?^n?1 zio|h!j5B}>O7+C5Qb3XU7pO}5M-s}q*9DBGb-02(J(%akf^t%Vs7CgF)n`v<^?> zD|@1o{iH)+<%^~DuP84Vx8S`}ZjryNfb&~;g8VDerWlT?c9=HVz<+yzJIs4Qk&M8* z1bVi27{cxV>>4Ls6;~>Fn&*ouT9XmI;L4XGxot%HZXKj%1t~(Aai~>97?%QCwT%ou z8sxU_<}CUW1S~tisw!f1I+J!@vrDKxOEN$Uqwb3;59HQSh&{FP{-i>lo;HXN(+q4e z)BcG)SJMnzBVKmA2nCXE`0p~{|G5lV$8vKw&%HrrhdP?{XcELsMZguExD8IGZnVc4 zb*~S}<>DKj_h3Bu;w-$tXHo`NIff^|;3FddF9}BVqBDMT`RqkSlqw+L)IzNDF-uI{ zmn@{wiHhNcdLZC2lzqG5>FO)PpO2B+z6tA9+bz|v*KR+evJVkC^4U1fmUryW4)*@n zBByiyK`g08Xwb{vAU8pJlqW=r(m@LLXhrURjyC@*eIrM*e)57Rc~GjhNOIGdSZA zzG=IM;L-2ba)YkP-pH+k;P(y$o;%c^qmFD%x| zB16|L3Bb;@2zle&T03z6~WYX+7G6Yp(Bk;lfFIgPV2S(?4BU znCL=_u1VZ^6fwjeJd~3a(IhqpIP`NcW1_6ST4s_2r+Oxo=g28P>+!2AW9NGpcy-LT z7~IBUK(8&r9rOZUC3*PbNsxI?X`~Fr^?qdDXH75sj>=q?g56W3D;0M}nO3CtYX1-G z-Lcm$wsw<-Di+1mg+T=qaXLJ-<=S;uK+edx17Xb$Y)&tV`b%4 zlqT*&r>XohMa;U~w4i9>nJ!9b)@W=Mg?xu}GVvaF>0JOuzXp!a_`zM&*|W-^g?tx! zPfDGH<53l%?UOaI_kqEhIi4GSK1LJv$mST;-W|@x#l)aOemXzfg@$)&C zhD>xALE-C4FRwi^zEF@%;~x52tMPR324yCzdT|(=kt5bXRMQ$|@5q=ze%U$!BObsPKT`t7Gb zhJ7mBR9I%STTWJTgp&%0pI&zejkdXe34A2p@4l3n7`Mdr-47lefiBN5R{UC(Gkk?~ zz1oc-MJwzR=y^zh#c)pzz28L=igN`4RK{_M#7KwZe{aQ_o}W7<-i06?`1I(j3H(co z+t&VRZnRV;%+_*E+eDq)TfRUsj`jOj4$=n`;>lMwcv_0@>-3J@+p*04=S>S=-`L>O z=)qe@|0009&-~UF_}8D40*WXXF`BbL-fzv*G6-Sm;B6w^YIv;{<;^GzY37< zzW3i(9n@N%)R1k%IC0rY06j z7gYr1dfpJ>?kn^h@h3KH|0T?RtHaIq;l{LBKQBr<4dt%8?H^fu+~}j|bad6nB_@Di zoOqzc%AXFsVhB6B8l#Su4HgC%c+H#gRy`H^Ib-UUh84QX0d?sl3p1v8rU}bm=+Nh{ zC!+_=HhK)h3e47R^G~UDb*&YAVhU#D2pt^eKMVAyE<*68!T+(XHO3t%FZYp9D*=~2 zy)=s3!--QQycO%YRi(3Mb+%i-TL51NX;{&j)aJ0wC*YkugVM_aN$7ioW+;N44t))? zA0(iZd`ZI+NBRRh(o}&WNOsZEMbA%!a|$sf{N&+Lb^X(vG|;qpUimxhuqv1PfeepJ zWN`kMgC!U+j<3F9F9uL?uWZWQiY`8qQf_oGHWWSC0!aG>i^I8hF&g00jv+JQFe$W!ABi4h_J()=O%sBlq+(}mSkq0d zQBZmtYe)ygii1UBUClK_uB>R5^9L82B6`x()s<4-W<)BWfZ*-7fWHoCy@6W&hY7Pd z{>;ggN-BPW$PYs7PaH^k6ld`$6c*>L3`Rrrzy}>Bx31$@9E!)SXNpzT`&_}O^$V#P zy}(~`8{Lg%-`xCn{0ZZ@DL!m%WHkU^%>n2>9H?1kujG~;;~scL zou7^e7YhptP+4o1Q99otBv;#|U-{p8-=6zp07MZxUGKO-FCPN2N2BA;SF*RaGH{YJk?Pq>bGd4qaP@HdT`lAu9TQ@ImxM4iFtE}U|ihk={Em-B;?ZadjsTjL3 zOxb$$kP?jFao^%}^G*x!atQIn(*!f_WfIl`LMB6FL|NOh85zXyt9l+LTHEp01gi7- z1^U^M8p3Z2$ z9HKT9(*I+AfPenMZ+k1ge8X*UFqiaSOf4PnVqc@H{iz< zR`q(s;GuhB{cfmC(8J=d>k-RrUs3XKL(&lWv+}ytl`VIl@w4Za7ZxM`6&x*y6Mw$J zbuIm<-`P|>!p07XAMolc_N@;P*L$d2Jp7rZw*%Ks(JZE5!GS)%J#*zai}&kaI&J&^ zRH6dG$oI5|*9ou>Y=a-P8clPPx1q~FZo^lbZL2`T>sUC+oLQ4|_;m@CTU3x(_;v_9 zH>Qpg!U@KyLoIdB4Wm&G-#)pb`gLC|I@ygI5j+)cJQNMMlJtkdepoYB^0lpa^qQCcs z711t?61~z4u&sbD(=?>8bCG1GvPMnNG|?r$K+@j<{($fQ<$sdt`GGh-8|7~`V{9^u zP#@Q9>?s$U3LA3im+_lgp=ej|jTiIsdT$AmoHkoVfR7%Oal+C@{(R6wxlsdfF!vAp z!H25hCbRF<>ENy?Kmw)yxFr&>10a385limEiHo}HbG+cU|8*WEsMNg|&FwIa=rK!@ zwtm|vbnWh|a_`5W)Ww5Idj=>x&!L1p@P7ND~E6L5m03b z$1vB;Qd))roz9Sok~C(Fu8LpZMdci=FAW=x7G;;7U3hLqNrGtNAM+39Kal!zk$RJ( z4jk?mI&E5v-00illMLzL&&gyXYfwJ?crCkuU(g+ z?9AUoG*NzHPAmWI&&4nps*G{W_OwU56~d13jept(LceVEcL873f#@#+q(g=g;DsTH z?{}uC^;_SJZd%fP{h`pixe131V~gp};*Rcc95=qY>-%t$Y8fC>BW$cXARg3VPaC2Q zJgh%%{;ujW71j4-CNA?QiTu$m5AI#Z__Ux$#Dl#6i$*}?si-z9J9oic@^XwIL@8g2gD= zT3WIs_4|P9O@7|n75+|5Nj})SDX( zA@7|4q=~DZI~QA3c0XrX?uZhEJntiIvBPhkj1kKa*zVQZpg)zY1BmlQAB`E@-uTdM z3}nS27E?2VqT(dmLe^D;{IKIN2R4+i?nZ1!Fb4})3*P@fvBs+Ae*5<~ zz%>Dk(;nC;+cVNY;9*O?Hw?uvzOQ7x#OTah;`>#vM7iis`0`c-$6D@}mqn-UppF0xKgHg-!=E z9N8XRFa){iW$xr*(aH#0TR*NZ^H6&J><2qOf*t$!^Dw~ReF?`CBn;A0t?t_r58Jm} zwEQC>Ikub+`7QYs2~Iru&v#;Wfj1L6chw^9IsdLixX}NvwL2@Y;zi z7q{AGmTDS=^gXE6JDQTZ2_;750IqV1g6rK~D3wGAV{tG(Z{8QpEIAoM-HPA`o^bX8 zA9~e#emgjB)^+#huA>g_&5-}PbcR7TPy%;7!8)BRBK^7-M^+L(2=B8vjKrB{7oxcx z`eVT>C-VFJ!2LZsh=c-Tb6A;oO0#&M+UhOIxpEn!sEV_B*wAbr%1vn4Ai| z%cXRt!E>Up@3Qt@wvABx6kGl>{GJ-Jdn~WoJ&9d2@aW`40+q7f4KV4-|cwMvu;U#fXABhlXO}%NXZ-b;!7Llb_JwLa+ET96?kYcnFm!I+67)4_7+Eg_a{vKCTgj4Tsii;kHIMl296)3=j zTP78@dCy?bE=rU&G+?rV(qEq)5Qk$^|5yPe^>qc@3gZSQVT5j8OMC)8%0v(W-8Ui~ zN%t;JXg&eoR1lSfG37RtdN!pG3%Uu-#a>pEpv%2!@#qIFub&XBy{$PMtx2Xm@bFT! zCe+&+xxgt{1no@AP1~YnlUnVcDwkNz%{u9LE*)sTb!065HT%ZfRUaFg50CLzs%ima zvG(VJz=KzPdqTY}u39ScBHNYHL)t@?lghQ^sI0@Do^I;%lVYLB&6LI5hk}0NpI$L+ANGjyq{gi zkAE_h7i;w{)co{_>KyMW;XuLGDXH*{3)wsk)?e-U`Lpah%m;PaSs31K+9F{M|DtCw zxI`A^_*>Fmm3EokMwpV`RFAG#hL1CglxcyZ1}6%CYozYpFG<@-%7L^V&e!zJUsX4k zuvES7)pTf1$LVQ5kGoj$D}cos`=z~c-`5}$5wh$)9pFZgoUYi8srm}Tg$#|3S{vAY znn>X`r_`Uh)X{!E=h%r+>bu?KT;gXz4iCeM2@l^MK3B;ml`^6rkIbRLuHU}EO7FK> z`hh!~6|`!R^j(k(bBH{$_pssmH^s&ez;$qZDCV>T(z{f5_w&Tb&v6^p{4% zp{9DVXSBqBf%yIlX(mTL%=EqZOR94dH0Z$|dB{?Myj`LehPStMDa3&pacx6&vHB@> zs|7C-jj*MUFGAZ#@xpbtR`4D)^hIAv&o#comFxu)SA$cPd#)_ z{P&weKbPR^*LcK!zg2Ta^ZuB2=ybLRn&zuzHj7~0Fr7@Hs3e0Auwu0w9^JE$H`tz5mPT`U3oC_zvgJa!9CR4Tt6O`9xTBeAknwh@OrO8U8Lc8+geb7jQTC&wz zC1E~~RgmQ}(1bnCVzM{WK&xxY-~FxQ_2((!=J%|&r=h@Q7`^e(wB-pgcA&3>gwuB) z4;c<6boAcMf`p|jG5E$a<)O#@p9W9FrXm?3^BA9vw# ztE?tSOE^CEa_D;m1WZ0XU<_^k-oDq{zFdUPh%?EHxTt3J5toC!}H zE%X-=UTh3A&b0Pe(QoIKi1`_ofg;*(6>_K%=Zzp}5KB+P=ul-(sgVMl2HIYOplNb2 z?j)~cQ&+FI6rwt0!Iial_CP?l$DWbtL7}D3ODjtCs{uD|^}V)}^|tF+(!pq(x%bH$ zt>0iuD{8bsAz7EIpDE*KMqdM->UAmEqp5C~={F?CdkSNTniv~q=7NuIUk9fGN~@^d zC$t0|SUDP?Hl>;&1uPu#6V$5JC)RM&mPA$aeF9pLbmI+ru(j%!1>OW#o`cvo0+#b6RS>tDGV2MI3`RvJX`%|b zEPaGlOs?$YP~k|wg#fm|RMM4`$|tD^{|H8k9F~j}v1ARa??k(uJ4`B|zzK?1EbcGZ z^-k9+wnXA$6r>Vj@qLh9z-}+hwF>DNA~^8t4DfvJMH~15f2jqXEV{dY6k)6WC6b5K z-x(1-K5lU2sc;y}-1K=9;o-FZdI9OQ?4T*HRTPyz3}N%Kr~$~$M@Wzt(Q{bV=XkH< zcU(i2eq+kRR-Mxm0TP$og_~<$OlMLts1p9exM4l1mT6B?k=PJKY_3r(&WT(vUog!5 zeW)aA8SgpzGSY%vI4ajmj%yxB09nOS81}J&@rgfcS5W_ zIgt#Z0U8H#h(|SeL@h&1!q)DtJo>efx8CbO?b(1r1~sW$`VuL!@Up*{G~l6J#fZ(q>3#`I6>l6ObupwO3S~2Y zWzMr9!kOJUZTq_3GFzpeXhCDf0A5-<<34N|XF_JQX@9sjx7{yE?zkW1`X#bn)6tUK z^yoorVm?!Toc^U;lQyU`@-sn(3QmU`&t-zPwt8K;t$Mx@uBqn||)EcmsDrQy%wCvtpAfuWCDw z9C2-f8GdgRuX%duoL^4JEhQY_nJ}^>dhKV66hCO-b<{p< z+(0jk{KQ@8$UuXQFmj}}E+l|&lShbtVi@1(ATpTP3)l`1+Zrh9Z*Y*|sTG>M!kRHZ zUu~Gd0t(Qr*7v*&5PJQ+p2iR*4g6c=qjLQP-VOC)uwdw5Q#2{=!Uvt69}|ncsG{pT zRw|RFBq(Qom91%d)p$yxTe!ITCtlXl0czu#GQC4_ZYU+#SZ5XHTLoqX(kplSfGhp0 zy7?EN>B>=yKgue25jUvjp3q?gE>uc-yO*I@4@j)zJd!c#3d*PMmX=OQoY~2Nl$DUe z9iWBb?VqOP1-0~wD-QFTGP*!_s6smEDZlJo>wPpK!C?wL^)u9Qtg5d5v1J1L9X?qH zekeA;9=7RHz52O%Y>PO3#RqLp@elWuoeCGF7{?g^K)Vi3MDYBf2w--K7LzBkyqrv# zdM-6x5s@1t@nDPLhI7#OHaLJex^36gulYsfOx7mRtgjbLSqI&Asp@I13%Iw#;g?KL zn7-n2$uwQ!kmAdK6F#n^rKiJy+Wo{E0TgU9qf|NF>o? z!H^KZiDF!T*A4@zLfk+BF0?lfA4a^?Lb`{o&tKJzX}W6K2#a``fw;}cU*`viC-Sj9 zdomcL6y%}XG}K5(SYh$8==~Q@eP|S|hQ3Ef;TXLmL)#7Rwz$or$pNcn;l_&uXR2yD zu&!Da-qmz&wAyO(3MIysBw5GR$1MrPi|tKjsUb3+!{lE$qe75w6$!?TbY)HAYAf1jbhmbUBU`-K?X2R^;isSMnm z63^}NDwBr2KwK72FI*KL;+h9~ZAKf$me_KZI*!p#Fsu5?czwvF<`T@CE%CN+^d)9_ z^1UucJi04%kWr776fMyrv6>a~w$YiA>XA3p>G@}N%(NlnA8`0y%E)^|WF_Mtq+zg= zV%ThJli*bNfPY4B4Ux)YB1Q~XzBdYU`=04`wZ|E&$p|H+(VPz6J;?Syj>p=rKT+`& z;|ThJw#q&9HYRyEa9EAZ6-M|hSERFU+L^OViJHml13_lyKLWt;&RK8=v49vUuX9#b z{CpBOu=F2`=!RT^^yn_0K(d7zdg<5#GfA%({3W+oLcZSpvR04_##p{hO-#Mws0ufK2YTHjy`0b)@Gbt;jEL7U0O80xH5oPVs zC!McIjOsCR?UqflgSw0}8ON-d6n0sv>Nqd>RFnJomXnanB#mmrEIbw0J`_z5x!_3H zBz4>Mwk4;`#hP(2Yc8ME#bB&qc}ppcq!d;n`9&GLnt|nw~k1x`p(q|rVPbR zicYznj)^{^{pM05|9ibke5k=Gdf{%n&V7dlgyQApEzSPSI?jH$T`DCq5&q$KEZUca zy=K*gl$4a5QPA#JvWH(VyQ-4vx_a@Ibi|IRgScgJfEvD0qM0F~#bq&^cvn!J#++5D zj36>&5nlw!$*y7p8ZO^==76JRra9h4oP(?i`ApTZHu~-Q?8VWnqw%NM@mZ^XeqMcxlP0Bn47rRo^^W<@RcOiVvvL&$47T;(8alu(bWt_?yvC z2QThjNTXy263PK5Xz{(1_aCa2V!*$pr7?SNx5pRHOw7GFnnk{cu~uLKvx#HXf=y8K z^9b=H?Hb5|?uMwhwD-mmDir#^*vY}lX#HIURN8qPf%=z!)?<2K-*MqF9gu9n4#h?x z>3c{krD9au^_dy62;Fv=M9WOp5_x@i6lT81iHC5tO6i8q#^www==C7@6_Oq^fL!Vy#fWG468$Cs^RdTjPd=AXON6^zyJ zRUNIy+Xd6^eG=A^5}Q|5SCy(yW^oO^_7z0uZ}hE8i279+w7Wsw-r6h+lCDL7>%%2E zhLftu*`1rbE5Bg<*kZN@j&d&>s^$RIMU7`W)5;TM>ZyV>LkEdl%NI)e8o83RxN46G zyWqC)v+fy`#}S>5^&W#pHD5+F^PEwRR*nui<=Tk+Txlt2fUSL6GovyIss$*I6W>vf zJ8Rgq})y(zmQO&|Cj)F#iRV|hq z-|_>Z&TEN_%$@`{&Igol8OFrL!^6vf`(^qo;_~WhLiJ8w{&09L@cnLdc_=}f=4jU( z>R`Iu@(uYT>sXDNA)6|l`=8p!$~Zjnh6APhkX{1!Cn|PF4mS@v6TLbP+hZJY3}IA3 zU+Ve^K~(QxV=nr2$ME;BP{5t~`bHKLbV!C!$1;D|FLK zP}J#CI@G?Zf999LGD@#;MU{Mz0ITSzN}VO`R$*8}Q3*5roEC5>L7I`mDeMfCmJ?@A z@j+o0Na=#B-J$R`J)FBDskuR|grDc#Ew8BJGsusTsleI|*%*Kal`IZdD-o`rCg*(y zHG-sQwI$;B`%XwSvnoLpms>HyU?UF`uT+9PFV^=_u)aVz9=;sd?vSA_|3Zd67d{0tjp!+9I6rH|uLHJ& zZG(38y*Gq9uol;PonO$@3(I~46LnW!iqC0`rQ4KPtXLi`XGCGoS(CygEohf^?7T5) zP>N{wi(Olr!4gMRsep#v%}9y*r64~SGBLR^K-GUsY6J#2GKWQPEz{4U6@jRA39CK7 zBAX!aXy4F`ZR@JVwlzk-nfyp7;2M;bYCOtB+oM;?=8z^r&}*>=FBrW}dt4anw>pD`OuR4Mi9ZI|Ga)G}fI9};O67`~SZGU4zRFgZ^acixJf;19n^bd zdsuQbB7{?46?|ZMyunO1oiH-JQk6dIXlRabt(f8A%?SL&5A8gwUt-geaa1nDv;6bo zS|_uIM{4q1MpEB9EM@F^W~8z#PXdmSBaN$ooO5CtuezM6R*x>FJ^4!~jN6N5Zn;iv zK*}36S~GKu+9xVrVCw)F+~Jm@FEd$2x-+Z$M%ASNz6~CCz{Q{5+4rQ)n?Lml(Y|3? znDt5+s{YC4mUEG$&lE+>!mzNbN4uK?|Cquzm;b39wwYt?L0#J{Mw)Y$HlJoG0BT(< zpiH-T(q1}6EcZ-vz6z_!yZEkMUGS>iCjCp7HJt{Iw}c{VEDDq56BV%<*$t1FH4%wf za=_29Q5{xGIcexCO#=;)v4*6FC*;;1*KrF^e6E`6mGTYm9lq2C=#@Bp{YlX&tYT7wuDfR;3mFrtti|IH-o3HxZ~?o}XS1_69tEIeqkLd5 zCKGfs0!QJ&FWH^x8?^R3SJibLT~D$bcG6FEzvI$C2TBkDc455{gXT;`#4 zuar}$viF~IiBkhoOeiF0=B!*W;`E~844iBf8Fi8J=?a)cOV-S_mP>*TY%)&{(22?u zN!76&*>+6yj~IM#vf^4+JI-6~O??u-2mWsIgb%13jY2ZzRS$@@w6x{D{{#B+;JT~i z3@T6k)GPE|3qsAY%P*Lzt714JFye7wz}?9{BDdXoMI7;Inn7}8%S`+AB>G;68ht%v z(8$LTl@3nwtF>6nrq%0$lb1GE*KK?s0Tt)c)zcfyBxpyP5dr4y3k+=RXyX=hc{P)n z%d%4_eEOpSUgsB)vM{TM`P)$^Cbr-o$r?oC*2ebp*VCy2bEF?brljL~!+TZlxp?sX zZ%~GQ4Qy&(4aMvD*X0+qCFlC^MjbO%cT7`+N~a#^*=7)+wyp%!ZkQ#`xBOPHG0Dgk z4kn5hj;#yY;vAueSL-O`6m;PzH5UpfO)5Kp(kg1K9gSi9(#hEcy}clFURzx$fpS$+ zlKDkPH?mEF{v0f!yvfcft~P(bk;T;Si(jzsPyOH@xaOZ4lT4dA9$Aj&ERw{e zNT;(~NlmurnIyV%;on5}(#7-a+{A&&*IjJ?QbfkOBN*$Q8J!&AMBVtpG%yN~{RpN> zxY;o2f%F4kN1wDM!L5uU&{=&!^)%GD7U32~dfJq3;E;{;4Zw+f=}$H##MG2$e-JBRR@AT~{L7xUTj4OXwz z`P{59@JY2Rl2Ifz*F`vVU&T$4OT8qGK%e39>-+r4F(aQpUa?}R8f5576?dx>{fGKeU4@ftGFJBK;{U)H&G)H+BIM6TPoOaj^RUn67D`Ybvdf#Z`eI#GX z)|jEIP}`j8*Bpyt;nP6;Rfiw>ehcy$IMSj~@cJ7v_CYR^E_T0T-BZ(H8Nx{!H~JIq zJxLo?#DnZ>pXuvpEIdO@WZ$56K7XKz4%_LN1}Y#%_gW-y4*NZUHpZ=Be=7o6Aubzh6^eEcz@#Qlk+|7B1T!$6dwPHe#1LY z+tzCvHb3mpQd35;+;Ra^ri(p8%fob};`2JTzl~#tSf^ld3$J%kM?)gT*Vj~hliuNQ zjv+zfkAAYlR|-9suQTc5{qr8beWZf-lI=V7Kt*tFgi=3_;;WjZ8jJmGQ%BN*&x)v0 zg5V%X+QJO@A7QBz1Sv`X!1mWK3j;F9skHZr+>XZuD!eidi9+!VjB~CskMKNc{n?!{ z`m4QPz8_1xkaD1^t!;5Ie&vDgoOWTIukenXo6cYU?CN!GSyuMCm*wD=d$!t&ljr~1 z`Z15&BJN`*TJ0&vqYi8vKm`xH0qc)QVY}b}YLRZTad9vq`qUVZi;4ksB%%mK1yduB zTpG+Q?Q-KDKm8uPo9apZG_y3r4&(hKy_xP!y@0c{7()NmO97^yF6*}w)L2ot3Ad-a z3$|NW$cC5;kCH|4S2af{^in!wZ~YH$1JauZ8|tL8N_{CbL8&*Djj22sX3fpj z=>v+6oE^{ng=leWA2E)mFGlLF#a!8nGVg-3sxz)DKE9E*h_i)C!#rsC!`p zG_yJIQ){nF2PE+wy(Slu`cWk^*7}1;vQkZtN{XA$DAaY7JdYZRqjvLvuqZk8aXs# zwMIJLAoMWeeGtvy!;_}$S!HB*X;)BKW?LMa@rZA{k(COmrm9HV9a2x&hKqvH+Ca%{ zxdY25yY{f6##N%a$KN*GT)I}9R$^9m8jMG5h98XfOt#LSIO%!dY<#h`Xy_Vcf3%;t@xK1s>SEQne?Oc0BW2X&h!1n_QbdMCzLc}koPC45vDJ{#=oSB|j}fKJO34nvO7ZAIe~dl((0>-$LP3O@PvV8BMh+P0 zLn#dyU$v_7OJw^tO?6WZpqGwF;!K=~mdH|#0KCkUFjYZC4Sdu{(K?%{XhhMXkJ2y* z$^|dfpXwzQ3u}4PMlyfcD&KtHr&PJ`hBEsHdvZuP!T2EM42eb`5!);G&$gw zdcqq^{Wdb~RFX6-?mCgmBP6*RStc2r{B-|$1b;GCd6 zZ_s9IJ}^%6(X1j2>)zyqNxuU^>t%6w2dAXi__x4 zU)zmr3Kl;A!_APO_Bq#4vfQBe8Lv~ad^k?;_i8Dx6BO>EtiCH6f{X?4_6d+AeVXDz zjV}6f0f!a>ju#nf^M+ibxF@G(c@@7pk z>$}5Xp@HY28JDi_$28xZK}2I^aI``OBzJv_85mNx8MKSPo&wmuqy-S!RkG#wF>Al{ z6ls%V#^U)R?Tz>2jdzbE0In~od3dzW+b3cYcfuOJYb;uV8@ie&iOtwRYE;`B$ffb5 z(GSoK!(^W+&j`ZSl1o_a0nU3jU*0eUB&bSrEgmDR6xRBj-nq)=F5sdBaDm5z@4{x# zcd7T;fCw`@JKv?~n?-2x<`{t3N^&lN4DFhcWlCR-S~4ReW3N2W`G@&L-}T|FBFvl8T8E^B&2h5 zVA&whnSAR8oE+eol~z?C@ug`V#ED=ntue!Aot570ri4u)9*}wJ1;pxc(r^(y6-sU7 zYf;~NfQ?)Dg?;K6Z1185_@)qT3ng0vHOo9YrOQ9sUmy-?LRPlOB5QE>Sk<5Po)LpY z8ivv3!9n+52}ONa98(PBzu`nhBgbCyJrUZpg<(si=6-jsuX*tg6~~ls&>0K@k4o=m zWlW7lsN-oa>BKOhwIwFFln`c2b;L*pUM%h41=n;w*T-+P?!8?XEfR-j1b$5i8uQGm zn+6nrD$*g^{1TgQM$O||(QbMf_CcaIMw^{cdLl7Jdyhb37bnOUqPoPxF&uDqS^vRD z4l@rkEzSy%NQ7mTPHKF!@`9<1qIk4Jw8-OMda9=uaPLp4eVbNj^&whMrmK7Zw7@ zZZt}XT5C%omz1;$rY5LA906P)o%Jv1>oJlOiZS0zvNAsYR+(y}4cN>gcD0MKeYLK% z`<#XE@TH{(9DShu{{X*0K);`huM?s%DKv=QbO06)3cwo1F^hF$aUXxI^^L>&u%Vbw zz?|zFj~PKpm>!;rNzrL$jK;X|1biA4jZX>S>StXNl}xWRh6Y49^Ztji znvTJG8kk!KMPUu){rlihORu!hjL@@u#|##8#ZgDepIHP-L8|NO@@32zF~$JuPVZQ3 zy~oFR=PTVqu^=>&`H+ZjXe><%PsUW%tEn`gZ+2Awxa!ys9W@4G8pD{vdNrBO(e<2$ zDUs>;%p(#LBZlDn=%M(Cb@KZMp90QbVqvM(N0rC|YSGoJSB*C=`tr*!QC(ean&V%8 z{dF@6zJ!|!{}$;hptMY>Laaa396+OS-`X^v(pEv{HTl<9*&_fC6q2~9lUUAquCGJT7^3gVExN)U~Whv zcK9XXzYozBqwO{5=f48b^=?-{m#1QZIZY&6;V2Uo z2WDzpeH%Jlo|f5n_Cg`1#|^{Gdwh}8I|xT;oUH!ey>K`@4vX&YkKDv`OeV`qZ@6;V z0F&lL*H_8wxz_L19WsE<|7O7qjq)n3MAec^EMtBO$hXs5Th+%C>!>s^c>$2O6WDqH z*szzpkIdg2xBoD2=6&&&U3_o1#rnOTAiZWQw!QEt%%U94WEs!z=ZQ6*p4b)@f;B-t znCU$TUwL_9$>Wbx0iBf_#JE~bn=D?d0A0dr%9i!y2vXU|qa$1x*16ew z)-+wSa7s$?mwtn=mdg8ZPy)_|W+3wyUf4m|+UuW)1%13Qn}A-iCKHWC`DoB|AimyO zL#2KbKv(!StN6uy>Id2S!vlp=9#eBu9a=OkQ1wys%{33af$_X}1v#(1Nv}K_)4bi0 z8%^UjIu4m$ftYo#8?p$_xe3GY<-K0`mf(KzzrIGt(i|Y0AiV1+aN;6xj-J&SD)Zya z&x6N-oGmDwG#lBEJ%{oB;h63lj5RR{EbnNn@(iG-AB^=hq}2s6>24n^PJIxkzn(!k zsYNa0(^yU2!1&9_)zmo+3S)+&psWDeolOJym=O|I#{UAwhep$I2&H#!13JA|>I4)C z&^0SXbw`J_QSvWi+Q>1OKPV8p>3Hm;zRK<&K*J~lZPTfz zw(JLX9AaJALsp&m0p6(k^$&QH{Vc=T(uFs(ZrZ$&-`aW*ZTw!tu7fDt%z9bhgjS7K zZ_U$-$@L=cY_dAV#_JD4hp^fU1Xu2$4 zmaoh3L_IC+ni`jPUQ-S1JBmq(BQVb=30VW;jd#AaFTJ^bQP?SY1@c?mXf>0!@*CR*@tQjojavlr5=P^@m{j2G1qx_Ani^~A6wvF@s9-4*>H;Zz z$*pN>=OtmRSZTafG8Nec=+@x7n*h4%ij=>mvhb0nIx0XaxsLoItSL*~iM)%pFm`KHjoXuvikC`rWl}Am>(O|LCf57Ebgk^kAtaEiA=vb^N zYvf`Az=?bmOrC{>LxxgTJg{J(8y5HKi?#0lST?{Pi)jF6KKVQin{z1DXn1C7O-&uN z!kfxfth|+CHhFJAz_sRkGZil%;BABdnxFdjIFAPR6uR!|`=^ zC>Cjj$EYM4Sp*{**Xvp5mjoo^8^2hb{$?t}WgZZ`s_A{3QU6+K$aw7SC@W{!${<}+ z<_qL5T{3{Ku9^)5bn#Ce&{giD>i}I{S2VyUj~auyG>G!tgRnO!4vYKJFpP)?cGDo% z_>dBog{c>I{kX|db*Ri$mu@_obC#oF*Yhe{v1Z7qB~W!t2BBIQN|}b%7Bp0sGp>!O zs4Zt2TxnrC>;+{6*qVR7Y}qpNyM%RI{pd@UEHQoTB74QcJB@zcfG$}SND%xCnCmA6 zQ^2RXVs4w~__uZ`83BEMkh)4jkQ+qsh)>1jm_%SFy>eFW zW+@W&;VUgvvc^68Ay#g8J-1nlU1;QUTN@hk={0JTpZ)u=Z0!A*?iYydk;%yI7evD@ z1RDq0Piq&6*&}4zi_D384nm<)iDLy@V}qkyQd(5JrmEtgxkN zUsO~xA9)lThmOWNg3E$?ypZJ^2YfmOv%R9QWKam!g~ek!%i)XAFljr+wGjoUXbA9= zHjsA(^ZzcOD^Ar^u+>aV89`Awr0W88)3>Ry4f1$*9l(l*pT{>eWYtfu!Fp>v!>}om zbtxnc-?|54$8TOIP_TT?okioxBj_kE9EGtj65RTy+@3CxIzGg}z^-^Pb1!~I5SlwDsQCEdp=Bw6K zsG*DrutjQax6`9*8W1;e;za!QuYWaD*ow6_gNxK^+pGm{?l5uyUCqA~M4d_)r5wd7 zS6$QMEUjAnq?SUn`zWRdr6DV5FcnG?R^1hX9lhho*3_nTtN;RG^`&p@AA?Mf1guPb z05#vwB^aE<_a5}*;~zjSJ@#$B$yjqwfEf|L-7Om1Xb^1ge~q{GBgl8*9WFNa2>zP~ zMk3oY1}j4|@G+HxroyS8uCk&8HB?pxuqAKOX=J5IKeaH3CYTt()kEDNz5R~>-7r0t z)@G~U*0dVPGrC%a>ScMD6*B@08Tai`8JIUH3@hVC06UH%djyqAL@E~2Gn`Kkd1`nJ z8rNl`_0&l;UZJ99S~O5m>z{Wk1$tS^m&eYvySo6o21iZrMQ=Ge0bfK?5$!)nFfT%B zRS92dH!F#?QX%~#mIBdH&E)oPv3?7rH>DrUF^V@2;EY#Bg< zq7S{C#Nqg>9|85uIb1n-$jSq;;?AQnbYx_>%79;ISzCkXc-6ubCRJ1PBnx^UjZ}KY zSMkO@_aZws1&jK4W2sLR7KNk&`_ACN%kP<~mCFO@ol~*T45k73;wx0v@_L%kKw!6l zyepX7fUbKtlCFr2Jdm|$lSEW&s@L;e*J`zfe+><2f#k7MSoY|%m`j)KcHR2o^|)k<*cI1c>z6U>NA#8TgAEbkwL)ovkXxROG52Y?ppR$-FRwV7un#h`}T--^{Hzvx_$3XBL~o3fG$#zN!c0Z zC0&(TGK+5PKZ@_7hq59^ACMh5R2x|j>Tmy7f7J-$* z*MJk%r!(hHGcK&`rPY`{_%Y1&Ny7ZQy|6hv6?4NK4rWbeQ<-zdy;55f|9 zU%BbS@qJ3VY7^?toTowFOv{=UwiW!XlU_F76r*kcom^@)x!ZuA6&!=5vB~%}oPf@6 zOED`o%1do%(w*K5EU0!0Mn_{JFOq zzPWFVX-aG?sWx7b0d(cF@}El1E&~hDsdyQ-M8ItMN?A8EHdO9X;FVPbtiND>Y#f$& z1tTXo0dxK1koCYb^rFrJn|9%QdRB|bYpLXylc&V791b#`jRdT!I)34Y!Tg5+U9u^8 z5Z6+<>R3bLLeuhe&+G=MfmMZt$o}n{_})JpEBs@T>&LnlG1QD1naA@f@q?|Ez-s7) z)mEaJ#%yC(sHrq z&!c+bQY=Xwjz#`7u9ydTz63hY04xd!!|bt-q3y^CCZ@$0(*oTF86*5{`u}Y}mxq4w z5~c+*FFOG}_$S@uZoSTH`p-0)?kkZDoXb{|`?Bx^EbZ@uH3ak-J^}bJTwz^g<*72= zL556PyTOc)b?kCU#1qYsXsWD3bCp`tm;^PytC~>f*YKYD%mv_@Q*Jk-+hDDG0&;ol z8VuKU^A?daZS77Ym7BDQt6cO`@+y{M5;&G=dV?0V$lZY{L8(|DI23E{4#O7r1mxZm zW=68B$=v`tL00|goBPFKg=Ye$1tp?(?o#0BIn3~k!}7k-d^ZBSywkDKBhE~5+n~Pl zzTpPIHIU9`8`O99i!r>*b=)d2r?IfkJr;}n63r@1*ni3Zx|);(G?!_BNO?=)cLj7Q zG$}Tt##9miZvZ;KAwZXcE!J|6&5X?9lV;VXwSH;X9g&6^_qbz9YzA;zebcm#51qw0 zD)5~6p{99qrGFUyaBp9Fjr$D<)fSh~J8w08uic8iUUqrtW|Io_f0k6Da;f^zna7@C zX7uz-g58Ji0j3qCrBth06C9hZ)xwmoQfB{%rRYp6h028I_H}7)4WcU6h-n2z!7DmR z@1>Z4uc^UOwz77q=y0`>0s7wO^XT}gDb9d6hpqF{>66W0Z5Ef7=uO|?# z7!ZKfiNkT}+i8p=ja-6&lwVgs*SYvQK-Zzmsglmv8kT}o9x6X|-R$Nj#adB?lar@n zYFskr2WnBDP%|ZIaX<|I@E^UA{oHSX3j{%}#jQS6ddEtCO;NEmXPsecdZS2b*1GdD z9yBIKdE<%>D$A__e&XgD=9oas+*AOKsJ~1ju8s%1fI zm0J+<-ujUFQA*Fegmq441U)@1rK?bw6q8U50zG#bv;3pYz*zzMdXF9e=th`%R2mg= zoy#btOlwfwT;@}@Z@BT$7llRPi{uomkDCUS_KP<<$d&&$Wm{1G0V_{TJ!8}qJ+aPB z9aD65ua*z;e{&_{ZHLQyT>)C>0&%5H`D>HF+$MSO;wtJ`2hi_8G6gRo6q6(jQUj)0 zS|zT2yyK@aE;7S-l$iwOP2}vmgRqmH`Yw9s0(9-NCa+ur;qnF~(1Xy_jAVK@CUJ2MVAlmest2upV-$?ACcQ3ZRae?>_eow4*;@7*y z63pXW217CyJVnK(EjKkso@uyLS_YmO&&(uAvdD@ip_c*>OPzIk=-s4FKz9L(QAn&U zHlWvO&M?ob>Gh~jv;D$3tp4pgnD3j6T#rPo85oQufl=7>#vgzamk8)ouosGP?z4%Q zN9OgOaXf>Ztd%mbjKv6=~X+Ssm}lNqiR0xDm|PglL_DV9fUp!bF0?yoVkK z&d|#vh_=_)@;mgp)yyl21-fIf5h{a6tggKfcySkcc1 zi-IEXMIzHe-YpB#) z>#a(3wHWOicVbT5D9n~8?LQcq|KUfkIug4E#$vyFk^x%NpYBJGlAyggcob%aXW;M$ zpRf{N!Ixg)^!8Ga`^yOI9+<-C|SP z>+cRh)-QZ<*ncQ;dPfn=6R{y`6u#{hfbXN}O=*62H3dscFS4>pe@NWKE`4hR*_?D5myklXJ+yW-)UfhuJ!`GRcJ4f;4WZ@UFWj_JXcYIakU!Xt`nH3(cOMg$O#C?%BWa;77zp+J8sge`Ms)g`a*Ze7$Xd%6VQt*P`zjs zmiopb&n+A|p5a(Z1ve>_p2fCi#s zIbJ@<>^Be_e1fou-l5hV*R-h)ZG6#S^&eUz*$Dttu3AvYI2dG&yl!mLBsOMmMG2i> zDkbe~S5S^c4?KwlK7q&$49E1oUYHk~fzk!5%(q*#L5Gyu$+MW1F$!w~Vhx}#=;w!} zNy7=aXK5ucR7IF1UmmYLC&CHKmHMa@Ky&voUVMn7vnmqJmY zt2vUgVWdKXhE(u%t#xS7^oZ&vd>oR5FYgYdaHnGVuYw51vDirP-O`H{`xk!5yEh!$ zJW{ZR-g;)AIC?Q@m=rSse$4$gU_i$|SN=MGW2N^)myO*A4|9I^3NF|_061bzW zn=H?Kckc+ZTiYgvoqKN(Hq%Q#5I7Xe{@o9&>8q3VOteW`#o#$OPiY6YO0VwXCY?B4aU;` zVaV}_LRMG;W~B|GcYd0Yr_vQj)H7CvRrqUgJZ8BEV#y#s%ysv{%%~U?&R<}Zfz~%~ zP*+Gd$pAX3b72{RWaZ*jSnL;T76{7r2s3~_IgB3q_Co|iep8(S;FODpIdYk(ZzQEP`j+qEfD0@g*y?4PdanZZrk zNliIowykYyLjK~Fm=ejl=FfUYLt^;=Z)_hu27e0)AQ+rS#W9ux!Lmln;wlgop^Nof zB5o8s+0+AuuEBR|p|DokW7<@3$}8qra|gA6TxkuUav5v3)2cdDPj9|WaZ^1zbPQ{r z|1JK~#}^A&$ERx`=guR{;HwO;1^HLb6UYU2lw}$@W?-rcRjm$ZFBuQr2K4oVe)@ne z8J6ss0dXn|UAzI%H4Rep?zKRXHp|g6x6ndCOp9F)q%VH_@fx825U4X{hE)VsmyqIo z?by@kXAkHqZ0)9|vXz&nrZ|aNlcun+LRPQL!>8_1SQR*!-dZwN_X@|6@DW(^AHLY_ z7GnT6o8Z2lK)$*cz3S+PFgJ7 z8vLTIDyc9gr47gULBZG-H5hBCu;)c3qIU6ewP~T9N}|GE0j;tDl}ocQJ0cNlBN8yZ zcYl26=8jooAE0-5j7o!H>bY(vz;3qGymm9FtfT@JOIFbnR@a6$@CY@7p(aM8nOP2~ zRkJ5QE6t%-je_l({{KEJ-?W~X*itZ-Od16xV@1@(566ak2Aa9?^FtD_ z=r;s`GJ1`g*WU%uH3+N|(3w9Sbv$9f1*OHzDxPS-R%JD|zVVm# z{tmrk-fwKM;v!jTprN>NC#DC*U^x|ircXHLcn0G;de11NQm*2+>sS`b3r1YvtoJm) zsIytN3#quoCY|KU>MH7g?FC+qmTH2=g$ww0$S5qK0kx=?FV^%8z{aq6eC6eXEr0kO zfw3AjS1wrtBWvoeQ{OfIWwS@+wqmJ61kzrrQyGE6hlDM$ASNHP@cM zi0OgREWXNa$UpvQ>JdLGSr^Np5i1_5*D~B};J5UV-nEW)9`-c)**CdsjByQ>j|4%UhNhm# zT5nQ6ZCNco3(G)e+=FKM)@8p6!A6fHZ1+m0at+0f0rafBlCaey(d@Lgf*$&Kdh6c? zqyWb#v}?DSEeNv-!rKSLBk$h>aM*7MHuj3fj{eD5e^&%{s>z<-`LIvOR`sdNZ14RvVOTu?37n!VX9eLCMO+%(}X4v56Zz9CjRD$0o? zbtc{oRGMX)=Uh>TZ=;g1I4ll%;qjR3>4)jbsi@9cOCV$FG`GE*39y@qI{}>@XW7zK zW({iX%C*Xq0Lpkyh}KlD;{-eA8efnXZL-pp&RbzC-=wltN>)xypCWUBrx-bS!iy zK+{{?6h9pA+~WouW%^34GF?ryTPzREG!>t^guJJJgPHW^m(#FV7nzJH0dY9}`6QN6 zJ(^2s1PBls83O{JUR2-X%@n4th!s7Xjoi?y)sAH6=g-GSgM6?(X*4!^S_8=PV$<MHjyww%@|&| zh^ax;2ObunZyLz_{D}j)Ryh!N0=h`!W(erBMvOJH6S4%%J)XL#~CP(EWTFn#GMcJF#6dKic^(Pv1$N|1l3eT^)X&7!qqt|@WsGr zEb>Uk?EmeL4Fi*~Wl$1U|1uD}3F7M8Uiv@Xkol`X>s$&++^fF^)|5g4F2LC zi3MKq*gzvdE6Hso*zX&ZjJ#ikW6vOZ>UTwA2Y+wu6^V_%3O0bQW_JyS)3mf)?>MY- zkH#v$BrMg+bjkEi7w56E6r*}46`NW4h}DToz^>%dp<;o8;TwcmLTsJ?YZQV7N*0*t zc8RWJO8pmYV5C#gKUStoA>=%oboc?5KAuc=KQ<^@LL zd#%K`pB}nvnw6%1Nr7Ec7`MWTJVP_eT4lqZWF0K;n2Iq!9}Dk)04x0?u-S`7UjPjp zdOaU9PHMshy;Rcz!n{`gt65!r=qhRxQQ=Po<(WTfiwHs$B{=upMAP)UT3sF<@tE(Q zh-vpd$_MLEQE;B$Z!x=x8B&vdNi`}KFUPpR5G)D~!-{|iOdk}2&lwNovwV49Ef)s- z1)41T&OX+Ez>1=}xN54Z(N<1_@8U&F8a4`_vAlD9V~ls6L&M>#0p6%txr*S){Aj4L z>Zo3>XD;%na!~CR%b>9bWJ;kMSv39Ye9$#8Q465xS!+t08r?O`P3C!umDP&URV{ubt}sI|h)3WWNY1?ZZGu0?US zxrLg|hNZ)&w3@Lf1oY3eQk(!?`=*;TSaN%-ta8?O#iqDB`R@U}D|hO{ z9!3tJvtnwY0d4ZHqOPx^f>M9inX8yP^a-qp7=ujrRBUul$L0Y^*g#KQ)6zEd)=F;? zI2Ab(Ieo&g#VZ4!{6GCLFO7(^pS66^5!#T^a{Z8%NAGSOL~RtBa+PExOt%wW~*3D z1%#%cNRxJhjM67{1@w|-7NBeYY^_)~FDL@v#w1$<_cRDiO?d=#I}6p)p=kWrqfy9G z)EYS@cTnZU>tYixjoiL^6E30g! zuJqf=T)=D!$jBtGF+D0bfsb0|1pb0ct*j>?wdxQ{htJxq6$usp5={l=Gs+ILcRopj z-qb-?(O73@T*#YekS0Qx`ns)8{;m(b3!twV z7>aDKa4ZRo#wXgj?Xm^vDi*6QD4jq4sCF&cN@C@iq9$3e*d+t5e7dIhTtIGf52K$q zpi>As>gvrx7MdwttCfbdDqJNM(xP09_e#X{UQx*Gn~0shBe3G%{LM)9{hk@f`p*FD z^G`?4T>)5ocMvxF4#C9U;n?=dN5H1TR6{lf`hiP#V`5}33Un3s(MuYQ2vdIaO6u@9sE zs-|v9fv}RZvP%)D>0O?kQDkZw6uT6?nspn=0$x=JOUYSz&>d3P3QuZJVOzD7tRwGf zBafQXr%^CrE^?B_Vv}dQX?|M}8iFY!2BY!lNwn9{S87rdoD?O~K;x=JD@kdI*pufm zXXyP{=;nvD9)ZZDLioy`dHC@-DvctO1>GT4e&vA~6xYQ=SChN?&sS0T3edlgOajy@ zLhr*`g-Us^^jk^2Rkq7bqphtzKUb0HW+N4CegSs>;bTm54>4=2FTKYf3*DpeT?pf| z|M)dPmq%|rbE+*DpffnrbgV=+sZY1I5=Bdw;LCtuv*qLlk7#81q+?p>P|AE0DlVR* zVOz#kJ?C;Sx0qPLIqt?jzi9mzV1l{S+;5l_sZJ=Qmzx`4b_-o*I+Uf2{8hc&%CX-EfRR!9t5*Q}N6 zi{_R(lr^fqn@Q1BO$IETtTNP5EUm6QkTMsz>yoYbwQ!B;59fQvep243uH3*jm>Y06 z@3o+W#Svs3@y8BlflOUju6c%>&-xhXjNg>u~y@8=$=+r(O;ty3Fw7p&}U~oU^;H) zsLv@D;;w*h%Yu;yK4|6F4M46qn@YlGdJz5W0o^pm@u+6Ht7QZTH4mS@g!NCogGHJu z<~IyWei4EkdgeRb)3Ci?5{wnwK%{rqHxRo5Qn2t|PfQM@lGweSims8~BNaO1 zZ~AzpxOgrvihk_@sjRKKN@Xque(WgAF%m22 zWop3SGOrlC@2jOeuQDDi2N$3>tLdHJu5zthC}6YvBP(e(i#n9&nl47;WCA@n&4J$P z6M@|Re#i-p$7cay1QUi|U51)Q0w&)n&|Gx}CS$#xH9+UH-C+h{ z(jtq8I4}Y+;4?WK#5=#T( z@oB{MfUf#u0q&1K>U~99+57h%x>$eo0^?@bm4`;hxY$Iv=g`l-$-S0dpw@F1)s+xX zv>C*K)0h=A8Vd&|BfDQB*7u1=E&(@}fV-h@JT~%N+ooy(nO%OPvFx5WEb>mr%9KY; z-}(M`|B9I*8JI(lT+eUs6^qS%qUnuC65u1TzJDaPX+(RkXynmb-!mu`yMGmpLw#r{ z{DPq8n`Ty>(*iPCy`r$Pe=O$treb{Ta7_R08)z${f};{|($bVaB#@NjPVREr5$w;T3FrSoW^G~PL@-tF6-}u+>k_kO;E-W4diN}?Np4a zmKxef1Sfr;K&yOg5c)~^6<(p&bwA%v#(Hh2(=Wj6C%#J?16`y{@_Dl|8Z)%2wA$pc znzOBO{C2apRknfyvHGT=@e(U?&Gf*$fq~dfK+os63q7ui2=c^b=zab=p# zOdAQV_7=Zz*|e-J8ASc&myFy#Az0&|f(fxhDbs~OWxZJy&;n-}Pq%U;^XoUVQ710e z*eKKWVOIE2T%`fjexjBhnRqt}(NCLx)_`sozUpAHHk=Tc7nWnfm?uqh`x-xb=zSBh zv3ESyy8u0hEU()DdjDk1|DPeq@*078!NY*18|ihP#>B7`OzInkT)%V!=v(OBYjwE| z{h|$+3)r{x5vbETADBq5pW*#m5cc+pNA`b)@LRDo5<-yKKL#uAjl{8GFW~L}Z(q!L z@L51hgGxBH=*>;F+W$RL{t3p7!r_?|niP%dxIhV7(c{=z%peG8wX@yf^m@GlF~KVU z7v?WTvy=prVDz+$&2Q;IvXr42L0wpZ^Pf${qKKhb;ucD;As7q&J+bbw`w0d#!sty_ zm1x5h7NPp%35;4Dt(j&!>%36_Qm)r^Ik>)c9iWR#Yfwr+pF=>`e%LF01L>iM;Cl_` z*+D>8U+>=m`mM2IRflS_HE2z7Qqd~!w8)W`Kh>7F|`%ajrM1vhrWbopEA%Z9s3*n&>jJj-N*MYwutt!8kiA6$|@%A}cBd*=jPsz;a+l zls8t=3FABR&XpJR$SN8e4Dg=4bee!o;N7qVU%P9|+eqw<8*Yq)Kl`{mIC;!`)i`fV zqMD!EdBNm*XFv5dKsSTZ1k|PFIR54bSmhpy&29nM;m$hX6N6bybN%`)j0^qUYI^6D zRnT<4n(}J1zlzDm+f9svC10uoOqGO~3dMq2n!CnJ>O+8bP@>aWYEAN_z{9*7~MO)U~MIT??xx z(9hmOucH?vnJ!Z^o|^wo72uQ5G|UMdj%jy?VV%oE-$bz8&^G~V-Qo>z=ue>Um&}Se z3~K_%U{2Bl1oM-){O9qQ;6VZQqovy~4%q_;(gR{?3|LFX?xa^P-b7Fru&@4wzZn## ze)I$M@^>&?%~{_#h+a^@V5|rmf_Hy0022m}MP)V(2E|hfn%25m$?i4^r1DLDiULU~ zY*lns@Me0+T0YY#i6b;{;?uDzAROyF0(BQRIu7ECRF-lf< z^?<&sepLR~4vgmIk}5O*eSv>8*188^4GoZ`eqoqOZ&vFHm=*&~U!tdEBHoSsw}$sL zprxH*n(!DU9$EK%wQKD`mzb zbODNKc4k@BYA~Eegr5+3M_nzIv_Oevs^@h= z)-tKemMf)Kj2DUgEBH!=8I}IxzJ6HX9fptld7)_0D&Q)=Wkgfn8efvvCYfLeT?cMT z@fu~z@->MrtHAW=G_3QDrrZZ&y_-KWJ;LyvS19AHZWVf|+C$!zM^+aNPnG?*n;4&4 z1@v6E5Nx6&aPYa`Gw;gLQdv$r-I~9zH8E6=)J4$9d@~!#nC5#i17vW_QX{|iG`k;r z6aW{Mm0F<3-`bdH*{HEt?H`G}z9Bdgl6F0y^F0}5jVwLOzhxX<&DH{Rh1=Cb*V3;m zLz1zymmk)7N1F|g{v1og_{wF|y;e>GQFT`NW%B4p9#ywx4=P*4YCteU7IY+=jfg}# z@VZ$#)6B%+P5tIUpYKWZvj=p0wXvp#Ml_tu$EM$Zff;f48;^S)fqPZ|B;@r;#MXP` zv6-N*-O^SMOfVk%hJLA7eoqo6_K8LP%uL|m8O(|sjhSxI*y1r5S-+q+IWQiXgVc`{ zORqc{2kwf-zPl}NJ!e3q*=BX?fH5VGdPj&F42Gm#Z4E`prLJoR3C}2V?dx z`(P!(V8in-)1xaf>vl{3Nx|EIZWKNhJ$03(@O4uQmz=tcwNJf_Md8U!A@W)@Q{CQ;$+gsu+5TlEvPwu1ed4HrFWic`n@+rrXweH zIL5~_O#)1QyG={3GGFwnj5MXpcs)E;HCUVLR98@`7hl2YZ^vVG+)%9W495Zj@anV? z_%?MU@71EEfWXU_s;X!tC=3BPS#qJWwaZs*JSUA9zm$(J8P3*-6l4?3H~L3lc3)pi ziA({`T|iq!IrE1gVFZMEr2c3It`Y0Eofw5%_RtCFO9sYZ`(tkaCz;NoVszBhp{1?? z9ReMN%~u(U>q9o1i&nT|lhL!?x8ww%QY}I? zXzgHr>0QO=R#~s}XtG5|7n}5vn;IBb?fb85VnvrCS5sJpqH~meJ=cTiXAkHqd~KXT zNi`nEmk}c|GhisbxhoWFLq}nSTN1MSBw+hp3D|Z|BC-Y~B6DC8*7r?9Zod>P9+*yr zJKPL}E19tv6a6W;y&|x=cM>+=tr4Cy3fwI~-_|=8M}8TLgTJx>JzM*ryVKk66ONpI zQDzsl-M)jd$vp`x3G|cthnmeYIt1od@=1`r?x|S7KAgX>> zEl6To4Ky!WhSIUKm^x%ECb$QgJ)|>zLoqu%2FTgKbP!zWg$j3!H_oH2@Wldi84C4G zZ%YN04GjRTu|7901It5Vv1E`pmIjC8YmJ&caE#w5nkX=xF7tIk@0 zu6|Af<~E@7n`)L;I^-3b%3gZSLtmq&W$zFy2#CP=FoMgDBh&=+p!lwO6Qqt5jNZQ$ zHh%<4%8MF?#>i^}2(tug8#(_B7CrnR7KcWeK~%Z-52;_#)U=2^ee+}?6E9uo7I%&Rb`tH?K&ajNvMme!t zUo0?J!LwAK^1W47IPKMF zHs#81UoFJq5yP>-!^bS>v#wtt)_C}1rdrgrVUO7g)@pJ$Gd)}aMT%DQ5h zd8K0qjZYc(GAcSGH;h|VU9DMYsFd3(&XTonQcc<-kM(=4%F|VV2Kel#iPa)l&=9P0 zOU54ep~kDv9*|%_UH$I@^n<^OrI$`GwV#$mj5fQit^HMi@ze$CYiJBi`ycMe3dq2| zCtkNeDCK$g?|S;c2uw;HW5yL2#d%?;(I$#m6Y&tvMq zP~-&0U`{U&EQ!v*`WN5kgDi*UR`!ZdL5 z@KIzB8G>armN$8ZW4m_@*1G#+R!CGQpc@cZi;K!c;I6#^l)xVV^x1(?SV6;hb7(A< z`vqY^auTq0GjQ%KaQPC;w~R7ME~i}m*rQnK=*rTSHW%@^%4#!ZQyQSGsSan$N>RnA zsz*>#)L9RE2ss|QN|m~$WcM*ldgvv5a#w`$x^qLvV#eK(SV{n2-8T`N?nyGdrhl>l z>^1#TvDiHoe;yPK?5AKZ$i?EMhcM40#kzlQZ0BG)4UF5|hk)KI9ygTP-_@`b()0F}<5bIo70l`GmMp&VwC~l)f z>eDiE<w=hX!3mhnO5M0Bli*Q{s-nV9+%YYO%^vBjObv4FZ9R zk&D|+iq0(px*03b*9+@{qp@Mw7_1&V3d@p4VP)$5Sdu&%S;HR2veZ#nkvh!q()1yi zH)I&*3>}6AgGXR-`UosdBc~6;vRgQc^X`8Pi)rl53{FNaznw#4d9QCM^;00Se(7!i zeP$@j;xfHyWK~?*GAG0AdJkQdisxMc-N)Loa1)J}_1b8O;q4v!7&hJa7;;8G zhO7|}x%iP=I6i_^LmxDpIpjfP4So=7Gaf{Cx^-MJ^dZc-{|RJ&^cS>iNnXmFS&~@g z#<2jd*cng^*MHONgnid8S1kFUMGksT3Zb{kvk6M@+@U?#*5(bj2Hn#?!` zVAdX2g!oe|K)0A-$*?+e0$)eQ(5OnFLXAYGdo(7~D8RmhG#u!`(AzZwt~B`2WmH)4 zX)>6r9{|ZfHoxtfV@|rFa?>z&ExvT(EaQ@a1q7$7>Na8S=t@k8o@l7>Knr{Fb?en(m|3KC#_n zKN^CRXYpF!7-aKXS~5GEydoe0lLF(g?v=L~A5G1qOjeZ}P;ahNeP`4qTs??>7Jzc?b4@E~Vy@)dGbga538e8v8#7>5_^IidZ z0&@B%AeUbMmOhEt&fjJU+1}yUac>m%(s0<)KarJ}UVi8ZObkfH@7!6Lb{#j(n6>nb zsu<~Ff}A`=g9x`Sexahq~%~R-DRmEPv`b%nhfa?;C)X1H-W_o*u^2&zq+4 zb}Hm%Dsx5MG`qXl8U<~!4d~idq=tq}eG9!p4csX$#mdK@!NQMtOETgw* zwog59hDJjz@vH{*{7$2$gy};DL3#S4kJh17yQVRms`F+(sKsr>GPIO5DLpOVA_GQb#z@eBYdv!wS7t51 z;>ZLVzkbHJS{I#xztFqWwy%{J&shN|fqK=IR^_(3Iojo2Yf0duVx0K$JJZFnB{B)C z0z)xBFbq?ok|_6w_)ML_3B7B`H@F=zm25m+%lm1VxM0a^4z?zDI}VDCmh=& z(y^Lgw~9t^wnv27nN9nq%e&3?48$s*AguHaLY9||%^nKX7*`9xubU!>to zpGX=ikyy)bWO+wnaX<_vMzt{!n8WgwAJs3;+d1E2Hp=p{wyX^o0U8BKeO{qVb@llI_S@1&& z?w=e5Vww`Brf~KDl8es~SR1GywYbWqd{ix1f(e0<#^cybg}Rmo(e|he{P|b?vE;F* znTJeY1&xWTe4c5jVSbyD#`QExv{yTmP+6}2b*87h1b_1O!-`OPEFNL>{+LgZ$*5bk z4!By3^871|n+52~Behs)%z|05PC&%#Les_?8j$qfH7!YlkLc1f?o7wwqXb?W3&GJ? z)XN7eyr^V{j79OhMG^><<}+>T#^8UEf)k+2LpOjZ0Ij%p02U4M#`t^sW4T`xmJq0x(^%DB z;;Vh>rL!Kb^$DX)&@*S=EG28Ro=iWB?ekkXYD-gjva{TH?h%P?UcuB;gRr@mFS73T zG=M%M@x^~W~qfz@|;m<5tnd&FQ{?0r~F z8DGe>-U@4M&O931bD5`e10r}HfrU)hLgwLo)}Lv?arh*J2LIddSWD}6P%qFikX~s} z3!BQXhtbal&{eUz0=hY>d8nF#dEgSp`;Wvt_hDEQ`Us}|XB4vg4aVAglabq>piHlO z0=@3l{-d$%?qn?PpH8Ja6v)~IoGiwl{gN?Vdz%M6h`Ik3iY?wFvE;5Oef*eINtvY0L$L8P_CYzm>N zI$GLO(rEh8Zzm}$$z_FwXsV&tU0jTg6DN`P_IswkTwA1SBNojaUoiFwpr9DF#YJYG zv)P+kz{&um{0K*e?fKLOT*!4);H8bV7NEBh2+NA08jER|)KI}`BzlWLo!ne|^J5c1t678kbY5v48wt4DD5Z`}u*8w`XwTV(-S2#e z1<~o4(=PxU-3jLR_+zs_zwaH6X=%eznUiM}sb)De)HM?8no&zISAKP9w=<1GX0}#c zEn>KpsNb*wGs9xdDB~r4d@z?@-B;G2V+7|IA%qsVrpaxCWa*9TN*iPm6;L}gXzIh1gI!X42>}?uo6#YzD*8K zxTT|5V-+USyPXmckF2<%$no%_PV&Sd|2S-Az0LCS!<2B!etr?tLa}AKs+qDng1XXI z$24mn2i4b3i0lIB3%nz+=3ZapnI8#Hp-za#G8&cR+(Yn*X9y<848CQa6gwE>Xk_EO8xExZ-V+J{@&O-5$pS= zm=W?iYGE4nr|~{AM+vYJ|1@Pv`BZpwd^pD{rW!{l)YWs8C&rqhEh-cCMO74?0guGByo=a`PdO z)S_K`sjFX@%JuI6oyyk81LGkDWcr`=ZL?SmWukL)mPmRhfmq(p19N)2VR={*kh|R& zB{<9QPaOv?5OB_s&;Ntt8Rm&b5!**iJv>9!`^QhBanTy&KK_a|fG02!Gkf_Xb3hmk z%1~?#OvOUaNPOlIjH?Tl8bhV#YJr&|s90CfFp>nA0exl_FXD!lFQ#z?8cquy|24Lx zkHI{8MJos#i>dH-{OxaOJ;(Sf01ek~WEl{s8;o*pF@Ub{HHuja(DfaO_e z&Rm0zye+`Soxr9&z~o zT1h!qi22G)((N47P_oj!c4icJFko z?UjZ_1nCu?BQUFf3YK_}!tCHNxcKctGrE0B>;qU4^9W|$8;3>zCl*^gM`Eo8&-G73 z7D0Pm9}Cns5ab2yc{BzD=(&B<45(-SDiS&Os+m3B%y-|+@9F&&0rw&6aeAC(wZu#d z+@#ddz`ylUYI2lrw?bpSngLm4dfZhz4+3kq(VIxb!v6lo^Uy{fQ(_5*m#&&-VI{&C zA{uc;#cY8t%Sg(Pio*UUZ(Y~5Foe>oDN1G`9f8c1i@=GK7@w4emBA6%N{>Z=_5HvA zVD$#H>^p40LMle#>Khgi=v?2j0Y)oYYe*ac%?w*-rFQ}PBL7Hiq{2N$!{l&KJQn}Q zAS~<`g3tT<;Y*(gvxMptJ~K5S#_%-%|K4$`AM?XM+B*71VTM;UvSNnetN+y(YkXp{ zBVs7ldd6UD>_~idj~5mUz8}ZFnq*C5qIc0)TV-USu)fN4zsOTE+lgufIE|`0O+`|3 z{`TGY!Y_>F5MY*3&5TII-~57Ta9tp{)S;ug#ZqW4CzrWn<(A4s<*8;>{xzrK7>tY#Ax6FKBQ{u!`{MME2>S)Qf+xP>Z|yk z%0psAF=vE(6#Z-gT?4t)+*zNy4_}Afhskc~SnM_onFB{)?SK(j(>sFzn}KzGGqBP7 zJ}e*@&+MCuse=aN3*QVNpOts>QA`LNj_G|8v3lSTEdOP^0dx(VTXc5}mh-v|=y|;? z&gq+sY_bjLn|Z$Ke}~{e;213XML4#4jWUDsKJ6C=?4$Bk|7DvN4zaux(=1G2R8r(Q z-A+>FZPbDnY7p1F&w^@9CJ?O%Nyi#`YdL-q81L?d>X~!UDhrwwsPICeS*Ki!Ko~>7 z)Gk*IGb+Jib4}^vX>Ab|4}YpzUx1G4aw?-TtbX}b%=Yocdf#AV61dier{kM28V~u! z=%}bLBcX2ubb-0P&=t@HkoD>pHh^AaB37E1?(U^BhjbsFg77J`)nLut&#nP$+*=_63M zXa!|}N{Gt4shEnKU{>2)k8=VSi|f5!Zf1Z!ZGSCsLLW{kO? zvQuj<)Wg@>RdmYgPB~JZ^Mb%!M}a%9nLJb8NZ?dtdWSzvtH9YyD4I4Oi$fBzCu%6R zddFeLuRJk3X%ujQMhF$CmNAu=t(F*-hbapgbQ)Knu^0RHV?h*+SOR*^0Dt6phhq`r zeDrsp7+_ON0pBp?)d}e2pE{st4-CP){$bei%awEDNqN((h(1YEjvMaf6@0IG>@I<~bA#`VT?Q zpwU?PZ?OdOA;|1SkpItk0{k#6y(bMhVUJ+n5{qAg2C8gGt69 z=k8SG^h-CR zlK#o~c3?dI5(_asmfq zXJ9gx4e-U+zJBxyFHvEY3inx)$c%VxMJXBx%-W{YY}H91(#lV*E%oFEVm+^^d91b# zt%dSj_*+U*{jLIms|6@qyckoxJh3e#3Ofa!A*uM?{~2J7j%H=9(<)T_m;rP(xARCf zV2mRHx@5w5q>7eWJ2c3!q!JZN1nAm6(htYHLU6FJ54Qi(3wu1`kn3ml6=%5xVH>^f zoI$~6@Z1lvMyT7|^}1FGQwzqX=wy8TKmHS&LgUReqKUl*VNP5!K1oUhF3FRT$K8Zh zEs~)Y4l31ldbUbQT`TiK%9#mhXlo*<^EdObZT~^cP9Bc6A<5Y06OSeL`e0&UI6Bs^ z=Yus?A3KApyjq90dMaZxAdq>jflkU_b%)U4G6UD7rkPJ#O|hN8ta<5e)l}kDd`JEE z@+9{i!!%zS4FvK7ep;g;1hYd^arpBIW&ocyR8g0a0NnySvN53ezP@$gBIblAV$Hx{ zGp#9?dAiy!0gHw{WDG3R{I7E3`=*)Q)pcS`lxGxgCn-rx)2#yf>VOn%dEp(Vv)F7H zB4dpqupCvW%pa3J1)vX!c}mtZ<~ehZc_IVs#~#I6_drt!wZhuft5|#A7_9XU!tR0L z*hIMzpihhpxem}}VA&qJ8Ees{E2}G@Pa8EBGiCI8L}N`~%6w=VJ_(3sc{41^g`Jkp z^xXnmwVAT3fuS-)OSWBw_AwX|f*+G>nZY~^9kp&kS1MlM91LSh}P z=}?wZvTqr1loBg#z>a{;<|Gtbxjdo;EX)aQF<;ESLP9GbF#N`MMNeU%zrOb|Eyyh;PpVip7W z7f_e%>LdyMb=UsxA-giExtbM9R{qIzn0eomSQmXCGJA(&eII`_HRi%^-$2LGW%Q`2 z*pHkcK-Hu1@M$U!f=UGq0qu3&m5W%}#X47i^{LBzjumy^aeC$im}4jSE%iw=tP>}& zEFcIQJ^itvk1tl-Zeds%7?1e@u~-=pgGC;}Sm7UqAWfO3oX{=-Zv^Ju)k{-1*4-K65YAQ<_b=5WHOq&!KZ5<8t zF;f|NPkZ|B*oE2j^4C%+ue;j^`y(?juJ1tL=uyht3iCRj z`L2=gnki7gqsl>Z|BHB!`C?cr)M?q-v%GhS-_jc2dynGmuV2CH-hSBU9)qp!(Pr_Q zX@ei6QA)tCrJ<%}eg#NI(YlnbfJ7_fG5#8TuNI5V?qRG;p~wqK$G3s8e4lw>*0yII zkV2MM=Tg=;19bho;nGdXeoonH`Ij!bA%Gix{yi}|k}b9K7(jQ$O<>LvF$SXvK%0V` zb#0$YKws|y^rhZ3M4x$$vRz@`*BA!HskL6K;?dZZjOvOE1!FAed$c|IjM(G}J$TDw zy@jL3GVt8AmK2v@4dqYc7#* z5*od`f}|H|qy;78zkKMSOD1Rtntk(?Uv}DlBwKZw%$(5iy8EtjU?MGcZKkB!ueB$t znb$R@vw-({5dGu<-7YtZq&eCbY(*}vuCJmxF9WOk z4aVxeg9+Ff$Re-lmxgr&Ym3#NZn1z}&F|`0UqkPF)4-wF=r#md9z)E?_O}K_5Rh2W zv|c-f(DpKIwglG2O7ks}UNr;K$k*n73!>G;Z=Lfati-IkG$>?LbS`9~rZ$2|c_o2C zO1;QO#Rd>0;v@55s-B_smVLc%dM zH1ay05@xaWI~p@WV=$Afqj*|a6lRdMzVzb2P-G2o$8lcY^eYeS^^M00pE&$w5Iu^r zDl_W7i5`DtOC6nCDqJ-e8W~}fX&k9yZ8A&nIpE$Sz&rNJm8iTNyL zWtYD#+qQfQhD^CIEt<98Tt#p#E5!#M{>bu;Gv4xUk8mvckKWkK@2rT7!bDF$%#KLJ z_@G2gjY!9o(0G&2;_1v&ao4h#8XiTC>Evl)7OO1g&=5Cp_3f+lqhC{2{7n*9e+`1wRE@`}Mu|72v|<%4A& z5%?~U?;oS_M}TM`=(jYmP7u(|=07Sf4XR^a2uPGaUF&?;I%6w%Ygu6vglTfs0IzyJ zUGI{2D?WNwN?y6Eu@x4;D$X@ER)4;zoJN+ewKrMDcC*-bmK6_#oJ zxNr%RgTs;O6^J~KP^@zgHH#MQ)OZ6KPwfO4)Py9#Hu7OCr5?y05R45>tER|(5}9au`&H^bbG_4TveR=DGLh`UeAcc#Rt@S| z)bjvcN}DGtE?LQ{sH)%^xm&H~#%8TFMS&}<##GuWUP4g18WeOtDg5DUfpEUE^J;&x*h6&z!}nuP0%eFTGm7SnMXKXa4{0!amPX z1Bz=r!?2w97W+k-!DiZyRLgv>@d`5YYo$Pbh_$NK5Aj;>U~KXVz%GW9^Pg_m*e3vc zVuxc+XbO&hJBdac6+TPf+A-YHHR{hbCzKHl_%hdYuM41;FU!P2dQ!O_LCEUwjYa)D zusSvkn7a(x4i*>a;hw#0H65Nf51b_5%29*Hv@K;B0bB!o2wWX?HE67>M12E&2(ygs zHINcp>gXElzwy|s3Fh?Z%a<+3;)KC8LgKLLZhteMcpp98)oxx`=NpI|=KIqA+AJdt znKXc`dCoV_d-2Wk-Dc&#fP9^sH`aRwV2dXWAh#f_{>4D7BN!|XPQmnH^wN)?W8Su+ zs!Gcuu~-@#7+)HzOo6%&pp9v^9KiSeL$Jj^&3JPg!qTxgZV2}L^)sXV+si9D_b;d{ zr-4q7zge4bupBKfR=$^A=dTC#n*%zto9_tFsSzw$SF^3DQvwJ};a9LciYm%X_=b28 zU~9t<)dZU~pitkHlWGHcJt6%Xpi}2uxQI!iVaW6j!1_)=kHoIuykYv#RiE_k-vhb~ zXo0!dl^otT&vlzs;D{4|OU-R(_-A=w&f4 z)nD7BKHj1tOh_As@qK-9AR@zfh5HB;x&8f+HHa12BMgf@qp-j`$_#eW!WpXu5iADM z^Bola4>@a~p1V0`4hqF;dOKP%YcCbYX8zV7uo+a89|dWu4S`qveN@zv59XHHp45!c zCTSdn^pk!yYp|iK$z6luw9v^~k6`5bYk*b&rfD!w?q&;o3kbqj3ej;XpNfgUtI4-= z6wB+>;yl_xO#Rm~lq_jxyqM=Ezr_}4S>EV1HiOlefL1NzT3?OjkN+BL;xmxfKNQ<~ z2OA@2hkq>Q|7rm9JOFv#(UemHX7Es~@lM9-L6JAjcbjjP?>4VuKCcUl$5Q_wtc!>- zOOx&mNx%jI^SZtvSmG6nFKA3OZQ4OVYe88N^Nm!ieobb)3~RG6k=BtvbQ*u{8$hs& zL+-!`l3xL#SgkeU z>pWn)Zke|%c`@J9=xJRbYnx>mf@%|~Y?5zpVVK}ebNS8d3`#8~YKAvUqw0fdh9;m- z4QAfa_z}S6QT~_vIDqcZzm9qopsR9I`VC773edI8WOZY$Su8_W&BOJrsNQr4U&KF* z@4ZK076D!0EkL(rDU4nNzK}g zM222*IZBtTG^ds)3xTP$3>^YuHBqWxP5|5u);{Fz@>WgrvGUu> zVQoB8SzTe|_=f?VPf|?G>UY|K>huYW4~;Y{x2>jUzTGn#dH!)&>Jwo=epw{*EHEBR z=$%g*6osh-&xQ1&Z#Dl_hH8HQ!rK?$`v+ova5(1o^~O%Vx4u6O0%XG` zec8;Rm+Y$PT2--8;{lZYnqG7+ADe#jwiy+@DIgI!ZsAzaH;@K*1ge*38RMs+rqTc@ zbB&~R19Y*SZ(RXhi!-Uk$pEFzb{#WsSWy$Hi%wk|T1#GW*L8hQYa`S! zT&*fE)-Hd-Ud79fk!i&e2nx{8pU1Rd8ap0-*fuZ-8{C4i(%S)ahyHccBLH1h$F8)L ze!~(l0lE}GXDM{%o)3m6;G+K+hxES9hXJBp+frEg)s(%o+MyBJld;0@RR-k^_3RJGhMET0K zD7AT27CDo=lGk-F6BR#jtjf$nCHZE@+Lc*orZ-*l?G&Rpa(ttZd9N4N2S#Ida0I50 zx{q47oCaK-@d|BSTdNV>ByCC}Z%fpyYc~LTrbh^7-s^_dT7@by0oZeZfL?-HjT>MY zRFs$UO-sSu602oIiqQaurLrlFIDVkPh0~>cRf3wk$w}p9}b<6oLu2{YTjjLG}ix#7O z-h8vQ?Z%KKtm_qoE#8S{aNM`rXyx=-3J&WZ<)%U13(V@)IvS8XYM~lb(No`Z2;XWG z65nWSq?f+MCm!>?qpln zWD3k(puVyXft(<(CitcF-j@v+>H>8O*lkbU_S_fqd};7#{MjuA-wuC_al4ATD+Sgb z;HuT8qNoO2irgrB^V`MBgl>=&hML-~k*_wOTVO6$mWrjLsM<`^qQJgg>yZmA&z;9- zgS^ce)a%ro>lT7po}u`E+xrhUEsEyv8~z90=Q!@?zTaQEOytaxcgZ=43Q7(FDxe}! zL`5Vih&hW1P>~!ZDJUZ5oG%q4Im5C!v(!_cnzK%Zy>y}-o*kb0K=}=u=UEl7W zo}T&F^sq4XCZTm?V!ay}KHr>AN?P^hne zkcC~;*6KHwx9TLyW*s@tR^XW0C^2~SfCJ_AK~@&CXTVX_?STWTW4(SVy*r;u@6LQI z2Owg^y+@t&_8qdecd)XV=ir|tw39dMb&v(kJ1Ow5)4DpSvMR8t_<1b)RHMq(&PXn= zza3UvI&fI#bhyess_}m9*78o}Ch}(8))qg1%X=0#<>(;=&SSQz60;LpeJ8V~ZR$A% zhXI|AoB_IlB=)a1t?4?T8)+Q-q4DgsJw;L8zfL)N#J>8*eS+XyunLQ(VS}Edwv+-o zFd;zSeqw|k0eS@J{|+jffX)#^(R6?*819B>bq-&~(tw~sv3sHezh>I}E0$`f_ETPI*y?SA&$eQmjGsIOL+&Bj4YpMjiRyzT(Jy)+Bfc%O&*T~b=+RLnt1Ek=Se=1@sTxMrzX$s{q zuPafO7zcnJaL}q_ji?0V&(7n3uIF(;k2ww2iUnXE19a6X`y%r?(>GHl%WKWsXjvNB zSEAl-a=ARoj%^=&tWI7>Rcw^O)I1{{=d3L25ESxVZQgap$)Apidpc&C8bD9Zl&v!! zlR4Gu$%hKOn<~|mHxnAjnwD+koBL*}Gg9DEhn$(kKIMu-I?_UIu&p8B$i@>m_}D7< zrF?d=c2n8ZsI{y}YAp{Xs8jp=d+ng8BT~v$2S|Jz>$$Uz`gEknJK6@=nqE=%`i`?+ zH1iH_%8^3~x@oy`bn!}gxnWazqh@_sTfUmCPi!i4nzxjB!*5aNNhc~@^7$q}PJ!gJ zCjhe(bQ({k^Dv)E?_s76*cTvKI|(|9PFuFhcEaORoF7zeA+J|xWV^^M=+RHIKm9c5 z;HljC830|pkdn(6=hU9|zOqmj>xllnq>i$sQlh+9tCcKIXf9t)oTeSb-!#7r`_dh4 z9tOmTA7|2pW52oYIty%BL0Q?>Rq#m5Y(KDfz}(VQw*Qm?a%_V|aMYD;-ovYQIeC)2 z^;ZQiIz-xD0dL-eHo+A9`DjT&h8`8%sBAg_J9fyN#tM?PgDLk2&?7)U-&AIvp$DUu z5UYXVs@WQor|0P`KEfcAkIRu2AIr>YSIVpPu9f+fd&=U9-DO#Y?y^LW3lya1R|$S| ze6ns2nbA~7n|~-Ueej#iY1mH|*XS)vHQq7>^R<`tkad^oQO^PMm6epMTw@HVuc>mi zO~0?J6k>b6ewD1Mc9kqh=qk@NzD6GDI#7)~N74`Mx7GL=>NM~T;WGUTpu;7k0L(Qf zK-xeZfIk7|U>t2--H|bmFl>652B{-IR_!jUo*_YWd{Ltx||% z)5vM5*^;9pW745$=XwN0__`VI_1b0~dJeb%@&E`q1V9se9;^{z2XqsD+aB36a=d*N z?qgP;SFS0G66(sVHf@5@Ibca<$_1$k8hj9jx>b;|`aVbf!z>a@OIDqz{baGNtah63 zsnI~*O1fO$uhB~0tJYjWp_O)o6UFwxKYUmU(-e@|!dU-OkE$(MIn?F=wIj(gT{~mT z>ok>*>bIA7t2UDjwOh#SdMzY*(F*MlW=iJ4U~yeG)8^=c_|CC%8pp12AAOlkqg!PM zbjs-+*g(fY-30$(HVh2FDXk}slTYVGJKfn|{Zk(5d%evL$saqkT-s^O)HY%3DtNUs zTMCY6X`ZTU1fV0JfKGd1QQU{sljPm#8G2mL5uG0@^?+WWBXZcDiDBh&j~CqM{bOwo za*+Lx0h8ZSWbvR$@_haN@^aOl@`{eESJxUSD=YVrS1!Iv7FFvbi)!lVyINm)u|hX_ ztYIHnJ>q^HogbB#`rR%|8xK(+?yrE~Qx^T>3Rz#im+_j4y%fNEXgVjD_u7|V+(kB4 z(Gmaubd+`F6vQv;EX)2!9hG(_mR0L2a~fPD_a$AW0H{tkyFgM>lC4b~(UEPAIvZ=) z=A8aLfF4iBM*AFtd~krq8sxwc79(jpWOPODOtCUD{^Sc;pp6{|^-fI|LA92}Nu6ZJ zQ!lIYRwtLI4p1F6kEX3q+NX;54D_6ZO#xuxH2?gO>O`*G6dmdB-Y3ts?Idrv>Y$yh z%JR=9ZL~90TV83@Op+HawP)jb$vT=<+O}tp##S+M*_VLV8u7#WKhIL_^Qr4>an>eQ zdh<~ezDCuoiLDEoPbJf%+fEj~e7p_Y80otj0BlH{q5oy|#0YL*<&)YQrv}K1*Ef#T)bvk@XN1qX#R3}xmGiIPXYPsdL8Api)+cU#;xU@G2^ry{j3Gglk7AF z1dWRxO$i!3H@07S+M2a8JF%s_R;#IPB@3|6xg@6xvhs3`GxK=} z6xb?e%$p2g1sB}B&9re30H(?s03{_+aMDpUpC;xF7&>;&xhf#4I#S3hXa#aKP32=o zV*4K1IChde)4H3?y`rBye)%=QW$jSapfgr7K!2@KTidh#{aTG>O`}#P z1G>huI1!yUDszC2gF4|I)fwtYmrRCrg{%qnlMlnl3Vs%X@3PLbs!pVih}Qpl@+AR_mnl2`pWvs{bf!0-a0bx zDJv`XmKBwB)XvCVNBOLhXSMwD|LLN}e66hhM;F_m=Ch=svY|#VS$J_rd9_AYnNq&F zyfgI)k)!G~4j+*$ZNNB@lzbi(&|vgG75yY9Z|5;DRZ{YFVu0@X=%4Z7BdO*r({poe z6nUI2A~P~{CNz>Y4V%iRI)dF$rIEbV_INj2(_uC; zK;IbaZ|kpcPs?;JOp-_Vq$jAt#~5@ZKCgabSzoihyj?j#-l>%+%NsP8H*dH_Eq%QJ8#Rl^E_^Bd4{45#U{*e6G zqp1RAI=?AdS(2KWWgSQ+Te31$Yk^sZSUZvGCwyd{nw4g+v+6BPD0oecN<+7<#AK9pB zFS+<~Sz1Z+t8|q-UG;K#x#tKu{`K$XWRD&RM(2m}vV~V*nC_z}&J^hc;09b`is{Lj zG;!7>4fCF}9%Pp~G7TVk`}b?(R&e}gt2~$3%09sI4)2>+Yb+buTq7%b4bu?}ix2sJ zd%kr}6m|^QVZP@PK6l4;F3f?n)g4LRUUi6j_sjIEHPmsmkPjNPkWU)5mbG=8%dFao zQm}E02F{kFObctKhnf1-k^zvK%!x&D3g~IegAw|h>U7^oXlk3`yx3d;{Zj>W1xLVw z!94lmSl#yiEMu!)9IH$Jq3rd**b~6w^-KFsISyV&fX4uclyrOD>v46IZ#Ta}w$xGw zT&b?CZ`52KPD)aJD7d64U;(l!Y_`g3tXY8R$RE!0&oiU*Un7SW_Pp7b{5~M zd|^zvM}S^-sPsQWFXAA51mjKzdr7fv2K8irrW{=Qq1<1+tIX07^s73mURtHEEL6~) zuK>KHT0eQYyn<508|3L~-DO%8b>eSr75RLx%xKn6o@~%p<}08tzT_HNUt^H0xU8pw zc^?JuJ_hJZD+dgyGh%1tzNxA<`v28VkNsptxo+~@|G%}&uc;&bTHWN42HhlO{l_B7 zIyzCO##Cyi%Dz7%&BRSt=MwjwF?Jw#E-{4?cMyR|8qUed(*|20nVINNT_n@)zy2X_ z-h8WU?9g2{v!eX6n(|Il?IcxK5PA0tb?ApA`*2VOAE6s-x&S=q=M&I#%)6;70}n!X=>j_ zf1yQZk(r!lP#9MDgd@1J3? zinr#n@{dcl+VGLxjKEdS&%To<>Nb{@xMS(V3 zI|n)G3eMRE?Ch^cJ5mj4{c7^q8HF_tbPqrpkj04YTiI{kHyz@CrsGr656i*5+Tms^ z-W|Wo+UthOvy~F$;frN_LtNYBs^JgF z^NEAy`6_*6PPy)~w0eJ8r01{bXnJYQ0W$ZJuCk=Tb@Fiat7TTl8%2KBNFQvI2kQ5d zC#zg73#;~#)e6$H|93~(RBxE9D%V$5lj3;&C1390%-- z!!I$#^aop-v7h*PIGu)P7gPei*UU4*AAYtCKbF*JYztoAuh&vuZ*_${(y+BUEp^Bl z$0YY?Fs*vR0eOcj<`ZItB59jSK1b%k#C|?V0@$%&Y0rLnqDdQh?vfe?$b35Vjl_2H ze3MoppL`m~&lkQlUuHn8^QROZq4RMTKEU#Zjxg8OZ7Pd(#Q%It1@zCq)416Ipkr?u zkNA|$!1)}GdS?7Dz^Y19joGLiNaxFOfx;BBc>kZB()JyaC0F&9_1bw^TfV-$SGScc zOK2>!o3|DD@~hyrG-jvhIq4tuVfrR{Wyqd`pCkKobUH?Qu=F#IsGK~X-*Z6bcJC*P z6db;3ceSj!q^7K^-B_MeP#1QN+ov5v3>>@CRDy+dnn(_NFz85_GXD0vtiENW+*_fF ztZUj%R#s^!>+5M5>$R3SjoZsN_dX=@<1b=gq)Sb<-SYDDbVSX_IyXm>wc|r)LsgAB zV>YKhxDW$$>cBn)%Ct+Y425)J*|{$(D_E$f_L8;v9eHi=4f1Twda|N+qI{OrR$i~# zNZxGSL!L_PAndKc%tAgo*D`5WOgmB91^{8eG7jjxw$@NS)wc6)_3uU6S{M`r`}lK?#g z>MJVrHb7qusO$CBHMQeVyRST*aJ5XX)Hmf58;(d;CL^^BIllF0 zdHeRef7~ zTheaP5i6r}c7=PkYF&B##t~W%YFmU;dx}3)whlzB6GJ}wN8YE>?b$L?GK0g^xwSFs zJY6eYJ4OeVua^1E+RJKYNUAi_QEf-r(5Z(!-Lb3bIoRr!mEbvY6i`P08b4j*+U|Kw zzyJ1&Jk#ZJnU~Z|UT@r5R##6_ZY*!N=qhs+*gw2&k`%tH9R$`;FmsWUrA5t=Os(To zUNh4&V!|f?U0E*#9jahf4Eg!Z^}@^?$+uS_)t+dBd{g1ut+J-ibu#6D{f}&J-C5pE zYAFl;<0ARIQ#YA$X)XEnsh5Jc3Gx)g6{y%aCprCy9MzG!bs*47e9FDCy|(sg%|r$C z2KGoO0`!Q^7nOcMhs^;7jxk$}&6bnlz!c2$*(VyWt>BUQ{cf4sP)FLe`pR>ad&|-~ zH^{;&{fwWz_$paZe~7$vQ8$@gxtBcM;s%kgm|o77yBqeDM=Exe*BT6y$NyJ5Sx~;0 ztgJRr)>a>^fZpGDd4(9DD=)3sTYvYKRVC-c zR3H`oXqHX$sgyP{PhrzPNfrd8B?bk;B?ZlhbU9^YD>0$z`Nwf~%9# z!tmKb_G?o}3^L)KW)udwKf`!7dzW1SufSG)DIZA|`QcYt(`A6HREM;wN>f`bwxm&W zS=GP4$Uha7)$y{2IL;ouQ~dZU_hJG4Tdgy#uVeWE2q1^XEeSg?#sS?NWAI8?0O+i* z&X>G=1#$)V0!FgDf~h0mmE$JJf_ggQtKPuAX7#2z&u1DmRr%LwJuu?c@-UKT3Y$j~ zmJ%~SY#yXJhIDyuOoysK>sdiLEl0BWe!q&(_tNS3uu;scrfuZq^0nlrwpYu>N)6@J z8i_K!RwD(E!&0zsw`6DQsG3%rr&f`!y3J6TsTA#u+ZMYAc55f{ds)`~8hPM9{vltq z>n5u&sV!?Ssi$DmPM)vYM83Rrn&hn6WHU#$aZzq|>%F9?lILWV~@d19?#10 z8?3{zuesqbvQ>ryn)mm=%l;P@tG{S2i)%NRuRHd%kK1ghRZlx9)#Sw*)#bzSlSQ^4 zlC14JB`Y=A9&czCnkKcU_OD5qAbhTQ*KT<+F;U(~s4Ji7D4n$@5uj`Mh|U|84M11M z!O$=np#$i_E>OXdZwrH#KfU&$Jl^gmdAM>PC*T|BD!SYJE z9WKrGgWJ%?I@~Q&%yb8VKm5X@i{90LcsgBZ>mzD1) zOOzK^=wrN0L7!LK7F^s_me=Yh&s4fn9Yc%}AY!&b?T9&W$wz`<^q-C0~8w zF8!^*_478F)4sbbuisAAS8OOBD>yHyURRz^Op?6KZ>vMm(W@2^-xE`_z4L7j=I|i{ z#D$5iWPL)D;2FAh2A*%#K>_`1tv3ZNb-d>b(6L$A0X@uIW4Abffl+#HPP%06S7*pb z^ylAYMp7$zqhU+=P~}`*Awkx*=qN9A*3qwy1l8l(G&YOPg3&qP%V&3jjukKlc$`|V zXURBmO(&b_aaCersyg;eZEvhz=Y8xQJLJcQ9+%}R*Oq#1WbH)>3Kp$pRmX1f&XlRD zYXvd2h*Sjts6bc0bxS~#WII>hQMavldH&2d;rTZFM<2+7E?3IzDs^;(+(|yF-CoPx zNjB8e(R-~{_7=j|_dFbY4eyAK@b~M8pSLmSq%le^$^etHeX&i!++Ky_+w0nqmL2Hnv8p0XsLT@aNH9!-|&a z8WTOh*7HCH=;&$BKH1P`fNa#xIUm^hKmq-&gd|&;Z-CB%N6QpIXC;Q7t3dJ3r$RKS z7@)t>eeg+u{(6H}@?2iPk1hbwfEg4cB<_v2A{tj+cET%$p}%yR)4piCP-$OkcwBda>TL;GY| z-~O^lM}d6L`Q7T;>#p8H<|TI2k^7HwY{ws(V7?@?Whg}BQKqd^1wm&kcr4=2p5x>N z{8L9wFN?Y_ujvwV9uOl}8(>T+O@6 zhRYJ=?HVm)by912s!a=Z>_2GX4%-McJu}VrNGDm|tl;A*_9eD_?R@a9@a#;dPB-kf&=mlaI&VEwbfP1r;{c(YB_YWmfwe zz%$KnI~J-g*<`Yy@OKCJqro2hoK9SqutclPv$vF2nQQLodI5=q$J`n-<1Z=Y=H-cR0CyV_hc zQ8ra=Ae&Xbc@0`xn_-;_9S-DUR9?W|{PzB4zR5~jSv+nzOy?*|o@slvtZLL=wj{KW z_qEQ}*GQ6=SVQrtc8r+;*`b{nw&Olr3?I>sL~%^VAX)2{t&EMS?--}Xj&!#6>G2os z@H30F_o&*{aY>`@&w~yxBr5%j;*?jehdN zKRU>Z6|ayNlX}aP23_Rchh9`;QaO`@smU~6JNhdLg=w!FN93!~>Gaw=<6wuLC+Dgo z%~LrUO@02UthuI_EUJ_spEqnRZ&hh1tLn6uSKIfL!Y{OurXEy(o+)X$+Q3K~uSR-t zKo2_bOm+SXpq~@5po!o}Is9}pQ1(AEGQJ=uUdRtIS*!TO3c9lae)Js|nX3R2q1x?h1|z5>I$wG24Sz+9+gZW8-evahNo^A94n8!w3!RuP8(>;6Sl|I+Wtc&W( zRt4ej8eb)Av=j1lo!TPrZcv*!BKZokSu8-)^xzDYKQ5V`98@68vp~6_4@4rdS>ok+Enst=Vo3@v=wHnES zI*nvj{YLU^r!Mlt^UrIiX{Q2;%EaFO^oF%FLpxFlja`1$#n?B|v(&b>ZI@T;x0a1n z8_RoIckgNEV{P?j^7SouXa#Fp$F-f&c~;=eR)8f9t1~;%|MUb`M*8vVB_!(uGg8h` zpR{Y2ywgKR=yjUN#%lHCUG2zxP^*<}Xy3y+iFv$Vuk9^8Q#+O3xNU1&l{Y8$F29<9 zy|uIbusqkUy8-&!3g&b=KdjqQR_H(=PxZa$`B&wKSue?smlrC}KP~TkWx*MEiELNi zHVz-Q}7{pBhHY$U#@ zd|z2y;|5t&dx*SrDX**zk`G#plUFb9CeQt!wz9eXD4A9LD!H##7g;n+M-BV6v8EnV zr<^GV_8q(+Y>7t_JU?XRHUq5q5FkKal~G63d|c(g0r_Ow40%}{-6}rKS+kkERW(W0 zw76P!JTW(TE~cQ9l9?=N>|w27opDqhl(zX(0Xp}C-hN3aNQ2Qbi<{8rJ{Bk8rP4EH z%lHYhrez0v#{7kjG&WaCl-cF$%I6biSdabaUUlqBlrirZBQ5R4@7pCg%U8-PIx=T( z`gasK-c0N$k0vCFeD#e2x_0!kGt}PLsf?%JB_!3mO|NT0=qUco*O3^1sepluwqAhU z$aJJStDcT}+IN@Dbz947b&{_(X)RlByHo2yJ6^n|iF^t+VL&Gj2Xq{A;FL>7Ob1g3 z4(z`8(PlgtZL-+yqc3G{-FEUu&9<`i|EX-dzpbj@RF-tRM(s*%i)ZI_!jyRP;81F+ z9kEGvfuG;9ql{s z>r^+J+FUK`T6dHAO*+VP3C-mx)z$3!ZDe87F0#B;H(8q4Nfy*>F6*@uu(ozfd8b|* z`KtL9wo%gZ@(HrAN(q%QQHncx4cGk`DBCwda~MLfre2#q%)M47d)drX@su& z2m^Xa`(s3>FGBk_y>8nkn|lqiS*`Wj`Q1{xv1};UKweip$<|-(8xuqV(Cu|ft>0tp zVShX?wwqxrQQma;b)UT2b%4Cy;&Sa6G?R}jHZK#~b~3k_)>~pb zd%Sfn%sQ9F4LcYwYy`Ipt~JtoF@CN2{K%1`_P#kTik-FCIm-57#zU%oiTdj6YIl`s4Ts3XwT8;<8Y6YIeTzKv zKmBESgInc=Dud*(r}rt0u2Hjp>fF+QU# z-~XmSry~#mJugk8Dwt=eW1@$kh4d+R2lHTb9&>ahqG0NeBfGw77CM5+mqHzR+tk~K zAImGP+shgqaqyv!PwTXkRkc;-#Ln7b(=ze-&YVDoTt+CYls|mXKFRuOotCn$MoW3C zR$F2@-B4=$)AjmR(d^!C8S>?`>&(xxAjxN|sk>C`-%LmraDP*H%7IVCOZ*wUry$TM_FU zwU(s~Tgal8o#c^5E#%9Yk7+ryvv@#922_%o_K_6cl;CldmQ~vh9T314=Drm|m1O${ zFx~(2AF{Y*H(8g^PBtr`tx|Ue(*E9cs!xr+fpONm;L> zcJ_+csGxhQXG%%*@>|-okz$+*|%Z@y`u7>u`nvRg^m9Lkl zD-D-dYuqZYCEg_u{_pPcT#X^}Owx_=Sm#k9KV{2rFKyAV*U4iw`^)o{ua%c74U|_Z z@`~F)S)ic4w8FKrOs~(q^cq=QjTO~{>B1V2U*)wyH$Lzlv}bERy0MBPts)1H#4{Kt~-+haNj{Q1-3cD3fb7(h+Yr z+0y(l%sj-lr^fYYlu0uQvo_)em(%*77Xc;kAyHv zg4m!W?@;hA^MY#!%bW&HWn;b8HfnpL+12uRLL1q;WP>^$b+kDunEvJRVGTY9sw4dL zm1||5jviJu=pf5QuX$Jbpu<2}TE2;FY^WorM0MykTcVvL zwYiMspj}Y@EKSFOJ@_62(iUhZ>ae6A)-v);nzb4Scgyxy=gNy+yUOfV?d`R_^@;7} z{U%q)rs~b*J+13+Raaj%y;9z&oG2?Utz~aLe3IBfK1$O1tx#9i>IiaC)%vobSqE9! zw2jQJn;+7OxNTDB_Pj@?7()WKOkavbt$!c`2c(ywtR#Y?(M!2OMgz7F#1>)66z1&;sbz zRRu`30vWf%)F)||lt{+xm^=?1l$WmRDf1dMmzB-i%HleWWs}PLc#Wo7N7|Xn$P8xP zl&xcaiSlO6BzZ?WG0U~n zwYlXrvQ9_s>>{{9`BcY^YTs`tpXx{l>@C$Lyn$_+RnCpdZ&g>ht2LIlWBir^IgezR zv02!(y*%Ekt$aE6IXSjh>(i#f$_|x!KxZd2bAC#D_U@PD*t2w|;%%ik^>s))!0&FC zn|1VkU*pkoPsQuy(F!-q%QeT#oT?+_$#R3_vC4yFX5GQEef}rvBt+&7m?{s{86eMA z9-t%gfih1!0k2lNUKUjtB#XqRZ)sorCx@Fzxr znt!Sap{@Lg_FC~+2lgG7!|HJP_ye;3RfKEL(Wa>V>j2PE7}M3XOn)A#4OfmHm8XW? zA`|Pkkk_v2FHcskFN>PBlUdE$%drm?{5{mom;;x}yE@TKl}cr^ zFHXTaO4+$`jm&C)l{{0wl|0{|jXc_*mE7O4tK@$Dz1E}Z>u8Gbef9HEd5rAfFrXU_ zkTwpI^%ivr4A3z@P$zTqbgh-!}crY4glpc)Ksb2^B6yC+=&4Zo$Wtxus9540WI4Y|qAXJYU!|kEmGu;;jGM{II!$G9LXxa$ ztDW!;m&^T?s>_o}P2`D|t+f+4P~?LT6_oY`GXcA`17WW<9+#cJ?y|4OqR-^CU~A(r zpi_U;qZZvNl1^q8i|(}7c6{4zdAw5(d7yT4nbZ0TdAxCJd9+1G*)VOo98cb-^?|d< zmt(BB*G`$eE3di$(0wL{2N35I&{_PN%RBNo(ZdJjiEcgQ(Z((0#Wrll)It`tyh0vG zY^y#@JBT_e&)|K3;;C;4B%+U*WjiM_eV=Q+98GKYA$g!jKY6502btHlqbzH|XNYwG zpdJh9bjnNJs=j#sH%s~Wa5Uf!skrMtXQ8(GqO4ew* z-KZTV>V7Sqh-TV}Y~NKLY~M+~T=|4$!zC25>IRCSQt-)M+B z9u5D`44F}HfIN8F)iUq00kZhA!LqRO4YEK1eeq=jWyz)eWl_a`vZzWwnP0Jo-2eY; zDr;NaBG1(AEi)SRlKCTa)Nw!?iDqArsRpk0ot2s+JAdCTsZ1|wE1*GUEAY@D{wktD zGWKymKk0o}9FPI7I_UINnRm|&S%1$=dGGG&vhL^5 zV+=+u*2#j^VT1vlkyh{w{rmtr&++UXZ5H>bEcq#VuK6+a%{$8T*`u=T-f8mYs#7}whrjwl-k5Q}th;@pyf`Y#G9>e$g02ETBS}Wo$KU=~{+RWOthnwLd9wYLGCi@mJlVFBJlFXud89>K zd7yDKc~n8`j>JTH^_JnXbJa@i$SZ&|v$JcvrllRJRMn~NDj2+)m&z(?6$E?UyX&Cv z0*%`=a=YwIy<_(<9}Z&H5Pf88=iu!}XUSWW?~~2<&Xi5lX2?bb^Y4}}Rogux8JS0< zAScsi#WYD<-$EVPNLbV596*QT^;#0rGz#tAa@8yLuVvhzcK+@I56Y$)_sEv%_sZr8 zlV#)d2hHwzv;bwMXIW5dIbHd1UEL{t(1FFq)RsU0_$i_)`%ZGaWT=}z! zljZZf?~yO2JRl!WoGBmPrM56}%4zw7yQiL(KbkmIKECT7J>DblO;UL$2gmp0_oiw- zGid{w*4;`TfsQ18n zZIhw{mz5J1$k^I9$eopk$b+@V$}_d@lt;>qkY{R+l6x-gqoc4JWy8qlM7~OvHzv-J zS&fIut2K2*evyvEF9`sBUX{VJpxm`GuUuc5Td9w{SiP6LTz`PfP14Rl?W^VPy4@u2 zyu^NW#Vmj2Odf7XWQ>P5!V(i{gs4!=ZPo(f+wGW;|Q3$$AcB* z=_zuI1tcnO)>mJvPSvq(|3jVM0m$Q8E%k@$5De56p!m{Ne1-&HujT%C0D5M& zc96&bz~gy%EYCs~L#iV#NY~LDi^&e~{8Q~@=MI%$4TIO2Y~-Y|)f@5kHC!hvd07i^ ze6NBx3%Z!*KCn-Yv%Rd2`g2&^q=jXDfNfet!AnBw@Ey?63tc>zWMNxq#mI<7N6b3# z{GLw%?^fA<_*IT?jjd|mw=ej(iBhJH(lNChhD`V?07jWQbzpEV2_YX~j^$Wg(h+24 z$7Xi|hmQam+99;9?9SK zn>I$RcV2Z(W)@AY^jNNnsT~aDSG}j{upGJU)jH6WKRqH#7|?AkigM6V3r4Ma(7vOV zg}P(QTV9vq(Fh+r+N(@Q;&`s4W*m`Yyy|D0Icfc=PADnuk|XRLzBr)A^m0z5acvEW zrjweAU29$L()PjPH>RTx?XnpwI*QcqAv#n-x7Hl{BQ%Oh8ek{25e)zSC0 z^16=Z7ye^_yl`26c{yRIET}z9o>ee^sA>;+ByoU!v}Hn*p7Pn#%hf=&!TzCg>`|Ex z=_q4ox}^QIUwXIdCaK$07`68UJJk?X-lHjL7lbYGCPkK^B%CyaI-LSq*Dl5e^Z=5 zDi_Qvt7>mqT2@wN*n&;+RoQs7MaROb*X22>zBCN4-?1qbukFEX%!KawC=VTKHW1<~ zedod!S2K-2BIHOJLQJO{&QMNOPSI6ea{d&>=%yDJWq=?#a- zggX62zR#6{Ejwj$BVJXzR-XPpU1jcn^_InzhsldIhROrwd&^^0`pKN;x5yK1hs*e+ z-tyYi7qrKxhPhuYSO*{W7;K+9gdY#fThA<)o7(i26_38G#>2g1+VRMYFQ&5XM*m8} zm<`YZFmcCa;Gu29Sg$+VGg5#}nGG<4^PoXH6Gw5yMknqpV4Og3>3jk@=?2riHsZIk z=*3u`ai?gVmf*31Gtg76gFJ4tmD{1?GwwMtd(v zFInG^%Nmc%fnKQt2Xt&nN8EM{>ZoWrf^r9yg#s zXv7*19URr8*`}rt26R97^fl7C6LDj01^r-OyfR1I=9_h}6lU9OMTQ1)K&N_;E!ZJ0 zuq3lJ63VkjA}Jh3O~q@1p{Zv$cqNb)M{g7 zO1aVUNXx|OAr|gI<^=yrLKt(rLAwTjaXuVrk7n%3-e_@LIFu6(hTlP49BK@Q zg2trj!XCgHnf^Qh=mD3Mm3Yd z)A2=K!Kg7989A84=){1hgcN`Wpbc*WFuK+@;<3krDgD072X9#ZXB5dxz}G@ zGCNT{Ck*FYh6-1EFpk$hGFk~upUy5>Jl?->kVc$C>Hf>L`1K$? zD{(BDu)_~*L<0lrjtEplAe?o!;`VI%$1aKjy36iuBH$pRrl*9xTgO0u2UGE`FSRiR zOPF+njvT^igqXmjg>_?<0S6tuc!7c}&Ye^T=piV+rfz(qj94bNMrVXE3FWY?RetOc zdEzpeT*`DvU_JNt0|$PiNF6Kj|G6+^SyDeS=C>(and{70Q3{Uck0_|WmTSEeQYkxB z>H$43SF1}K2FJa-_t|Gs)Ayvwvs0gz>swwc*DCnV=y;b*Z+x3ft}$93sy|5{tTjm{ zT{c$kuQyR{uQNo3Rl7!HmnQb}ahcibCV9BgPuQ)7d7!U6ai6ads*r-Os z#2T5g<)ICW@XF_da7H*rz?{)FpE6-Ig~MT#6TDlbM?Msjr`KbEXdDCWFkr$COtAT$ zSeZ{MyRtc^5)vk0y3%vXP!!P3A*zh7LmUztf#9V4o8K`0$=T{d*+?UiUKkKyWG|`w z#4ZZxfI?VL`d{3}g8DyUGk$(5KzI30W(`bwarWz)xSpGWIL7#-xEegnP6n=V);or5 zdc!#;D0b$y7R*cqARLsHAjlu+Hqb8xhV@SwoHN!u?6O{@yv%UWfh3L*!o}FZ&;i}L zKAx_ZvxF3sNpn$sQgn{Af$&TgHVGgyV?kNTmM{Q)U{ir?X0v+jB#w#5;bdvO5>MRw zvkZmh*F(I%VAMq0@Xju!u`pK1pg!XXl@TiCfF9Ifj(u7(sGJn`2&NVlaBsbGgiN{o zHo2whwKA^CP?=PHjLb}!s60ugR=Gp&tU6l8HyAE=w!Xzyx9?f@Pr0|j4Kl0oD4BL? zA9*fmyn_B{`TpK5BJ0y7f5|>MI`0?BS+`HNulP}3zkijyI%>Au+vP49T4#Xtsc@y- z*5*bT*=ms7(6pCqd`>$6>FQ*Y>Dz)S1#&ozcWnzLCp**lFE=Pui0WcYW3@)Bf`uKd zGe4cVgsO1_G#=0?*mdrh_UIK{m2vO}jMm~bO~Pu>^F`jEaf&?S1k_=ETb{g6Lu=+K zhqY?W9p}yH!oWP>05oHAhMa=w2APcmec)uBOG+w=0eawcVse{vRLzC}-DJ_r$PXwm zTgV2S=-=uf#Br%i<`aM(q#no})N3r@ zP_(dK<`axAf{uoPH#2TrkBiA{7h6cFo=q+on~iZC)MIjL+!N=>AKWimUw@iS{u6Z< zwBH!(IrV}q28^u)wiVb76;ZaROaMA=EIUV$Q;BKq+#@)j-~*Zm>#{V{o_-~DoDxn44(!a#W>;TCyR z0eyP&335l>(K5N(NSRq{oJ>y`BX`uiQO4E3Q6{y#RmN1iK_=E6A-5;oDED`|OYUfK zv-GIaN#@@DjMyssbZwBi$0Q{sMNLT?a6T(#gRd$D^8dBuZ7gVT+7|h(k_Ij*FnSt5 zJRof#7LF1anBW9KCS{&e(sALL8yxhCV+~E?6VHgFpO=8#0R^K9^5ZqUfXUlF^r61P zhOVI@Eb>qX!PL8kq&{r^JGdW&(IaJzmG=ao1Wpz%DUl^>dQJd3Qo94^T=POUWJ1}< zui4T?Op9HzL1m2N^$BZiMw2I|P+`i96O5Iw1Vk2qInLg77IdzHccpQn*izVT;_~Ua z^U1nB0n}mpL+2*2Mkb5{XqZh`MfjwQoPD^g>7kFnR${=89Vwf+DhJu>i81w{90PQI zBYUjq%ARj%)M{}wE*(j&Ae(a6pH>b%@D4j{*Ie9~K2DS)u!jIBW3tB;Su(=507dV} z#?`DpX;Xnnfy^djjI0eHk4Gp9==NLdP1B7BG1lL)=kx&wfPAj(@xA^|0(Ctidr%XB zKXaW1T{x95R-Up#r5@0arX8}XI-GpKfRY-@J*oz?Pp#*G$k@iiWK6B=W%Om&$@IE+ z$gDPx$%y|NCX;GTlt~RI%g~xP*kZQh-yWCAZO6$>1?w4?4wgsi-YO$59w=k#jn(*L zWJ<-c@^F>$@7h?x>(*fa1KvZ1-=z4-GV3mWc0dqiIbBgIKxE>0oI@kY*>x}#w z9_PzwDi~44fH6Lu8-P2`7$&^e3+FI-IwK>*eG>pB#h;sN9csO!4`&B-1NAru_iT}* zfl-KK$r_tHZW}?zj5fg{I9AWIDx3}`9U$bP$UHXT@kwCA0h1>AQdgedzYEX}%&~m{ zkV;X;E|7;d&Lwh>+m&U>55218cbSoedZDfY$pgR)W^k~Rz*g)L&2el|y~}a+9^|ie za;_ZAB=~~XV8%)>>peZ8ET{?jl>-}~aDffMWsOQbpa-j$nfl4MPiWeAl(9nIKi@0z zX^fUXV1rm$mx&MPjwk)EpYgI;scXk%V4$e5#_mw<3I z<_ygtX~iq3^L&$07thmKnG2u;n5>c0wr`ExKtQj#vkm}VZ@DZz+tag5KO zL$GX>gANjIF!(?06meJq5NQMeUBml=-Fz)KyOH@K#NZiitQNI$U9-Mo=$Pvgx8bSTRwh~+c zomKU)1G-IfYk0P|<@Gpgf9Q@PQhi!S%yz}gZzJBY9yB~$LORirMK^jOEJOSf^6Cj} zS?Y=EBj`oyPnb)lgFt5NmU!6p$&6O$wg!IN+K`v!95@W}d zCF2{7mi!;o1P+Sa+~g*i()=zN{omIss85xtRd182)yK=EYPZ>Q_Xn#?kcSo2AFDb+ zrd)iJjK650+*5aiOldGeZf`I|Mkie_=^txCI&|9g9iwqgmW>Zgm{O+ zhQE@?KQ2S)2M2RaG#1`JSK7no9D0L2rtQAo!zpo}kD@@F^VK^qpMjAQ_t`VR956)V z(m(qavmWgej(SW_)p4gD3CsTMzRv(`b*sk$oMWtG5vN!f*LUDrVlpVlZxoF@M(?HUyOMeHf|G86$al9-gAp1&}juu^iI5>8#11zi@n#W0bQy7&iE`|ze zy5JQcJg{Da`HlW?TIMukda=%!)*t$|$0Q{r`xp-CpbyQnKy^sI=oR9#sAez779+~T z{aoG+M_!GK9c1#|0F^IR1j)z>KC5c7>jmn^Y(UBNbwUvVyS~C)G|Wk?e=Jb7y63kN z-y+Z8U1=rapRlt)*UYeSZnhap%kx6t&T&1Hbc{)la-f4Fd_+hop1-oyo093rD5yJQ zf@pg2G(+UFnhudg$z|tAxtz>yVzz_5lUAOV$2u1Jmywnp&mTEyb0!~ubOy0cJr88| zq=88}*JQ}g58WA>|b}p;~6e}0{b)N#0 zzLyg=JL0^IQK<)X%+>Z1X4435K-pOtl9qf#@_2;}`&CdM*LI}bT=jaHprh$KDvg$j z)yB!pCX;1i%kjZhuNr0$ucOr&Et6^};8&X{Giy(kiREvVN9*4s_f@|`CSN>C?!9b` zJf1LF9&K>9Ox3Ux>)kA)Yh5d=@0u5^eBUeb#}~gz%7IihvH(=n>Xb1aXZ@+@0 zeE6_AXBcv4K_f-~ejUQ$@|!T`5R{#0a5xJKk0O-KVdxP@LPPb&NQ0&E!3GquaMqwX zmXJuFba5Uj$w%YAN#C~mj5&_eatVp@SbmxmGLS##ac6X@Jj1Mi9zG6uv1`r%k9Cw{ z;SD_fEsK&8NvhoBZw@$?C+Fnhzm1E?3pN;oV(xEwXs*J)$4HFU-~It#Z`(t=5gvZtk}@1l)`WRHjOGW{9mpmEJEW51Ez<%hZFzr8-t zeOOngW&JzpJj{BHK}boc>5{)Q zNABq}S#GI)qfBo$S#Ga9TJEYoPVT5VR@UCOLgZ(Wk7mCo!)p(b$@Op7(fi#x`W_`y zbwoa;sseeH@iMjA9de&uzo+``GNHmKnOI|t+|%q%nb!7pk*|+Q=DYjU8;HEWXtTWh z*h{u=J3Z2}0O&Z>^z?K&$X1Fm;x_;eXtUVQ$$RuTlrVrZG9VpyDknxBN~#DNuJ_EH zaefY{K8iPvHC&vJ^|(t)nidT{?qD?igZmGh$dmkv;}Q~OH^-$(;bgqS!qH?pRh}Vs zXB(EExReQiLWcl?r|lyli%<9xk~?~i=!`t$prGkfV9JYpR=-LBHb8?dIgmpR zJlG9kU~DT4NUn!-W#qv2>^u5;38qu&;7ys_`CD0IVF25>9*beh9tRI)%I_e}k>BC; z3D3Q7UYuJP(&U;Rxn}w-0_gk>Uqg5C*$gGx9={1=op7}W&j)t<@Ot7m=iK9Ha)snL z6BC~F!aCr(rwLOB=$Wu5_RBwE&&9I-aU?u>kQZ{hj{QErDT}qWSX}EolrbvxfKG+& z+PN>julA7x`(^*`T|q@+$2#EK{LI^OSIawOT+LB(XU*GWM&oHRvhqzL-|BsB$2TPo zkvkiWlW98gzO&q|GNtlea_40luJ#m}P-&b@R8XIxfIg+hopMi|yX1`#t3=+>hHyX~ z*4z8!(-+^7(OqtoF}+91p>0RhSVVr>`ila#o)iP&1HjQkm;h~T!)-1+RSW_C*wFs{`%luvxo}*LSf>LUa3H`Y(5nLh z_2PhIGf8?pRhmWF({K?;LrW zE(7^mUFtRNA+v`;zHmKr{Y*@Fjg*)tn>` z)SD*vB}|e#bcD{-{e*_M%8=^U%DA?-$iT#2azpz;^6{Dv4ZE{4a&51679a%zVa(3@ zQ;9?D^nxaU7pHKDkFzM56Ho^XNINjQ!}XC<(PwoI_&8#Y#B(R^PRtu}TrtWHa1M6y zaC$Vb(D0;R5~hK>v-W&();?oU)PWS`Q>FQ5n7qj+?C@|>e(h9whMBMyH>5jsO1j=z z0EG7M-FH$MyMV(>fR|;|8zet%{-?GEO>)~oNjVzq2zT%(uheOSaE>jAGd^*+;E9nEJJ?f zCVbz+5YIb-4l-^-5J1KCc;_c(ON`9pbPApPDb_!9;v;UiJ;2r+zMjL_pNHj${AP=% zN;AX`T%Hd0jXn%iV%N}<+`ZaIc*lb+)r%3N4)<0)tBp&Iw-qEX< zEzTOxI}c@zO8KHVbqWfUyY}z2ipoBmA(L*IC|5VWT83XaLVDHdE4?ZWklUM0kYN>W zk{NAg${kH^m+`G`6Zu0E(9wDKOM1!lwlid0wcF%@q?rcjcV2e8Osh6UCRCiDpnjW7 zsXkGr*O@Fc>fSBW>fSEns@^O&R~jrgCJdG_oo|va*L^D4dotz7j$pbSo7sEtfW5+X z0?_q_--JsE`A7*cZq%re)UI7yx^(FxojZ4yI(6zur%s)uUcGvfl$0bxh7393D3kzx zcQ)>naVRgm@Pf2z)k+#RY$yq}YD?>ut)zY1cGA3QGilVIp(Hg*l+~+VGd#u{3MaRGKz!QX(d;4(-~T47F?4lCD=?Wk81$=-a!GjB0qCTI69s&5qMUF0{SR5We1*)oce>SE@1DI(Ca<^79XrW@e*Iv&unb=v=rcKQji|XUga{apXOs+PqT1(x8 zI&#J3T~6u*p)Y1PdUo$&_LtbOk;TLIu$iLi6=l+H*QTvpb;Xs^?W(TQr&n*eWBl#1 zbnz1T?wfDp9RvfHSf1!|{n~X>vw96{AJi#rf;?#>)HgcpaZPvA34rYY4sq`6L>R#R zw%cx#tFF4r+A{4A8A%^*SigbU(>GszZI8W>1({nkYi{;MSn@{}7&*daJr#RculsP^ zP8~W1Hi(!CvVb}rsYw&>mR2oVTDYR=6=lMp7s68??n|!ha)s$<i&+opE zch;y=e{@pTsMG^`Rz4p*DUh_o$x?7wdwAL|w!gbw9vt$B3~x44?r3?p45@UB+~4LQ zxvA1{8CqkQJagkS!G~3LiQLj)q}*BWP8oLbFqz)y9vQEr@cZiDE8{DUle=o%C3n}E zC{yZBlJV6?%dOQ%$i#-@ixw@UO`A663;=OJIWbWiU6Ur#ym@oE;f5Owv;pz(v?#=sAJ^eovu2qy z!71Y;0XCcoM}E^#0V06M!3R*F;o=x@1~gp29|uzs#@XVmNRRY!5WYal^DK@}mFAye z@}(nzWAo7$PQFI9>gJ?Qm1mgUp`jC)bM%KUNgo}N&eX|MSZd9DKAH_6Tjy5 zb@yEpZ91Mj8BO{$JT{1J64vyj@^M6;-l^q2dO|m}J16fv!?a&IM(C8zP{)oPrDn~V z)@EC`Zf)iAHidk21j(Or)5$;|*eE(ecFrjaagfQ&c{--fw9lk|z_b%VaVol$qgh76T&zy2m(zgEX(j7s^Gd!ZaU0x}m! z)}d6%+oQc*7GuLZM7A#eO(wLMEJG^{mkA9g%Y>x6WpK6YW$TJxY&w3<r6#xJ?BS}O-RMW+SWqRYg<(3LV<&JveWoq-OGA3cP+?I5kjBYSOZmKs# zZce&UZtr}nyz$t&VDp%CWqc2_MQR_cZs&bSo}a^|9nc{hD$W5XfD`2i7{dU0(&#zTKxvF`tbaY;zyZ5K*ABFXOL%(r zG*B8Y4#ph?P6mg^H5?TV0j7}?o(4`BoC!>xI7LQ5T<_BPatpitk8PIc^@FdAJA~6Q z>5{iQ7s?PW2cvHsFnJ>Jsd0dxkp;u z_cBo)@~B$5iVPk&$kIM^KpRNRX`hL_&Kx+x#66!De)Qo-Oy|g2r*?wLz&Uc#VL?{P z?arO+92+!fV6!}Q4(TY~e*5k5XXQ?Q?snC?ckj3@I?z#mjsOpOqJGJfJc?uFaZpEJ zoKqg30`?9ix}`jXp-%ir{d14s$l%wIg`*8}I|8@7>a`ws|4#A@&4Nq*SFU3X3} zd8lL3BM!P{bj~&MT)ld=&1TR>{V`T@a&lZ(kS|t!{q@(az0!%IBi^!QOG_VpVW*c} za;d!i)+Wo}0k41GesQ}+pXkf&4*B^FyKbms%1L?s8f7Fr@>34i-8J2=j_aDEPwU@) z+jxshKEy#bW-CaCw29A5g4b`gDphS}jXbDFuM_T(4l`Gz!F6U@KL7MH%g3hm%NUjV zXXt7Ma$wgU?cS;ZYg!`Z^9WuJ^tumm>wUYW# zs;~jffaU|_+A4iyc#R=)Pusg>cT&#Q{2HqWlgdldGWZuRUogROBy4)P`5t+(E4I`OvVZva@`XnSKy z-t{+p_;3SqI#0}SSUXm^T?cf6w*L5&$zV33?e9#0j!eX(lR-Mv2W3V+()74o^Ejlz zy|V#y+N$SCnjC5K=#M%kZ*+(pl4Ztm2N=S z3d!M1W(wv72XkcpBQMF|jsv7mi{5f&ohzh&;{npUc0U=`=q4G_aI{S7Fh!Q$xy+{9 z_k6TR`Zwz@quY;^L3}B##x*iHsh{+1&{KxCxn7309U?b%zF8g`^@#kq>F3yoR@CTH zwBe*HNM&h5XTSop*PvoH#%PXCcwsGIjZuV4AN`epj$@_4^E{idJ{qDi(|~E9G*sfy zkiF+lBd1Ywk2DEOcz9%_ zoPYqb`pC_nq2tuq41;oV&m9;?A9WIsbMhc<-M#fg({J}bzA-|j1thjnQ$Rsl7^5aF zl47BBiZqOrn356$1O$Y^2n9t6LApUULQj zo%48{bF>!VenU(0P$(JX9gktLhcPhi6iYP4sn#n$rRoq^ed}}CqHweMBs;YSD=Q6( zDKQz<)>F|{V_Lj@Fp50c-*DwgcdUn$f81Led3u}(dNc8WYhVzxV$@H3x#MM^LcF99 zV{0eG*p@Hh0<{m)X#VwO;Q8R>&Dw$LvrGnMkqOamtiQh=!mseoiT`<#&e#%;$V6`Bws zWR%lWBJbFMveV^0*ULgmzVnCzD4=Qa0R(3vUm`=s`>eKYbdAKbyhWAE_ZJ5KdhU?G zi&455``O#EzkDb4){QB&!O6p?hjU?}-DSn4X}%Y~ERw*RDVsBAa$L9YT0=XTg#0kR zWM^lFl$^IN9AiRWGe2aOUJf$r{z-UVbLsbrs?_4|GO-dp@stM8J4qSo&)YFT`I#rx zO23#Mi#hZkNyc!U_$IL8=$4fD(3ld1{nf!?9y!L06F5_wB3kCX9_SCGP`6#^CIr|Q zGngii;*rO+EiCxgXdlZ4T3Yu}3AJ(J;S7H8j-_|Y?W_SrofZI;iFd3vDr>sd!?BZ? zwLC$H4k~!<=Ap6JN(iq>+3d`1UwoSxHPOZgk+3I!_2*qo2!gA(yhM>2&M;Q8(INe> zirTkO^*JUvBaA#Q$M0PhH>}!|yasFi|9mX8@A>0r*IK+A?+%rDC|*mP2gLpI^?w_C z^Ji&k%S;Its6;Q($YxB@usDFGo?lffCM69P*xtp7Ctxs1Sj5602E>4-Z219V{=A44 z_QgQRpr&Uv!n=hUdYDq4!xn|;3@HPWa)qBx;P%v}@$92Hq5BH;+7@nqM->oqU=wAR z(K3M&(Hsj$JEmO4@y!qVnGB~+e#Ce@q&JSgIqW7@rn~#TEDVKlq(aRP-tn#1NMLH{ z)$q{pCAIQKIgg4SanKQoQHXhYIm6o8zC7LZjQByAc1BaV<~ZlvMQt!Hcvqy=k)=r9 zB{fi>`K#9ZkZ=|I)B6G5M^C@5oooQNKDidGD+rEhe_h<#Z@=fS1A<=W9<(knOG7mh zN_qa^2t^jQ1A)lw)2d4s^sm5Rh>g~3+!EWA2{@<6dNxN52R#AW%@Fu z=ONcCmfp{K>cERVZQXAbZep3UTcsFW5+CooTi$#QYlsIrELOV;* z$}L&RbX6cp4ty^Fm?xP(WUly1-ofm*3=pxW76OPg98Q+7B6?8`(SlEG7fPoG-m^y@W9}~qr5YzTZY1~4cL&F zY13xpI9e*9GU#@88uTpd;=rK1`>g>Bk?Q{EI71g6?4b`=qlaDh1mMB$b*2I6VHcgU zfapl**m1>oftwggRy!wf{?0CIrRe-HW^ zi>@=Z!2hr_k*0*tUK}LRu8Kh^BZYC5zhyI@jZ2Ok0yLG};_mcgC73SYuDZYI>lCTt z;>BSSg8*JTUbFj5Agg9|Xm)U&ZvLT^_OL-hBO+|KGh=QY;EfHGnGz*s@hb6zt zDCc(@J@V491b{qOaI^kEJ^&rRz|E|@FP}d!IN~JJ5mp|IxWoMk_XRBDYJxX_>Vs`~ z1Igp;%wf^HEFYr6*h^#uQ`waXm-J`I;CIBC20SNAz*%ybcV5Yy0mDw$F|n^CM&R#bHf&86-YTGWu7_}P4;U8RSFvt_Ki z__PdrpBeE@Hw9Jpi3!F{b0sN8?^a#f9+25!egUBFqOWbFvJ>iYZqN+9!&l0{CHQwGgCO*kLOUoFb;3up{-5iI^k#B z6!ETnG(x!-9BX<$3H6$75%7sIeUou{V!%>@G*hZfM$uTE)%&gMt^E32YIj<|k^MQc zTA%Tu>k2WOoqNLXGehkuZPz$f2rm=;RD^p)srvoxD>@GmeaZw8n2e_H=7ji3!aB!E zq;F8`(>U9i3%=q?T6AcFFjqjk84t0P8pY!}%Ibju%_4lTp8@`250~W`xt}91wMgsw z8qqvs`vwG&gIMGGA3J;;rgZP01qZgky@%$a+qZY;)0GrIh0zdzJ_sh2C)%_-*9?c| z@7jIT`?y+b=`FmN5A(-YZOGjKZh7bsX$t8@K)jO+dxF(IEg2ZJ1lc-;#Kd_w+sxR)d zB!l<+Bm?6u@zXZT)>G_s#1xMG3{G;mxbNm9hoS1UuiDWz*XmHLgw#=DhIF1xU%>jy zFQXH@l<@Qx&Lpbqy7s!$t(^Xa>ffJuq&o{5CuMKxDsTMGOJGF~xlxl78cfAwN#AP`#2Ho-Vi zE0M&LgVwMUVJ>cp1r&+kUf5P?9fZztBdj#@@ps?@k>FC-;*U%f5}WbPW`J*1h`yd* z)>L27Hv#lzpg-HPHViF^O$jlR0*YJg_z`o(ft_asv)_|WS2PAPtWZoveB;$KT8rWR z82$HxbXL6mu!p?qth81ao=MiVaL)~JeUubjk_T6@L>;Q!P-E_}thCBi5Ti5A_S~6F zWfjzhT-6+K+Te(IxYh8R)4@82NyO1+6-N|-dU#nKI1}Tfuh5`Yqw2PoTU7-2 zGOlq+hAwaXpo(V8?!)-e>d>7Tl`2IdnRa{=!)mq8!`sD0_z>FFm8va)-6_{S&khw& zEeuTx4ZeBkRfFc&fvrhNh&}uh)}4z9C4;~!ln_!_WzO7klz-?e%5!Bj>xtiB#CX}noEHrN)?ZVlZY;sbYF)-wG3 zYW!$ij=S1$foMmS;soy-Si!tU-{g=Ya7U^dcB|BA3zP%+d%@SwXBGoWncloiCSc-)q_9 zJrEstv?4lLbtj*``{GGakBe>s!MRUN!`gIzV}dimFc2*h*nWL`6qmf}S%w+#)EO1c z+;3swpQv`XyrZKdNr!n%D)2Vnx;6kM_{ga8g0)d<(TrxYTBap&>P?`o^u z4_72rlFL~!fcUV!6#%wDVW?{d!{73%0iXc0bkGy2SZjL;g#thQytr2pbmUc`KMjit z8qsuF$c^It>NxXLcu!mSoqqNJuuk)@n`0AiyS-J|Rc3sWmq)>mH>wqa*_`<=Dz3#F znzPfO#;?1|kwBH}L}v3mce#k$2bG#7+uftlbJgkDrX5<3_tkN`YR8Eag7#4XuB_bc zP1I*#k^C2_6TG$`MV=C&hxlH$_ zF*MjQ^k^w)yQOk4;Qv{Ni)87WVQA) zBQ!6~xe$cvZV^q+s%JnMCC1OB#P8n@>ehMOH~8qNr~A(L7>+L$d)s)vOa#z#^}AoN zcjn8{CmAbV78~AH!hAa)Ak5Q$kwI_Ku)xgSd6#0gw#W6+E4>q|P`^pm&C#P?z+#;$ z8*Vtm+8hvI)O`8@6lHKHC#bg1mT$9nj*aUFO!z(oCu5G;z-jNy4*uqNu1;tHixS@k zl>vu@n3^vdPTl`xH27O+t6ukgw-BDbU)eoi;?>Z9?%Lljj*8liQes&yyAr#?72^%h zo|pss(FeA6=WNA`>A%eby#{o=c6S#?Js(ySc3|HbDo9O{n3{`*ZzhM&+c2)7D8F4n zjO|>e4|>)BaYD+9l?)eoI>^eA21h-`e{=0Up(Z}*7zQAY7X$cqWX6k24Pw6mm2DMm zq~?LW0ekc<`;VF*|6&;V^x$qu!;bC?hbN7JJ}ZOy+6`=LIOEGZ%Wu&L`lKNC@ZImV zFQ)QeVBn5x;YExdIdyHVCI)C)mJVR)lX7Val|woTcmvHD%)%_JKR$W|Ny1XwAypSG zA<49!7h_LTmeMgp!4GHk(0xT&rnLQ;i2QZ4e76fxSZ@vSwp>t4{E7Uue|~%uwk&Cj zbD6)iFoK@*2^2&@gNuJBL9qh#n*+N_+!-0qwcX_0bJ^mXuEPSgu9OJi{?3ycTagEXq3DZkItP zczL!sQfHZV%6>-G zbm6flG3BLngridtrwiUuu;$$I?{*Gt(D^4Y%$x6hJKSNOl~*|7CNR;>`6qD2W6H-5 zp{24)uPn3FN2SmW;?MqZ!w*66`=f1eq7Y=c;Ns}Ol_EnV@5CYmY0C)Q+%Y*2vyF+?bgS*Vmk&)2f*xtd|hMOMp3`47CD07y&b9k^QN&I6#C!i<27<3FBkuJMBk zYj?`nx;6Z}I~0yCU%@7FE;;VMDxEOR?*1%Oc8kY8uDj&|mSZ&NiPW>LBe5)N+B<8X zoOek>6+e*RFOE=~ORqK6wPm^b$9Jz$Iw$hnt&uv~+?5Kv&cQCTHD3fr7Fy3~I0eBa zvt;5kBCq($#r>?syZhDr3A&~CU)QNqv_(anrqDDd^VtikLh8fhO;%m_+v>aGesgD3 z@MJ|i0?WI_g&tm2YJ76p=K1eJzXhN7y)s*A%T4S#B(gDERf~c__KvX-0F7vuOOJ1P z?Va$^#{p2LJak|zQ~dS1{JsW!9-)&fvCeE|ADF2WYg;NYU*Ru$ddz>6eQ_WO`=U47 z$Z=E&OTRd);!8qnSr8PxJA-Mf-X;vShSB|4Hj-B{A(mzxG31qQa!c8S|Ef?`x!Ch6 zUg_wN&2wx}^=Me!JOs_8Ft8^o(Fi$sVK6G>8&fiPlmWn^25%|;O%jqevPfU?>SqJb zLX<)0F6{QDUEg=Y4QPnss!RFVgyazoCvSq9irE3?O&T-S14|P)D(D;jg=!uh5R_uk z`A@w9{C<^Wy}&S&osO)d28b9+i)7Q)61l;swS9YMeT{2K-g=pzKSg{Mf03{J|-aJnEE)R*a9M?T)_WsQpB?<-!2yQqN^t6y!=v#Pk4P zKg%FTEUWn=rtwE+x%ukK^F&hyyf-QvTcP&Oe1yP;+iHk4*vO_It9ZzPHLh&N_lg?I zVR?!bk#CX~-e|_UPCP4t#51}4>fW_pWiX8f*(RreCmeY)cHE}P zRX`uE0phb->;P#?KM}O~)V-0?S)^$QswnkhgbdFEx6n&ET;hf187q7+a&(pi*jqPQ~8r{D}{17+IgScf(9Io1tdK($_bdiV_J*K zaOH1*meWV=Sq5Bx<)4xPL(DSLZn8X?TFYtdq3lKqsP0+m6{8Qru5_A?u7?O7zVQ>G zmGqyQkIC6p%-Z?!uWp*BdU;n|8_BLe*~D?ig?WPt1+dIfp{;D1O6pRSSoT9B(Q_F0 z%xwB`gZ%o+iB7Edl~sU-2Q?9hWf}m|BB!#PbS#xtxhO#HO~LxLlKRCDnQm{AVA|cp z!Tcr4XcR6tYq8!+?lMLlgzHcPp20N4^) z+6!Ysn|{#|N~7OSE(!oV`FU+`9UVDcu5PJs=%4TilG*T1$bRb}Lz(wr(!r392w*(g zVHoSteIi4`y6r^CvT0d-Z+3*pX*Ow{doIbQLu~;$MvkBpV~3@(5ReVxV9s z3a^u)6ngGd?2|_TL%o;-lK6r_D0E}0{%Zr?ICKB(lm!<6Rtey!)mng8aGh~Cj=TZYAC)2hRhOv<*sTTx*Kxs{ns7D zoHS&4v#6a+)qmyRvT^m8>+DU6T$qxc5e;0fvT`n$J4EF+)q!o{=G z%r4c#xp3vvt#)QJj%|z-eK*lcQopYrZ>tqwHAQUb%lOj~2~b35JBz-(SsiE{ zP9tJ$kp9t&&wMI$rWY62T@c|~F2d9Z_%I4ZEMM?v%^8c!Q%u8R%Jqj(;D{9uz@J4w z3L3!;Yzy};qZ&xh&Ff3#$fPD`V6@$|uxQGG?mEG4mXhujH6Y>3LqbQ;@vIl&Cm4F$ z6054YQ5O!CPkO~nOY$Gt*=qEC1{}b(4Y$UBInSd5bz@fMDM;Ehv8QJP5$m_p*D|WS zw4if>ra<$~$C}G2^w2lV>l?>@zKFPAjM&KQ$wHZNL~ba~akay>9T@V%x%eig|0;X% z;_NxKcwnil5ry;ByKYsGl=GfZQePCgv072xlsp)Q?`d03`xLSAgDagfVChoKtsOMfw9uEO^PYa_12X!xY`O-FXf>+bXp3yiw^Dhc2A`h#lf>wh zcqPF#y?g1j*0evHx=Ipl0L^!Hh$JM`5Za!_y+H9yp2yfT@f!TKsu&*Kr2K7QFSz6| zU6?Bi&MBc&N@PY0Fw|{-bPr+6*<ZxAOvCy_{L4RerNR;;H|G3H z9#YGAJ>P|$Oo>0LB8sn^eEa&5PlgxM8bD1H$kV6USgm9S9Tf&3pLA<0wG3moKgiF-Bpi#~yeVTsDgkpP z7RcOgn$Fz{di@BJEcZ42$Mf-@9LoUqYZK%3T_1D+K9tMPaFEV{)DHT{tgtcI zwj%QIEk*VAu@aIdXTY*bKhWJX^ZPTJ89RY5$V?2Io%xSeXC^M!MKzi$QrZ|bVQj}! zK0ajr?2O9_ca#g~<{Lr^qul%<^9c`>+)=C0*daZ8t8Qigeb*N32d!MW&<6`4LnU(@ zt@rB{8jTE%TPK@i9zIJt{}WC%XY-Fy4bND2PeHMY<|6|AtDaeePBn=v;kaQ9X33|) zq?!8umLD5b4k;b?6@4P|zlQ+$LO_=xA7w1e+e6Br^WJtQR{%+H0LQXkd7dbOy~7ca z^I)>{i`fIQ3ZsOTm4CQm4)@Jb?-lXGK*WfMaK$C2%Vi**3ie(eIQoTIx-(i~czK-} z&1b6%ojTTfG>UAX4ll&@9TVGyVn$n>X#bH`>Mq%5=WGAnN0^jS0oyEoJ>_o6@v zwS6+{_IKZo<9!DdXD%a4*G5Vd-}kla2}elZPGPeNH!#m;#KB+{D!qgRw7DdS0Rok0OpIWyAtG~5;o#p-=Gq|HIT zgs2L#cL|6@#O^+I<8_;;-|7DSLw?#07aK<7?{7I(82P&22XP^;Cg|!iu1{gpkgebK zJ5*kqY&d>?%5EBC+m`rNF&K6ByHnmZg^`G5#F@BdCxBRZv+K5#U?-U}x*B|ob3V@7 znKU_Z_M2yv;9{7(ill0(#~u>1>d+rS7KR!jo0EV)gm!mKHl1}1-bTp+CVP|;YoL&$ zQ7-AY%K-iJO?fBJB+@PMW!-j7;lgIqN#FR@MRa$Y!BFL?jceELw(ka-bo?h@5)v>& znFriUc%?Ub1C^cp-)<%9 z^VHqP8`-H@#nwCQHQ#c_2xhh#=n(1Lj+PIGcUf^c+1&`7$pI5_&Tj+=?cOG+GlS9p zK(cnH4eFlbCgS6yaklNAlOq#W?<617 z5HfDzype&U_MUO({2=QjELr93FxfwZf=QNC%*K7RXtOyHlJH?G=G(6qcJ2NHd)9)L z9c3@lO}$a&)kxt}eQ?jQ@oQ8MxycX`n z(}xNGkW;c0YWjuc{c?UidmK8vmvurVqD3UOwZL(b&9rv>Eu^AOb6)q z)chsfJsa$M-onvCDk2i4miPDwB-gI?7xfs!T0APweVZn&;mTU(iATY@SOKcu?g_8? z*7sWuw|g6|LLobyXSA>v}=BPp5Zd7Nu7Rs!fbQ6}9)P9Lc)1 zQ;brpZ4&0b>ETaDU0WVJWb*@HP3YMzN4R~G`;?E$j;hdBe0r!h{hG?G49t}|7kD@( zo^Py`Q*dXQddkE%L@`+y5IRHVc8Hzk?YHm7BIb4K)%Z|)goG1fW?-Qip-t-Znu(M% zFss2VC;%gEeKP$vrV}*KUS<7RZb9x6Q!6-iZ?NVXK2+jimcpD1ncrhc(Q_^1H@Z)R3-PzvJHz}|cPdh)Ep{M0G-%Wwy zOc)Q@mR$9ycS;36)L=t0_zA&L-qf?wBxd|vi^V-bL*M7^PB^gEQzbUdN0QBhFPpun zw=++Zz`-@JU0DAISDM;x!*|(N?A0yB3T))1`M#W`E=uo~AadN_n|u%1k(4%J;*0RY zV2zl%k;)ZZam<7*o-3JiIAQX_irVRZAB#O{q%;SJTYG;sj-ljHX?-?|~MtlxA0dp>S$1TIi%WV$YjygP1fo=v}-s&K#! znWIErzZRFk)I`qUeEGjYVqT-gj=CgR!_6A(Rx7wk+MiPR6|~uS@WdZAe**AeilB_E zjSy!+;#W9nrYi(!mRugx{kHE*Qd^n!Ohh#YB^Inx=u;yxDkUq%GXiE8T9l~B71uW3 z+f2d*vLs?XpSe(YLe;D^7>t&R9U<}1;wK9xKJrq4t^N3wsEDwR!7{FB?<@#onsdS4 zxO1s#LxlhjKfnK!&qxM{Uw^1=@F@FFLG0w*bdCJv8>0i#l)l9fer!%h!H+J!Y0?-+ zzlicGjFC~3C(Gn=0-tIX&VW!oJ-j$|Bjv3D-2zW)>NRvVf+_hx6)q+VR@JCwPa~y+9C94NSeP} zLcu~+7-X@(dT@|Y&O@$a%{BO#oJ2VQOzTGZ1FYnOUNVF8*woT3L(hIKzDoSDX9n4J zdNh7{gsO=Ud%X%25R|M5El=-z?S=CjYWZ8E zYDM?mWQp5hmG?zNkt*WD^z~@KSr*~t{zU$nHVaQwGJ5!4*Q5J92_822- z#}|4`B-Ikb&tsg!$5D zUu0I<8pP(8H~iF6^QSQvSK`G^t$_;AyOEd4_=^)jgvVBxlB?nHTF;}P9A-6fyREt# zbzA*i%`ty`X6;c3k6AR-{NaUwJK%cnr~3yz!Q>x4-E5=_^{$a?^>D9xU&U8u%_V+v zRId*8ACG_N36NV{xosgl=X~wjI<+}}^9b;3U_FvAyp=A%l)rh?XRcYGGcuZ;S!}GqqU&o-@Ctz@g>!>KLOuQDOl=8Y^E)SFQ zLOh?%YUAsh>=AJIddFT+TY?ooiIujIUB4Qx##!>T`bBhQo5_9%^yFA(#l+2!uaUob zO*|;1L~p{!T(K6@U=rIvn5&B#kg;DRHVd?QLDfkA*wqDYqQ(Gl57oAlB6K4ncD^W_ z%hoZF&@Yl$rC+CW;_xW!*PGNlrtrYIrV7?bz4kc3nM)T~6$7w6V3go@hsz?fZ2a%v zc?^NXcVlzeoV;?h77pOcJdE^?2aY`Kj93Z@1=eVyAU3qb>vg=jJ4i3A@5ca1Qar6M zTJXik^jh~9Ab$2EBgBQJ%a_4a5o8hlh9u4mQ@5w=#g>}?Z{o9 zrqt;V*xKDqe#fb}u^iedQ+ta$0nj?8yKM%t3{hHk^YeAz?ruWccwjJ!{9M_}EBIkhh@NN{#xKhXFjR zOeQ>Yk@GhaJrI4x2X9N~1STyy&PEFqoid zX}Emf>2lOs=&d<&gC0SHd!Sy?4#Af~L4i`eT7G6K+DA*<+Rhm~@1a?;zh z*JY5TO_z0i&w^_d+);F5EYa^t+tfm+3p#Hs>rh(KMb)gZg$>2;OZ1qhY^2+Kf zr9o?_0(#H@DAa?e&EKG%M?xash?IF;RH?HrVj24Nfy6V-Po;hR5AdunlovE0JiN3> z&@92dL>ncym~ZBOKcoOS#L{Na+df$2K}{m_^YCG=L8do{6AhMGVqrs9`#n?)CSm$? zx^4VviX*$X3pS5xyyv)RJ)^yLGf&Vo(nrDFy|oFSJR}Q%0T8>+ZBC#0&}#ymJCXL9 zcuo7OkaZ1s8~Ya-zt17O7!WJ3;`Xr;U-)`2*jndc<1l~c*xX5SeTp> zYh+M#LKA-*`%OCQ{@-%uT0?tGxolr+8Z=*T&6O3x+5#Fq4MIXFYn+?OFSeFHJE}Dp z6N&OWg!`FhU6)unl2RI1fpS1~kR_$N4?6#5l1D%Ygy=(erp`i}_)m52L%4XU*oAbr z<%bS+JmK%}!f#yoz#5U^d>g@vebwi*4!Gwu<`KLD$i8-TDY@C6if5eSe=EgETT~SIp{liz4j^S*q)2=^WlV5KJ zAadPD_a2kaKSqD$V5D^Yre9k!>l*es*{-_7Js`70Kf(y;^-0h+HFHc%gO2reYAG>P zs29*2Ntyi_Qsb1w*^0sMUNN`-^u|d;SWBtumGQ#a;gs*4qCpK(wxl^_)(D|81XJu< zF&5{_#0m`5VAW(d)(8dFA#-x+tVmgD_zObBn@1IslV;E{JHBMj9CMLxpD2=U28#!b z@mSn$N@V^<%-NW=tv^mq+^x~Py}xlI zD6q&kXSt+xvFRw4fE!}zCG?;=kUyNc4;CYm7cCf5gc_!yC0gSc;WDwmFs1468tp0)Bg88v@#87JRyxt}c&aFo&ADtfO~E!Lxs6|GHqbc*D1 zD^vio3Fzpe!wu#Arcd*`bLt425fP<(j&+U3?fVG9gTa8R0>`o>y6r3jAaOS6ze;0G z6{tf3u_Lrra-nm3Z|}co#3halN$;t~dmUO;{roie`1k-^VpUR3?|to-ch2Hsp@_^9 z3Cze2ME!Fu{6ERL&c36WMiH}DXm$Yu3D=F-BK??mciCU>ZO)$S1uSp$GY%~GnF={M z1q9Y2nIO}-cr%5dlmvNdPlJNZfa{uQ-hZBf-IzO{{=xwCry*Lwb2wf=si*HeWGWD;3sp3dV=BX%W zcB>KauVRRs2Am=|PP)~(wSSk>L;(ST#{8-roHW~#|5XM>Bk!pPus{#h)kNK>LjtA>`(en0|9Ff@o_FrE@c(Tgnxd?K--y=L{Z}s( zqrhGGSGFPsBWmKmzDi3;JO0=H1aE-;ry1kDiRUWn>TMzWo0BlZ{GaJ{pQVEY=jT6Cu0b= zEU>?~UqRBqH@J83o%8pH*acUke`g+!|I5RGTL9?yuot$`+K`$PVVvMjf$kK zgqWI#!Epzo3y%86&2#3d_o}=35-xsYtZyJyrZ24#A_YccHBI^3cao9`lKt=TO>y4g z!Y2}{NfD^vsJ+9L6vM>8WgqTjMWmoW3;;7TyqT7%W?Fto5t2dR} zt8=^ht1Xv4_o%TL~2u1=7nr8Y5{Ywl+>h1pvq=-tOdYwLm>gwvv zj_d!8WeT!7f4)yx-FD>9QWO8jH&GZ=uhj){d)q=;O$}@^5WP7Vm)9Vb6u-Ug;{0Gk z_-{;8-$0b(%?XK#>v!iqk|3mun4Ek%@}h{C^{XLqKK*L)f`OYg=z1odNIhJUgdFB0{NI~$%rgWVDC=Fc`F-K6K~&pJ4)q0&Dm#&wCc)4j-JuqE`%4b32xTl>B8R&@fA7f@&Aa<$ zk3%jP`Q2n7;ePmjWc#mVw-&nDO5!Y(!^6XI_vwVyLy;|z9CNPTjAbvxeC-2BK02P{ z-wDwNkZQ7@8s&Oi?qembmowQvHA72& z38Vd!A$i70>vXyGT|n|PWl`egN0_A3IPDYDR{dY$f`VvqYnqjv{!>r%K7X8t+&y0* zJ{f3y{1H<+wRzXE#aH?nagyH^CpjD~0-=PTzHjFZOV(Tn@_X-~xg$x*m$w=GehAwG z=o?oxaCE}8_yr-iKeu9I(*r^FZa+O%9q3llL*l*t#MV!;RRF^y+aq3s4_wrr)eZW$ z5-nWJw&ei3)Vs1Ud2mTD(Qwx)MYM~TG|LP}$^^wGh6gsg+|ZSRp?U)PFoxvK27c4y z0#&`<;md3^;J&~NJMV{ z=Z!EaxQZl)!4nBcIrPhnR2IA}84E$keQnAr_1G4lPkzn!tb*sZ5Nqb6FH9Dd{jQ`F z$nW14p*wW%iv!?!@@!Y75>B2!Pde=12XH#8{}86V?|(E9*09#a{` z?j@7p9ou_Cz2agFdVGFr5EY2-2P=ST=Lj=H@GC-M5&F+uTh=io&l0-%pv$7$(4+qQ zT2esQy3c+UpXLwR3uJ}}e_-DbIT-50ns3fIw&l>1y1BZ3bRNa$eWaVg-!5h8d648 zF;>e*z>A1(@K=in$vg~Zjgq0SI)E((xOT%*@+9dL>gTdm&OI(`A-0Py7)M$zD6^RtK7k^rSum9z}3C>afh(G=945!qY9* z{3j{OEPftyn!6WbMz{2U7LInRe6Xk`fKKc*G^Fi8AO2Urey-$8t?0Xy?oqKA3gvB$ zBNv?y+#5d`q{hfwpsj7}>o+T>g+vsHuHDu;D66-UCChd82W`0W`ClMsuC%oaX3ljb`1duz+ee^%I_MSUzue zL~I<7I9}|!J8l%c=AKEP-#-h|E!CpDPhagmKTX=0bAOfWUhk0WASRl3(p2J!L=i;C z1|nD;KqH`*<2mzP`ba z2`BL+$46tu_e94>>kPSsU+KGl9&xl^6)ax5L}PG!#ico6;sQSi0*q03jF2v(Vny~P+5}g zx?eM1$X+Q$Sd;w%MZ^DW++vgGG5GVdkm`oZhv0ACK07d!EYQ|!nvN3#E-n?HYIGx4 zukl!3Za$2?UO1@P>xR~ec1ClGwg+F3zeY`wY68IXd+&03xsB zLQkz$nV{ETuM0OijSnyBR=JSPR>vfUf>Y`4o)EYE zANZmxy>LP_M7}?d6d8}#hE!j6#aH_dnc6;&tJ!+lAa&l~ir*bHz}MVlW1r76q8tBy zlD*q12st~Ch_7)z!D$!u=2-Ey=U7R-#z-1Vi+mk@5nM@oP8rV_Hsrr7CwIQd)o1# zEkxzwHyoWFFWP$>X=Ee9z&!HhC`!iD{(xI4liMHbcdnBdPge^cpQ^uL@N@}b5bs<; zlE<&REyi#94=lV)W~Mu;H^RuYE^ZatuWy;!*@cmji>~?(Rr_|YD75ZwjkMV8=yrq? zqCC;j2QosWjA!8>XV4CQkBgO@6n3L`WW%K2;lI|+Y<-zZG4k+OqEl$GKv~gZTlHxi zT5HSMIz+ze&846`l=$nyBrD~tw-&1-+$zN)55dUfW@}TWWX<(7*okEv%8j|i zaw|&G>ut-9mQJ)s1}Tw-hKgb}5bgEv_^dm#`t#yG+LkCVasqZA5Zs8Qg9$=q510Y4YK`)+hU*y(no z8=l7Ka_2Phn*9f(XL5Yo|2`@$>-tVES`EdX^?Mo;-7moPT+56HC6eKnbg=5HqQpzO z6JIkM5&R2oOh*;%+HMQxoqZBw`Yz3vMMnMDTWfb){{lh3j6bNWxiI=9 zs0L18^Q3#3-AzU*l4K5!Uz>1ne^oV9!Eg&@7&%4YaQ5?O=WXT{ug&Y8Chxy?EnKe#`ZtK(t0a%*<;3U` z+KgzoXs@>T{3g1CC>Z-S8n~X$&UfL3BtdMw~diOsL}Pk?j>B7lH5t`mgb>E&4;VcdX@Q6S(0Iqu(S8?k@dDN8~ef z%XOt#YwiC=G^(571v{`<{YY0BUboQOXd23MXFsfecR%FD_XI*f>mmuKd_@^}NmjVq zClvL0b$N5KBVqh_MCbFUz&HClvU|r8Aqz6Vy4cDSC#Kt3x5D!ngwtp^sem94rFnj_Ikv9GE(EQ@zq2MpX z=k?8bfB%J@KZVVlu77vu%hjMQ3O0z-MJ@UV1!eHh)!4U#dw+;Cy>C9r#h|zx%n&;; zX_uBKV>UnTcf(bH&99>)Tm@9)Aw?*E=kmK=+~19hY>Qgme#^<-MyJNzQx=}q*IQIA z(VF*uzQgd|+35YgawT>eojTNQC&={E^t*S}IdJ7X>Ez%}wv}vJO(JRm9}7kCR~+oe)PZCmCsG z^nPB@Xyi~Ybh6|8Y_BuL=k}ulkstwj!Nr-XJoDo5hoZ{@yPM~RREcKLyvgp;| zLC{$0avYQyHEe%5cB%~jBvnXKo3+L7gvQ)0u2LB){qS6d_`+>W;zEAvaeBSc_16V#E_J_>lol?1QfnSv%?VmBo9DI^-+e3%8ayyw)=<&ru z)xoVz%CUp+_nPracyE+auFchcTv==j8SC?h2w5(>M@qVNzr9 znZeInRzAjMgjDJhhuw7hWWXi5Ve;!|vXG6E-LKLXgry^N10Itc`Q z-bkUxyYmg9^cQ2!4-OH6k8%RpB!<4AU=gNpO6;q~CuRov@r%dtJFOLh9>25`Q^nc!y^xte|ZyfD2LwZ?qR1Kp}#YN+O_lBHE!nb;*gH?C!$Oz@|u7 z@-!uPgO-Uwm;k1~gYu9I`u{8FN0U?JPG&XKc*yi=vLt%p90R+bM)dAZ|ETu!0yGL` zMcOG*g;5zFUJ>~4Qe&kT4y?CH$DdtOB5DazliAQxuZ=tgL6?#vXKraswuOhZcti;0 zPmjUWx9*T!#C?xa|93Pl7ybEUIVowlA&7)V&4R)>Ey8(hwT4(y=rVnSB=@ z%dvvRf>LM)0ZT_B1gXoKp$eiD92a;eOelmYLES)O(+^jNQRO13;d7?wn4tK~C^4Yr z19Hy(AAh!o&`}Tbq*^~`;pMRQ-aJ|yuQ<@xb@xlc6bE!Q{?Mp~2_LsaASESh)J!IW zttTinBsL}s`L-BRI^Xb7kRV{h5+F?Xk-4stUh@3l~Z8+i%te=GRX<>#EBkK8J~4C9RK&IHl7Iy>zpBipF)4 zOXP*2DjI4IqTFav+0Pd)H~LY0mC0s;T6PwXUVHXs0?#`~hsO#C*Rx8bJ7;av^9BND z_1C2=ySC)&d5mOemnHYN2h~EE`(_y@qOOkz4ypsLj^%ZSU5PqPc{2`qX$Px6rd=)n zgKAsX?@pJeaQ`=siu4OP&?jg~;>$GzX#0{Ceht){6(xyq#il&l2ebJ+e4-NZWy2$1 zCJ#hEiRvFdXTLMG6%rv#2IHDw)tH)+ya)@SRmI5lmYE0|ofNAn9NP8lMXwMf&yrzt zEZug@i$x;W8mhn3tP(M|b2Q<5Hh{G)3qbIlPG2N1{ z7$QrlaozbB+>;b@y!40GEz?ZKV>-PCE1Fz0$ch7@?FmJERLj($Qm@YTa*mX|B5q?> zhFN90L*(46_8{gyk#&~zRdeE?yR0ozwa+)tH0{-e7ZC;xCAHvNy)SsJr4BQ8ah(`# zP){4P-0USfYc=ePHRDB@J<_{+Bc)Kd%xf_j!JqRaDbT<vQ?6QdzosAZS`(Z+6w*%Yu(dSJGeCIx1A(6aUa= z<@4-x_BG?Op$2(7{|JFSWkk3~m+o&OR(pRKQGF#J zuH>XwSA-=tXOQ9Y=THt}M4mY&%aqWs_GU;40n6TIA1f?A2)Ld`CtSJYyWsoZ;!MSJ z>S>F*cX5cV@Kn&^a2E`e%@|*9XUPxxm5n+F7j}ZBq*m_74BS7%!iu6&)QpeeoJFj(O!;Ni!y7ggW-cp1 z<166LeZh=g^zEB(Rctbj(5OB4oQ7T(UuOxluzFjxvy!~IJ~%%;yz8$2&ZOx>| z1yZRoD=vz$0`k9GzV0SUz3wkXr%e1NHi;d+Qdd&}Zane(_EUWF8@ zvb4m2!RT9_>sPcS`{{yOmFqSLQ|2cu7 zL=hjQteTdqB5hl)&IRuw=bb_kXKwV>X%^1yPSs`B0ijWFOF9=Sn}&7cc#<k& z(5xS{0?=#t(lW}0ty@1c8H`#jD6U?ObvwAek6hy>+o%k8Q(9P8q`y5J!q z&EY*e|Dc6~LY0u!3`ni@0Iz8^F6uC1Y`GlOyDtLqBV&7-1U@-zda%s+Y9 znfcP?xoP9Tt^S3SaaymN$F5v20Ggpn^xTE6)FVt7K#)20;kK~8XLVZmeB&nu(`kc z9-y_?EtkAvOeB}f=?d<0tE-rf4!(=rA()DS6Opha^f2rD+*New!7OH+6@4hirq5>H zIwtAcsyl)^s{S(2*U$PUw~pLZvU|^74Rg?@i9x*?8?0z%@k-l4W>8L6N1U%BQ?f^7 zLfS#?%?N0YXTZwyP)$iUh$HoxM+0} zB7)3UM0u#y@%ua)`r@pny1-%+6Vr%Imm7HPkig0nfdvVD(YC-QW^R(JW5>qd84mJo zo^3jYVXoSb!BWfQjng^n7o+as!+NUU z@p;T!ij-v1XnbeDxmNb4$kU|{5Rh)J;Z)`xjxX|u8&ph=jgS;DPpHzlnDs42Z~nWV z;tB9Qlfyr@CxD3Uiv(uaL~jRPVNu-{6c9j297B~O8vOhvAv}NY>=R<1kf6* z2V{bogcoymB@t%|c9jQ)RLUZp{6MM7C#Y8p1upgyctq4nWt7C}{YGYPd%naWd)%_^ zK>j_`H32$w=O8OH{gWZQBQL6Bh|LMKhQ2UqGH}}rq>jvVLDh^O1utv%%oc5{TWt%j zNy9Y`RKrlwhwg2jgKUTJui0<6F}L-FiB0)KwRy>+fhuiKEmH6?T4CcHw+z}UH_73b zh%pzQ>tZ_}RH)XNwl8T3d5Kd_r$)+d#cHvhmB|K3TnEBIQrLCyccx06o{x5Q7f5zWbuN8cs$06bxqRAg*{Q{oEy=Qo zO3t=EZnp`Vb*FQsa7(O$pzwEORpXTR27*?mcE5DTYdqUni@ko__xQ!Z%l3d;F20#Z z+IvOh7ZO@uvP&bw#?al7*(c=cmjH>5A(KZ3F$oN+#VT%^M|GQYJ{E znbx1**Q?Hgn6RAv?RlwXH9-Cz2Czo8> zo{a?xzFcNtZaZc?T|UhuICSviJIOl$9nTw;l69Hi7#!1il%UprN(y=`C&V}znMf%; z7yEFzwp&rc)aL>$+u=|M-FqGj`g$K`WcMv?prq9i!P~ zm#qruF!S`zdKy&gWO!RMe=T*_gqfI|L}CW+r-8V(uK0P$Q@tT;N9)1H9ELUD zDq1vOWf`G`tNRjO<*X)rn_ZDD{<2x?w^mUbJ&VKO5wykrs>-gGvZc=lZq?cbs9j}g zPA_fM{hc*`9G+%8XPO;IlQnmri_6Ov_~%+zhHnhQm9}{r%K}G699;z$o_P)~9ExjH zElMV{R5`ys&|P72O!Ic)22**joz%afU_A~RbosS~Lcz7)-P~11ezJcQ)h6g|q=3)5 z<#yh?$8w=z=O4MZ73Fu%JV|}QLnYnt$!AnQG}0<8g~|ckUoZYMA&tPW7coFqAqps= z;0^A5Pr=0hF0O<|q5w?EFhn&h{ZWJ-DGaEG^#eRg)%%e{ga~tkQ*Tf%MaJ7xTUJI% z5Hscuf!b~pFd;xgQjrVkv!Gh&@24YtSquETt>EV-ir37VOpFK)Eth}(c_WC(P0wMp zY^4PO5CMkv7no<~2h|-?D;2GoeBC8-<4310B-4AZW@__fqsqr`=5S+-jlXv)xN!3sPGyY~;U4s3Dyc6KE1Q@azNo?{(nJc+rnPT6(k9 zIsCMp%=A};SH1pd?|<0x-c{!@Shg}-(X*p<*l0Z5^N+G2aR%2_CGYHi$p?&u(>5vP zAy$vQotU_n?q>S+&kl7g##vQX(*oL>VZQnYH|bxU1g#D&Ix-WCc2>V1->6a#zPOt; zkv1V?IzBDWD{Y28KM2?h+TAy9>*-`gxE2?BEiRl`SU}#hylUtzMtgOkUOIewy`4%Z zu7k>KVY{ztv0*=hIwn|4NI$q=^%Rf2G`Gor>~}}{_3qr$f%d#3kJmYV(a2O99YyK< z)8~Ok0*1~QxpPUkANXF02M0k%G}nBTY%q{WgpqapROv0x2#Ih)qNu_%j9JPAaII zvY$$VMlUgq?%X{;HWi^h97#Go#nPj0V6YWRytq?5 zmAm>!;zySkjdM`SaIgr!FCb}4y>+grUqN!kSm*~?US!1Q;r{ZO_u&e3W_J1g3PXiK z9{HNJI;_~OXsO}U>6KUZ`3Lfrjn6^vByFIapHmx?m$wQST<^ZOr`7i#yeX3)`w2B_ zMOC=&o*>DT8aVh^NZMY{N7g%fSyo~+LlkrXby5Cvpb2^9Y6xs}Jwji@DtKGNs_N}q zQB&E*qnJr#BU^FZuu6Ng$?Y;cQ=fTMvq}a?dM=rEw3_3+aDDwfzj@&HV_INL6Y|nT zZ=&x%a9%kC-h4QC6|836J-8?z*|&4>9^EG0-L`Ee{Nv9Uecf1=eBuB(;Qd+WkcOm5 z#eK0XlU?bB?+fyopq(d%`{5fKkM#|+o0RG3`? z;rEwwUzv;)nptFF%EerH4@Q?r>%hHG4Ft3nxNgh}Oq^x37ohbMpaX;8n6dK_!ny>1 z3|ypOV?kioH^E583)zFxNzllzvr}*hpkM+u6aYr&li?HfF%iN3`H05~q!Uu1iZ+tA zfQiqM4z&H=q4l_C3;khT1<>As^-Oe8uB~OI*}wRON=pJl?U0Awi!**r!2up_7IKiF z#NsVJ2ih)bBpNrDBbXop`z;`!tQk-e=TP%<{O5QtYcGflab+)lD!j!EbF+^iMpN}7U-+xp^kZ&SzaEZQ@)T_r|2x<7W*er$Vlkz| zdJE{pr13}rPA~v)n1j{^M-=Uh0h2U5NOmUMglPm6f-07z%vjh_$DW?S?FEZj!R;vO z{>hnYOuo7YE1Cr*Us~F?CJYDp=(zJ`LAxW|;N~%0+1~~7$q9b6w>vjtUf)^0Xdd#E zjNd#^-{twYlh+#i#!{P7%MZ$GlqVDH1iplI8n+YRoyK&BQS9O4N0O<$m5D(8l-&D; ziuxS|(=hSNI@uY1b|<^YS=e=;p>eDXn^4~KI`sx?;U(VmGVW18V@@Z6e3%&U_H(GB zyqK0V$@k5?HQL@jX@p263BL9qs7H8xbR-5yHB&<+E_`rMUg^Ri^~y**@%qQ%AB;iT z`vch!!LNNv=(9Pd*J=$85jbg#M`csRLy%kp?ZHVFqoL>l)VmWUIaI6B!PTO=5fveM z@ufao*{ChpJx+Vg8FfBl>OPGrjqn&cA$#W@=5e4wP3N9n7`oR$m=Pe=X~j5${Gm53 zsL%#X4$6C)U}wQ_0vJtpo^g8qQq7Noofs06Q*epM9#7INpjSYA*XRpv@Cm;qQLI>E z?}Yr!myyV)reqMto!rkfI+()jmOTfC=dMw273eo3k^$`(5CW`lajC&HSniS%baDP8 zMmkx#)t|Jhf_r6ZN|bWq5`1h=#sQB_qaL*5*Z7h=g!Q|Jy&r9QNYX9-aJ@?)nkrWD z|H^%H{4qbP#kho?krC2pCko&ec|#902L*j|!2?b+_oFXwK23_}7d_!%#glDD*-vK4 zO-ZJn#J$KwT;OkS*9XXiB7Ty{XZ;T8ShKkROQ*o=AeLA$@9bmZe`^GK==9y?HKbJP zyI`~%FXrj1#nNrY@T`lS$%cM%EP4kXrhVsr`hD=k3gK`H@oiWb@O$pW0uD=I6czwG zOb|lJWJMp`5FV}}V%GU;m0oLr8ikphd+_`N`!62n7wq!Ta=a{+XPb-)*;5veBOWlt zKTDnoZgz>jkDMtTs7imeM9t?PjImN%)V; z^3%`Vf3E^f*1`*<0H;_F7bD}b83d8N&2OlBRZC9XpfpdAiquoO+d_dPcbVW;MiJmi}36M1;|EcSD!qz7$w4cr5nZNGSCGOon+-O%&>GR05uIVx)M~L0H#Ug<4vJ;{Oy2W9{wIk zC02w7VM#QG^^pj@$3`#B?1R|6@ZNcnq2xNDr@Y8R_vr*Tn(Rjp)t{EAz&nM57)yA# z(TjNiM{kd2=MRN;ERUM8b&dUtr^u#uIbMcBoVBsCtK8It*xOWeh{9So+$Scb(w?jN37pd#OP( zQS`Z}3P8);p2d$8{n$fJD>C6pQ`1-hlf0B{I~wo$8b3Me$;Zu}#aO?0xL@l4zwwa1 z&uthYb`SE}yumPBn(fBteeb!BN6($(2h{q{sceY+Ps44$VQ+O;@ZF}DDqAJ`={H?K z&jbJj)5n%mg$Oj7dQ0-?Mt0Km%(?dgnH;0el)W^?U)@JDUdN?olK-N0DKz`l<)gN) z_MIhBKA7KrvV*tl+LPJAMG(( zShx`Ut;oN1y}RFZ=oV|Hr+U{El6-TMTAeO5$d>|Xd0?&jw7tcw5gCpT5KPgb;OK`t2byDHw_{M|_u(x3=SR_mVAKLz zL^?ms?kX&C{RD-K7eY_TEHS5$DgeF{0t|1MDTC@=&bqH(vH(C^aQPC+USRrvLcbBO z`lG}|npFnJP`mw_MF!FRQLp+||9f`Sug1tk>uzS37*x=rv&U@V=6c8LzV|JCp$XqV zq?=L%lbFX&#U3gD%nMNueR5s9-Ay zmR<^qja^*B_9}}_#cO8VKwF{!?~I+bO)!*J#HP447l_*|kt}^^O81%IU?6apc!!LM z*#mgD^YF$&(iFyl_HRX?vpo)EyZNx-%mo1_&cf=uld7fn5)KVjKb2`e^et~AI>yCj z3^ABwnSccR+Wy)JGSogTMEi%UE|D&Yvve2lsJU|m55I6ai2zoBjyie1bEF7pR&P(M z@tUWp%%o(ZWD5&0G8%!73mdbsv(5CyiFa2L$nmW9i06IRVg`3*{({uJA3WC0DcH<& z&NaY0AN*PMc6Uo;elr8j?a^?7*jQlVzE$4f`Twp{j>~es%Kw)qO%=4ZGbiHrvHhj|AWPP!7)Q=^=E7teA!19lfDZ$L8WL1(aiA&B& zsAIn;w5-GT@|Sc_0> zP_rkJ&aeTS^e>&l`M5gZMm*zb4TLCodt|FXUGe<6dWhn&?q(sHol`tU`hmy#jh^iWDDeO2 zTGpECVk-KmuJ&c|dXOaJ?f3-G+1><}dxJxm?swgFJPKB3VE%;1TH)GkGkY<&J*pk; zR(>LYyD8Xv{f16-3G+AxFRrt>Zz~FqMsz#L{=Pl!@Og3F!H+C_&XP2k8H7#8s#{`= zP4aE)?Q`|XqC>(1SF-%)+5dsZ=aq|+aO+8Q#~-SrU6ui~6=gLI=6# z9OL^&z~R$L&+5}D7RP45BBjp+5aD+PjS>Y1U_GmO2#UrV?6o$w_o+V8I!0F<{Og^^ z8f&K=PECtgRtu(ao{Zu*-WR-&kL#g#o<!@CvhBF(?I zVE$>#Zs`~dt?z%mun&5r-0ttnObIE9CE;I!>u{D{tE=h+B`q#xi~kR3KgtiHFdyS8 zAKZY1$V3~F?q5!MXS~_7cM(eR6x9T(i@wpmxy`6GO1R~y@lK_L_V~Y z_}RRJ^6ItziYWxE-Po_v3b(JGt3j{KJC~aNmxSp7n*(3^4rYd(w=OD!Xy2>;OLu@P zkP^SKigSP@P1SfFYK*2ll59M>V1uTR{84SAy52YS|E6Kp`;!A3Bzrl`x>kJ?JoEp4 zqSmj!YG4q4fd!gI^GCIedOcpmI``!~gGR7^8Ho`s=1_$)J&y4O{?j0?Z&1rYPC7F) zGd@2dL(I5rzk5Q5_BNb&3*msbx7&bPcZ>Q+mUz7)V{l8C6odw)J`%A;>Y$#t{VKhVAuUs;o zwf{EiXZ}|YTAa6A{694*kaD2?AKzcY-Xj$JC2zEZFBcV{Z2hm9@Bb5zxr9rGAe^0I zXIG>#k|Edqyj?yor_yXUsn>TZ78&yVtz^^W7?-gvY4?A5b44rY|3t?fC8|lyT{@xg zx|MKzMxKl`q#Zt95yFm|k?A3JREF;dB_3c?P(}Sez7205g@D?gpEhYPVXc`i?$2dbuDP__JHhj1Jr${ckLQQQ%?E37$)n9CMz^M78v2Iagq4uRi4IygN z5xxb~H_&SDb!fHXerdZ~QW4luf2Q!iW-2o|o6Gau$A$7nCs=di7_pXJ339pk8xomF zWOpyNcYdVtWbPVldTIR3$?(|(GoDG8{oaXUYRiXAD`XXFkC7pK=A3-gSBgn1+hrQ=F(S9c=2{YX ztGZ}$@hZ6}h;yb)QZ(HjQjl4*iN1c49Ok2E3hV;=`C1|x1Y!Hlkw66JCCZ zHoJB{@P;xD>*HgZT8&5O1@0I z0ukA3f5V90O;l-1PC3QN526MNw^pW*;*CuOtpq5I^iA!?5sHd_LBG?oeiBi~@fU2t zl1W{E;eZ?{n?fB5Y{@M#a?d<}24wxlUS&K#fvXkMd`v4gYh><{E4laUY;lMX%r`$3 z5E8ZMw8PugW?@vnB?TSUE&J@t9 zoSODPH?0NhI{hFBHrjukF>@#R(URhCAbS=9dG<7X6%#CT<2t4qA;)j=3;F?~ZWhuE zjT=s8&58M1JKEFRv~40B4HAcfTt<@AogR_~##3!R>OtJ&=HX5>3Td1JK5hejoq{*0 zCYj)TvV5s7BR5GcJ?v_t5k9V~s87}hAIU%eiGp?hM*(JLH{41vCy5@cVut0FZhRFb zn40{NRrr6R{-OFrY7h_2SP8WnW?PgONKcN^9ADFtjhfNdMk5VMF5a$qcx+ApD9)N2 zr{VBr?%yIgsR|&CIN>@kLp%8#NNVYe@GYWBO(2!=Lq6!HimIx)(t1hM9DuU8l})+!&H!ff*W>!7^)m^S7an#Dk<)15a|`+Dxu-l za0I2|PU;kN_CWC(-1nwooMyC;kSHyX+WI9Isq)y!w=_j+&8$%=rQfL~N-sH^8Jwua zB3KaSRXkhBsIWuk-Cwr;f(?}C6l0f_z1G!lIYi-+*vBADQ8DS zw$=X1Kg;I1q=gwx7Y${Q#__+7T4Vr@SlWEg!v%asi85b=XPjo)(uj@N#~9F}S~*KF zYqC0b>fo$RY_y{@43~+%473W&4kdC#Nm-jhJr|+cN+SA6X$R|Gm&OEzR>3g$5G)h5 zz5cawm0=lLdy9N6=5zBL1apQdHUfVK0)$-?=^iY=j&FONJ!Ol!F#dX5#SmD#llU|t zp;gu6ML^I__(LhziLEZX`hHBFe$T-<3}+l4Px#lpf=uX65{NF3Cu{vjNZQ*ps}!cA zZ+tRCdu5E zV5h_PUl7qu36n_NzYrGV_7f~l`IlC^A@YweSiA;Yg8%1_VWrT=pZ@|3Sa|0|4r`V3 zFFrj9{OgYY<`DP#!W{Ac+)ttZi$7pt7{;yG{~w6nf@+P0gnW0uv^^|qT~z<#MYt`e zQ}2o=qSp!Y?!B>YewIYAivAm@bbwUD)lDQQ+0CitpcF8KJ6M<$hUy64@^JNQH7l+A zlFq@YPS?wpoK(fEtE)je&m*mHI7eV04^mZ+4N+%R>J#2)-#KQ%%1j9IE*u>wwQGRp z7^BY~d~W-2Z;kmAt4(8cAd*70=0@&D#g2f98&^=ncrw9O=)wtgv2X)}hY4%3K$9bP z=6HiJEiywzwPx>9yciUL2_#~LO{x_WsLr=h=*uB;5DwgPaLbO=B7hr^w#S#(dXy*| z#p&5XlfUCk-#9u9B3j;hn_Ht_gQ?47$8B-VF^Y@oUQ5Dj&F-dNJguc#jfEbZb{ntY z?1ksd6xDp)*d}*gjv83d|CH|D>3<>WPr|RU*hj(gY7u}j6$=whKD$hmvtw5-Ts$c-(I6cWOe*v}IJYT4D8u4>~_qsygeQ{C;P6^9nlo}?M&@xM4S zpBw2hL2T5-`iW)(A8&^e`I@^vz|KYn%#|cG>OJ{~_OWX$6}!uFSoB|yr?z>7CN`k3s^B|__irn{c7+` zr=}wwiAn8R%qr0i4)jVcdWXB1WHdRV8pk;7`B1ptZLt$^dRgP6TR>1|r-WZ1HLIJ5 zJ-(-~bu|L~`~z`vKXtEUbk)8Kr@Wg|&XYn^b?3_@lM4EFTWmD$%#=z;`8jaKIF@?X zW}niuCvx0O|GhCWGrns!`zJJX;J6cKqO11R=PZEt{pp|`RX?g$@YBeStaT7bTqf_Y zfkkEH=9ZT|Zf4+a^k4kq>eg>Xh|i{Us9^1KP)v!{sx_k|E|_j2T4C~R*A%0H|F~Z| z{sfH*A1?~OFEE-!bN%(cMra#rdQ+>@kFl}b(7bbtYUD^|ghVHpL?r~iLU2ImwgmBa z4`l=oz);h^{=Y4tXtXNy9|Qh_XFTh2B$(VWGmj=aES=z?Zcr%EnN0;ejDLi#2$xa7!*o5<$irx&1U6cm=bn7jrR(Id?m(?5{r_T< zAiqB+psrg($aWGt(fWwk=F+_^uya{EmeFpB8tqcUiKo4=_0OCKV#2xCB2?@f)Qpbw zr_I9H46332>?(kJxCvBFO51s>Ywn8N*!VJI!D+p?tNwncljY(fv0Ui_&Zc};wQN^i zCW?l>cI777t$d&)u+*&Ev;NplJL%bBCd?7a!0q;I<;FbBVaR~Ze)}GFfHH*x^I0+` z%}@EPOhnhT+&A0D+rMhs4^xU79F#NA|CF`a=k0xDa7`xq`btbYtz}{b)u&^uPA2N` z?XbydzcOxk@y26#)-dC%M z`ycnMPB)c~uW~rjaSJg@*|(*LGKRL=FpDggHD<)5wKa4qxtF;o?6{2NF4xSQ$gsJU zEQC5T3h`|&VZ-7K26@8O|TH2BA5CXcxXKZC)N6pJU4A*=)b= zXKSy&G4m#q)kw=ziepiAURRq=pHuh86_h zK6SRG?9$0EY2T`JZY7~Hm*t4mq|er|RdKktb7IF1hx-6==B)#wuLten{UjY)y`X12 z_yS+2>47aROi^z22C|3)G^W>X5m3N5=i}nD@A2SbWE5Sfh5gb)n!$Kj%m{qS2)%H9 zR%WzPFJZO!h|jVk`Q}7`XXIBQbyqt9LL)0xEA|_Yv_3VFbDmLtJxLgb;ac>a#=p3K zmKFxNIEFv2!D|_p^PA@KJmI=DF9>9LF_dRUwoefiEdCf zc`x^Z7LSmY52)o(UpQH^Kj~UacF4FlMDm=ZaaC7f-8@v>_s$6a1~HX?4)6kQAU2w9 zV}D->eKh$SiA1{cMcLORJ@Wpub+xG^>fybMZ*;T_tYfP)g@TIX_ZKvcHx>U;rCJ?a zJq$%BPel#23rsDxSXkl)Mg-xaPDe>|8}S*#V5@8YU9HKvOW6rLj~8^4qdh|BM|mrY zB;N+#nF|&2^2O6JeUo$*=e&^_NQ%b{HGUsti_e*=X|H#Mhp%zt^duGLZOZ=`VL0;q zLLDN%WF?`HirG*ft~k+?jgRh|ttdfb%IVoBT^d$zMLDdiso{PmpF~dO)A88%eS}9W z#^Jf!WWfy}JbQfvj5zidT$FoS9Y8(~iM?}nKKBfTfcEh7RGIkfG~yCUoU3VlzBYJ# z+^xhq_IGtE$@6THhAmdZSpNKECC!QJJQ~&W#H-ssGBZMStH_U5ouB=B@xd>$4sH0x zUC$1swlr}V13IXeclIq(&35b*x(W9#WZlSWht}Y*V-TLkWT0Di!ae(9g9}~uI&+op zT=&rgM?7w?>{3#@a%}9F`}|A@x?r$ztL&SJ+0+SEnAO$Na$3m)!g{@qaa8T8g3~fR z_exNH*FT-blzZ&nf4;qH5Sr~`sOD%`di_+r)_9H3$gF&~cH3Jtxyo9DeLwF0uV5UX zly>NW`i>y452X$^Pu|6$AyAQz8c+X#mP9#CNBHHLCPR6DBt-vKZh>&RGQip280 zm9OjHp9wOQw}`p(|Ij!VK{9Z8YBBK0CjCT^vX!i@zfR*%+a4&`S)c7H>M-jc;0xcBpk)2p8)%3jSyz5R)hsoOC1-1>Sjg z1dUW2I&ftgCbH{jkEhtX*5+=HDP)3Eg6Ob=Kwgr2)B=e1?#HD)eZPJt@(T zDkm4dNMjzLFPuAP>lU!Xbe*KJU;ombR#ry1B(LLw`g(G+Fr?vPqHbS?@FaoQr7O?? z->m$d9YA08B)Xpd8xs~>Eq|?1qQ*PDM2!jlz*IgK@;b-qxzr=Q;0Z2nr+uV4xJVvY zRO?I{3-lcGeJh3?ql*K?_DY$l#@*@=R0sV>fnyO*$O^R#@UAmjld!}u%TQ!~=Td7l z|7HV?Z=c@yL_eh)q}?|H^OwVDje~L6o0F9#Hxm;xUcAxH#AsS(vX_^7gKV_%4HZU| zAuAtlW%{c}i(_*qw8pcrj__NXj)Ln!CkL))Nu9F-lgEd`Y*USA9W0TmKT;?k+5*Cj zx+>Otj@mtTOU{~bWegW=Z+~V?Sq4FI7EBzeHn9w=-ivDc=uK+fwO&N8J*u5=AsEaa z)%Ke7^5wC)(jY;bitdRPswFs|;s}6-Q6zARAyGC54CUHw;onT9wd$IMrV_pzr|{Lw zPh4HpZmA^~`YvDwT0>I#X_&X#vNEfnM58Y;12i0dU`j~lAn#T{? zL9K7K29;Dm3LuWR9mxBYN5u&YuS^AIrx}_06ZS3R+bWdc+RI8SHgPpzrLe$#D1iH* zTDdjYBG#pE-$g}m)?X2!&7#fAnHlvm>nq*${P)6UQ5h}}U!`O)H^cAab6kTx3OfBE z;^Gg}I(C49WC5Ny!h{VUvAOITFk*h3yCP(?#!nw%@JJj6LbQq@>U=z8wx~a{GP})b ze(}~#C!pt}y*nyUk<9n`2xZjt84aDJEOXdE|HprT^J%!is5N}Fy@q0TWC39bUz4o0 zLOlRiuCJ)nDkLIaQTFDg5S zg*ujAzV>5s@V1A8F$pQDW5t84V#$6zZ0N73#-ZDk`tT*h@~gb{>B4KiJe(Cb@7GcU zFzf2iw=fM8i>`|;YzIQ&Hj&d~dU&)qo`=ib;++voY~;ilR%!wddx1QEJsa-2qI)Xn z682=2tpFl7nqZhO1#}JhgC+cd#J@8oSrLZ}9)%^Q3qvOoVy11IZBc*G`ZW#%M?>0$ zbqy#hEz5AVuXY033SV0t3HSNuDvHLBHk6IFEf=-o|2e}qz9}83OQsxK`>GygJ5NBs zM;0_w^inoV>&C&>01d{r5ZS1wELHNsfdD7&9KvKX2E#+q6=oS_eDBsPkXBUkVS(1a z1pt`Lc!ruWfk@iVY#Nnkto6Y~?4*ZgCUW1-Kgv3{1_>`-rRQ`O?n`G^a?m(c&`9)v zMnb2cU}aTKrOQwl>_ZJ6pRSVytGEEd;_ic#9vlJG$mdVHd@wC%&Lz#I2!Mqa%9}yE zizaDC^^Ym$bj;Kp!~P62gW-2>Ab1N%z$Gh3a*k_CI0_0WpG}+d4471hcA)V073`gh z<81{&Zb1V2R}tdssV0o*N#(7J1mEx8V?+duRmm;i@E(ak#ri=E<4h()T*_eIVPM}3 z@MZZ*Dl}ch&KT*&;5yfhsw`vA#&3fxxAG-Y5p&inK3ko&g<&5d$dbB??7)Ysw71M% zMgsDu?F8iaw)U!2_T7qDz6Z z8@mGY{}i(l23Qoe1(5bMknSLNY+GA87$gJkXL7eq>>?SSA)*NJ;a1#f!-?;2ebD(#hC6nqi(ylQq;NT@vS z;O>L{g72dK#&&^1LJ~p0UVA1yFzP?`kYBXb|2}TN=g7m8pN!A^IB;?ApUoP_ z_JB^_aS;)|c)IrNo!b(s$NxCoacPo+yEk0eNUJCe8SXW&Pzu$Pi(?s%6*%F%ORS6T zHNxrE_%p5Fsw)9Gp1tt7q_i>uz!GWG6v6Yu)`vfJblk#Z)LHQ%vo5VAYjaEZqzNkT zPAMX!yFpT*(=UlPYE^9J?yQS0jam2z!+Iv?g*1P(f35QkUlCKr*3#Z&LeLhx?Ds#n zrU^DEuL;rrJ=al00@Rgda<7~3?uG{}Pf(Iv4cIS&xCE`)_2A06Z`|y;Z#jZNbVAUU z{4Lzm>#Si0cbbM9i*!^>!^Qw%ZMD69_UqHAH6n%K@61`QtTc~wz$2$xZcs1% zudA<ENN3DV1tQ*#7=NI0qjMuwbf_8^xPHb$%GlRt{*XwRcY^$ zAxlxl+$Lmz8I#U%c-X%Gb#lPMPm&}x5o)Oe z;~4Ht#=^QpD4e{A$5gFcYJteyPd?9bXF(1L(dSVqXje(3MLjZSnqL>g$QsA>;F6w8 zlFZD2ApWokU2Ec{Vx7VEq~ZTl0ca$SW}w)`L2X6{Y~zdS=;>>#--vUAHwI@r_`CbG z*WiLFGylZFAp275XMc*LEL3Y?mzckH&@Y*FQk5h*iTB3GGD-4G?%c6ZKJGlv`%Q-3 zW^xd(N3I?o7{||(-oc#bF~nyV$f!yAdJ7;|eBJNg=Y_ud$K!-q?~*Xa96r-wAe2QR zG^fK74f&gj+V=u65%q3a4Jevi*A#y>dLWdeH5Y}qUga9Zi!*ok?QHXRZLk5$h|2$l z^KWbIgYz9kt=)v6-5!IErv^KH0V{pH$_yudWCKe|t23P6)0oa#f$%>Du_eeIs_i*`LE;w-C5>XG-$??@(`@$qtKcCmL5nn8EsJzvB&OC{x}GjKyPRhhFd4^# zXm7Wl=WQ4era<32qxnJkLHu1xPWbm*+8a`ocS->B_m`4A``+?%|FCP|$MGc|!!C>Q zdIYnG*ERM|2ghEk%t`lTSW`0$+-h|lb- zC1Gy8q%2(_yO)l{eK~BmfEZ@>ySvVC^}yQ%CIN^~wlJV4(211zni`paWn1*l!GDMU zWJ*y>lmTSn0kF5neXdd3^zP|==iiRiyX+t~5r%F6ZI#W@`5W!MnPeD0A7;XNOG$(` z@gORLFaL4m_gSC6vSBYkZM)2;yNx zX4lI7)=NJvnfXKFv$ABpyvNTv)HIxTa_WbQ{5zTpy0Ze_>aT*-b<$6eeuy%Xz!)V` zKmA2k{dEeNZvJ3^hv6BAWra5vf7E4iCymS{HfKaU1?cCS6Ttp7;bQaOIN7yyGwsR; zNg0kc*Ti@%-0z*^>){g@nXyt6c{6ba?!3faBZKg|l~8ss6d}cGYnAJ$4y4X?#aIU9 zEgtbC(^1>hUNsnMCT=chhA>YqeQNGc!$%X8&+YbRXrH0>W(M*&J5)^i{{sTXzaRjQ zW4j)4J-9j{&Pa-rOM}!E5l#F-@DM3i3Wn@Aayv^26Jc*W?R@#iooy61>^M&tp6Elx zE6azlc>NGl%Gh>JF*JRgKWWhe0n^epZKt?$=4q!9n9NJ47#-FKC}4rZUr-MBop)R> z1OwhRZn`BXG?V2pDBg3&`k>C;|E=j)|6~D)_ZQ&f$yy7-wZE?m=y8|;;?La+1#^kG zY@!_7oKcxxn|diZAGd`zudoJ$UILC4K`hQdBZ+~6vHf)7HvoE+?o4Pl^2Po3{v_u0 z)i%xVf;y>XO$_C9_w>??%Zu_Yt%Z8chyPu<``blqVb%_p+1Yysbs84Sb{)IqUHr-* zWnV47GGgP$?pGPI^8yY_kwsKy*xCX-HFv)k=ju*K59t{Y6L`^cOf5oPR~L7r@vbv`M#i$Q7mob98klsp$^*w~cmlr8eZ z$`o@u@u6qwjmGNu7WMqx!;EnzFTLYCs! z=a1a?KZGQ@9@38Z`5h_rq^oF2n1$ss!L-i~vv2Qc5nx7KY}H$~`WT0dn;Yu785%f* zxeO1)ePmAXTtk_1ykV07+Co_?KLV5Va9G@7V_kOnGvC>CL>SJ5QG!xna)Fhh3LV5e zNefl=vbqN7Qd;BJ28rO=rv<%gQ`~b>qKs2N$b|>$)pM$eyTlfmL->;%6WhaI^tJ1Q zU!13n|FzlN)_G-trgxt1kepNn zGu=-0AcBTjoee0USSMH zxx+=IE?;(c{v2<`y-zV2y8{(vfIV{qM?FHDQQfqe)!chy6tkjIF{(Si9=WA%_+#sZ z@1fACoS>=#O{dx)&_2bZBf|uSq{rWGE+;s9gwB^0N|G`MrMkB6P!dM&@U16~Z9;eG zf~A;g&q~>cVkHSstJ6vE>EUi^@$WW8us+xshN{9bXXRmi_{zS9r2+mHFAohCUt?n_qm@!M zG1ipiUtK6c!UT7vpf$y#+~yUuv4Pb|#-9=1)xJn*bQN6sT`w2sZshRV7l~Yvs6p4m zi~4A&>45vhPKCuVpUUD1ySdIj?+vVu$I_dzvEorCB`aHIV4}t_#21=42es}W{H9L*l2;qs z)t~+G@+18kIT_z~I;BM^g(75B(-%rg-g;Amb@_0JpE|0nBm!k^O(i7vHsNmw`iEGk zq7{R?nG=*n*>#5#JyI?^gxQ*TU=XO{ul<&&&>5Hm}H6ZWF3q9uS4i=6FzJ88YUd z9QyjXfVI`^gSj-R*K;n^JC!MG3!t3f#NqHdxA(03?JJo>59}mK&H=1!jDffFxwV?b z4vYLWo5%*J4wr`RQM$_HEkd+W_<#mANFzs*#O4p)OcAc-@ii{GyqOec(aP|Qd=k9Ux?7KCD<5SH^woD|5-b<}JJQ z6<@LJEgL+>&-PkF_Bf9DF&2=nLn?~>T&m!eUjZYL1qMExYJeh|R3Piu_OF}9Bc1q8 zK(stKk`ocWs=7Q#(|m99TEH<-qPw%GU~YdE%oM-!@)8e1qj#3U$>V4MzIF?QHwhJNZkquR;rQp5FTKKG<_S>+>m~jRa}Gp@YcVcl44g&Aq)>Wr>;Mbt?nkZUGsPvkq=j&aG|TY4O4?*%Zy8MTWgQI5HxljKnY& z;b5Wa!$EFR5%N|+?2M$)`Fw9l5{$x$*N9^)jeT;VeP(tQov+6bPnL1>9|_=yj~_Kc zw3`IgzK|e0S?Ib7Onaq+{Woyd7J~OY^pY{2p^}Y`OVVuScD@>}V5ab=lzq~PL}H#H zDm*S!p$l&YVUS_YD&w4XN-!^E?z+38*#@9)6X_w!)YEj1bl^MjQEA66vXdr9;1Y}4 zrw^_#swB_RGzkpXsn=GNE1YZS<;R`f>+D(Yv4IDPuXv-I7OTYiW%z*eVnQt4tYX&$ z{-o+lsfj%(jD5-9{P@em<3iooufU0FLux*rAbdIU2&S8clkCV8@X>;)$5Y`UoX{w8!t-5R888!OMxz3I-R)| zpq$OzY3Z(Ni(&BKme`^w;2;9BH^ZR*%x%c?t+Lb#7855qvA?i3s}E>mlfF|>Z!^)< zx8mjl@I%<=OZ`?tbb>uRVf`4Y_|sg2vWhA&+JZSjyKY&;lslM@A1}67_0K4qP^JTh zkg!p{D*rUhI4+oatn8hNoX27bdv>A5ei-1FknNT9!z*sHqNd%?m{2{d-L2G`@^IRE z5j1|f`Xd&@F{il_)8ia#2k$LjmI({YjrCz}D|c5H8^EH2>_BS+FDg_?p*7ik@&3r? zADE=3p>yfOAF;IWzNbyk`vHp=fz{QX5Zc6JEa+Mt6w?H zWy+bMTPsmV>as!a7e?U0l+#i=25LU%EU5e}d7WlXr z$>C3ndGTA*Gu?xvcbV{1_<>O3#$sW$;Fqv;ecq1px@m%QEswZU%Sy9hw__%IuK{v4 z!-M|*nat+Ayr-r;dH99lzO#D3;w+<@8t1f-;JWq95C(cAIPAiarV}HFJ~(^58yN<6 z#ER(|cteY6Yi_^_r}V)m<#>pWB8urbXnhY_e+TAimblM znkHMnOpe3ce!T@T3q-t5bBXGkpiKO5j&l(@C5jQoQK!=5hptlYjD6D2=h1zo#yrI} zwdsBjKJHpOkcMY_(~mcu8F744w}jliAFmo^*nlJp8o zGv=%^G)r2FJuB<)dyDn~9$-5%5=g5WovR?FDvb+F*NY`vc+%q(oCEk`XWicxg_?J! zrvmI^H!ErZ-*3P3Neg@rkM`F{g)wxts;@e|sCQa=9nQ23?z1LDam%$;+2M{D5|*SL z7uMd^A4{$X4Yt&HkkA}I{Cr(H=I;t+e|>!=dSTh;UFpe!ht&en6g%IiRkVz*}g!<8_B?3~j_?wiH5tKY*hI z5DGH9>=U;Ph_))+&?|(XSnQ8;>DH90FaE`!=QhgMZ=%B1Kg!}#H6_gb(dznQReF}< zW!0uLmJ{!>mo?49ZoW=oNH^Nq*N%b_fw=I9;F@&Li^Z|KEiLITZkw8c%7yNkRO2DP zl-_Uh@KC7E;QLF#cB$!q@8!N(wZjc@IKI{2kX)al@!^4K83MCzeR}i&Th$T{gr@WW ztHE*nAK<}>%+-*Rp(35%vkX$eMXVnk$uljl95K0zRJG&d|JaZ7k|gp8XY|`_KeSmk zFWUFsYS(9c$_&u&T?#(}jm-|v2sC09W%JUj-mt-*S$>%dNG1jOKww9}ax#)YFB;zP zvTB3-CdW4@al}FK+!nu$(%}i@e{WC{xb|Bx9ya(y%T&p_JyW~#0J>I1&fqe!VZcDX zF8Yc3<=@5hm#5^eENZ`=nSZ`HufElH3VPu7)YCxB@n#J>1aFPI5?kb7&}C`}^r&|GCe; zcL*=2#nma59wV+kk~Lu+L*{CVn!mRPO@1OBAtRaFHd0V>v&y5|R{jn1Ym{k)FtFk) z(-W^;$=KI}A&)Y@Q)T9xlb0exH;;PND%7jX(zxSU#|-b3PCR<>+QndgD?ef9V=Mfx zbhS6K>YilHWfba0EG>ya8eCdG4>ewD0gZ4)@weqp?6|Z=bS_xD!nq%z12we9j_$6F z(rCVeVf(wtAhFYfYd_hQ&5T--{$(1v#D16ODN}?c#;Y`E2F0FB>EDTZ%iy#U?>yB` zjm|6aN!iN6c0-|kCD98y8&Uym-FS}%ABQqRQEkVu@1!~D?Z_@^Jh0(}H+sp|yC2~` z-J{@MwOH&ra>#v-*CHP_^OBSCdU|y+#iFWYOU6f({CIlvPaZoc*PGdxIlz1H`E*$w zHD^Wp)Q`BG8?gj6BBT&ms%r@UkGJUWatwnqp=lDU&-D?w#+3J3f2*sPn{W_`PZCg$&Kr5diF>ldUTYwMT)v-Jetp5T6i#&(iuw-Z=*1Ekb4~&k@ zL!aZFF8_GotRg1}7VoPLq)({9_uIBK27c9gvZiVbjnV8m^zwvwm?2XAN!Tgf`~#s|B-li}=|H&9yjXqqJ~#Le8LnL;=tp(8YhkG9iNK2ktdCji z8ptP0rA2pi8Vc2dwX8(_ptor!AE0LHMAIH3LNjoy!)NZ^6%NO@hphy$DHErIj;$GtG_U(jD~RO0^aFq8 zT+@8YNACMaeI`vcHVHH1yBf{_$C+qf_Y)iSw@JK<8CGkVgr1?x&-HEIWjBGUeu|{y z^!J%48`Q>qMr8jqs&g-5e>T3>Zo{w%%OR)lGpKNJKxzKurYuIa#$0bBi!{c%Z1>rf zb1M@T;m_tX2?v>gx!2zL;#C>cG1 zYIyJ3$I8lqurJefR;3evWVyJ>R(hOMvesgIB;F@KgDINS(;C3`dpVL6qh!j23e@xw zi#QPdEROKX33Dz1SsGy&_#cF{P-|z#E-Zzw4SWXWnB1TOM@@;_C;@Hxsl$VeB*{!3 z;m|r`Ep*+AHtRk9gFs`AJw}b15Ch)f%tUzJ4R%qqq9*+@J{o=*1x~xCpdvvs{OXtk zP}u%G>H$p1{ebZ*ys>^?@AUqMyH`MdeqD8ahaj^%X9y?j!`|bDTj58&d;1i#=ay5< zz(F5q7orfBBa8~s!9Ol6DW3}=iR!@z-OZQi`VgY;Z5~(vp6B} z*)tBD+LX%qlJ<}XK09xe>EAf4CyI3{gfM!^3RZ!gQpY_-qLWHyc6nn0w)MhPRa|ARKzJm0r#QQ(_zz(RhQ^4#k-y z43$>$ueFhD4!MrJai1eSndAq5w$IaCq1_$yOZs#>FR!7PgRW?VEF@%e;Dp#wO~OGX zTJA*mj#Ee^?liscH&&p~g-B~BLAnDJo%?7gvjREv_y`3qL2h^UeI!AikRkhh;%Fu< zjI0My&SU7y6fzM59Y3v8&fZ!mpMMwBAYX<=y+`}1YBf9cYC5r39zIBKdd#hzw;ly- z_DS(l+|zvtiIcK>&mABzzIxw3aQl%I@)2gNiU1bAG2;d!ZCPDhPx|q7Qd{~XXv{^V zUaWSxarWx`>;0C!HCy+&q?Jh>c=yo$a`?fU+R%`*lBAl7nq3tzXqA_#DIf#c@LNU( zTCMw&ci({;Pg^wuJNFxY&(DW4len0CIo@syLsRCGEeZWY*Fik-8v5 zGpC#&p=u8y)-!HSyInL%@YnUwDXpfVXWiPx#(ZV(*4&1=vdc(?Z;AOjfqv;Y(J1aK zlQ0ls!ETHn$>x8RYG7Q9*K39X9ayk*_jYqVfB-%XKSz9M*#eJ74bBtE^Qin%#rnzB zB3}9;%BbT8j2_8q`BcXGTm^#$$+_7$lx>}fo~hY8crze%vS!#J2$fO8x%GOvb}L#Z zOyaxwIbu~D4!RFL8G?VZ&PEJNEsFdh@*Txsr@)A?F&oL}z*Ca!-_8^?N@dO69hG+& z4>886qaNok|3I7;^%MGh-WolLge`(A{D%0+Qq~%=$APd3Eyz~S=RgVaiJRP#qy;~< z7{=TASO$x#=QH7?&%7E-UdL#Z287l>E;}q@ZTzTX+1IBk-Q9CP1Jbu3b*n)B5w!lqrGlu+jmu72>yDG)gYSz(jot)!29NPckC#CzQ>@5&32u-L}Y(%X4d{bcw9KkM8d_3 z`5%?v4=*_x5G#ruI)e+|VbAOL$_?J*!dLFOMJ$g*Rf`*wDel@v;oad8ox=-2NXQEkbRT-!|2(Kaol3N6;Tu!yE9Nm5{BOx$Nb5l3f&hj zsuyO>gYpOmvw6-Vv22i?){?6B*1(B@-AvBhXcMR&5+FfF(MbK=@044bOl3-2uXr1H z#91$C-n(FdfDk#z9Hy<(EFy6R#Gv`yp64uQ{iD)$PC9D|L8P6&=Xg0D#2{txty#dw znJ?@2yNdGrjA6v|wZ$%}y@cRCL`!k=4Z&e%&M{>ppi8%txQX~hQC%lJq|m!x5mGQ& z#G>Z9tXeAM+<}34G3GGyFQ6Ey;!auPu&eXx!a*-5U{i&KsY|NJmT=bZumkPYQ_wZp zjH9JW`P^-0UgyWABu3P0ZyQ3aibiA$1(60hw*8n4nw2lD@)y;U-xa_StQSoilw}6$oLVX$4#1>D+}td zqqPo`UT?bCWukkXyzRguQxnZ}+~vNoVkP!f+fHKAzH_|~_UFBE{z+NfXiPZBIw;fo z*Tw#vs2jPlG-E7oz2OBjqDPYX`k9IYjSh!}nWWg<`UJ9iVFJ;q7f*Hc!>&(E0EPJ> z_uih_a9W!R79i#0LhXi3St0Bmu@-wq@zg^v<|+ph{Ga6ye3OeR7~0no{pRn3!b#ij zj>&nNjgPzi1C!lOYxTL?DPE+=UT}1l?AZb{j=O%GJ5VVfRRDe0D>lrxZMv^LmY2jX zEOE$JUjr|kF|B6REw0{5uK?5yzB#DW&pRYR61sgmSd{UWUvVo-F1WV%@6CN#;X*L71(CTRmo2lLa zworFdC59lv;!;}FOdf4xm`OkuLyM_=d_fpKa_rB$hf>H-trt!Yg{Or=8#U-l{>;k^ z60n3e;H+Ce0H`~Xq?@ridvKs0KS-J*o#Viv3E$D#QQO|siLt~D%WFN!nLv}tcV;u} z=XWV2)l75kpboM83Gt-77VV2v|FeKs!Jm zk!5hX1%B1OV}%Ajmkhy zS840$QL_AN`w*w1Wo8he%GD-G9T+g%q8za2t{ar^9ST@n*u7q386u}~3G+o(5RunN z0>`laWU<8`gu)r@+|KjD3A)KyRxEFBpaKHKIp}nwr5RK<>l|^v6-BhRvG=o5jZMBU z%^`R52sm`EdyJ)p_eO{6%A4wZk6CJ~_t%{3}?KQ)xw>QIZ>M)-b8xYnN`Dm`no%jf1{J!vuIVeIzP#?yPrCbRZ=FCMMmuxB?nHnlD#fJnVZB;;beWMpd8gwAaq0oA= zQ=C4!y1HXNn6Qu586vBcU7P__LwBy#+jBB|YuJ|ktFhxF!=Q+{qXOU8kC`W4>{PPU z-k7y$1ubksR{GYEnxO`$x-z3)!z1|pMjD%Xx z5kf4=I_g)d0k0Lqz~wv=SkFfBVQm6x_0hK9v*iiyAYLLd~Q8qd= ziV=pRY72ZuTp60&v6A_BXA$|VDN+8CGBplC;mMir)%C)e_=`XNE6SaDS~1`h>W0(H z>VkupnIF%ZOL)`;RV1=9$|B#bSHTejs$=7gEezvz<5?XJ@2DMJxv1tr(cFVn>?4z~4c)pW7lO%l>SsiubM?jG zJCft&V8FQhYnYv0oC}SY)X%MTdU9-Qivj?^LpSSh-@`+DjYMhXT$OOpVp0~jP_HlBKHi#>kaK8YVJPlcclkcxi=UY%tS8H znqo(F-oVlGot=MH8E(O&RTVcYpy_kZTPg%et@@sd>{fl2SB)$AHrJbT4+3Ezdb_0D z)|(%)%M0HJzSmQxkB|};P<3uUcd~Kr>+5;Yq94J7RS0~>2H=}gfFlN-T`#lU`?0<( zCd+XB`|h+U-I63#$FQ<6ffqs&lXPMhV62N&c3ohS2RGeZXc4I4)2RIjiWvH_SB2AJjQPLuA-wRdnL#!FcfJyki*c{;9x~`MTAF^)^Oi4vSm5qm z68I0!$2Q?>SGll^IhOwD`O&gXW^KLB9oh|SDRRKoDUM#P(S^WJ1Qk3v&V7P}dDpuc z!0axFGuw4ez3}`gdVQ;DV;`TTyhrG>tW(+I!RnEPWD%j;xi5V1)@tQqLP8e~S>1@_ z41JtgYY|6-Swtgu29t;D#;sD^S|Ju_@sVns7{+im@9J@@<;VBSGaiN->1+Q`p`kXE zcKnHZe%1cmmg&ye{1}?E=R??G3i7qEmXOdf(LxU~#^}-An@9O=K(}7zNCyzfqRi!@ zcHa5&nj^f98#p*~7X!3cTEs#JXcAwkGXnx0X89!S3acCKv^%skR=g6rUwE>Z}-?6Q@#r&e# zq$n@NOM)`4eNz$b9$x2l1@89c0Ws=SfV$g`k~ZSyxhD^x@>>q_b$f)q8sEMoIN=y$ z9qnv|@KcB0LgXE&jnljQGv{TwR5LchUdoaZq-5u zh$kY}T4N*u?kq;_Yvcnu<4U)=z|bmN_=qHe}BZ*{?WHi?Gx#`I>=$nXU9th=gTjznNi8a+7L|84MT~02k&w$?&Y6fuM#G^Kgbf|cn(z>{~ ziK@jT+Q{9y44q(8kG?FL{7|q&ZT041Dl-kq1fsClHDMugS&L&e^h+NuvqseT@%Wp38g@J8(7(DEZR@|TaDJ1DxK98jH4 zRm_nKUl!fjuw^yw+c(UqI2T z>Am_(KoMPFQ(aU1r(3J zBTo`Oo@Y!>@YAE#)?vEx;<3ov^Si6Q+V!-T>wNLIJnSu-i!-!AxB1cht|hsW_$+af zuuty7w-R$V8lNrM796b9&5ud4EiD5o20;+jZ6=O5_)QUJ{T2XOoo!?gATbSV=%1H= zDp*w7DE_a7hn963jPWAec)izMW-xG z)%P7i)cV-?XaxM}F5AF4dFv)TEbi$C$wHleBB^1x;gck(?=Ob$h>0fxP~MIbsYNaQ zp^`B<662mZ>4XiaR)==3_mu@RK5Q)ruLx?(7Ui*&qY z1%TfS-{39DhePQzPuENZ>0svi^7!UG`Q{P>3v@pL2s`~>E!3L}hG~=fq1%?BbE`*p z7G&V}>a<216kwNX2+6SSc<5^g-1)>qosY%&T@U>xrsb3M*qd#y(K?pzl^wSUSJtcm z&vCf8bku%9L1|i3;La$1VsaPXBuPZsRG6>%k?Tw1UphBM+?ew7Y1Cr}>hH{2dcI1W zwmRJgstbNBI&}f{X>5HCEtRo&dn0o^OecYw=hg zXzlLq9Q;Mo|HzqJ)_QJhSQDRI)?i>SpYMH5LmPBE=$f@b&JVD1xIVY4-K6_P{;7U( z-wl90cY`$WfZh9$xhDmK2&uUsR};eCNNtHA+gtQ4wphvY`ud5}jTQi8Rf=}0ocSX3^@LoUvpImO7e=0to=8(g z`v*81{2b(`)#~Xq1gvheEDY}7?Sly{;4is9`#DXR{h&nEs?W+?O|=!I@GqK_Qd{eM3`5wBRs zVj7$WXN8UZ-#^@Vq0#IFLS-h+{#VF*vgnkVKae*Ep6CBJm=2E)2~v9;Mw9=)YEYpo zA%?tolmG5_UHxSd{*id_FQfXeV6XrGmw(N~|JUZ^*d6xlEzk5~%8;T6t}lJ`pVjKf ze}JCU-&{J4#m|4d)*V*ezPrDba;ji`n9IH7GwZZ2b?BMCcHn*7ke;@McV3t~@5|7T zCgDz{@CAuRTo*-LPd-?rTdwfi)c3I_dU{C>Gx2|w%sc8iq6~kvdeRD@JQ^O!$tlK2 zl2{y{tdx$*)7sm0MqKwy01H0Y2snS0&&yICppWT%8Lbg;Ketxpq>8-K9c-E4+zY+dyiI=Vm z@1M~Bn=skeg;;ev0jFrcFcCP*s&*QKc)4>fe@G_S5N)38qiU4LgN*C@GSL%( z0QbH$XuuC;haYg0UTGqrhi21BN<>VAmI`B#dkLH~_fyqwlGw-byd;3kM#nq8d|wuImssXvhqEFs?~$)tj}c}~ zp8D5jeieMk6tUUr95~m82%wVmfhrbk=b}`V!_3qf^Zv-s4u3P^jJ~Y zvKjeH^c!CK{+b#F{?`(z83;;HW-cX9e^GBlm4*={ObtB4tw1^2cWQ2k5{2)PRmsCgsLy25*rsMth+x?V-CBMv;M|#QF312|&IM z(Ty8ei0EFE8!dd>odF2Mc&YXqeXGSyh}RPC!m>6* z(buKH&m5|GuV%5dm2rt#@N6Y>yvQvd{j6a?6}%7AcymJKT~3V2ADP}tQs7~tu^VbeQI#aL zc6Wl_JrUr#mednaB^UTA2o|PW)jM_4_sFWsiY`%eJQ#IT!<{UK1u zkqAA_J>kywm^GFB87yKRb}GN7^AIIq3cF-cU6x?S16oM9jHel19BlhZB)9GJ*}FbZ zdnqqrO_p}Vd@3oL@!%S9eMF9!`C~w*do1#G=jB50*_(fJUl0K{I~B^k-_5bNap5m! z?Q01>{@b8M9{41yBg@*0t$k#vt~hREUsUpBsjX9c1Y<0uAY6Vs?Bqa(`ak>d%=n6h zKWq%pWAPVXMlO!Pmh@2g6Fqz1_44Zbzm6E2hP8-hda`yBr#nl`&GS}I^(a0X%BUVxt^ehP*l`t6_@jz0mEaI`Y%%IjwI_sbfYddLv-Hm&3 zUE~Y_x%l3P9<3~D?W6ZXBF2uU$k+xKxQH@}+oQyMBJZ8Z5QdK#6CZJs=Z@X^vw*J_d+|(gB<@o%bcf{REjLh%%F}5g&@KN#RO`eS3;)q> z4bdExRRJuL{xtu?45M<%{IogX4smux)LOs(jk$EfQKHjO&9lVX-h$T|=M3uc z8&Fq@!s5h{#z3_QKuJ+M+x^?@exICB7&@b$nEMG=X?8pVR{&P18*b9>lx&3212*1- z@}GKu2NWArXV`yw{iaM)Oy^pLI_gzhEK%9$I~-O3ddDXu-}$vkj#(z);E0F;$POdz zC~5Y=JiOj+OBzHQS;tle1TjMjUXwEQ9hSC^CRy14(?UMFcy5E=Ikn*z>lUm~7)RxX zqvpm%uC28ZOf1;LJ8C6!>|$`h{kO2xS$FZ$RTWp0$Z*kGFZ_^Ycg7_mqdsiQcmplB zR5u}cj{oplFk;p`LTfkXTkG5A`cy~U)vb&*=+}3$W0H*gc@09pnk3?gi9bYJd2`Cu zh_Z?If=q1>aA{dKD)8?XkTc<@5!3hO>&y%$ee;(vw{VrLeHP#%sOg*D!~Mco{rqiC z?!KCtjK~lXSoBT7PN^M2G^cWp1?HOi+ccDBV`!O}V9bW! z6e-Yn#Gz^1B(37NUy#30ahM}qD_Kp<0lk;Hr`nlCy(ZOPgS*ly{`giMXL~4Rtwwvo z#Ll3dHC8h_zG-+@%2>8t{-N#&u4IOJInKQDg2Qp09yvYl%wV6b&eUh$9CDnqeyVzQ zQ*;x65<{LUZ!;M;ETq1e*h@;17&=SiDlrL`#M5*4!5^OC?|-2*7desu!8~l%;4P`7 zD;9rChPN*!C&Po}e8g_`O(nNYyqE9aT(9u7UU>Wx>!Y?w#_aDG{-Om4u&v8fCQFh# zU0k*wrA~qLKW&aG&^(Xu{lE)NIG&xCc^Z4>)p!W1vG8+Tv0?+NGWRClvuoxq1z8cl z!le7AcrY6Q*MhTSML16Ueq>=#DAA7d{oBit!G5%%D12Cs5krIZ7Py{HKKXXyl-@B> zKbg29B;jsf3W%N!Rn;|1epSb=kTE(xEkUBCKrJ!`7QE~DnQNbg7&16ERlPpAG4nkA9C|84^vA z_fwq09dKpT>DFn!zn#jt*^=p(A`D+6fA9CYo%b#fzJ_(G_*xtLaIa!=x6PmFw_@E* zeDwxp)nNfa_|u}PwCE`Dvl)ZRzhi^fz^++&0U~ zJ8P?DGI-|Pl$u@A^n;3{Tyy_?U%5TZ=j<;X=$BDRljh!>sV(V6N(;trX;eD}c*9ig z%%f}KCT}^`h5|O+cXw2v%D~W>DTwzsGdtm5lW}bI1uBx5CbTM@!v0DQpPIe8t*S=Z zCtDKc=6b*I)vUbXxn}vFzFv>}c26m1*cy*hjU&Tjsv;injsh|sKK4d$q#5MeoZcg8 zK5a%hDWXoPpper0EPt;kz}OISA^Fxa^KgnKpE)@yaOF7%QaA5EYMgpsRHKvz z1E(K5_&Z<`%F+EG%2)}BPO|516)k}&Q^#?>AM{OkD^6WqY&tv6ke~(utK%Mwvi6T+ ze&)B);E|x+=MIJR-^e%QQbiCKBuM?(QSNEHwCLu(T%KPgRhzPQzRSWtn&25P@k{S> zO}AE*c5=`DQdy{U3NvYb>t?d4>@&$pJJ%QubofE|7Mjfqm>QytZ)&2WW+N{%^G@64z;@>p$FgGww-aIkTj_Z70QE9EQNfOG zvMQ^W`Mo}W{)U|C-zS>gZeTZMk}saRd%FCH$!{Kq!2xyrZW(d{@X$4ANk-U63w*%f z&rFYeD+#6Z4vo)<$cv&Q01#%?nZ@W3lWlEtk1A`g$){20R!;axFyqCI7KPz4Gy0BN zso4$JAvt=UzL+`L>?z3pZbXkk8BajNoKbN%7_-0P6`~J&W}w15;g&h~#?qvGok(73 zWs3Xvuz#@p##X*gU7=1cLoYi|ma+mQN%=MVX`N~^Jy!sA zX9CB<;Vm50@FY{}H8=ubNnIBW}XNf&(ta&w^D6 z{*Ad@lZe~yk`8k<&CL=MO;?;=&_WqCL81%^_fLl(M}l-&jarSb%47~8Kl(4UO;=1H z*^o-)Sm3!eg@G!FUt(r^uPL)_vPv|Uc=bD1iA9J;zN$ha73(~GP2jrnwdj>VG1IF2 z4Eeg&!L;iWf#psq>?ES)d}rjQz&Yp+71hMwrJl8@LG-3gmfmvmR3Q{||A|oV=bUcN ze6RbPfrgqY_O@Lk?AbgLSvaO<;HROfFL|qQsJNPRRoeWI-1mK2j(I%L^Qf4Nc1hX! zd#COG47)PJwWzV?T}^qAn~0r)cD?^Y+JA~JF3)LMZ_j`H{_|b$S9?^#TY%x8&i|Yz zEgHWndoI8lsf4b2;-|Pn<8%D=usrhG2XmW6eT5m`bGhT%H&ie?PYcJMv(5yDZ=t+C zoEg4&9Jz9T11p?5Zdn*Dcdth7H#x~%d<5uMKsSpeP`5t z@g~v(GoR;LIDsvG|fWo?JebF2%~38(#Pj+b-8(7UOk!t z#93_V(m3g_{?3&M92L(Gv-MJtAjMcde)tjl70tNgK0{tSfjt9yEagvrI*Bh4uQ)i2 zh%k6$5~8Z4>Iu>GIjtHt8u0jh^wS?q!9VE{&A3=e{13H>gPSISX}QGR&OraSOy>DS zi4Kv`1Du&(u>x5#O!nIJKGeIFGJC6MaS4+blULdn+d}MS9V~tsl8g7&j>&%bKUn=u zz79;5fuGJOeyB@!Lk5sdsH2+&oxXFam5mQR#phnhxrtn;NjICy%iJ1Jq)p zno^|;2uQywNC%ZBHA|AlhMJkV zJ?u*_*~=le+%%$kyfD#=_j@r;aR85ZamwyHwZ%1s`5MnBxdyr-5Wd(0_vo0my zWip&}Xoj}RTY6sns&9ba@DA0vffG%|{(&hk(-n9c*!g93gwqWOrq8Xn#%<$sZ8%O#Gf6N0Sf}QO#^s1AsG7AiakJPN`3< ziR;&sB_t}R7z=eOA$B{;soeyTyoDHBYs#y*RzgAT7ljY5z7gk>LH zq`L8aDyBg!U2rrJ6H!|%o$faqnQJk}ow7TlmG~#qXx)|Op-(L>Q-+8fbpum9v|=-~ zqrB|~S9O9eOvp7Z;@T$;TKsTUUUGPHGtnZkIC@GR#F5(_cTi2>n_!&cm`=S9sPqs@ zb%f^mIi+sai81|YlS!?^E%tPtmh0kUsyiAv2k~Nzu6c0Ol#9g|y>p(yCN=wycgIe0 zFT^le26BHQbT&!At(@*ak@bATLFa)QC8fZDub;K6Y-?E9j?XjlX=8W9!=6H1xp0rP zdZ$}tj(n(q+1M}t_{mo^-d&5Yv@c1T;9tPo;=`;*Q7U57zPPtsd4`?6-&rPel$IX{ zsxBkv>aa4gvopeZ9t^u`D6=MH(w^zmRDa^)Dk>|?BGh8?diqtJ%;IXlPg+-UNcW$2 zG1$>gkz`MV3oUe;W2Te2Ew+pGvkG(vWkme1cc<6#JqR+`QQ7Kz(N3r+x5a((z(N*{ z?D$0WA4=0YrQdLTTQZciApHHiDq|Ulg}e%xOuFlZS_%${nl}BDC5CwDBWLhlM$~SG z(+oWtNSarEqPrh{)GUVUlo;I3BF+?dzWd?TuIRNBhgi#d!i_ii^V0g1 z82FAsy}RiN-XO^1gn|0`!1aFg9fxp1Gg#iyU64WgAEwd*DnzG5&pvs;DcIwlS6}ee z)(r4tdpQ|%yFb9D$qkjPT#;7}Hj-2`qIJ?rz_3wBN0us8Wkx#SxaLty9=}A-!INh= zX{S1xZwU#a@Axir7<~x@WkJrr2u5EV8g*cUUe`D5y(8gYmyOVitqys|Q_Tt)^zR;J zU&k4MJ#$&VA6M<@j-wKXD>IkgHnd`Y%uJi4&o; z3{#xv0l4zdPcscIJ|^_<$nDGL!8AnGU;ftI zq}t9*9G8lFzW>ky@*qI<3Qo@XDEMy8UADi8^6Y~?D40BT(Js(0L*!NBC9Z+()m2o} zfm#2)rDDd~pWRZ6_7dCfItn=Hd-_2Ny zeeDd`Wq;_zDKY12H`L~jU$9p(50*`AKlwMyoGV{fbqNAogdwrM;@5rmXH^aKY4H!3 ztwtvSUwz-9|HmbQ&y8R4@|X7v{-DJxOzb$`PL?8$uH+Ie_|i5V#kXTMP^6OW z{TJi3w8B3Kavmr)5+Rw4)mWaU4hyeI1g$3u$Zz9|M7n|}eZ?lcr8X8-u5<=VpqHjY z(5@#tM^YrVO9RU`i|gy`61r2?dRZYQjB7oVcNf^Trojpz9l#pBE%oDM6j=6Y=ggOTCy{X^WM z7n{OWM3C;XT65Nma_^4}Q#tUQqp#dM=9y=?s+SI)+a=-sDH9O4kz($RbvBS#z`-f$ zrw=U-)(aqmU%m#^%(cFb$j+iTZmd8*&$l-dcHCSjpT;Hi&p^Bnn0Xh7OP}~RYoJ75 zQC(JE_8e80yon6H=pqWd^4I3MVb#Gn<)5*`4bOG{(7a4=OZW3syKS^Ql_R5*D?Qif zKXP8gZi7ivOgQEbof@hl>7uv1gl@#9w}p1a#+1d3xF?)!6de+q#SevtG4?fu;3M-g zpw}r>HLTBPvvo}iDLen#9$ueQTulv*?R;jW%2nx2?mC(V7Iy*2~+o z>Xe8QYSnE>=qO_A#h()7!?U-HQ&}NvjOe9El7^9t)INiwAEd|auH1A}4QIK+hvHPB z^XbdZ4{tY>EV$a;R?}AjsvNcAAQjWdnxb$_Q)lJ_1l-0^F7>(ND@#I8=) z@l@+*cSCC%khq#a17l5wnfJcsM@ghq(HnrXP~5Onn|ep+Kzq+NSZ%~lJWlPNV{qNA zk-gB|owtFnKBl3$>CU2Bru0ou*AULv za?PNW8}OR-{cXry6qw zB>E>7XD*rOY^2RXvg#_D3D>7GR8zmxLBg31)VS*NtlhAnoDaUKw?P6aV!vD z^DXxW;VaLAW&NZo1>)19CiZ4)yH5(xbs4GD;%GOiFS)8M#~9V$g)&xM)^O4I{ya`s zk8~=wdL?JX+qRByVe{-7Zid_Q$aj(vE^YotbeJys_eQpS*=Od6vyh7mIu^D@IKAlK>f{|kHONPqBi>Mn^0Q(jAg@) zMAD;-vet}8eX+CJby0WF;`NgVk*WE8Ab{3rMDfci~ z6~^n6yQ)p<&JdIdifXQ?%~KN=WnJ)UBdj`N1?IwXK=g<+Qg; zDMlRtT6JPMn#{s!5NR)jwSyj4>!KAEi3{aNAr)3sv$Ys`Y$nuBx4{$Z^E6j}NN{tH zvkX@-zV+v=g+X_t)A-qp5kZw6Z1QNFOcSND7!5cRvfqAQ;@SU+f4R+Z&KEEVHsW%0 z>su0|+H#Ue)-s$JRL0u$N!Gk_5QY`gKDg!TwzlZvFLamr^36MQHJViTT6t z)aRLRE3Ud}S9sLXL2>KJ4+iT27v0|In$i*j`i}$|w%_SlC9D|)^fr%b8tEIJ1~jw` zD>f|#3w%hQIy@AU=KFa(R~<6( z_{ojVA|N(fRj73#Nc1*+NpZ*4pDC$_N4$DwZ9Vq_?Nc{t(w_~FE??q$I8U~vfimdWC7xg9JqSdY-?4KUK=GH>uP z_v`cMY?4Z`UJK4Usa9EZdUPMppueS5!E!S83ah7cSC7pV^&%53IaAq@3zVG0u)B=xE$sTLe#28JGHa5PB!Tsc?RzWs!K|hH~ zw&;vgHn{6{WtD%Id4tI?Oc`U;>;ku+gMJs(3WxHzppr-W{PBJPl(>2Bfm;$8Se z`3u`S1#*InQ*+rBQ3VxRHQjNYz0*|@R?wj2o0=}C6CaB%&3%Ilk2{Ivy>@8!gw0Pt zTGu4IJYT%-Hbx6ipG+U$D{9$#rV&r>a-HPNBIx*L3g6m}MY zljCw9D^?5a$10CJq;z_)7M}0*vwuGf^L^aPpKNmXq2=T+FxsrHJgpNB5Wg7@ z1N_KjUd|DK;^2Qi`R;k@?C8(q*t?}d)Zz~-Pj+*=ZX|Yg^q{vZgHU+E_6pWzyIc85 zqm+}vdcWL8R#@1w!5>j!Ka9-CLT`|fjQ4lN8uii5_1Kwx#Fc9L$w+6}Se~X5Gewc< zx+zLQ^12Z0KIU>M+}11{s4DT}^Igw2nE95*?IwB4A+=wqo)a(U-qJ{gMt{A;t&z>W z`~A_+OHVY@0rnOnr0H~|!Ten8ug1dbK;ucsAf>J1iEgz&8Nm5;AO5Q1`t=Kdxt8`+ zT_-%CwN+-Xzbl3RE%n#$LN7B+brd{!vZ7({|AVqGpuc)<*T>90)MkZ-eoIHwYxKl9 zWG^-EEIa?|efay|cEX95rvCw}p6CEPpuev8Kh#_ZAQ^K|hKr7yzH6inchc^;dw`yzoU`* z=R|=Uor$jJXwv#G^Z&*PQ*~kd_gy2ucL*>{{k!c?PPzXX@;@Z`May%2`}>85E}-@d9(CAm{sYuW{oJLWSi&fdj7=Rg zgE!_~|Fly$^&q$|=w9$YnZu$*$sF)kA42w9o*T(yZ<2@!n@^9R95Z$NifKzcm5o)a zwcVy;yys<0yBLdLo!zytMUy^IStD%Fb+z#DDdgx~;Mmw#o@!aldK`Oi9S>SChC#N4U`6>sasfY}^K{z30N?c{p!JgGmUaFU*p6k@Gk=j1-{=Qhn$|uLb zBLULrId9VDs9A!1=Bno(pvEoQs*Qn}uQF-!5WTaoJL0>vD?&YnHbn% z7>Ha-^CVf`%VarIlIPwoacwg`%Mb?7hHX}T#=~dw6?9=cCK08b?;uT-_c~8gMNVXh z>sBTyxmd|IG-7Y%fzzu6HL#i!`(u}B(a1gZm6;+j{e%)kG_|i7X0?35r2HU_WgjO& znv<&l+mp8X3jBOT02`oFndF(@qYbX%@}G|qsA3n$5P;J)unthP3pM^pZi|n{JKi5g zG>yL;F-0>4?pv$Lg>}HZO%p(ZM_pC}lFXXmE{OUt9*; z60M;&Rh55^R3c@7>d1Ro-MvQiCyj)4t+k>kb9FJqz0eq0$8SG9g1>*i&);Yk=H0if%{AoPQGcW-gJi3 z5F;RBp1p={eKLq{Ps-HimFx6iTc8+fK}5NhGqu90hH@XK+y06Uk3l(_D4KeiWsEzF z#$1I$B-DthOrW^S%R9NF>)?I$T%_j=amJiiv;@)ISUe+{{&p1PK{JYVsCc!V`gyN8 z87A*xn*Io#3(C}YWSVi^E@3*P=hOP{qz(m2WqZ&tx|$FAuLG6uP^~nx-H7H+otVv| zgIMQ@opO@2Hw9!e&I_dOFZGtls1=P4mMmZ+3{&D{#?E zvcahhN_WZM>8dEuMx%w{%gG0-9+~Oi8PXySY5k>=uW1DY@imqXEqR+=kMe~TyiNbq zQoIt=&vY8uIKeOl7o+aB=#$E{`$Ytt0Lmc3tCvt<6QZM5+|=mviLu_IZKa|hjD9}k zFl&Zt3qgY!=H|HSkVXFJ9lY267;~LSrYSTzh6W+c##Y0Yvi=Z9MES`d`Tf6e8L)J6 z2Gew%GI(yr4y)5*7Rwu$b(Zr(2tm2;a&lo@W>G}=J&^&#u8Ye=sex1sWx;@IR?dH+ zF1&@^X(txCERHP=Wr*M(UZLEHf7;E!Yi!s-k!Xz1jot_nxvc%9{Pua(weNcqnc0Le zLjwVYU32kH&7e;sN-L5?C){5(P<0oRQA@1@KNPylv=z=Yj0p@hlT4*4s+$JtP8gPg zTf@L0@N+oY5Mlnwx3zuFT0cqB#1!V&h@_LV0Zn<5*4KssTKQ3XUWvTYO!8z3vWwA6 z{Erd<{%QG3#avW5d?zK|mxc&osU! zT%gS!JOO!ViP~Av*v9sZSuIzLEx|zn+)}C16%+stQb(|ybeuwlphUc!+(02?tcFtm zn&a1pfHv?l7m%WzpwbW#8EX|Y*7y*qFn8#xj~ZKXGI!sl*v^UOh6tIXs0|k%+KHPQ zF6dE9ipk)1E9HA`&)brPdulFd1u5ZgHcXO_o0AsD$#nG zb6wmr7j;X@t2m9D*ypFVXLJOW(Kd_8T(=W%Jqp|AIm zv;65w-yR3v)&hOaq`gO-eCkiCO2*INiV(fF%{%rWkG!i}+|>^FKjK`Ct{A6=dtS4E)v)DiKV>+EIXTo}gKOMfrG0+jow8m;Tjhfcur$m_!xn_Ncg zL@-_~-ObL<$2sF28h1Wu!^64`|H=$7q~J`DO75--glDP!Fl>1lZ}n%|t#?0`fDMC) znNwv0@~S4O4Zg*lZzN-Q?w$mfuD$ff@n#Qpo zacBe&y$2+I&Xl6{o60mm>zM@bftjc5gacP@R#81$?SwO)LbL*F7Xi(>(@NuirCRB3k)n)7%E8h99uHkDMdvS();IRo3zTu)4d*6r5ko* z1V-Q%ob|B8#c*G|7uBPpZn{^Yu$J_E^@0<`ka)HNjDd_gqOpK7cr{tNQmTS3E{xc! zEk4AK66AISlCS&v6uoP<@ztNv?b-IjBo?}{G>$j2s(GsY^K^X9V-c|JsTe=uF8KMe zCCT(*J#7D0iSn4$n#n-6i82srk1~T^?pq5fB6Wu;Ihnhd1#TVWMCvB8Ku0RrVuL`W zs3m90TY6XrqbKt8rIh_q5o9d8e5ESlV!R!HWC=FA=~~y`?T~o`R{O-+mR3s^jZz}GTPefjG7C)NR(pc|gQF!+Uku9xF}wC(hQ%qFZTo%hY8CtXDubhj zWl^K)=OpHT?(TbkD%}TYhJSuBFK5Y+zLP=+@4Se`(EjuM5qx9*e@u@5MYR7az5j0R@p@!P!hm!*2%K17_1Ax)_uD4g^t_(q)HqOWK&$cp-o&i>VZJ8(OCX*J z{$;*y(;#viZ}uaa#CiX#Vfi(*w|PClF=G=bxAh~z`iBwzviR?)---YGBBa0E2>-jt zt{q@?^J=I47fRg-y}SKR_J48+kBRrcOQ1YX7DE}9F7pqTH%RHkKyeK%{mRC0BEymj z@Ex_NirSl*JATE|i4z|K0JNhL(7MbAT%35d7U(noba;G442#{wjO^6UAYptojd$pA ztK5%)9xwrY8u9GMSHyA(q5Z>5ehl|=J7%``PoOn5?|2}KRSuvpeEawc6}N0_|4o;l zF3?8`3{%&R`*toJ4|L89=o5JyczlIwRHfr4tJ6;>d6!DC>;s_HIpfDbSrI@VcUScB z6`jZHY8Lu&+rLPvF1BxhR=_s)*VmuyYm7BOVfZiqXKZJM`P%NonvQM=yz%_;&75ln zCO?v2Jp#GR3y?B)QaXs{4Oe`1s*YDD^L!6+6td*3e-_mcLE8Iyhs~YJtok1uH2`7; z(9k-;&VBQKVM6``fHyM!!v0*n`|F_?@B?{=KpA$nt{YA1d`)Ic$G6n}JA84L7Ju%z zA>k}gxse$tAuFcp5;wc|$FH#ZN1#45I@uCF=XdkfoPws7a(8_NfwxqR!iZej^&i1Z zXWtjy2>(;+`65Ud{~H*f6*k1$|72#>X3~S8>T*6gJJ*4`ZYV zI#6ZKqTcL{7=;B2Ri>e@Jji4waWs{PG&uU0%tuWuE_|v+esg;x`3Tak;gjhZusH(b zKpVZ_%#Gwkr<1DBRn-(a!0ZkbSqEgQs+dir(OvB`PI^zHUK!Y?Q$&7p)Svndw0K@6 zB$KL3@?r>80>qK^;pJ^w=;m5Wgvi<~564D$ii?|tjb^#v@$$0p~-T_2&(0DkQqjuk2jPj$CJ@H7nH)23o_rx zmO>Sko-9L9$4=qPbLH@oK0fNB-@6+fzeHM2KJ{rBg=p> ze4G)_;f$(@RJbx))MV84B0`};{xzh2TtQ@z2dF=Gfr@4r=o`9tpjnZz?g(?DA#y$l zvW!rrv{W_k_qoD(O%;(pHu8z3B0QfuM^FW?actaEo^PG$-y3z0#goj%o#gQFtVh4| zq;Z)l_xoWj>-6X4bN}d@Kb$x$x0ZE#p0iXUWRo&l|s~Bb+mDo`h z3oq6SS--XA?m;t|!hN3>Zx4pa{J!+~VG!fE*N$=7`IgGmkiN3f|8NLUq*9pciUk8p z0fQTDXH4~v$yz83W$cPyR8I+EK?QO#cuIi|FD z2v^i_xUu;j4)&HdsXOm%S-ZedBS(1-R^b6wHx_=bJ?NnP$;HyovF+BH!BQ;xqvk-K zQ$72hm^;TKo;`{;N^)}aGx;)6+Pqv5DgQ$ezTDP#4VI06al7*z+F^-%Q7>un;ffX!xL5=pxGdwpb_NdJ&AH1T3YsFg@kwfw(iSC& zX_&P4y3K{8|5(i=6j;q9>P`yFkgL`@^QTNkbK8&g9xZ`z<{wm1M9K6ZWl$kw2~WMC z@QihLT^1&r4Eq?r14d1Tv;V z-b%#iS3l@yf-03zH)G0okM-mMXSt%N=RAm~G_b405+-yR8g8VmVwFC+6l{&d2vd(v zb&YY29bSBD+pX{r{U886@I!q80x|to8LU^~%~*nr9hUpbXWX>8Fn3f$wNlQ66qOvh z=u^g{L{6L)7RE*-)e=M)_v@3q7^aV!>|svHmlVegoLZMCXr=+^pA{~EoN*^z}`ey|976u4%{S;UL?2GAd{N0Jagk%1O zV4hfH_@Cxu=J+3){M;h{!w|pd@P98uyzizVR$VBlTj04SZufaNDe=z*9LcTuJ)<>_ zZ^zln#Yo{rfF7QOd|p3Ps?*ZH2c_PZidvj8ae4v^dVrsHnXEs1E^VfSzAH&|!Fp>3 zwS4(w$E1ST9`!V~4YgVpmtFAX(9Vd=IYRB(@eH&@bj6oRb>bq3g>2=OH|-GD5M>O0 z*Le%eGemI9nC8Jh;~gf1Sz@Zh@lerq7D#i_8&W~ytLqg@1^9<|G%_6^LPhGuC-ox& zKiey{KYs1JZzj?Liq1nA+F%{_+U6iV=I3$Fx2>=FqoX34SnJ%`v>gBlQ?Hu7_g&;w z_lPN@iu>kNpO-SDD1U2FTD(W7SQ8}sp3@j^PUSr}wji(MUM|(DK9ZHuQN>foRM0_IR7wwqLUW z>FZ(6`uX7`At_I(L12t)t5Ss_U>1q}P&DEG2Em7i^JbG7D^8@d$$CK#Bw=pACMpuL zT082Se%f5x)|_^I;$!>df|Y9+$vduN#DZ-3U6JV=^VsP;*V%($b5Z`g7&B9-}y-uG=gW(f*JGH@Nbd@XMA)%$?QC-n3h$<*D~Wm&SI3%UD9dnLebR{o>+v^;Zq z*?nOVt1y@DQ2TxijrY`6^BY2cSD;xUEv4TtB z*YZ9d6b_UTT#U>!v(}9%l+bG>P?3>t3`9EVD5~MK%|@?pbt6Cvd35I6Vd4C!MFahC zZwb{zUq0the6R%h$HFC6Yw9&}qifWvBAjeNNE~G3%4nG;60)FG_!%oH!gU)c6(cR} z*)hL4zWx{HTCTZ{|0m_M=VV^~Hn4f5NU3>Y#pSa=E{rLozRvo6<)p3Y^J|IAzl9fG z>>XXPso{Am8(95?&G@A0(JpiInRfd}Qv{#Lam!Z@J@3|Zg50%6ULRE*EtU*0GxlJd zs7F3EbF!$#kG?fu{b3@cX-paMYvi;vJ<*T=pczZ67>6e(F-YAbs_bog!#x*J>AP^d^*=j)y449RQQR%8h7icIzhtM zYj>8Oz1u8$*mgu8dNz%op)E(>`E8qH{uQ`_6iVVZrqL0*m#EA@%d2xw&%diJvJga#?AM7UX~B9Mn6hx?pg@sTQWIH zkukv9FR~3erI84>cdnqY3#V#y8;)?+aM@iERIkeLURyv-)&xq%V^FS2W8I#ooaVfZoIE* zr=@BGTfXy1-$S6Z*4Fj)XoB?DTllcG9mo>-FSG8g#M*{~Q-7_?M>GUBOSeg?8~RMk zHxXpev#^CHxCJfwH*ckF*N!-cH|+-eDn<-n%tGf_ESfAy^xb8$;>c*VTa|413RliqBN>-ukJ>WyfG_pgc z8wnK*`%co%Y~~$SHOoEh!j`3!M_&+YbUO@uU^PSD^F2t4p4qK**M32IvM?PO zB^!4jiX|1MSq`>?Wu2}aaP~*(f~ajDF{+QUj$}r*up{1IMEL7%uCWgnI9$|8fyWlI z2;%PIBrj!O|9lB0iQAu!DQ{>ui&lXN-9eZ=go-RC$4-JP*{Zg8ZXzX_?)G{>CfY8| zz&(o=ypyM}@xEV<1{X!{n2_DA*7}4jzmcK1gqc|Dv?$p=_)$WmZI_;y(Lv3Lhml@_)Gd&fBC6f6Eg&J~(|gI+V^#*oSV_jqH3 zvAK7o(NgnK9MK0km+G=q>_0sZ-cCyXezd*S(2K2TgzY)UUbjl^JDM%tS;=QWuP2Z;d>O4b zLaej;-t$0*5}*HWt4IkK9lBDisImsFVkE#L6Ugg(JfIUP5zN?+cixnqk4uXiPNLnh z21l4^*Y-M(rK3s;-NkvsXPOV31r`;m9Lp|5sYW+CAz|8Xy;%Po)TQT>W$$RXHiZO2^aWIaw@9*O+8V&4hX2Klk6%;EPJ2#;r)#lJBH;jvKX z3QC%#w2y_bZ-5C>Z`(c4>^bSv=l&~rer?f&9AS&o=SGpTz$;}KqHn^Ggpbp*=2vuF zqA0FMbQUYm%L)~bY671Yn>cw&c%vZ5qgHfu)q0~P9pzzBak^zx<5yok2Y=7g?}**W zuu~O`R%M8i<#9GjN9lZRU!kR1iMM7})QIG&jA47wJx6lVEl+!9CnAyE)Ba1gqNRo9 zti>H;XZXSPsrlTrC+V;6qxG9)%IUzLM=_Q;N}wg^^lN*{LIfH;ctbZ$VHrQ0erv|p zxqJE*a>Py+zGL;&h+wa1-ep%Y=kAhd+_-ZhU)p9Pmq@q`36)FFCSNzQ=1recSr>iB zhd#GJ6vWBn{ebsZ9qmwklEITpy$#bg5bjCvi{6!^r3acd`8!>c;fVcpjrV+-Graqe zG8N!Bhs6vPW6qinl_a{J(WCO+m6<>?`5AAY!~?|66YI2C_XW#+jV{jeCsqE$+KG73 z$C8OgIiZPCisk0`!l%RSvqjJyml?oW0la7O;);bK{q>VN_aso0%a&fuNtCR3{E6Yr z+Y;wnf}YeDUw?I-3*xW+y=C2FFza=T|8?gA-^`Nt=AQSX*DfeSv|l=XpgqabUQkkA zE^+5$Z9l)R$L9;hedR&i;|P6GYc|%Y$}W!v_M=|T2iJJk?5kznh#{$8Q@OO@N(~AV zlqvZ#X-vLU3s-GV>-(8Zm#3!-1D6{{Uwx`sy#UwHzv4F?BXir6@dz&VV4}t|M;IxU z+n_2V)KkcK4%%t$OR@FCte|?cy=<3@&Za4)(Y*}c{<`SeM5!*G44D+BilUMCJmFc+ zVTw<)7>yXuhSOuyk?;It7#Z7ankZ)7 zMmVmwTE2H${T1D9;$i^o`quzimtYCSF@DbgBkk_wC}(Ed(nA-tVfXCk{)ggtf5#vz z9Gam^?%|=R)jC;rz4}ZQy0q#*zO}ABEv$7iiXGF~&=<}!)^%DyUvY)%cEjCYA6DS0 zg3fiK_d~x!uPaE@`*dI4SLDrttS-paYbBaYgZeZlj0KxtG)%0crt#mmuA8jH;0hgC zCyd}_jqUH6k(()F1{^C%oC9^|r>&a_rRvK)q(bK}khx1w%FV%wbCoP2*6@k1NB$r= z*!lN)!W(X*X5-G3oJ@1js)ZmwGE!H3iBUVG4f=YF2fRYN;(QQ#CWyL7Fv^PH)y)_P z@wtCr7)Z`eJ$hQ6SVT5XQMgr5Qf=u`mC@nTX|egx=o<1Q{`=V)GXS~d!h9BZp_C;3 z_4ma?jW|1WDFeo9P(wUJT}~yJ4>P{d?ocs+E$*lh)U7MOy>z|$XsFSiS(lSOf8oaW z*2Z;qXZirI+Mip>I$NpNP`&MFR2X?-8lJFsAa#oJFt}@XY%L0;0m}BFsHAM&o8QWS zAM0Y2WHstxU)X@?J43ip`=-%kot?AYWVnrNDC-0oOd|aWGi~eKka|D{vh0_}Ds0gA zYZz794wWk@)^}%mdhU#hW`O`p#JWp`VEBe5WI|_??h(-mjCl{@QMo8*AjW1=)~=kq zbQx9BcT=8z#dL(Lnlo$D;?J#nl;qzaDS&)lw2n2E(MD8tQSwEPH5<8;V;NooG-`o)8O;u$bUl%X|wuH5sf%Rf( z-Y{+3q3I5})mbhbJVG@9TUIu{_Bcwn_%L6^x5+W2@#*yXlu+LHbQZ5Y##bh}TPOXU zsdTzGCY~ZnI!JboAI(w^D7>C-QZS!#r)dA*mK_*-JB=vhFLhdP=r}6gbBm;L!G!$U;Llq42| z>`;CiJ=Ey8qF&Wgz#u_uot9!aHcI0i(0I{1;}+L_DIfHuG23L8aZ z8%yg*XfC~avEqY^$t+CFO>_s=nvMBvHh^82}3hOqZQ95 zybS~DJlpTL6050r9fg*76ft6KCNO4!>p8j{D4LHGY2LykrNC@o4cgXf$og_^Q({}J z?!9`MNYF=Fh?<9S+`bnpo4)|Sz`jeD`%3z<00uSz7?@7+kCCmti)&Tq4brT^@B6`L zM%6u%PKgeXuRW^V@n+8V6OybyWwu`Nj^q7Ib-=SXJE-u#VpnU=ogBQ+%9BhBynCKWq1MDf5H*=I1vj zbBg+LlviY)nQ|)!1<0hqA#rk0kEA3?OZHur=vVhC#;2|&BiiF(2#~h?U2s1O-OTKO zba>ufcp4!LTG3#mL=<{xb)A_wKOB^(KMEmE`s`MKAtnLI?cwsFnh=%fMvF_66K^gQ zxCc7zpSRWCtA(!m3hcSFG%0Ynli%7AFW4GkM7>H(QhH#f4>4koZKRBI!EeFrdq;G(v2|eiIst&D8%B z)K(ykW@SBWAAKCY6N1oElQQy|pNu6>?Cmjq$Z@J*(HM2px`lmojb|*gE+ZB}t(L4lsJe04fbDOxK)#;(!C~AZ$Vo$CwN0OFx}-JpkV(#9n&SC$_02 zA1IR7VC%p;YB@ev+V0ohST&sjsR{*C0=c`mcYW2+%asG0d3_+Ib?N3U^Yx0e7O1-? zS0=}py_2yPRM%NZxE=nhc3|$)lrn_H1pX-AP!6H+8O2>0@!MVc6(fB`D>06}moCir zsf<%`*`*_iFX%%O|5RO_y$^79pYLDa?)!d~d-S}Ui~L&R@T~i-X&JTLBF05^TZQzJ z?Qz8dNT-WiMfmh+i1w`j3ms6XYxndwd-es+9#+$Lr0^HBwtvmNR(xH|dj^)Sg#9}B zoZqNRxXf&V1#^qI_8agnnC5d5`F1Xw&z#Nk84MPlfCzzFkn8)x=CuT2-| z`wT-7b_}UNx|mJ9+%h%Xq;2oKSPGl)A} z*6*-oG{!D&1gO1egh-C!HmTu>_sl;RhwU@dSyMPHuN3jf6B(wTOlfd=Bb&L3K9ARf z%Cm#9U+rxjX!ms#oMT@&N^CFql(zJTJMDElMt1nNXrYC3hkb=-r~h2u##~)nzU4@9%(X<7PgD3{ zKCXo1e&OzwD>J$=xDMEJAp4pamoP6UGjMADsPg7=-r{e|d3U)_bE=)zdek?nlJ1;U zoSxf`4wIhC>`i_xInpU)z5(a)xIf3}e;^h)M@jo8_U0|~dHj@iL2k*P6C~Aq({HDP zP89^sFOVm!4rxbzGAy`myBeK)>TQg4(i5KQ2BjjT<rnRe% zT*TJ2(4w2YMk~W5(sta6i|u`Z94?!3^gPCtDV)F-<9%`ZJjiWp?TlBoZx#2tCfE>W za{MqX$&n5uxT?6iJxcY$@&tZav6O|1vtZQHddQmx7&WWEYqZ~?=1dbIIF@PoU zr75=&BD;iFWeiafm1*1IY>H)`iwQv)5 z8NE=v`w5G-yEb*|dvh~ag#%uzO%|N3Co<)}<8y0MQ)~Z%Zq{T|L8AmU&nGo}$g;}?|;Mx`{q9d-|0n3m@~1+y#xBx&k5coE@`+}%4< zevdMT=J2n<{C%431}O`lI93{~4>a$ZpIVel12qFLI5{OUW13n<&-z`*@VVWM8Ac;c z+QnPm9NuJGQqB7rXtpSokQApee}W7DN=ju)?RyC?BAgppMm(0CV$o^i;_+GU$u5c6 zjKMX{Xeoqq-{|E*kCtt_NeX2tAfoh-ve*;))&RR`c(VE{33q1+x7Ot^E3@!88whr8gXkyQ+PhaNrG}pw zz@y3YstpF~hOW!cGe-N)z7<&*)+B}WJ>K$Me6Qu``H6kk-Q@dIoz)6{K!}8EQeYK}5R*va8X07hAOpJ!A(~JE{_p3?<3I*49PagbE(b(H^ z@?Fr5v#Kk)r|^ADWopH0!x^VZ`hUoK&!{H5?`=>-K~#vyqaam9K&6B9_Fw@if{Ie4 zgMb2|3IRe0iV8@x(5r%gNQr=uKu7}8lomxGKoY7ULP#Pl0Ybt={r&&%`>vTa-)7!f zYxu&KB=@<`IeTAwU*|e|+ornVe2|8gKit(sS`Xj9+6xwREF!HcChD9K~;| zJ3CaH)--oH&$x4EyQj^~W9XV)Tm|%;Fm<TYC4GSGQiHYWKh7Z(s9(#j%SBgIy`hJRK`WeJXDW~1k@h&x-!OVAVJJmKsdZt-V z|8%wi!rZ30^|6XDs#C8`Y_%rqLH)cDQ?7lr4a0d7TI&xM4!pW25e^>+dH@aj71Nmz z#*)%n{r87Zkfz$LqMda!6rlfY)hz`tqCIm1p#d)%x6AQ-Jb|I-< zS0pRA$HX<76ZM8VMGIaem`LE!TBD=7;TOzQTjy=wWea`3>KwUJR9|EuhOnR&dQbXW zC0Mt3w;8JGQKbma&KaPJXX=-P)-mj&&==81i2V2}?G5hbJcTw1pOweXBjhD+wH9^y z8LOa|q44(+# zm>Rw(F;Y4N{i+W_l~eRw)yBbBTPYDQLRN1y1ZOyv#M#~}qG9j%eu1+=v-0&){Uq@4THs`RYhpxZD?qM#0f?P^Cna{`1b({N>i9yVN62`}jK{(N{d zNX+2PSDZc@Oe_nd|Crl{Cyn19Q%zXke;Kc2qDuOresitDCZCEWC&IMvXL(hb~%1Hjz1-VrF5>k!k zpx(Pj5qJG2%1wt0U`z?|xY}e`GE+<%V$v>2G+Rv5z)H`1#Srbt@G~HO_ zZ4|36J?(c}zM}{o^j+GD(fT{V=)1ma=!Odqv3}kA6OJt?33|V0?TO`#OIy^V)FPMp zGa{BHylueiz(GoePTg}>5X#y0BxU6}jIfd08}0VcLB3+zo(#tw0_sEXX+VPomFmq! zpvz+Kf5q6cm}F>VfchmtO9m+$&BxN!AvL=m zHU8$=mo3MtJwdKe6S6s+n!E5Noa9H0_` z!Br-m>waaUh>I{Vu+zTL-D&c70F>`gv9#?OxT61A0%^V~VS%r>`^DXnqIU@z#Hztb zi1zVn#`bq-)`L+-_Ovzf@XOq_N)^`UMlnfw_}iQn`6oIh^H==o)-*YNZl)_SMUr`mrZ zV>Y9D{}=}hD2E}pYZlsxT{$F=qCEO7U<&vK2Qk(2+(le#3*g3t&Y`pol#o4uS5%8qOy5_W6_RhH{TPHj{ zp%F!t^zcUxD%)%FpLq}I^mWWR6sX`kN^W4?BGjceYb@S>A6GPbvCo_EJLuYedk(od zr290eIz{1_^qEqb=H@WE9{EgWl;Qqd1AC<}x0nhR%_Z7y*#L2wEQ4##An71nDW9}J zZyT!?8fw!uk{+D3=xfaS27h-L{*ZYzBAJ~{it`z}$MN%qNJ)dGxvC>c@a<_RZyK}z8|&cj!{Dj)zRNK>49VA{N@ z?7HA7m+{69`L=F>_9=2{%P?Y>W|Fi1aVUp%5CChn=$TTt5WL*V%W~V4)H(@;6yq?h z?w?0d_vwklIXAXD7^L*4S7r22r$&P1nlk3eBP~s4wDs*HL^{y$XchTh%jA-L5K^-^ z;ruQ0$ZTNENd3|84&CrCx#4EPi5IHsFVFJah*45EZK}S&!qH;{mp}y(#TKPY@6{r| zM`(4lrcVhGMMX{e^`>#u4dR!#<9*{h4_b7uUENz5TS|WSlHIdEqOdx=78*-7ylSX{ zPN6pr%UgT-)Hs_z^Zw!n3hiz1Qof`m|7)}nZxdM+_u17p-*{(r)+~>f_Ttvol!0y2 zi^KGc^hD{brbJb93L`ABrY7&YA^KJxLGw~tOD8Wp6R6WneHSKPw4gw2UA~!Cc|Iil zBjbhuB}u}$(%(U#?cCYZ^FC`nS=6Yr-fhlP2+cb(DI0o31I|u~-H`hIR}Wk(VNY(l z?)IGZlQ6f&+(_o=vp1gC!jh;aZ9uC$-m9)c*^!0O5;xOsXfUo7TT_i71}hIxrAvJi zL8B!UcP(9*FA)BIpzlkfZY-&^$Dt9`dcpf3gnc6O1JG=8=SrZF-%o52TO~HzfKkl0 z{2)`G6}qqw4$9$Pg-56+69aw*cMZLKUgheir?L&^{5}}4;g;0{U(K9&vsdi;&=WI{ z^XW?q)*v57zy?O$)&8;Z&(#?k=YLAr?);n%&pAKx-4WFvCIOFv-@Ba3-R;{&- zp6K+$l6>zEf}4o&&)axUt}q!%1ZHGxmEtg5|L|QeUo^_)pMxx>1oiT((}3QyN9nJ#qQ9GK3`gW z0&q!Cr=96AQZoz0Je&>wd6X~&J8EfM$@eZuRXt-aY2u(2mtHLYRH#2q#?~n zk%WLj!|UCQ$4f6#_vySJJ|gErFpj%-)My*0edcKObDa|ZnNx+bkHh)TCu84SI6H51 zl#r$Kk|mhB=Thd3woXua@%GVoH)Z8Jh>f9Wr7^(mU($72p;5MVLO(j)?%x@^6Sav) z1ys{%)Z#R)u<6t%4a4AfPF7`x3x)$fBs6G#Q=R5!6%4`t9}(nJHcJ=h&1d;*AN?N6 zNqLt^2g9?9&d&gwl5jO5@2DD~FuNk(LIdKN5dAqn`d6kgh!9m^J<*o~5K~GpUrY%Xe+6=dKN>+V>qQ(sn{JPAufe>tOvnw{ z@*oju2~T^b*#O~ts-_p_x?hd?78}@cPs;3+PecR_X<+etD z@uo7gl)x3hRC4&c@oS^xYne^Qg1x~$%|ZQ~Z3E=(id2+Ekb9ehNDTobIr-=rueC=iNVgVY0ZLYErSa|snvhVqfnRc_f(+Td)C0%YG zh(Bdp#U`$X(JIA5}+^_cdX01BZ%HhkVx4 zbiORZ2QlGqob&yxU-D~Lm}Gk~Wslm!oB!p-J|YA z$1DDLD5I3ptjc|A`aY0cSCK8Bha|V(`yXxdXyQJQ9#_o=AKl;eTyAx{U`F~mkBI=y zRQ-oQ5Tvs34$VC61+o$BUiD@6$=$Itmv%V3D-F$!FTVcQlUN?yYgY2}xk}BD7cZpg?qh*_Ses9nCC2o-z$)R}eMLBMVEK_w4(E!?hyTxtIjX;ZELX0P2KACw@ zXFAfX`vUzsTTRlYdrj#MjY-r5{gd^h|*Tv#~X(&$_EAkT!KFU;sS0tcNiFjEH2 zJDy;^+8TZ^t`Yp+iCnaBDx@s9#aa3tUe2ib-2NDYxA&*cv=EgfI6E^Q1mbZ&J!meE z+j2X5Qcn*NiY9;(qc^9xt1vDza`k)f0$m_xa^3d+AZw(VXaKw2YMhRVUyRm7>=bD! z-t{{rBHGC2+;v45{;_(YoFbq{fXuR+aoY`FuFkTmG5B+?v%;4-I5rQ{b8c*l)S0G1 zT*m%%mbWCjF;N^`uoxI_%@u}4A1Q4D4&KWIsN`axgp;86I1H+$vuy_C6lq-AH^bG+ zd3?I{Pklt3C45vsBd7*B-6}~J7`bvi*!)8wmpFH$6~OBewbJ>6u=)N~ z)A=%A=dZW&r z^lK?|^=}!r%p6~q;E){sCeG84Owi~skl)bzGMpak z^Zk3X(shOJI*|oW9t~%Ls~}1r_d{sw@CXPeX`=T>c!_=M;2FgT&iY2q`(5|t4bbdb zy6yY8Z|Cp3KVnD5m)9y}HG$BANs^)>K9N@N2YM8NCQ69GhLosWx&OR+L@7&2;?+)? zt8o$Z7%AH8W;;8tP(SE*LdLgBNBOopje2P(TD4{zn|O17Z}GpwwfaymHw~$S`j>m`K}CXqKC1tWR?Mosz>mA>)rWYDPTG~wm645C zAVXbqgw&3XO|F(iwbARPW>jnN6Xc))0GV)_(oV!nYi+jpxs|omiAs z+SXxwpS|S=a++e?BU-8-wz>5Rax;9E>i8$ofTc&C5hwmJv4VFC z*c1%I_$eQwPQ7+DMms3O_|~T#`XTOuKZ^8;imoW)a2-5)X!BpBzX`2bjIJ4Rb0nZQ zMd4>kCjM^yZfpMpT!1UL5YRx$#fBC$5ZcsVerrV>FUw0GXu*i;HzB4YV8`H;CG!yq z9IMWLHdT^Kc{R=kW$OSV8(Q~(-)eL!jqhdEdaSOCGBWfak6@~!kYYO-9lhjdrRzS6XU_=AA7f>7zXw+N^g5}2NNDrEU%O3r zgt7{~w@;30%R;`s70=VCG8KCIlFHVv_;|3`e0y|AzE-WD{u0o(J$Nfoy%54EdUcR` z&nu*E^rG`fL_|K3Q5=@i5s~ zxn(fZerdi`dUR<~@He`St#{jN6{yc@q z>q*R8x@JMlDbSV)pKAPq`)!wP{QK2z2 z_LRhxs0X!Dp1fV@;O8*D!d6W4(OtyiVVr8kAY*O26w2ntVPa=56>t?Be64=Kd+|P7 z&BPN?kNL|)@A$Mzf89PRM4QoTT_)*}t4((ibM5nY5|twPoZobWBmvcELb+w2xQT2d zgf$>N$gZDH)y7f-B)>K>PC#E7&ebcVg4a8*10&&AuWwkW}N zt#fu`C!C^;43ox6VS0f1Lv7dty9S>|wsOr$5m4ekH1xr&$~vp23{K?hrr_(m%h!o< zA1=C&+noKF(GSdc&-1}wl%D_v$LK7XqFVQy4}BqCwbt*CA8k%89zW|k%UBLXi z^6M1!xRFy1nL^|;N_Q(*$Djq+o7uj^=^MS^pShGf)o`R)KCcDhZ1C=B;FaXKVmL6{ z`=d#3=8INTxnV{oacFOES$30<_cHOUVkanH%Iy@q8hE%c1Jx`>(^Mi)4UnXf8QslC zoN&VZQppS9Rwb;Xlh((x8FLztf8Pd;=skF9EV`ZkP>Ef7dl-G@W7BwZaa|43M|u7P zov`2zdcRV1?F5kU@%u;6Jd!0Wj=S&kk$KBoMbXNrX!5+G^QDanWAL*(G`>;$gGbY~ z@SAGQQ*9i@uksD+q>90u&GG{Gx69Sh1GjXCF1o+Z1E!y;tQ8;yQuT<4iQ!1eZ^lg+ zZ^9;LNU^|FOy=#9)vy~TR~uhKNr_`la~lG@KFqCXwW>lc?a=?Atd{WNdP8q`NS^qG zV|=RScgxYT1I^1?po338#a|bk3`{_)QJ!2M=)QG#BETOnE4s1xptk7s*fGt`TM3)s z9ad@VhnosPqw#|`uNsiwX{;#x@ZztDq{VIJWC)c4VG&wCP3y#!&`#%9j{7bVpr&NC z4HlbktG#V5l<_Sx$I=l&tBLqQ>;Gw9n%w*$q{ux8a*J(%c8&I#xO={{7QUI1%GC*b z4S;fa?`zaak4#`5kWqQ4r=q=brspEaqGdzk~H;%L=X>1f~bWpQslZ!raafKd9ijhC~Vb~DSTxO-v+Xl#R6u8tY_2V9uP@Th{q4cYF#J^|Rpp21LH zZy*d~OC^ixpfM?m65Xk{4^rP^=DC#k(e40~ZfZtA<04FTdm74(uODjs2oC|Fyy9Z$ z12W@|xnr+COQl{$x9B`d5Y-H^IH>t6eA*AAyK)H7Bc(2C_rhneU2CUt>_Xw9mJ+XW zqRvT%U=Vd}b*mHZTF+}w%I7$Fm5nGDssgOVRN*k?%eTz3(Rvl2$X48dWEOQmWMyycX3`0~D`5qrQ& z#0;<+|49IATCObNFtp)$gOsqzsCThe z$wWUtqvr7I)?kN{6Ee*j8!1(ljpld#<_|^(+N2T6()&Nfewx+ZWq+LSOx?(?QU&3p zbd2EwIVcu=Nm5g8++8%vKRc_aC8f3GK2r+di};3oXc^9TD-@Lfgg@h1;|?$*zhx%y4c`~v ziE$pvN?KTnP1sNV#sSB(8CnR^c$?aT#Egg|QqWUh!5(FX_TlgX7aJ|Q=Pv*2Zau(8 z!o7ul?+BV?9#s%vMj1NHKyBSEGI)HG^F!N(OvCVRdjTH}xK)t#+AL@;V>h;iBs=kA z4+Jg`{!e@2+RIjZ=Yo;hf)riG#q;dYq7fnBf9;AD$~joNGRcD&H)sz!zQE?qn?b{d z226p;H@_&uAbi`CdqMcq->*3~*Y1+2arSapSqo~Xitnxl?#-ajeRo|q*G(kAp!l~` zB7^!{Dsg~=O*N7)g9I&v*}m)CY(8SdDa2ljpjq9+I%WQ>tch4|Qz)FVnNYnWN42V= z9tZ+&H$>tJXuiB9T`NUb^nix9(B)mU@G(Yl_@=h|z<({dl;7VauYp9aBJ}DA-pz0sE>?J-egilN@301*H@)fp za(lb{@`O`Q^K5G*1!3|xHW_^}!}wXb3H!>+zgx85_m>~)xu%JT($0f{owwS)`XZPq z2mcXr>Y>42iDS~K23{%^r~C6uMstc&)ccBJj&#tL8Ox1dR|aQdAOTYF#vSLVcOjbedp zJJ{yZ5cu=0yFy>Zf(kP7_I}!Ve8Cag9Fqh~xR}S9ygldlwA<}!xk;S}-t)&N_mqHu z&^d-loDsLO$Q}wKfaa$l4ZhP?Vkt^q9lD-f0ZTzzsR=r>&Z$A%@_`g+v;)WRW~9L( zy8D9u$6re-gy8XtbI<pyq|OLU?@o%jv{4*&SxY)Homu~ho2`K-BruHfK!$0Q_*Nn z8H?CuaVtx!U4N?GQA?LTFm39w;H@acAao?H{rmhERn7k-Oyy zktB&${5dJ?6rCeY*y2xWeppM=vTCU)P(<$XUS}o5SVJ6_h9EQP{@%5%gn8BCPp2b_ zFF3r8_87jm%X_cTAmF7;?mtnVVrRU{q7;08nVPE)TuIzG6{on~mNBc}Gaxo6 z$3}^wN;ckD^EB5YJ41oM0<+E1{#ly$suZ^o{BQ8iiW{xZh+t_k8*5pTAzT9EC{89X z(KW5=#ll5AH{hGF1q*3@SgT;Zt9t+hHUV%aZe#gDtlgaqeb=2;N96wi_M)Xxpkf)a4fm#Z(Nt# za+mJ^9Iyi%j#0th9IK#Cq^Gzsbq~}yLnAkHlTzt{efQ>PSCu;q9F9S+dEC8Ecj|Gm zf5SM+JZYpkB=|A9smQ4((PmW@i1JzL7nzJry$Y^gP`CROnyuea)MVs5B>OEioUnB! z4%{y_-%9eM(2s7vw#)!nGUS}<+~+0(5g5g1BN3SYvFiM$aO=zou9R*}yU)vr=`yo8 zM)UZ=NEY!Sj0-f3Z+%%%iAHq{^hQ>TKiK5Ap0dOB{?7KFnWzW*=c)n-O-gEFs2aYY z`H@!h=^L^Uz9<77?a%AU-J}{(=uo`swMWkNL0B^+#pzGYMxvL!p(p z@rmKHjS4@v$O{I%$eBHZhcic4;kIr!b|z{jOD4EiWW08p^E=4*no43&0TRvny!y;p zZ4Nfz`8m)#YO%|d6oIsG`QLU(EB}2v39O$cJ?n)(Cyv% z_BG65(ZKCF8&wIVoYpV%q{z{V)89oLDW}jvrJ|84c+O(8OJ({){?mYiDd_1J+lc0- zYn}Ngwfn16u-!3uaf!DRjII5D_MI!6+r7J$ozI#Wg9tTdm3@c()H4aT>wSVD`6*ew zazU7GeD5|!-5wh=I?uF8msjs>V}#SY>cd!A34-xw1BJ_zpij-`*edjcCFTuqP1SP| z1=hj0`r%5iB=Er z=QaZM0Qp51IG<`KTjL&OH`Txh-0E z(&dTsK9L7%iQ`4*dn^H`G@d43Iv{tkHR+N~(&xtsif;$Qi#N~#{m#y;Q2-@k8Mhtl zvEmOyF{T1w%qAshQ(L5d7kSIWL&6$Cfg2yS>KT44Pg9cd8vi(xSzx+C-wm!I1E^5f z3`L5DTP($PB?m1dow7R3nA)bVz<1xI-{5>;&hOr$kpV0BnKB1LnsNw<9=OA0G9nV-M_yb-yZV%fkb_W}s z5wRS1D^7PPfW85kovj&Ep!_4<^^UotTJpmr-?^k(hCMpHjs~Qut~RnWOrWbr20FlO zEORc!=Hv(q@C&*NM{F@kM!TUq)B;u_K?ZIt+Zo3WaP!k$Fqr<1W_qrJzp?JXy__7YEmRd9+%653H|I0tmtuN#8Kv(aAB>b>c-cgF zub(`9&Ais<2;)18Np_p&<(nU0)9f@wt1>3(YZw+CQ@-l=j?6Y(cZ7c9XmaK^EjU}~ zt#9s;&|Q5lP7_wH<3L`98bS*PL_CTq!SFTIiLpuv)&qYoCM zAnagS9)@eeM#q~xPmD3YDTG3RWBf1}bYw@oAaeGwvnauCqqiVpT^Unr`Dzn*P(C%mi ziw8#$6R=YxpEb<<*e=O0we0@bIFY_3-X1>d=dx!;>`2HZapQRM5x^8r&(hXDf>thN zEBK3~$Sw!ArTO&4j`_EmH*IUDO50ODVG(=ULr=~uh#^IF+;=+}mtr~n^ufS~WD@HT zg5C||1Jv(a+KKekj$7~C6-(h38j?RJX~lHQz;F_Gr7`|S%TzcSYV63rWxl&rG6&uA z-luxx_n>=I;tRZ&-ITHuiJ6Ht?4=G`;^v;|0=nu9$8M+DxUkwVH}KalBM&HG_ejbg zFVI&F=#bbta$`nsRa0e8NSpP)?Sg+wl^_NF*7(W2y7zBSM){ZVe|o3HC!hIS=Evjo z|1UlQww9jy(&3WixK(Pu?uaP{s`%ks|IAVQcae~cW7Ll*y7$HSmGIL;|0Q+ zEy2iZLBT%U)V`R-ADw1ia=8$7ArL&9(E6tf%#2+4@?P3e-y9F5bGmq#$?bM_MAuGB z(kmx;jLBSNc2;2cdtz3v6Fjrm2l=GU?0#dUqL)70)+KHH{opgxc8DA{kJO>lboBQ! z&&Wk^f6R9w5}vtT7L^~*nl;$S#RgoNM{eggWAXRbe#Uhz*0}E=?BANU7e!RN;|~7* zT71rU{UHSF6SYFJ`};mZ2JK4WXYZjlSH&2moN}`|AABo{8rpSH+MoajJiM3YemQXL zPYulYoV*i+Cc}&q1i80vtU1iIbN$S8^NRfoRP!beigXJ{W%aG?aL;Sw?CyBbB-{cTr%dUvF*G_x{VUsVysEN|V#6zc!XG!ox&BPSiip@><$a8&T<4U(ZU) z8f%?QGnX+Ra8K8Dn)K0&R-aq0O{#4*{L&nJ+8zcP%Ij4|!8O%Hxd+PJzuy0B(f`ec z+YDr5lU|WeA2+Evwm4g~^IYk24esx*5PcHQIuW%V-L=}3wCM13HpI>bb-bS816%z{+SYT|C-jlG>^gkbO+@RG4c zAq1|@$7I*>ulhsCwB5*zf=l%LMc__UZ_}#Y2?sTH4KM9bG;N~oPv6*fPXR>lk8k|K zm7=GI+D>}b{$0?h390^g76^k8HrZOl&iw$`3cB?(Zq&MQT|(-(;g11ot;rRa&)&90 z5`15L?!RiG2m_=?`9>qZ7LuXQ-LWGZC|kEwNB`P9Dfi0*zK{uc*$+K_W3eDuY%%#x zEe=&k4yFevmkkRcWC(+3(ncS{Lm;~V>GwY4aE$>u$QKE+Vok32OyNh9s@5W)=%EG1 zwnGHeDx6r$682jfU)B}`scm$59aNtm$1q^Y?m)(sFE*+8?q4M7QG&>3lxYFw(kP$8{5dj9vNBAv-)o=RKoU0^8<9L{#*sa87?gS79 z_5`!!y^NWIXjk}m-Mut(IsmT{cS}(}-?%o;Npbk(3@!DK`}q~7nSjie9h~84%vlTg zJDbwu_;?uQNrSD#QNd7izOnnHFj~Oaw>k#blRjES?WHv^KqaOh3Yo)V=Iun6wMf^x zyE!p+hiaR8`N?`+n}3Y?6;_D}`?RyIGx=q+6J?5190LcHq!$}!J5MoVKL5BC;Jf~vk3fxh z&tI%DQyg7w|B-;sW;K)q)IN{fx;duYGn9B~0oU(@0}-eGHS7&Lg+<~n$=kXR+0QNh zTDPR?URvn(d9T?f&*$LPn{g0LFKS%2fpCh>>4mUqf5jQ%z_v&^m6VjlJRRhR=ki9a z3IwCTcGpi!VYDipr)1X{vIWGQI}zsX5P6m+yI_``xu%>QkiBj5ZbuFFo$%fNegU)T zS4vW&++lhP?KM3bV@2YuxdcS)mM!<+YBqV{-TVL)HK>u^NN!|zOC}Bl@89>QNmNh;QChYOAL}gV7U$%z^NSF8t_vd}P zs=_A5B{>L~G*clLiaxglUL&2Q!#S-2l} zy2AYnk6YBu|G4f8Rp_K#k-rF}x`&6UYm`17r8(DiG27wvg2U--x>*e|*|F^fK1I0N zeWY16uNIKV*z~j9FQI>aT}*+8DOT$)Gc;0+JXXJfu$LAA&=1ogs33T)%Me6Q z`?odknLjfl{mNYCa-mz2Zg{1=q~_nqYlao6#7moIbIf*rRGOr`GBMPOQpTG%#mMo8 zt9z|I*M!44|Ad~(Fp3BgJCaj=ZK2NG@TXD;O6nwe3+EtPF$947)seI?{d`5+c9;Pw z>}hWh%L5TiN-=~M2UpYyyG;hgY6cG1nM9w%$7$3ABcHQ;3r?C~wF>a%?DUt+uj?(& zDtAqqNsu~Ue8sea>H-Ie#4&^FS|Uv5tf}rTe_E5yZ0i5tIBvNSlw|#?TNjljT&=Ta zD7TCjtDmxrF8HwdX(MT3bW+>Bf2FdK7AcSnm8#F)fYZ$eM#2L>@-oFkRPW;RrVr!m zi0^Uz#x-Ou8_sF0^Uw=+9(Er3oHd57>vb^LOKW4M4d-Odw&e8Hm&fYIj{9!K%-*Dd zQDc75Ukg>~^ouQK+2zaOtl|p&fJi`Hv{t`_-f_9FfBMX(qr62!bTr|}@)46J{gka` zRs`$hlDZuZgtbq{$_^E#=%Vhx?LYDVc+s@*YfS`Sh-)r7p z{~J5dA;@#KwNFP4vZuy-7bexZT&V-;L5Dw0|MuZXf#(j!uw$cT%)|}kr;R{|?T@mB zLB1i=FnpS=Y(--n?(ll=gf_*NIap!r-;To7FejOeD51Tytkyrf@A<$?9qFnj0+Xt1 zcX1j{VelN@yKE)Q1)*wzREISJmFmh9CRhu4LeY9KEqoc*j2x?@1vm>nBH8E~Wyd4k z8YMN~e}t(c7Ebm2!RV+hCoUqC8Q%+A8I>GR6n2gPo9v~{YLSDCoeGf7+IJd9S#^J! zF-ivipB#5*Jo9ybrsTckmkkRKh;fU1X~Cx#8rh~Y1LyvRimiVUi`=IOl)!Ea<$c~njd0?(Gb6rD*qWUH=twa z&UFxzw)%w?taM%alO0Z1z%JK7?eMpqWy}hJO-9k7VvL}sPt>_50L1s6!6ZAm4?}EC zxWCWJS+B*M&OL39C;Cf9w&(f@pp7q~KF|}DSwl)ue=+5&+~4Q?@6A&SlAFTxOxkgj zKT&4LG8aoo36cX(xTI<0VGGfWEV6+s9)^2QHvOBV^cKW}UHq>aGVQq@!f7hS@lJBCo=FYnISW1- z&+2MX3chUkezYvGP0!H$9E69tKuoG?Fo9`-lwn{6SV;sIUWeSi;22G;5p)h`fbc#7 z?1VI`Oh+BeZ%R-@SXUQ|{);I>GJt-H8%Be`ZVSoZjk59{cHFFvRp z6|FFLSr2E~)k*g!d5)q1+iU(^J@a>_$&E>Qf%tXvzXLY>jq@jtg;Wm06S+R9>X_C{ zv8%*WT%|(*B%>A?9oXy?7Z8;$#RD?phR$rihxDpNR;_20Q4UVhN0xQx(>5tc0D)R+G;RFv23kl<99i9((&cglj)U9^X%p8&K(pgiGrEn3A25e%#};n9)iULKMJQ zzSm4zbq;&wn1GMAHuKU*4R1d;5j*H_nzlJ-t)k=aPIs#61p6SxdzgHLi5DGVN~DhL zXcHgh)zMMUe+@T$fp%OA&uXK$C6bs8!0bktAhzXYZ;@UJa7iy4YE)AKg-g3|KVsP1 zce7ZL)tC+Jk3Jr!9ykqA%_q6|uI5%yXY!+M18RM|^6QJS6qVg7`#(j?RSW;y+sjxe zo(VGKhHDW%V()1O{O~-WR<@b6v+p&~7+QsgmMreUn%4(AGBu3{3a=Gcb}XYYCgmaxFUzhvXg4@Uq~Z&DYd-EIPq1e8;U^52QIpDLS$7 z`*H0l-P|0&uwM%SS`l~-=5>Blh5$^58eOSMkM{}1JDJ-tDa&j1ZBh78EriE-9Kc)*6Q7^&s zyNbjH{+_l{r#O}Tka?gvdJVOUf*C()w#9NVOX+{e+jg4PfQwsD}RWAlX_5P2Ag zVUKc~Vg?4~s4p9zY0&F^!0=HsYhr&IE%N?LivAtIjZ>i|H+Vqj8!PYtP+;m|M0FyQ z>u8g8Z|_|Q)HzF*txz?u;k0Or`(&VVoF;toUe-;;BR4g?7X7Pb$^p^oSpW*hza}6A znoVYrLRzwq24a6~;4tFN6SLerq%Efl%eckJdNJlcKIHMewy`h$Ej9Q4y)z9{U74mErgr2c&ynAEt6u| z9%AQV=?7lp>U@!0PDEDZmjc)3vm<{(t`8ojEm{e4k%@GG8;FXl7XbezW9=!{?d-!! zs$(9E(5|^^;c!smhO1n=(Y++Y^muT~V;p!N?Hbf??DTP=Z*adWe}QkAbr6`)c6zwR{mE|evGuRRj|p?JfqYat{&@#td!!>H~Um`ebliXrE%D!Gtspx;ar z!e^l~4o=HJUb5;g7b>wDtx33QCwM1+Cq&WxvLWIX4(x+~Eam7)PaiJ$NU?S6YbJ)j zrN*F;7|tnGDqPO{K`-sM*{M%R1{MqbX_B1lK3rb2LQ9oy!Y-6M3K0j&jm$ zQ=JLgHaL(=*`7`*cPFx{d6*ZaP1XvMngZcm{<(GE$V435rt6FQNO+s5y1Vp;Jl2@L zWzF3U;2&{{qTN+9X5bDvOdm8sAh$H^8 zAip7WaDWpp%jfwIptu+n zdtCjMdkF;kA;4JxW_1L!vls$)XfI+$z9IDXsqLAze?%IFM<%)O(nN5P&5$TRM@#^5 zW!ox#3q*<3`gti^$~5vtZ?SU62CT%TQ4a*Cma&k~j<2bt&;Nm=}g!h{lwYT)9#~X0jMCwq(xrA1wYaRqa zgbd2W&|D-Q4n(8YcJP{rMySD$`tMFATslu88s;-d|E^~POfY_Maka<8x?hW;tIQ?c zwDzn00jga7YN>|y1RP-y!Aqk~;J}#;3ygmS5pcLfi&4P_1dvY9@;1zBoq$u!dbx~s zOj^eZIBTqh-@E1JA2W|n_W8}!xAB1llfj*%6(4=C(#!LNQ2|bA>iY>-z$hwy>rS@d zeOSNI`5CuXT%PIFc|gI5rTN1M@Z|Ap(6J@)$#UwgDRXxWU%A#=tXg(az@K+`^?RkS z2s9St5l9h|fLwx$1YeyMWY#}>bQ-|q(v^U5E5Ux%*z54`2wqw(va@y6_VmJP6EY}D zCnPA_ebKq}ZjL%1LBVX_H#p3PwfbS8)pblyrm4^(=P$$SCd5S~X=E?|0mp!QM&Z># zl_VY6W^}cLGS>k_iW$@5k&A01-2_~t@n&877RB=jn!?SSdc;C<&{~Zj;Shbv{;IKj zqD1vZK9T<>R4ju^B6TVlIY$En(6d~)!J#R+-R+EKhbGs9}zhv=yks zC9>{~{~%@QAi|XAxb>O=LstSvmM(31=te5Kj{t?1FP>mhBD`JFzmtDw@bP|@?M6Wv zvf9%rx?s7vkrvMncOFeF65QB`=EeGrpswUVgK*ioNNEhig7~7>+J`*?Yox^$%UZ<> ztAniferl@jTtSBDV9>`_UXRK2F(pG_E_HOUZOd~=-Wlwf8Dp~&_unC2vGQ-i?3v?E zx7ugJ=yP7PzFm`GVJn^Df%amzF1H)0WXRfEUWKT5)iVyp|71(w5v!}VQyG?Ncej|=immV^T-G1srR!U@C&Da0@C0d+MgJy>lVb31XzuraT^}D zBBRzVq@m@;2=)G3PBLvyYshbpKb82&I8b8duH9qlQi2vXRL25-p!3=m&`h^j(NH8L zuqCb-RJE4!(DcKka@nIX$yXDoN%&q`A~VczsmZqOXsCAYhhV9ns~*^fHZvztR10PU z##Q`D>dp*>#)jyy*6yHevhB7aoPyi%l^x`}U3wtMaYpO3A*iI*gt}v$$s%r6paH~C z?Dl9)L0v!Xu(9{ni3ztDKMYfLBQ6f0E{&g!)|1B+S3&ghn5`G)px1w|`>)mi^%N&L zSM?zg9|4w8^eLj1dpfpkEQSCVj>hcHY$yb1K{;E$B4%%n!N(W5|3G!`7^2MPFy2v}#KFai)05YBxQ3i56&t&p7%q$zT!( zc0x#po`;PRm`_^WwkMbY$oEQeno%=;#W2J1#`TpzDsz%&?5pwhcD63-1>LlT6UYb% zDN?f9I&l_P+}9fUj&*^D`Htm^aKPUl^qafe?D_?)40Ko>KYBc8JE+~224kOa9yHNU zFxIi6FEiubx?Ya<##VX#^eL}amqAZV*aEfqUrfUqyhk^~|N5E3$?#;GhL(y$X-3XVcp z3jwkKg0h5=VM`zjL?JK`k^mtIB!c%Fym!vI=iGCDn=^mG_sRJ_-{*OMUhmgANzQo# z+lrcc=!k(ni2EY9C+B%bd;06VTvWgwa7N=<(_7A zXPL%)M_9q^*q{BBQ<(EYH1b+I3qU%Tz4xIk{z;HU{i;N(78SJ?Vy;v`<;BXQ<^0Z= zCX0uy{JqvDcP8Vj3ucL>jdNd+JhQhgyojud<$A2ME*hI}0?mk%MnCLz0hOyPE(?Ra84 zwia(M)MQ+{p}}Q;X)i(8of>MAq1TcL1oqae>~!lI=`7TJYs6LFGF}#VWAoB$Z!o~> zU+<4|t}2#OMo{!IhT|TQ#=jzbiW5Rt_*l~KbBBQ^_reaBPI`Z-Hh&Cd9 z$>p5~{=jsp$I6zx;{{wOq7uIf-KvNLU*g<`K1$zKWdzG9V`i;9qriafgf<$F6A*D+ z?nOLTKw)<*)y%}q(#$w}oxKmCfGzg%#{GPA0%v75qwI}V0=kwd(QyA=_mJNd2Rw2a zHnyrP+acm~sxfYNYp4U}lj^_qLd!gd&J5FXo4B}FbLOVxZ%8>BJ+5HRWX@`+7u!Sd^{Lm zEcd!q%rD@`sG~9)sUzEML1%s0Obe#fpX~|Nl>ZKg^=j=WX=@2t82yC_<+t#_@cF){ z`cv!q15HwqoEx$4B6qsc2tS}z#u8>`*_|ipe6Sig%P5lwf8KY6+8f4r4d|q;d4o+`-knT3kg2Cn_<`U*7o_AZW%rC|kC5B#Zi~ ze#t%Uv}BW6uLD84{PK*c)KVO}?T_w#|fBuQ1syQ+UzRmHOCMqAb`xp@*SlBw!(a?M9Hw@LuIgYt(BK$w>gu0H|>!Vd^)(iS&15hsr> z)h7g&A(VHz4j89`>gv9Tx71Ra$y0unLEAX1)Q_HxM1K^ZW7pppXXwMR zZtpy}E7J&|3Re}2)m1STH=)_Lf~axAx64>l8WLc?Mo_X0zDhAIBJF858B!iwk(1qO z;)z68)<8@|(J+!H9|4_4@*^lWtEEX&zIzYU$sB)%LGr+pU%XFol<6Poj9|dN+Nys6bnPy~+1s9C?02xW+P>&aatq>|>2Xypww($J=sezY4)>(RIEQ zqbtl}?uHb1bD)kK$7R&3N)3@n(rieR4rsc+*ZPwYRFi)s;v!@L`gz?B9=-$nFT(~N zu9s?M{7eXn6%baA05zJpW>E_l@A@VcJi$^ff=jO65@X?cETsmJ7{ z44Clm0G8_yv`BnE#o&S~_fbE%oPNo*&J8*qmsg+dx#^&dMFP2Hs<4B3vN$esM^Hx6 z50yJS7RwK`5EAy?tp>!NpK7cSQS0x#x?;^;;rv0xADXe-f1C5nQt~j@nJwlDFx_$=CS73d&8YOUJGS`K^ZPZ;(%(|X*)7(91no?AK{Q1sd^C)jk&R*vwWQ;F>1l@)r1Ve|6gS zT=Z;I2;awk^GnD8|8LlDY>KkD{UX~}=3|j~;xRnt+vgqiRz`c8L8??aj2m|?4RDoA zMwQW3Td)P61)1j-@Nd-|y*minlfC;fyH!xmLjE&H{Ay$WGHIJc_!m%E^s+=3T}-<~r*h%$LK zTVF%}{k5}c6nh9{l_h+8dcFG17pN$AUxK*p!|5$p*nEVoOiu{;$LKbl{y`6?5o2@W zsS`32N&s4K3;s`M>$wrVIX;2}279afETA}T{_H)#$qpQ(V$#)tQmzh7zc6r20!cQM z+adbVqRvFJk;%T;?S*XXBV64BNX75|^q=K{kMrHT_9%K<_nud}c(VprIe7o^sphQC z6*}O}(p?`$)$GZ;eZoW*psQ_)JNILIiB9nr!?K05W7=*U+MjHfZZEcNgKXZ0J@2Oo5nX}28{8k=@ZKj>7?l`>B%ZzwEQ9ND2}e6UKWFJCb%dvPzI z8(;Y>3Y&{Ib?~)9U?Vk7P0$P%T+2fmtnZ-Ex=S=az+3++v~E{&LoOPTYsL{TCe;w> zJt`Y%-E*&hl*lWViz_LRT{lhkqCiFpY)T;!cd3{4{p`}?u*m4>fW3%luuh;YazI^_H@8$c$_v>=8na9j_zuykm>-BcK-4pIxm>%al&3EX~ zq2qUM-+XxJ&{4{vLr3IzxY)nZ*cG;6-yIHoXnNyN<$%;5_5%)2BXgrehpN-~_gy*J zk9qxX+XWsvB>eK=`|#{j+0a9WSVebk8a)biUY&_=xbnDuheeS6iwhYkXBr(rVlzuVFNRoFeK5xA>uN-`5kdY1M*&nCT6ckiM&j`VRY%6h@mY)2dm>L3QRHyeYWk;8CckrIMe5aWUi1v&NejJz z#^upURYxj#91i;MbdnKl%G|{Q1Z?)w%F@Q6e?<=)bOBkkaggHVU^tWisl)g!*1OXH zgzZ{LpCEMiGYF5SEGqkXr)>=apv???&D&FZ@~s?R5NI^@zWuAgLS9?K5(DgKuc-| z*MWWUY^9Z7*E@~6Ng^mw5&l-f*KyKjbrZWzn?;CI+&=ZX($^25vk_c~;mD!R)ajm3oR z^2a&w1wJ%|tp6)YP{ucO0TnlnnM=VVA#2`1IK=qhofWZ_FjCh4sC70mHFA9`4zs_g zTpJSnI|)b|t|C|Xo;1Q(Ff%wALY9#|p#w=hWGIUs0BlPwr4}cO?Br>9Rn>MunEHMZ zKeOeiGm|x5^~}C&7*(!bnY|KIL9u;G@&B%v;y)QDD8=xAZjGVMoI&M<(y>83Mzv+{i(tF!}k+s|5=&fAU#cSOVn=-==$jmk|U_il3$Zj9>SdSi5Hb2lgPgl zSbbLq!Z{LVGsVw0+`QFb?T_jDeHrSmoG+f(3*75jmAvX^!%uTd)Awy$i);YaPXD?U zo{BxekqyQuleLzzCN)>TGr6T2*K+b`b>}@D{#`|z3yNn+j27{;0jL#%|2^YbJDvhz z-N$;D`_eH){R^wyfZevsE*XIX`XRgRVi>K&{2lS|+22n)?F9h95>7~Y)%V* zPvks3y+z(NFy05Bu!rU~U^-Zm>_76}XhqE`L%S5Q;AhmPKmC5%l_eGd`4Ew# z#>T_i74e{o$9lJY_38B|)=l+SCOgy-kQor%il^UH@9zV*`|f;1Qr3V(ac(3ntMt<> z`=WmL=)8x#5$jEdtOmX&0|8~!fr!Sqm7`3;$`?&pvJ-s%J=+|RgAt30z+4b2S(q~Q zrRd?hrNQ32$;^sBTR~T%;Pd)q2%!;VhEwis2`4#RZTtnBgeEnuM1>H7^DF^>CH}AN zpK-=DBbs756@lB!lVd5aiyvQ=F9`KgwJDJc>3u6k&p66PH}cpP#*H^1@@{Se0vrVI zTQL`wSs@V?GK5+PhXVcU4gCkhq!lzx39*PyW;R+$&dYLOzh~1$ysNQP$3O<#rZ3_Ev zIRqLq`eWNKbYZh$EIg->N}wz?95XdoC;`D5du9Z`5q$>h?QY>3EK6A=tbqaE&2)d? zVT~9`&+EO5>s1Ns%2ZV)7gNn}D`Hm9{;C1%qROM|Yjn`;aGOS*0n8_5+?R`>EqkymWL;g4kudU6ys-EhYPdlKjF;lm0wQk zY0hq4!mka#zG6gTeE^|mGl zC9L*}cQ)U&=7qs#i4`&7gI^oRK>^H_NkJxpTVj@cSWwCa>9D`=;sMlKs!oIdXQwNF z0uLVxns1A&k=TDZS99e&Y0!Wk-XWveIOM;968M19O+i0|e*nL6prWaa$+2y>B7zl4+H=veQ?IRXc*cnu{V6~DcdP+~ zH??P-@`GYk7w9(?4o^RX&3={N1eh=kzTJa`VH$vfLD|Az*#BOGE<+P$#t>U5LF3P4 zV_W^?jmXME0Kbc1iA?h*>PT3FO(<9Nw`Dw!b@R$K143;X`BS`0LdN!#pF((%2*pgl z)$+}%&N$v}dTAH1KPQM?`!@xQK4EX*8sAe!`NXXaJU&QD{^wTJ)HApPAsEAUkUAJM zCvflsXeQ5EY7DZjzjl;KyG|0OXifzp*zqiE+uK=p{hB3dE(ihbXdLYKt)}|)dN}66 zn7-5rF#eSx_FQ=)9)TJC#DN;!%`M_Uk^fdTEBH24Ih7Vq(-S(snYTu7f%QID83cA` z?yQEbZ{-2>cB|r7@9VMh4MN@@UDs){1b%KDG=62=eM8!+s$|1CJUn53@+g5e*3dAz zVnAowCMCKcsOX_Y%DNU%Y869UN{%)?B=sbvu6c@&N!^P^HBqw;PElGy-yD zR}Gf;8Ub121;Z;K8s|tWuFsM<7#c&7oP6N2=^Z=Eu!Dq6r&tHa=F7Q{x@Hu@=`A1v zfe$=*t3)IyDgnq?wy6z4`gsm2U)DP|q<&8Xux$-*BDS%vH4PTLXitzEKzo?7Ar~g4 z!Df0PkDDEyI%MPIWHvv82t|#t9hNd<*JMVb82_TbvVLD>5}x}_y?ob7mnBF$?h(;s zSE7byP&(RGd4iHUS@Q=Ld7}>K44XYUu~yi!r{pIsd#2I)Lx>>bDIcwV{gdC2b)-^j zxM27$JMlCO)@+QfgtpN{mDaBjAo`~E`)$raO)v5w;@pNS@XxvxS#d3zqSuds zLT96;_#xYKX%pbV{mT;#uc@VkPtXIjHe=BHw-E~n!0$CdF8&wf`FFAH4&3r9R(a~u zrE=k`y$4BuhoCa+;E1)Q=uU0l*Aq1HZc^i+(VnT7H6EZP+7BHn&M#)h2zRol6M20H zeA$hT-}KfWH(D}cFe6ya6s7u_t0>ldl5!r%DHXo|Th8qpYA}4qxB_j-3kHB}0V20Y znu|One8F1|sqhEWY5(#2ix=yG(1XxeZ~*F04=Qp)*T(?ps zuiCNXwA`>vlYfEQmMyPQ+Kwl2KaS21r2I42vNeDY1|TvXVTvL139>TKyxz#pN4*C~ ze%c~y7qfVfyt9VM5KAY_?|<$-CHpM)oFVD&n$Lh)ujTZ^n!}6m<#8U=pO@-SuO5a} zB2X!9tmRf?!hVh;t8>@sEQyL51;rhJUVM+6EHx&eSBcK-Ut0qkqJ=B?nR z;`9IFrtIDx9JCfzGuhUV=+C4Cd1(l{m21qg^Xw3Udo)gUu19NX#!$|uqql(K9T%w z8$^}-xk22nO}8}gn6{ij&1WE{uC9D4%Hy8XCC=3zK$ieav;dkqVf{Z@%Vm)R!oO*x zMVD7P;(YpVjb+>WOTH{CgT2ztyp8><)SX5az1b74pEy)HN_aBK;M?9`^Mi&*h-d%$ z0c8GtKdIZCnKEmPfUh==BTo^zywObco_o)eWBRY92iZO|l5MZK z4QM#6T!HH1s~-vUkV%W6xFJtY`ig{Z7|ZB1sWEru9Cx;SAM=Es!|T&!1RA|W9DADK zB`5Vt2Ajzk=$DRDdA@{5zl?=c-ALrWFFglLU<(#wh96M`AHd=}%T(CDC7Wcg=1RY< z*ocvnQDw6*Kbcy3rfcm;xH@*xz;ATVGH-YD(+9Gx*QOI<+Na zcLm7O$q8I<>M#*w1n&&&>~FzYIgEx3S;j85Lkzv!HCa~G6S3kzkU{VD`=zj0bAnKD zCZ!mV!ism*qE5q)$=(`Ptz7M2Xn#88)ZPo^PT64-NZX`5cl_y%E|O^1DUc+ zl4*w>VQ*k{^vS@)lk`YNHlfdXO$WW7fjB1S=-;+Fx)g9g{v(LZgk03e!2`u-?%-y1 z)Wh-Znpm(*;4f3a&gQ0H?*Y@DJ7C`I*E*mK!e9@`n=<{UKtz3@ACcCd=Bz z`gecu96+w;9f&jWeh~}VwKip(&HmE?Si$!`AFd%TC;yCG%_0L>d&+&z75&Q4ytN9u%8nhVSMmQL>61!3dDrbt_4edD z@UzU+$uif_!El1+fxy)73=-g~$%hSK61B*R!{VTK2M}*=a!?z*&)T#Mo!?-`fB$`` z$lN_u156XCKpJ*mGD=9pLVAVQQtycrcF~>sAEArAq_euoe-y%JC`%4Bs&*}^A>NoF zYs12Hgb{N!+0^+UcFtKhG|jAzUhiHXp(-gbJ}cV>Ea>~Gh1ZLm#IBJySaC#s7H7u+ zxd9acu=4^TEk8riT?caB<`$%ha8yPM@1{t@gQ(36_qqI%@EfI85%|Ebkij;?5# zStD10a%$Sp1mKC#an$U#siaygCronMYw(Wl{n!+p^yy8JU$3PHCUGyvBegx@>}P3_8?)v)yJ6c*c<{j&EXJy=Htq^N%;KQN82ud~Ve zRi{+|r!Ix<7hmz5)`^yp%Rf7g%IyAnPy%JF6tnYXywZg#a2?syfL6UWWq0OxgH-!SHk1TZYasFKOG-ntV@=x2 z_OYWOB@WE716Hucmy0We-b=|q>+K7reS|y-C&@0n*AKob+OY9wFybkj*Q~2C1%rCy zGB!NPdpN4WZ?qf6CVn*@srBc)tTgpN9rhsthhjkbH4-5JQl1+?j3elt#+a6y4d-bu z^_^N%fQC(7QQfDer30ecxX%8d+kQPveE!EUF)rdD#fDSW1X}1ug{mb5s`uXM0GmVxC zBH02%$mL61ShXzFQq0P2rgHRgcnDMd&q`>#K4pV#sYa#-OIabQrcL8G`Zj>5vH;=R$$Shh_JFao5`5 zJ6tTycapar$v#(&(~?wE>d8ESHl{_W+vLC&ZCrQpB8LgUmmZgVLrf| za|3Y2iM$`T3`+ui26a}-n^LC->Imn`Hvo+9p2xkNvB#aUi+t z`B{X!`k1FI_TmjhmBaTGZ%IzLU1f|E1FnUu!>O|2tNyq)Y94CJECs8u^v#&edNtYn z{*zw-2XJd@JOwc-M$D`dbr)&1*+;Z6kNZhc9b#x!M20OK2Dyg8{4!CVWy3#E=92x)IZ5zwuv?LmZlVlP&ic21|Y%-o)qJKL=~fZ_LS~VXtG<-bmnsa zlun%ru>4$0HS2%7ZNJwphJ5<)`Q0QH+G$oINS@>4b^}st0OPdy91n6+mOZE~(51i_ zP+$uA%(3G{Z%_*+Cx+6Ke`(=n&{%Us86YAG^px>s}TI043=R%uz zi&?r~0^QGZa^J;mrSzpoI1&&9c@N?~S!YrMeNA@zpk}h0hcO^LX+y5X!_F@=EkY)t zEw@TZ3t!90CA!8cGwvQ5)?3F6vcJRINSu8R=Q@AStwU4RN*B$-fB*--yh z;Vq6y>+Ta5Nt{U|f>_hfBS?*@;z6O`QsZigu?(|v>N&&fkMdjHTUFF-v3{yPm!8nG zN#n4n=CL4VV7zK;sBSPO_tVNIWT5C$8oKW!<%j|r_-gR;=d5=Uk4`4kunQBlPOdEu z;z7j6MtYNnq%IQ51hlNBF1Ni4cEp<=it~C|!4yLwcvBawQw5FI8bP!VqaT{3{Ku=S zl&#P0E87~s%P_M2hYEm(eJoT;pv;m{*~jiek!t(?M>a6d?I~(`tb+$fS7w(P)mQ*J zk#*;RzfQu)fr7aC5kPMm?4NBqR;IQ5eNULfTZBG|v0S)5?tK&%=Ipw##GBZ|bJ+~n zz%NO%nT}AV-+cY?=sr;KXH7z#-6z2YBY7Wd)}PXq%@Co^Ql!;Z+eU`>tvC3A#+_LiE8R-KA0(Aj7}3N54ZJ!L8UZl<^lLr>!F zwdKzosB(8^ZwneAQZ=N!H&l@?SA*|TYY5y>Rq%e1mrvB)GU*kpY^vpec(i26j-4m* zqh~&NM!M|-q3U8h%w1U;0x}k^*bkm$Bg{wOcLGzU%R%DvUlsiMatWUwbo<+B@twPE zrDB`3p03_B+G8kk1b2qiU{l>*B`VS;w*elBN_cz$`gqcbgJCbeF}yH#mzOr%G{QuV z_k{A&b}~;|aLH9$-*l-7{aq+~?R3R#z=e;W7ql%DNfOr>@6W4vcuBZ%`6`JwmO7Yo zRP)q(lpJa=P`gOtg&f(g`)MJY6gYRPL~|jT=Xq}%RmZ)Vx6-AhD9+6ccd|ccOq6Rs zAPqY2)oAX*^mMzKSfx%LM*{tx^h9+}^N<9e-+WSeJ1r4IfU)h|CDgm~i6IH@#=rJT=J2i4{FE@z9p8G19%b|LW&b zj&@8M~gGT0y!DkeGpWk3tQw2psp;arnH6$M;BcE(l zLUaYt*-DxL=OB@|Dv-Ixm!1@4X~xJq>G1A zq}%e*Rnh|lLMc`8*VPhF=LZ}xj)|4+Jt7>ycgpS<2Rv4oRV!bE6dn39jki&WTU!PR zI6NW{Mc?kwy)$0E&G#FkI{Eo+Wr|Q>?y$8ZL)}l30@M3dVgoO}`|=B5`@xy3eXzb? zaSCFSFUY53Yu9y)lX0t?Fwj2m3v%CnGqwy*?tO%Q-C;>={1Dv3WseX_X*lwY*1Rg= zc9TVP`;&BWcJEMx^JHYr-&TuBF>hY*mXMIMiR>TxS>3s;?G_P@w*X%mh46)Ib)Kjt z$u+)UZzDwY@xO(6+pE=wMdISqCjh^1$>Clp413;mcDO0?Scm%|xU=Ql#WK&g`9z=h zht>B2D)FO7b5lsZNnWRq;{9wKJJbejO}>`7RmSpUAC^zNsvrBy4{#SJbm@`~S1X76 z2_YC)Qk47sG6#7kJ9FT~zsD#Xtvp(0d9;;UA|^daJt@L~=p}!yJLSJ2AssZAb1FT# zZNNqRm9^T6K6Uk!6c?Uyw7n=@9aj@j2 znTb@oM16zX7r_jMi*M}|N!yrUUMQPCjIjn!zRUUvFXs59Q}XydyG~;Z0(c9JxTY(( zeHVXGxq7(ps*Ycu65oZ&g8cDfj#>BQs^d)JNY#6$SVpkSr61^b>EGDaKc9IN*)ieF zwYC~Dskf70&zB+M=x|LgS*A#-A2NI{rpZSEW;%FXjd! z{rNFztD>Zd0S6&5Q09xTkB&xd%zsuj8~&S+gBE{t+WUC{G5-ah*2l-ocal>b-pUSO zN8HEVHdMx}ymrUha@&J+#$W7?w}&Oz-VsR`_IZ`J;bLhX-x^ZCIzmKyxliR z%_dwcDBay27|JFrpCb;2EoML$`mB`)a1k#yoqj%AY1 zaN&QvklEruwfTkf*0Rg9JqV48@B}j)|I11E0uN&blI_uW?@H{J^`I{QQKBAx(^X0y zoqqIqTU=>)!CkE@RM^(9%H8?J_P} z4omSnQi7dKgaZ3>U>-Q-1nxt1hC*;+H6tZtg1`6|lus9$%p!V8M zwN&2=u1W1+^I!QsXI!f?A@1B#`0yMXVsB-G;NO>doiUN?r#W=RJTm)Ex?AIjoNb(m z+a9~2{4tN6?s(EayQH%s>x4yp>FEhe!lPB9wRSeRr|7A=y2HB;w) zIA^$0%arqqfNHlgGoAdej4OxEinUkE{KfuQm-gC9KmyB7T(lwq2FjLxn82I~UaoG* zZuAuvr$Z4&*9HwU%mNP)6?dCz8O1Jl%g|yO&iB3wGGZ&rsqAR8dnMU?p#Tw577XKu z%>8bTTu8#L6Uax^HXzoolK%eKKZtt7Psf*KUIu?o%m`}3220`{Ng)?WMu(~atMxwe z2LC{o+9J0c@Ov1w%N!ml=Qn=T&x~9X(#v*zjwbgOY)$Y4-A(R-JLCATKkqG9{nNm9 z5bCbDd^#CZx>a7DVNH4QjBXosHM_ZdELQu#n>A`!U_ zo83nMN1~6WgalU%5OzUFosYn5?2QDwt+_q!Nr(^NTIQWoMGJ0FZhm`z=j~h;j5J=w zE~R($O+LDJ`{I0s^9V%rn6d6B^RtW|YU7ukNZm#Qa=!!b6|=hSC;of|R-P-d?*?^l znJ24%mNBUG`@2v6NjetWOD)q*QX~OLfGQr>S0$YInxznliG|>ua^dh>i;#)JFi72Z{U3)Pb(uHCL?}YS)RY z3aR0dRI3X#*X)`#y@+_`)4wEH3$H6ww@3Q^jrCn>MaM6n7PG!qr>pDs_ZLTCpA+KF zv^IbF_Y-QRk8}zR3y=?A>!;_l)~sN8(QEpUQCsy*q~lu{+C*QRr9^wC8^@^*rnj*D z=X`jH?6oZ%*Unn*y@hr&qsiAbl8hexO<#2cqf|M_l`!^nJsGmfsjtPddJ>oi842-0 z^ugQ1{;=ww*dsWvO@+D0NQrITtp{eS^$(r<9=_S`{7(9W?Wkq0q@*(}ufuKMKXT%f zTfVhvO8O1SY~>UJ-;rn6kajMkp_>>tzT0%5oqJ)gbwFgUHy){PMey^Tn`laVe3m1{ zn^h(_uCvXEkQ(_jA3QLk1`5c&WbEQWm(vMx%*S)qM1EW&0C7Q$wM1SNOlL z=)$BJ&))@%@h#@A0b=)}Dgu1B_*nTOeV2{M&7%mH1aB4Rsjcf{Bk^J3LLA8p;>HpE zrN^S>BQOii!3BpG)At-oPU82KIAVB$wZq0XwE zyEvbq^8|j}t#`8CI$5N<6ipc11I;A~PiFIC(xErMZ^I%#yrcJ@hU!L(__YnfUW@rx zvfc|(y4ACRT~cKN`=EvfyO`MW6CX5FolM}a4K4vGmAJ^|G-ao#jvK)ro2pjRHh<}@ zjc5LK!y9}iRH>ifC_i*lB#MuDij|x;d-ciaCIzaIqvXZOu&3f2MmEBt1OgG{^m=Ti zwwy%l%M{2yq9j|fKHqQden5dZR|Y{;@wq~oib{}1bjH@_YVF6#K%n@U35GAj6jz2K(a)D@=t^myoMc|PI{x2LnT&D-HZr}EE*Qh~Q3?AXbD zAP4O3$=+HC6C9Mm|uOlAXT3E-;f4Sg+N^SA*A*%!V!)AHgLf+jvg79iHEU_Iy!+_n86{=}6^Rga>vrusj< z%Y|YYIm+@ol5N{zI$uYRVQ~V-(2vKNRGXsw>TA{u4)L=S!}0fynDBycIAU7uVb1Up zbYK_{_=f3nPJ7tnv~By=^ul6;#4uQ-Cms=x3g~A*@~!-Da@d16?SpGC&4vX>uaRw( zA`lRKJasJ@GJVWw@TxFBhn2UihwCJ!=E71gQIBE=X_&HPT{Yqw$P{rJzhAZDi#4p! z(2cjoN!mQbwGBOu7IQUe;0SVc9P9YR1A8$t);@i{jNgZ*1~8e51jJ)RZXY=zW%TZT z9=pS>eb<`f#v0KNlwC8rBuTGmC?UdJG80^NRll;)poPeMqYD`eblUrFYP#u670%A4 z94(87edOGn(D}h8IecsQG;X&B^PkpGc3#JHmKe;7aZapTC-+U%G?n=KJz-Co4x|>ySO^1$8mKM*UpCu zJks0~*DM3;2{Hc!r8_3+fW%O{q1j5Lm0d{@O13I9STr)@WhhR0Y2>5sm@sAa&B|Jm zk23KOd4yiUD5%~Yhh;zR^ByqD){ZyXklt&fRbDxU;j%tl8CJxh-`qj=3Ie8GIp+3G z`pW``SfYWUq2Q?AoW{Ap`{6U)MvA1#2m_1$YdgwDe$8N^Q}V|Pcp{wtaH70iD{6%M zlhGqZqFjaQSvj@?)(JkaywE4FXy+8Zy0W{|Mj6&PA$Wbk`O~1-OgAUNX#1EbN9mlc zPQy{YR~AI6vBYIz^&Ri%V_Rpu)^tTK{A$by_$Wu>HN?|LSA`3@^h|LV3fJsk3t)Y$ zKq1z+lP8eger22xd7;_|Mt((x7aq;!YjGJ3o_PJF`BGdFRHKz`__~^mxLVh9>o(Es zAZDDSG;relSS9I`2qovrK>Xn2Wd~)_CvwKwsml+U(hZjYw+2@d4Nrz7Ul-C{JmzYK zbJ(uD^EFG3gpcf$_l|Z~^Qr@y8y>6D0S!fZdvx7vYYdFxT?6x^M0O>pOEgxZ{c|{2 z_7kc>Uk1f4lFVP{94}{!U(wGw|NK2cv5s5*#*sGShFZG$z0d0>eup?Qwvz+L?C z3x7~sli#(@*SWz(R(KtvH7?txKN@zv^K{z+OM#)Mf03m9Vq2xf8dsaPb;iB^&ZNww z*1-GpZ?BzkC+9ODg$5JXSSyN^^+B(T{nvIJdd2*eQj4lYRwGcCs%Fqu2>in!L;pKJ zmPmAiU`M*cvmMr>x#Cl>cMp9{janAzt!N=rpOgiDrng^^cZ-ex(d;28jvvfS+$+!@ zXCti>IZ8O+C0jf0izMmZ=>e=i-Yn_Nv1aT*)3%w@y}kE*4&9Yh+fz@D=D?Zuc2KpP z=Z>dP`C|94b65X~xO^kZ#cOTk76;1H^u(5dBI%Awe{J#9Hx9iCga&D$ImHB_`f(;qk0CY44lq-)qBwKr~VPcl;GVzhtvHb`aRCOtwR#^aWRwVUn zuc_NSTN5!ay~j)Y>bWr9bP5)2^W11K!2P~Madyc=)gVDtxzw}XR&Q|vQX-hQ@8|{v zPcD|9%hZuj?TT{t_LvFE!aZNKkQ<(q@ZizYir4t67fU`RfB6IGZi?0VpDa6;K;OwT_@gDbH4}p~(A)3xp^7kE|o4AuxT+e{wky*PEeq3&L zX1KdS_A$Jpk>1+xD)PC{kAHE^n0wBtAFZDHVN!gG$GQkFVsWxsL=19j{g2zUc~*Tz z{9zC$ib2~n53X>&D-8es$Icq%RrnOX0^MS=NsXw&6>HI^w>Q8yq4~sKO zWZEyPt!qoC8r9tuds+xKMRK4v|LA&m&HXiN5^Gd_7HC`1Srw)3`YqGi9dr|i7>%@g z^y+hv`%>H6b!jM1jP@<<4@n#l%niWB){t(4fs+(k-mq=XfAqgT%7p3v!}Flq5U1GY?yngA2d*Zga6PUS?D64{ZH>nxrplxgo5(A4Czy=&_E&TEvq2u{>R*p;R5p052vMf&e;G;h zkDa2u4t@?nwkzMOw`_g?#A%yn>!hTvUyuwcUoLgBJG0jTXQ?Fc0d8*uJSaGDbt^RY z17|%6tSR3}$GbRPIkc-oM4WP&0O?|IEdfj9gXJ;2RT1y=HOb#?ug59rR3Ykdow6H{ z3hDgla-0K)8rRel0P*n=M_>b6ng}-1)iQM}$(6ejvuTbm7o^u?y*U}7fPGy;z0!(d z^&9+@;`hrWPW^0lnmPPTfk9v;MnZ~2Db_Re_>gC}natfwTvl$sQ27MUVh42g*6A9{7f#G{*BEGDB+-5NC)L;`JpbXFE+(xPtb;6)Yn5`;PtKh> z5_E~X;aqk6^1bTZt*Y|;kmE63;Gs&XTs9bpm-ZLvev&K^$AN~ zer2xmqYQ2!dGjeR-6|NGH`7?afogJJj>_CxEnKOEEJx)|eeGOkH+GgBiY5ohcqq=% zLEdb~^FcWL)Y{Lvg?|?;V4_&kUufNlPiEIfkDQ4ynR!0e9{CZ<*>1mk+UF+x&d@MF zEli%e93|}?eY6|3(s0=|)_iCTx#xgC6dsBi601XmCT-qAoUaogtcZ znC7{6b4d?0G+?rQ0?Fo=-$%AC)rsNVBd?mGCz6@b&bYv(m5{C^6IdQ_DSXeYGm0}- zn3AEy`G;k-EeJzbtdz3X6MmM4;{(65MBc`URpgWHtZ@#XU#y9Kpal=dEstwmc5uvU zwr36R206uQw`*sgcX3`ls>aswp}bw0Ca_sO?9yJdki8MvzF%<545&Fl+Qr{f2&~(V z8W*PMFP#gtpS%zm9eZ}GU9avWVFu*%c%oF85^);RB3pYVayxcb!0Ybz55!ER&!$+8 z&EA$5r#_(?<~!B1y>%z~Ea>+Zh!td!O4PUSY3GGLl0ET?g#AW408wciC{-)KVG`6N zZYYDD8}Dh=GCE$ns3SXfN9g(-D6XgXY4ffo3Mes3j^gnS?aU`vhdY0a{kqvW68KEY zTHd>$76!ca?Ux~mCn-jzI3EgI)zb>g5gqsa=Sgyq0TuJ-4OcX;#BisULXM6_^JD%Y zxl;c{+f`o}TFR@t?G*w(;?Lwd9q?<5}PIp|ma_>x@>{z~lwsK=YOn6Uan_(%1(x@J<uCMK`su8Bz|PJ?R~P_>A#u5YXKnZ6Wc)S;%x`J=hB+nXABqgiFnpSfuXq; zI^LC!dH6w(40VK>oz|bu#@L6B0osgDIP`>#4Vdt*@?938d63;t<9+W|>x@^zzFAA$ zOm!%#ILTwimJB2@jGP<$;3_LiMN-N&w8doT7BAhySM;6NWHR$8Rvu~!P^(Nn-af6~ zXmG^%nVom+Y@@$Hs^Hd>mEyV+)i=ZT-a&>M!i_AGPqF2Y>>sTAW>RE6BQkv#svbfi z0T_Xz^@AMawHVYjlz6keB*|O%EpT*ZSP5D000OD(0mkET( zzAMIxr19Q?WIz=ajj2h{fYF{haMVnMqQ3@H-ZQQjE9Bi(xk%a*k7-7}%<~Z3znItm zVG6lw!q1BGT2!f;cQ@Lelu>1)@-G-sL(3VR7b3U(@x&Du!Iqmz9ZTo>vQ>%m)Zow|xm5j1Ig4 zK?3gpW7_h!#9IeYi2`oIWxS*6e;hMn^TlH-ZkPXX3lX9$ceI{+w~Dv;`>;*R5~+Cc z5Jia1m+zB>(vlx&!&XEn=endkPp0zP$d<@G_V&Itec_6^IVB}fu4i1Q$ZGW}OyY21A@y8mzpxsWlE{=Ir-G1T&t#Z8?A6{2G6ufXK; z;HrxFGruo8K&1dx#_%D_uj`-SI$|0zs3DoDkBlLNqIt$b=a+Z> ziX0cUJEx{{uB0?cTgIZlch?1eV2u{)JR?`++<=}NfB^BD%d__g%7?HCi0eU(GTSRU z6Ww|2=HmPuZ4=fS@kpRx{4kMMvpuf=%w?W2j}o$CAFq#vZsgglR_!%%u`efT^U8PZ zpG)(}au7%QX7oOA4I{xQ#w~Rgpn>fVN^D1Djh3D%4MW|RT`Fd3-6yO4 zb7ktD2-;JbOqTk*ra_eMC!8Ecii2DrIVwC+!w&Qr_D9-#aRC}gYA~iNv?S{MeRi|8} z-9R{r{WgtiXEq~{K`?dXk1ue;qL9g}^Y%wu-9hKBsdyhzX$>;#ezqL(i}xFsbo9zM zds7$v63KKB1~%D_TbYKu3=g>#e!JO6y5t7EF0Q^S#@N5XV)&MdcronerHHl1iW*Yt zH=+!GOIaig&PX#l+?8BZ7ge@xK_7&P*25`9)~{4PCzoxy?i*&?gPz7n+;B5HW!o&0 zbNyuc=Ck4FWDJb_%%)s?eW$ps^?~q}uiTGCWbB{TJj3qaVpHG*q4{ros>6KXlPdAW zA_>UJG&j4pQ|T!Q?`jo8?NYPpO%ck?auww&f11XG6KmdcNR3t`6jdk-{BufO09^4@ ziQn|C+f%jRh;fIKEd#Bldp!*Bl%E~t?81Zlem!TeOx)k87F=E*$O@^Wj)dlV%9>`g z=blryTWTSgkj_2;LrHJ>XU^?6{uCO%ASZgWeXL#l#G^BHU|qA9lKdhni^l?SJ)0Lu z9F1!WhhHu|KJ1i=#5nV}j%e5DOO=~(JM%+s;_|Ia6GR9$Ta{baBDJ%Y7-0m0fi_`T ziH_6BKp86~brIsr zcB?$@<4-(NQ=@NlDgU(Jk`{a%PLVapVB)8qK|Z>SXjD4sHyCKYy?FkgwVe>>%>Hw1 z&Uo$Wn1V>>O!=nV3D@li6pzebP5MFxp$vqNuN}U(Khkdd#0Cr*#>3k~-Nvt2oMQJ@ z_$uBtmi!Bn!iH?{D~VU7tlsRkhs`=(Jkc8O<+pj0mj+(HF|*?$WOBwFvrwFA?!PY5 zc#XP|J9Vh)+pl6|WP++m@ehrXLD9-4NMjw-v&H6DBL`~hq@L)zHAqhX!JORHoT$q; z38D!X6)mJDHDHfkukTRJmaB#znI8#)J0zW&kV%gcm?#Wvj4uo-SeAdkrE7 zVX0#=jkg{sujVh|!C37%6s@lQwERuL!|>OF0*`cMv`S#-mdmZ@m)ZoxSrw}8En+Fk zw>@eVK=Xc757RH}@eazV=P%pt3Y>ff)z7h8Xz|yFe3Zr3Hat-I(a1#}jk&mcF8bLr z%_QIarAjHdF$uhGGnOA&?)<~|SuWP&#H!lf$JHr%t+pfMvq>K{6+2J|Y@8pG8I=_? z^78H|%`nHF=~3|VJ{B5^(~~M(FV)zY$QH#ed>8VfN$P@f>=lgHD%`V- z=%4=+09p;@FSdQtE%$2PhMrDxsCVG^u_{-C_Y!o7QwO2qa*<18L` zqWU81S$ZkO3po)sV9Zcn9$#ujD%+f?5s}$ZREeKq){PFE%^WWMU(&?Z(33wH7U zJ9hyU;c4M}8dr6VeVoGooIRmo z>3?Sso*MuRh&k1(&m%XYWsebdyEQ!#8fokf!}yMaNwV-D2@Gu%5jPmQ9=O1I@A%@! zZb`>DeBM~dSQhHq+ZwzL=et~KOtiL@2Hch+fHTO*6JUjopFyfSz&oUa6D zEv$}q4|%2mm`6Jf&Y+mgTx|LU zzc#KXI%`U^lzk(6QSk>e>+GKY-CiJ5P2L9{ND0I~j9{N5Ams~TH%jn4CV*O?fJcgDBxJBFl5Ca3y=j$$JZdd-=%af|?c zmWC{>l7BtusZS>z&(OI2;%7GD#qK^4!#=S9zT)VYw$}N7*n9JDsQ*9yyF|9KWbK_O zEkcqOOZG~NqR2WJOCtM{b&Sa%g{XH#*+r6NtYe)SOH2`?!eB6#7{*I01-|mxI%K#sHp9UT6n}?M`IK*yBT2^!sMsBuD<()~)#~ zfgdg@L)rVT{czKDTePwez_1U1#=l}y%>XYtx9*=jm$7?meeE*XF%@w!X74n}MfmH3 z*z$lauaZNgxjI2R#?|N}(A{5Y!4;=s{VjtA0szPN(E}HiI;TKME`PLj%n0biNMusE z3GnfaIL$Fv_8M&?n~gl~B?vY6-L0XqV%+ElQeC9|6QJrezSl+wrS(SaXT4qr2V zUL>=VI9xIlHk~o9HM%fPAHLw||MhLQ*679r`{)w7v1vw!DG;K)y4*f3&Hk#g{d+x1 zFw#8~Kysk%{m-{;+7fpNlZ}AH+8SZezX1XPw&v~In}FBz*ScZ7t1CE-eC6G(nWXVJ z)A$n=V+7k{bok?s>jH|1;&Xt7ta}MEwcA zM=An8IplnqOWQDAGTjQqj-YwNBQ`R~|A5{MbeX#KomoF6b5K2>h_4A)qIF0>wWJRK z7d-3YCTW46e?c&AIq)s7Zp(h?f|sxzmcpeR@iUmjyw2l&kZ~R2)}4Z8YcVwplZg=v zq-8bGlsq=OEsRZ$=>LdkLta1>y_Fc}xq-!TxjG1>&gZ&lLA}G8z zDVTEwOXLfWda*!W?QAziY*nva%@lla{ifeH8Cp8Ncl>ClV@&uC4TaFYR%NJ54+Bis z6kovLCF$s@b^q`w4-?H&V(9<*2Nb|dJvfL*=E%K$ww?I74h zJ=oUYE%AoX^%F}c<8-~s>(B9oFZGd5yM9zm^YjN{I3%_!fZqsqadmKV$;$y8n}SKI z>yyc${o4qceKx1(5Niq0#NZCHL9BHNoAl$DH~t;1kU{Q;rQIC@xC?urXa3xI(y#Ju z4`cGFA-Prl{cauBk09b)d_18W?;39H&`b7myuHh^NoyP~NY=Q8J0ib{YFn{7g9@El z99#O?JDUo+ZKkD`*`P`PXCzRuyybr!slBImYbXN=U*Gwl)~ijgYJ|5YIA}?Q(xW>y z>i9ITD9_2I4~{7FjPTw4Au`E9s=CZ*dwdO5Ob^!-2G-Tmj#LvspQVBC0&R4FuCx>M z$f!Hfox9?5#GI~Mt>IYegO>J_Y@@e1M>Z|7dhSOIFXpXy=nEGA*x++35Zj!%AMT6s z>s7yNo&&TNEIK_Mn2qgp9FC>%m`kQ$0_|rBsq~1=*!HNqmwM!yCcoimd?3U(As!CV zhx(-$bGl%VSWf<^BHU(Mf=Ht!HK5YrKiHj*mk&qfHjPc^Ro=lV_Ur;7}z1n&0*oM=?l&w5jfc!2fPyY9eD6tJip+azG<@< zC%;q49*v7;BBk!DIH7InBLjGaycO%3SjTTg4f%L>$C+4@mpDE1bglH-X*IO7p88{T zBvCjghiGS#?8PtAOvt}LGgjvSZNLpDiafd06_MZ~lJD(wIW-cNF&!S3+OcQm29CLzRIaHtA~b+v!7KWY7C&jN3=v~j9Bgryd}Ww z4PJlbq}d|xLLoq}cimfa8nC(_v*Is{pC`l=u*)vk08ThJ-f-DhH%hZs{0Y&9Zx(_% zd&9-(g__DH2wpe<_yOz}0tOfL-iH(1_}c9Gc0;htu?IsoJ2YG2&7Vc0l#OgU)^`_r z-%}Xg#R5?m{-ef$mOj>Endr6XVM#oDTlBN#$&ItRa<2;gr&>{CNQ zx9tBpw{4kMbP0MQT916>z8R-+y1tmfDaDwJD=JWVBYk%dK-(dSTl!bj3mKtulP4ds?e|yw({j zWbK0M|K2~8ab%Mtha)C_LC9_mrw7zmx%?7?HZZIoaSqi^KzFK&hg0*@gr++=tYlMM zpug#ELB7Q1>=kCcs~q5IwpPB=kW)BX7rM_}#+&k}Hfit+Pq^$$ktYM6_$V(oS{o)` z1Wai}OI=XhVnw&B+hpcph1@&BIkn%k?XM2E5lXr_pe2>oZm3mjU1e%4b(0a3qc;X+-GcUAuRc;oRM;U)4z*_Xt7^WjeK`8fZqff@$T6cF8t}9*?SV) z<4k?e{e6bnxx^&p$nVB(5ao6jE}PIN%k#&VKCk1<6I0!Q=7ah#gG=>s zo|i4hS3A{SDKe-d-n^FIq3q{sNiKJ{?t0>a#qpN9BUnG3OIkyw*WVvs!?j79zH-w) z6Lom%n*uFb5cUl{6&e$X2~H0C$rsLJ%j3ug=$p)PjK-YrZm!Tb_9r%3_SNxxO}XE1 z_%giIst@vjn4{$xEvW{TtfS)92g=Lb9gO=-Q+ohG?(;x%c6RE?+~NyG@nLhIb#;{3 z+&7XTukS11;weTc&DnvjeiqvV@hP1;Wy!+?C~9mSU7D-wa`qdfsZAA~PT@+}mNZXa z8Ks(SA{{Dx99m9*qaXoLHr4$*#2;?b{GAlzO-qO_OKG1+m;@=#@rw4Ub~z~obL*3f zf&+ur%NMVPABvxz(A7(q}a$2th-uFMcbxmpp(qpIRzp z`JiZ|Y)qA=$Z^hztXtt>vpF1aw#dvdFL~ax`#v)J+nn*(5wmUS>#aBHBGhW+PM%Z3 zC^NpMDfzpayxb!30)tcF~nN>Xu_9b#K!fv2c6 z650Q~-ZGc=(BZXQZ?XCK01{5M_#IOz> zol}0(Xz9;_bx}NRfIV0FD8*sR6C+Pg^2DiD)F$Q5R#=J~vdvpt!6>P9a7 z`R!Kd#bu93FD^`?(vqg{9(Y$CbgmQMG&@OpI6_PLFwr_W-4_^f^QQ*%td%nnYgtrq zNJPxC=O)|f>d&rZXGGF-c3EyM5*t(>VyLey_V6EXF<_|;?^Tv_SJGEL+}J^tmMgGE zJ)NuN_2vJH@t)eJ#Sl@VBZ;rYO>cTu;^)i{pa%*+?;QC!mQpU8z2 z0`jmrW1g=MC!dxZ_Ms@!;dER;fN_C`Y4JSgO=u!YUf;AvD0|`|@y6*AGLLmXdT{;% z&keyG536C4^N%e7O`h=L{?0Et#*$RSPAK3!lFxw;waaK=v$>;uwHgG+L(`2afwkQy zZK70>(I6JCSccStlP#=9&?l;Ba0*epHKA?eTuqTnm*`JbCC)3Pvvcm}nZ5GI+I7O8 zw?5YQs!~GU)I2rkDq8=TGD_Y!+*%w=t8q2zYcD&=*$_ByV4ocRGg*uMkuRgXhO}7> zv=TBJJ2;W%j7vbln{}^AzEkRrXZ8^xcYI`Oc)Cl*864&?`Wx(%VP4iaN9)*p(!3ghr;%`T@3dVN8 zsI@*zo?O~Z8dvLL!HY!|gg?Aw=4RKe_?)O}1%nqE-wFdW(hYWaZ|lbK@^4mc82)&iLVfS6z-c|C3Ud{KDqDR*Wj1U@;s9|GY;Bx zXTJwsm|TM%#jm)qgI?bN;VQ$e%(mO(l5ep@Q1|2X8=o#Y4y6{Sz)HRWwOO8i3#S}a z^S&E0qK~XUZOR@ULEmdoYgCXo{%Z#Y-a1crctF`D8kxWLkiPR~6b>aVc#xdWA*RU-YgOD)$m!f_DFfbvdzlL z3q%fI3r@*Ly_=*@KyUZlGqWe_97fp?Brpszs*FPuQJ^eWs5pcmi4tA$(u^bTm6)X#uPVr731DdO5@RUHs>ZtZ5 zDUHI^e|Z#d(Y%m=4CzdulhlcRJ|c-n{To*?F4JS>;B`Np4Y031a;MFbN|4)IYuQ`5{z5^(42+_cFy zo7l+t_u@Jq$$s@ONn0b3aQzkatC6{{q}&|3v|p@yv;znE)NI}pdVQ#6n@V)N{Ii*D zqng5VtxgC~Y*CLXs$tlLA zxS6rln!>QnDaReH0sQz4(8duPquSIR`4>&sWRMPyx9sWra{z;1t1&C>a|u?KHLfz! z=&tg3Sel%t-7nHqj0+!ohh^z&bzeWHQ`s@tC?O3zvd+Y7rTa3jt!2ypZj*WlY}Aa} zFum1kB@T`zz>9OYc6gunS~eWG#dYTEcN(c&egQY`^OBSq$cSdB@ka?5X` znTi}nxzEo1xhssZ>@n_NHqs|NK@C6!4*So)Y|PZ>=%I?nRRF5hu4`&S`k9U#rAlRf z)~Dxq&90|z_;kEFK)7R>BVB)MQ=OX)xsx>&FS?m|l{kB+rvq4;klHg+?{by3noE}9 zXDkc<5Er&izRG^?>xw?{nDcpQ@0ZI%P-YDt`ZXMwp>~S#tM*hEP}18dTd?~ht@84< z&N}Q;I&u+_qjGCZU>Y4GZ>x_=zT=|1P+Hwd1U^L><47~o%(6t=3B!pwktr6S&tDg^ zDfk*N4xlUm>Ry@bKf0@{O0uPK?sBpNR+Ik>Wg9i*T%Wl4p^?yV+(5hOYD>DM8#0hn zj_0u~ij~xp3(a$bmEPw&edI=2Zk8c0Lmu>c>T#AOz8^RFUeTvU)uBtbO4H7M)W2x_ z%yT&0*_1@NoNh|p%A)awvy9QN4@IkrNySt~jwkJh-;sGlPW=#___DaN(*!WRguE*o z<~cJ*s|%|^HW@<8&L1V_v-%Cia(??R1aaXLZO~@39sO&-%N*NtI!}8qh2+V`_Z|F5 zbOx4-gKf>{LEh7((aE`xgKz8ZG+W+KSId+yS(Sc$X6fH^6Sq@duldOeKJq;Iuy8KW zXe^NYn8SMQWR1$1xv~>R6bEI?KRjCj1?pF@c!I+n3%WKIkl@HQmm)>8XG!tH@<5kd zaTP7H{xxJQX+lfODMS4{cR$^4^b^M^tCZZd=Ra)T8X3n7Qh!`RI>;0UFLS}W^HX5& zWg<(OIuueTxZ^T&PX$w6X2}BuIF0qK+J9`^FO!L=DtU8xI%g^D)|iiD>GqSWQ7S+& zMek&JQKaA~pB=BH%g;5=tWB4adg_P$1|24dC7R} ztrj3@W^o#ba(tB7R2m8YqT57Ilic|IA|^Um_}tsQh2Yq*rDDXlz59kS$}me{TlU~8 zE%h*JvO+D|cHdeu{mCEx?cxclYwBHXv1Yf+oS0`l4_nZEYT_?VOs&-h`@`6!$gsI& zEdA7c|eyC_t5fjO1=ZI)$iCHyW4Zm(X`9Z`Z=&j z=<5U){?hT*1>Wkpy%{cT;fgnALX;)#az9@z)3i{6sQsH0&Pse$(_!m66PmCnQj=+7 zB22NQUOCz4!c{G{DdpFD&Fv92_?f|NPbJFNF!kpvo4CLr`!f~5tJJL+sT^Nv1UfIB z(;`7iQWs?lF(~_e?IAo~ej~Q(pDgb)EnXkl{E8r%NLl9BjeXk8A)m|i6x0i}&%7pI zmT7Dfl<^Nn98hT?)YC0WBLmNpycsW@&)BNo)UT%+QXj$Eu ztO_$jWVph_&^rVgwIIT2&rJXk zzK_8d9W`J$p-W^IM*6OzNmT2R&JZEz1$hi>)mHD>@goy1nzdrWL{fz~915?)iwr6J zEWgL8%4sI52wXoT>|{@KJ>+tu(YM}Rs7P3hyS7MPFke1dZIHF8+jQ{#0YF8r)pu*e z@4>O2kDQo^$`fr5fpy!qwP%n&pQlEtjWTG9d2G&{7TN5ycy+I1!J)4@4yn%Ja%o}I z&^NJhCbsM~MviHqIS=awgrHXB**gdl8|1pZLxkx=>DlE){qX|agQvY+b0_*l9G8u~ zvz$3vZ@es(w0$rU(@*$hqd(;*elxLXIe=6Bv($BwW-@wKa#J9cw|Q%fN6LvKMZ}i= zld%_U&|0=XQ%e!IEMj-CBs_*gxK}t+PQjK(I!dVR5%r`U^_AZA&DxaQx{<2`kKvU^ znyYW~q7@W6&?h9dJ)=I$ownmQ%=mWX+vFdS2dpo^`GA&M9O%RUvpn+U{%?m?GxvL+ zn`q@doB7gCccEi4#jHHXPjsO*u-ErgVJ|t%O56~bimi%@?<{j@EX#*x8-X!XJ6!Y;ad{GP5MkLsqEx-@+=KQM-u>*zMA-`;*w_J1Nzh$UE~9YpwgMS zg`-E{C%=M2gtSm+L?#z4>x5EHn%`ie^Pk?sEXr6jU+gwLb+vUj1*3%mU%^|ie}?QvOes?>=#9KKFEYJ5qb z(~PzFVXtH*b)PHU9_491y8Zy^&?M~VLx|e=Y5L>I?1}dxK-J*l$~Kt}`CfJ+dSxj` z!|{Thjy|aDH{4vUF~_ZcZ{;8SgJURA7=OIkkC5Da+QW;)Bigc8zey^eLw&*t%7?)YmP! zjX{3OzdYVqj}$kS=q*OM%nwDs{wb2irf)#F+_({`-&8Qr5l?+&_y2jYvy4#7Cae znkfO!^u)yLuM_6_1Rp_bZY!5xp-aQ>;mTq4MH65DiC)e@IrhueL@{an`ng>(D$)Z9 zD?y}}r5SsJl0t{!tl|))mjmXS+6oR%9v?m*ADLa_Qg@pJG?GZ=UbG?-C*7JM~xZlH}P>B%N=!YPKHO(}=*G zL3plcq(|~+$Vk;^!cLN^BQ=84UUced_R^IkJ94;+6<^}!@MWd(YJx324+hQkD9c}l zM3_L(H0M&2o~zna#QU9_LWP8g_i-fKkrQm$O{FMqm!eBlur>W|^ad{DMoambXE%79 ztb~{%;2&1NMD+hll?A#iCxkcuD_;<5;{&1@z^?$#694xv{yvERe?Jny48#U_W$-O> z1R&7B9m4I=P;tf=Fi+90DoG7<_1)5x^(l}=%#!%;Yj;WoLKprrz66>{+Ru|C7pJS0 zGy_^q5!dWi7-~|>FdUPBK0n*z8@AwA)U>DzY8o=Vhu?q_j%_GAIQ1{a5cvK%j7_)a z2ppAtG<$0kz(S+lP#NP9^&rY^muiLk;BE8cNl}hY#Nu?$(ES|%ajCA^bp=#j;5{|B z9pZkV7r*`OfPVp+*Qyh)N5!f9MmOaGY!NDA2*g)&Xp&%YR{?-eZkzkOPHhR}Ea=Nr9)k>a$)Z!eOj;GLZfc&3` zZ(OF8>+WFJK>%)WO4;A2lkz6~VRR}O;evOuj`FP|v-b-EP(MjPE>f4w%zXOGu+y`N zaTTTgpDdH-|NXr6`5^3a7y#==OdlglB}H^T_*fCq`;%}SYHY)vmSL;&EhyBDt!z!} zn2nH@1VbmH-3*%&<+Nvtr?CLPQ1db5la1<(L%eq0g|TeCEktUg?_QMPNaILk9Zc$q zgDLcHr=O`5&14++BxHEYvwq9F+CjT)x|1I+w&J?aVk}^ zFe`9`XfbrJ%j%X2Kugu~hx`SkOK*TM%wSS#Ac6IL@kP5|D!ZN$Od-Al>lS{!4U|M@ z1eK)#ATaG88y@R)jD;~NsbFNv+GQA!xbb$y>WQ!1T-Xm68C~gVO)_g4$c*_9xlUu} z&s#hZgy3~9{_nx^Se=y{y^9E?YkshOX zg}^3z4fgJao1xV6=_GbbJ+!P>_s?qFE7Ta>hiio0Es9s-2^ajcA=IQ0l&quA25FJI z05DtEbsG@XG}-U8aO+;dQbywWOcuTTyY(jXqRi$>LiKXsqrH7TX<{d%_5Dq@8t5nK zW9X&y5UgkL*Z(tMMHS(KEHL6F!_U4%YwAt%Kf2T3SC%#9Mss!+DPFL-QIHhcnUle6 z6^y(%4%-8TKihkB#Km_2N3!6-*X*!awNCEu4TN0~^VpPAbZSmp4>favdo)N3of?3k zk1l+As|yL4jMbGP)4#H&fOUzLzKpE~CGY}P|JiTeq~u<*9X?vfb2*Gpr2OV{u@1$h~PT- zX?va!sftzBQ|UkHP7 zVSfG@ji0?Qo%-)&K5183T98ZkLt8#GT?187pB7BDy|XKH0I?h1MdLB^wOYx>awTFi z!Qs*_$)a3U>gg(`PGobrlFoi97a6+svJ9v%eyv@p$w%z2$w-3fMZ?k%|D-Jv``H0t zVZ^UcVF+zkYSElgpA4wqXgv@mppNYi*A1kw07QJ1+kw$Z+=`aZ+t&ZRFDJUw3@7Wb zZ!;}q5)*8?-F*E8iSTdtl}`1Ju8L-f{{{u`?vwRZpDh=SU?lO)zd>HXf!s1RLuv?~nwG~sOSM8PV^LonfI z8tci+01>IvwOx9C%a_1Q$+UQmQH&bz#)_J4}UA?=|uE{q%ow=lMHR61hl#;vtG z)(Z4`O6#;aae;QJ5VMkze)fyET(2@_^VgbAh~nI@rI*&uqrSgBa@H~zDEgwz#GKxe zbr54aO)=_gLX5AB12=8E@nnLA+vsfLr_U?w!8$bCJ}iIq0BvNV1=J`mH{Ayx-dYiL z7piOT#)XyGlun}5-sM8*0y4+xt^YkaE3Ky5TsEM}tGXv@6$XmIu#}LYF`_mqYOH7K)1{!ASSAGq zZnWreCVTT#j>;Y!q(%DtFTV`T9w-_Wc@I`MCk=qs?>Plh+|(qXRon6FlF!M+G#;9t z-%3*@%VMysC)aS(%hKWR_XU}%^mR&YU^Z~wg-v0- zmso6TAfRa?dU_$;8p^VSE6*hZ@k*UU4uMMov#vt_Xa4Qle2<$F1B1S9343>t2;Xag z^5g2$2*I9>0=lZby-@=>!PofZy0&jX_3`mQ(}+rGedSsnO*>8PSc)>s}3T6x!IlMVjPf2keCAp5*gG71GRm@ zW-qv?1 zSmO%~k05J*LUWe5-~=Lei{_6Dr$(46&m9LeW}zeVMaem+O@!$@nLb!|9NhG#7v3G& z%obS%b08vka)@z&%5SY!FOCQViHz%lVzZpu_?6oJ-T!wmDJla#o3sfyyAt1vz5tk0 zziC(Q)lVnM#FjECOU8#JZep1~yiv?;RM^r&lJgJ@u=&A1bpaA~ZRS>>FI%*i7~kMY z8#C3Kx--3(jYW)yQ4WLbWIQ*~W|HoQQ(ee)9ZrGa29!I-*%k=onMkn~1s35#v)`!r z|2YYPfnVfUxr?l3+hpckVPD<#t#a;hxTd~sXg;g9d?RNXW>LD_WXP@q4p^(;F-o1* zPw-_n!?h0)6hgJ72ydxNzpwX+G%a67$?kJx11`xM4C&q1aY;k~6yl)(~I_p2Z9_EF=rWgZyOf6WY5TdRgil;7;f7#N$2uRIW=jV{}RxcaV`;%%5kk$ z9L8XB0jlf0+iRK`vA)gN-5|)AXsxxooR|dUSJw{S+$|W3(~bDGnvU0r94(^ME3GsU z5f3!~H&=&frGRy1#-MNv70dm5$zxOvcUfp_11aIT2C7@P$Xn{;z1dd(z1hWN$^G9L z06;|Y|4}kRyTE@~{&dU=0GRIlE|+uZKMM5^1TauWoiZBehI&T<74p{MfKF1b9mL63 z0w@2F!+_Y7{CMSj+=2$s*LaK?BuD_t_eo^NN;3v0()1{bvTzTkAH|O z5$v^O#1FJrvg$T#1)&Qh*r{RHdM5vdNepa(O5b)so4zny(xDy)fILP1lk@m)W*a=% z4G!P>*(-BxGG@&;IWQbp)C45#H%Y_qmj0{uSJY*!CC4d8_)k-g1S?UpLdI9Y0k%t4 z_hoLKNwd3@iP+nzw(8EqmD4XP{Z$Nux?12r+Xdo?jx!mTT%~g~Y5kK{fS~}eAY1^B z0Fwo8buK;O58>*hg0~*PY_ND9R(%v)>@!N^Q1GP%7fn{`2jpX;4(D-eW|+* zZ4V%*dmX%Iei^gX0W7%Oc?HobOG zZZ}-A<4&o%-2FG8Qg`=f@?+GjOZhUzaC#P87aCb9H0ROG$j+kxfZJts`A5CAGJ_2U zAzX#OeFtU-NV%D-kNe{hj`rSY;UVnbWI`(li<=aI+nLKC?+ya79Y1|3I~ri{<$tXJ zPkzYEmMF&S?)htI7RdnG=HaGCIsreFBj=4tyY@lr<8%ySm)*vXz((uVDNahr1pd$r zp=z3H%)I?*8fZ`%fW#jB=Vxe`>K4$KF2m2+3rj_yZakl^ll(1yV2NBFP&*06>UecW zIMArXL!b`Y4fWjr^)5Y&PR`rZk5wGcU1ZKK8taVx4;|ma31+(aW@}|eF zguL32I!jp_Q|b(o?(9I=vDgAY(5NNF#vZdcgBr3d2Lk>6lRkygkTzHEg$rQFLrE6o z%sAK>Q|Duz{a5f7Kj{yQ#>D}K)1Q^6X)JvBqVVf_@QQ2IC$sja$V=kN$dO$BoKt1Zx7*>1N~}hy*DO!Hlx}}fQo-oL@q}QNH@Uu=cG-75e-j-jlYeB zu@5|_W7tCt9o84Ki-ewDwuEu(-Tz-U!eToXXej=Q_0GbBDFUki?LByldTGdsVmQQ; z?=9Ne58T#?0HZK)65LN+R;NJ4U0j>PpgImrNJyH=1}?FvNk!-m6&Tkq7D4ljW{P~2 zt54``bH+ICcEBLUjNM*^t(>W6P2=9n zQdb0*^e)PSfp_g-;0>DiRa>`p2nNb0woZ}Zahl^#yxLuV>U1v4ZYzUk5d6xFOsbNC zZsFnO4VS?WYQ%)FWia$&%fqwjVOT3a^oka6Dfg-?6MOVG=us77y3?T?slwb$TJ3oC z6Rdv>zpK>%*0IiT1ziWf>Roa82y9o8r((5TRfJ!LSU(tq-cTc`Z~XArDpB0eN7NQv z*>EYk7zZZGihGE{AA>ak(k>;)_Wta2Rm6+UqbGI zxtl{5|7PE+)?XjhqRdeo%HMp;ikm-n!pnBZ5*rCMo(3PN1|J9=#fp(_EHa4VzhuGk(Bh#96m9K{u!?o z98qRXb4UR7^zDDcC`&rPpiiF#X!pC|{KuzPKqt&`j-X-!SOprjwCfHkufqqEbZGO0 zA3B(ixfIbkip3rs2MrRPp?&!5?v%)Bz-JPucrjv@3z*$#;>`yMfsbEH4uc2s>C?Rr zG!`_;M~@qEu)~22R)HK(760^ocMo}{hV7Rn7s;R%;}Ui7^y7FhM30gsX;XabncG0R{Z?)) zYmTHyyKDvn(_>pZQyzV9mH6dNe&m+!M2kNea6K=7Az`7+^Z3N$pPg}$XU4T|zOL0j znV$xa1QI41%l=w-bjjSwKeWA4_U+>iE&Fxw2}`-}n@N>FHcVkWJ`(!p0Na-}E$5)@%sP;35<8C1rI1Zz{OvU0O@4 z2MAk$=Dy{CjMg5RKb-aG@`7M8d{pE)7$E}u9O(qHRkiaiKe{WZT-u~O@;Q!Uy<{m1 z5@-?Y-D*C>`oPwEv4I(CEp9W(3TDh#n;v$li1)Qim*Fzx+8z>*wVvf4T!@s~M2a#V_%u?8%Ub{5n3o=?5#!1(!n)LTO!o z>e)|Kk_=ssoEss=Kv2w7ACuQBls_Vnv+gdYkU$gl`m@*JP97G5oSPu%yPj-YXrGl5 zIee?YHNFOQZ>!fJr^K595sXW&um~9$qc0vxtIJJ$U-2$lSCCjH7$`kXTc#(z4II8> z!YX$@h5lk`cyEq@P2DGpXEVz8Idh?g3NZABfyK9wVKHR3kkDP*gGdLcFWw4TD*1&e zo%G@klq?VFj&i=7iTXtv*|RN5Z`{iUN}3@bUp(Zru&>_d;XN(cx^s+jLMofSE0M@X zahsf@yo?@fA1)0u6@RwjzEr$VHV%}EY+`2rbdRqi#PBEQnAqNI8uDuhe|?2MBTMf^ z-~jo@@B3E0QTsd|m?A3~+A4LQfA9;v=kB>GwNPR|ZTm6jGD;{0fP@|M&GeLmC;nb& zwljtNP%ifc9ArP#;6_zD5b{zqO=;F^%1}kl))hqz^2`joTk#{^YJ309Tl$aQyYL6* zsjjnTM+~-pV}Iqn5}|!^zzl{^wH^s*1BF-8mTmE~cfmkBrS#Pi+YQt;Hz+4%J0D<| z$d6z!w9@*wf8L^od`ZcDA8rcQKXLc| z8z`k-Twusd=Ob&g7>LTS+KlVDi<5Zm?3Ex1Udy4#d30ZYW>@h!HY>;W zP`{w-=w>>^R-ZiePGkQu+dC5jT4iuG()fjT;5vI0aUgj^H3(V}b#tgp;ZT)8GoVmb zG&=awy-b!tcz^MYP4UU9mmLaU?`jIyi<_w9lkflWdVk)Y-0XZkCocEZwR0Z%rJ}2@ z!jA5*=4B<_0WZ=|!k+Z5POP_*1EVS}8Cfr7Id3f0fDHVkb5w?Sd-1Nu>Y<6|6qvv~ zSXA64pYD-Ad6UFg=8q;%eXw*+u5}#vva_0l3@{8#(eFN)&6rzGT8t;8dDyFJ7(_gi-e!vZ) zS;FIEao;Sb>|r+`s=STvQoYj(-+)K1Li}W0sHyJ(j^><(mmKs4Nce=b~8I2SnCl zu>ImeP49sb$=@k4d)~!L_>bR(y+O1>>bkMknusWEQJd{47<7=M7v2=B)6W-EeaNlq+ZUKY&Y?*Fp4qn1{b zOIi6q;XGyQ6=Kpmu-l~rajKIYC*XWkK9gSa1%J=CNC?Znq{<&S|1kH}i4}TmuGuiT zO-dHh-)zBetb-!+J9=^lH7`21J+1&kT}%98Caa#%%sMovXYHc(3g4ZSzuBptbJpVE z%afxD*G;6k4$H+Hc=YUI=2=GDqgfs2TlPg4ZA7$W9n*Z?OJ2EF^g%@a-i^~$9WB4V zt=cD{4v3(WN-p;1yPf4G-Y1dnay~G4;GA`*v4FQQ=m=FS=nCn0@&PXcM`=#D=Dg%w zV#dbR&*6tu$&ibU<+1+f^;iBG;KyeO(GZYNmOgwUFTb7Pw)8)qM5q`_R^X_EvgU$EST>sqDvaoeI*@#k3*`PLJ9>0L~XLy!AlzY36k{?g! z`4$XF5WX8Ns5orCRlS&(?XmkU{Il5Y>tD{E2)c1)vm@- z{!1SNC4Wp}I)1&yg>m5vrWs&%Op8!e))qNWQex!#FAgyzYU=h}bZ8jl_p)3y z)o^a>jEBsOl^4#f-+(e{qeGxCCeF*;mYELO z+~Q%xqXR}&nXm3$5(F5Fa!jW^@y}-xHw_}sBPs+7&RL$O6qZ8fiMo%l_rzp9rDGab zyzqvtiKJW1zzz18c4ZEsz{YqyU#1q*E==cq1UHtU*~4vDiO>NG4^ zd|%SpGz*egWD%DF@kB-c$N^PTOLN~1&d*GpBKxA7S6OXp?)+sfMay+{F2fO%nThh# zAE|;7IkrE3x$S&O4bTlD4L=4mln>_k+aJt-emQsjZb9aatefq1bhdEjKfR}o_6@Wf z+#Qd>5Hck1rc2q=eIycby^kjXRUJUnnxc%3;s@XRNBfh$uhpW&V%xB1U4$NR;^2}G zO_0uQM=X9fXnd~4WZLT=AiK=yaa(E)-R4$|f>55wK4nRr6Yx7WyaHh3utd60z4jrF zj~wI@oh(Dexz_z_xQ)6g(z|QWwKbq+06M6h&I~UKaj@sBO)lYa5^x_lgTHduXv?@Nyk!7A#!i&d{{g_Z7PZwNmR8-Y?I8 zyHSbxWS|6ZU2%QkS`i-`fLBeF9;15&0=mqC!~I`!wrY18{Cay6T!8OKdi7r6Orx54 zU$l%R-6 zG$}w!7D+2Rd`f-fkVJ6B2wDkApNOj}VE}$9!%yM{0P9Y4_19K}PJ(KL2<_tdwTeQ) zyQ#%H?{WYp4b{=cF2r{wE7{YBX_#DDw#VhHxB8A<AgO$ zH|5Zmvho#G5|)_d=dic3Gza9KnXj2LoRhNU=5{v9Q(vHT?i#PYi3f54KJ8Q~)!w&8 zW_wp&dj;H42X`DcN^=kED~Bo?w^4Y!JA&aQ2+QxcoDe{Ubac;U!=un?3=^^|V z45i(p+&FjDcO6Bd5YkWCyD0V6uEzC47K}^eTK^3%LnY?q^q5a9q`gK(R(UvEBoAb=bwSDgtJRfR|~0s@TALCV~@ z9LAD=eY(Lsgf9y!fanV{w@1UxmJeIss$oS&zj8Wz?g8b_=^A1KkHCRxOL_Banm3Ac zBo`xNpSMh$Ytehijm1f9el7HTtfi5dSjLaa6R{LE*1VD`R>7en&KpMP?F zn&lj?)?ROYUlOO+qbTJ<{Fe72ZbPnjL?;Pwg9Qw|_+=z<-ea`36&)tY-paC4KNh=p zyL8huMQOk9B~K^`g;-5g`PvGN*^tvJ%RI)-b#khnk!wczrtfWaE=`*tzj>5dvs96t z0uw3cCB;{)1YQ{F;v{?rk`DqWxQ^7wh9J;j02p$bFWiaBx2>_A zAQjn)IAQn79B!-nF|0~_A7^ytfy&EQq3-k#B_m;vfjF#5-+``2lc8m-Kg7G*w}XHk zvE_-(qGI2PiTgZ}R9%i(i)~wukiL-ZV-d0jCor|YGxj?vcStav?d9W+Ld;z~$^+SG z4Ga783g+Hcs|R=|tB##GyK}Bgc`SdWN}Ec~>_FS>_gSb`@)~Z2rK`pJm;2gkov3dP zK0bSr|42O6V4H!f`|hBr9Mo(1K~oAvSp1St5CszTXhA>T0B4kISstf4>J{~Uy=+x( zDbKme56!}!9BFGbTb}3AuV_>MQ{pS$`EMj%a1*0l^WoXB){q8;GHU;Jr+Ah(IO+Li z#JSp*Pvx+j=ia^&oEf|L2SkEOp0umRTob4HgXxcA z4I3xs?){sYf^`RtS3XIKlE0Jg!3|p~E;$;hFQjRA^MuouVazL*y~1wwnd(XLdg&Wh zIko>*ErwkuSQ_(&=dn4whV%#$=n=O8Ugk-D;c+Sc^85PgYyn$!JLWW0~U z%=pl6Zt>RPtud*4y|)&c3DIxQzLuY`GMH%n^04XE+gv5v7cwO$T|IewsX00*9D2)O zY%4z|cZJDYggKr(ivmKA)_P1i;{xBP0JLHH*tI$%(kRdDaAaz4=s;1;W!T+N4@5p8 zBtyovSP5{u=ZclJG0%JgyjW*S!}Xza2N;e1rvqD=8!^0j#TQ<66zA(uco}WhS8#L2 zK-SH*Cs2aOl@W;j;4VZkL9rG2|FHL`;ZVQv|2N)ZuPj;1t|BC)>_a0`5+P$5yF?gk zb~9>(BFU$a>` zB4X%aQ>+nK==H&7M$fH~)+GS?U%toYS~S4ca_mF5(3u3c%*``%w?%)5kWWg| z^7+bH7VLIbPbMX47!Pc|fj8I1+*8461a0bR7=J}8uoq?Xr4kLUbU?$LxAXKxkPr3YrowOi?A^Y^HyuF%{C zKUhl$YRE1w9h8JTLZvHx*TCt$+p*TqtdM%g#+3Bo+mUvNID_sTaPAS7sLlO4clylN z(9-*%-ak%It!v86l_Sd;{135` zz0FoB6Rzqa^OO~*AKT+{^k6*dKHs^2UyrTC->deIHkc(BcsLahEw_=Xff-f9L{wh5 z!9YTr>z%`kU=(IiHk~F+-OGoE0K>X|^5KuzDRM5(_Y!Cr(_r?GRcn)Zeeei%KARt` z+xpnDhZV6S2HvdX8U`fvJP35mk@m*T(U$eQnaZ1GU<=Cp)Ow{mlj35h&|-79nuO`s zJ1dny zYn6a{>zPBON%2`GnDO(-7s3}u01e^`3Y8K)we7rFsVA|C;B}78#_ZmWcz|PQDb4pD zSqm1-@>GoIQaf{wlzd9^7TLe)=uubpt3jK?Ae7alHtdSeJ*1qt1mTk&lR*5g2l+fz z`D2Z|m87o$H7G~;gxVt|ANFGs)Y#3^U40s>od$oDZF%L=;ps_IV&7GHwzoq&Bm6{2 zLAI+a17t^jUtQ}h%96MQ)-QdKWdUm_ZkqKsHJluQjl|%H%*{8S=p0^4PVS4f;Q-I5 zz>(?A^2NNV^m-33;){@VMbgu>Cgt6OgUNp|QLGmB zva0dC<*%dLTN&F2&f+s+192pOBkM#*bbv_uI3H!n=jG6C#^@~$rV`unPg;W`g6(&F zxc&S4C|r72wJaG)30lF_h=%pc3*A_W>X&LqO)1WP5-eVCt~=_eJ@8~6Kd;FwLzglze+AGHX2K>hF?dhW)NzO?fotmuqX==YU@$)kK41!2$inqj_mnr!0)gS_$q`^O5Piau}_g(*oT!51lE=e2fVv8 z@puk>IB+?z8Vd0EA{~`Yw$hXdY$nHV^`-3-!0FQGPipGo&-wAtyln|fvo#Jgz8Vf- z;*1*^g7{yDA+?L8{0Aq}J{d9m8fw(?KFRs9^6}2a3$XFf0MHei@-+StVpy}pH0{KA zy!GpMXv@-q=;DIYmEqSgL^{l+>xHjl_#=J<&gcg1F0wYathH7N`=I(v1f#}Bfc<-$ zZ4q&W#FHDIm_2Giz8&~h*s-0*Uqd^^c9?y^;sl(kyZd&(h^=yWn$7J@L{jR*$;wAm z%?PLI$B_JKV2%Wu;mpX%RpvUS0pH8yJtFku5TL(7gF zM+`S8_)hRom}eB(_wU?43!4o;gJhSA{5jn$8_;*&J2yk8@lh>o!2jjM{mb~}L5)u} zI>S!65^g+iuH?8ml^_hxZ1H88Q6!QlN;;UB$39LKgbMnT0!=oo>L{P@zF78MnPUi? zaN)LWh1vl%fQD&1zNyuhJMr2X&aP4c>yA){#%)Ov_rGg}8=r3t6w5jnhzj(1J1t?7Mn?r0T#*BW;`Jo#v zWFyVTm}q5${>&OMRC2uUx7==C5oH?&G6mwZn$0!|=vt=nUa?>VYd2WRIMx(^LxlX4 z&RFxMEs*n+ch75twF5iCJ*ii7A@VLtK{p5LPdycNGz=qn=$ImF$hb#XgQDr`ypVe| z4T{Y2CGGazEuV=O%sx9?7uwM4|4GkAC?8p{LII+DO@(hi-!?k4FL_v^1t=Vtem(JFm$EI}Jx-KTC6Q^pH6EXt7__tDegd1XlEw2_Cu$Y7H! zezC8R)ED69LF?1>?)UbVd-lcC!oDxQZ19?eV;IPTgDuD=k6>v;_YyJm*tFUoGwPQo zA&hG@^Pc3-sL?vukH7`v?sErxIoj_zUUHMinML6*g}VV0VxvREkhiU#A6Nw<;+qSz zBdME^GK<;0vC%`Cza=n7oZVqvX|Lr6h@`+5gz*&Rb07J)Oqa&9DnA@cFxd4!W?ju3 z@w4Jmo4NMYwReOtsJDn`q|q_9>68~ro&7`FV2WDz{Z3;{B)BI^L)Rt|OL0U9>)Xm4 zInU>z^Y#|AqMM1NnkcsJ0p^1B%#f!>=`#DVEL8URvR{u?|J)PuXR`W|%~~U=9*u_1 zJ|T^%r@PhQu*N}z3CW5Ap8ya{%G>h~Z4XX*wwDuOyy2H_q@9P$x6TFj^{f;mPP z72Gx@O}PcnjS4<~6o8odN@QfV-fH#}TFBw>ld;i9#?bCkB{g0#%wdGXLZKW13)X5k zeJ<^7vAc`!j#*s+kl*HwG7hFaQg zHvasKUm`I!;?%U#R?~;{T6d*>xoh$;Sjt{};hHK-%8AW>U15iA;++aNIa=-Hx+F@% z*SuRB<7i6I`jjPa@Hq(4HEiAKTbz^yIeBa}!HiQp(=Qo_ogU9|fe@3n~Xpyjit^6cxAo7}kL!WxrJg60q| z@bHf^6QHjSGaf-uG9DOhsQOecox(3GzUb(5Mf%gEjHLg~&ord2@zn)}5v`YvFG^j4 zxVO^nnGy$Q1%@|H>b{ZWa;v}n*(WPwSRG$1P(A+~_iuY)Xqp}z#a#=qMA5Xp3yU5< zpQ!WX5Z_)G=3<0#O?+~F5yGC-`n@&P`nJkZme4Ksv)yJ>)>hV6kG3)p)NT4LA|&1q zYqTqyPuAuSE!he9U#vOTJ|m>2lzx)t@Lltr+6wkhnz??|H@*5Aw1}<^E&Q!Vm2nvD zq-bQ{SM{=Q)o9D@Q4C|VDxk?gTUq}!Gh@xQv45e}>iILb@17RyWLVpNq3mPoWcfJI!WcRUzIgc1zBYp~aKMYnDwF!qAviMtKe4fa( z#u|}oDfl#y0ByfGS7cYPsCRbOvuvQL?s0Fp{#MxCf%;e0kM4($YC;JgL?%(Q4dmX! z7TNmg+=DHpxtLQTh?Qhq4YJIkxaQQxxiN`gpbCqcCR9dlCpGF+SFlDUePFv%jYf%( z@&WDx|6=s?kKSMJhfV{-@zkcbO^TNFR;%FNUloJur3eOz){o_~|{L95g;5@guAW)NM!2{d0#*O~FY%C~nZgKBz*jyd&){4!a^gdfT@MzLv zhAyi2Ig^?yA0=~bEj@kxEAFoNJsAPdgYxTx)YBLK#y@I)qHtk342tW1xKt4PWyxYu z#X2JVD(>n%?^nHEfAz?Ku9>}{tgENZ6I?o4YX7YtZ1418f#0r_=nn`AQtfwjCq|DY zcpgEtj|>ZST|Oh#i1h12B+CMmL@>PeElHl#5x%N%2~}8t%A9ng@q<0ZP$4QWVzHjw zVmP$lii0sHeY+NwEoJ`!i9Na4HH}*~TOIzeTXlsmCJA%MR`FGergHoC zf7`Yiy*N{4C_V(Q=HBRF4YJd1xc&4-J4?+wPF2*&bLkrGx(^K1ge0OR;z?~pYi1*Y zgP0n7>4xI0pXm^y{+FY9tk1b^YBqywa%3ONdsS8D73Alo)}rDhmtWtW<;c6;AP;?L zBo~HA+YPL-d=v?owN`!8)y_kT;g(M72{Fu@L?}y|%8taPuY@edO%&!1)q|R+ne88( zM|`Oswg}u(Nw22I&tGTQW|L0u@Kd|L{PLU4y@#a|Qv<*4iT#dT4V%L0?`vk(IlDYx z`96AC@zhN3&?O~*+Jo^3eDIZqQz?g?!LX9#*m3ECDD^;Fp@;n^;o0_-EZ4{};q7jO zHScdzj%B;q2x8j-w|SSl{4}FkLoIC27;3rWDaiSVv2o4s6%C&O6tJJ8O}YMm(Ff_+-z<3E;~Ee9ky1NAPq#r`w9^i_>~PZ`;z36#MljLa$b zFiqD_sxr6!VbdLw|LN1_a<)P~{7Ctl7G?$1rW(J@ofuLiGoLs@GBAk&m?tj9o1S0G zxj3a?o^k#F3O{Bn=UF@Mk0g8hSG#fG2R3Dil%=}I2gKXU7Z zdmQc^k9!RgQeAAf!Z!18rs3n^WrKG2<~PuQG-#Ud&$jH;XR3k6ID2Q_=>}*d@?m`q z_dJy-BB_sSa@okIK}czZ|Sw~N!GjmEICt{O#ZGS z?3=SwcCK0D)m&o}?q;eemuDq_u&+@l!gw}6Jp_>-k@lh5Vv5ZvtTdTb3s!!AZ&51h zI{){?rh)}HvZ67sQ$pXRv&IL@*ppuH+25W02vlAEoIBgAz^Q}bwu953b&)mZ+O^k3 zi<$48CX>1>y(q)AaOTO~+M{Oz98jmfy7LRG;>+rzUXyqAJ_$nGv4ls^o zT90UMacbD8+KuW;K6rF;qn<%EiS0S>@(WZkZnRYAm4^IvrL}Ms>%(P1jV~-fnf0Sz zctDDu^)Xgw#`yJ+>N5Y}!YuY(pwT-WNGfIoo-7=papV6nHj4kBDvXlq?87DdLv?zl z<7EUu!8da>e_qTT*GTK7W8#HCE~0yFQ4QevW9U+bDvQDW)u3jo;_Qq%F zIk)BEg}DZ8(EZ1eVi)?yvvGcZqI$>bJoo0GQhioEa!~opMmwYP z{so!p+r50)JrINWuc@5dCU_PX9#6Ok5MVPgb_T0-^E1XuSZKta>NLZ+F(7h4u^duyr zQ>&xZPNi|zXQ(Q(aVpGUDOe5XVIPz*+f6hyZW^7MV*=)nEUmB_NJkHXQcwHR<6%#s zju+JG+?rZD${{3j_)MxFd$RowX#1O`Mk_Hi*{V zpXI2M+uZ$KpaltOii^cn@9*LwM4H#YMQHWxb@_C|+0x+w7c*h$_3Vz^v4+mN!`fmS z#Lm2Ygc|-zmi_)O{&u?6V3t|CZdnMBwe6f8=2q5JA1;qJK5yeP4*g44bX42!(v=&U z^k_dmTb_$gmoJ0e@n8*s3ZjAMJ_uKu24gKSqv2&n!i) z4) zCel4z?h=bc=Fe=s>-dK^k1-gSnU0wZ0z?RQkPIraNLUNGD>V80VXeTSg~27OM##Z& zQK41T03;?x`2f|RM2f>e)m{Y z=e2)<{3lHMx|~s}CPcg1;_TpcM~+mUGb#MKX80~3A1IEdn;YcA2CeIJ2IiDWzx_W+d@SM1zLA1`cDB(6Qjs6z=$wbH8N=pA zHrK~;bR|h3=T|eIlC{8uNbHga5CI_biHQlJTa(#eL1dStmN@7-lOFslru&O!G=J0L z1!rwfaR>6LhJ9X)5MBEc#AyoCb0IDgnsC80*>{Egt6u-voFqabw%w7c z7DTgunu&7aT`y%J5Q)0PJ2Y_-Ff-sWl%_Qr2;FUT$|E10JiV>CTWE}5C$|?>Zn&~@ zGZcTR>nCpufYhu~bXC8)2bQSt>iDLuIAAg5QHR zU)A2=vccshZPT#|?exIY8)ttzH83#ywHgB^D-j5;j{3fpZMDa)ejsLcK;Y{3+) zDgHVc4yKq2+YE0oykzA;)Sl?sI1?rR9w?5Z{jFW~so*sC-DYZvWIPQqJW-eAW&eOdYb!)U$?PNNe;YTX9OQlIIEJzj{ z3T7Q;epZ*RW4k5089cQ_P$-*#F0{C6Q$BAThSZG!nXG3fxKFXX$I#hQB5JmDoXdx~ zg@{gf(-Ti;1sVsMbIVEMJ5?gjzY&{fqVN+bo03n=&@(rr(iPNuH#@FE=aEk{3jJaL zo8@CRmF^Hye>`oUznFDQsgpkRHtLY#?qxwVL8B%LP=Ek!ODWp!Nzr_#e8``7SEm2c z%`7Li#%dWFdA*Ufyqx4OupYH4EcfV2?8YuJOS=iTA=wmuGfnym&pJ@wXl=_D_aelH ztV>jTUaNiRZK!xZtv*Xa$dK(Kum;ncbc=~$m>df#l>x$ljEMUu+qpdpNw8=l4Ld88 z#~>9YN1Z;e5TRwz?VP)R%=nE4l z2;T)`Dz=b~8N~}Z#wV&j+qHT>6;8D<{mjTTo`NNSAB{N^X~`(~yXu$$q`p2|1w$I` zc-*R@y6#005TWcyc}eBNSspl-bt1GycV=QhOf`Y}5YH#>{0n{J@)&IBsl$W%(qmlu zkWDM+z{HuOd*{oA=nt-M27qv2Ar!AC`vFv)xZZE&8|2ji`fibe>*u?3+N*VQ-R4({ zH%%$lN4@{fKk1uu7Cr3nnVI5v-}vRF3uEK3uFlK$$zS1R02LvcQ7!VT;n?Y31%R|< z<7_oo9bssEtewgRlPmUlz1M;^=dxllD<)VlbPP3I?Q#*D2D7`zDX<<3|$=60CYUBpT)^uIu=aZC%SN~M$d<1YvlW4I}>{;B@Cz*pETS>3vm z@ik&BH8IMWZKVpEx@s_!fwUALQL=a$_`Y$K>-hzeH%Jm1PAr|?qTYe4OX|-Q;&MJikr^{O;`2E2iU)!qfrtxPg4I6i>EK*E>BW4Tfk>moiMPfFobfJz(Fw6N#a!dW%Yw`P@)rWn~ z=Ri28X|!=oDYk|Fh<1)Xo@UD_GtK{QfN42<=DWtL5Tcv5O)({jrX#pCSyDJDwmeAypwGejYL?Sx>Q>z*FtWO&C&W|!e{eXhov6`uoETy z6jJj>y=&|7E;Xv!T+mo~IGVrriv>OkG(^k4%XY13I_8h(5I(~?tSSg#S zs_48!TJzk$0^15BpOY;V?!B;R3)fI4CSg%EIa<}zrf3_7d-$r`h>E^_iRYI;+@31p zKa@=x=Aqx|-E@?Klu=4jwam|65=y67-~wk!CPj7zbQ;7sHz#NFpBGzDwJmKC^K-}N zCS7CC+_JPpisu$v>djmo@ffw-0tL^fYdjf2xC=2K1EQ@#ZCdR&gEmOXor$~H9P@>@ zF}wD3Psr*CC{S7sm}BA?XmnH8hSq#<8a?4=B2e9VV^5Vt+mMYsC#~l0BA8A@! z`pmDA%=Kqk4urJ-N^D@1>rrM|vKn=yAGPjKFG%%|uW@v;%US2~iirl}2 zAb$PQ8O03_`0Blo%`*>b{}2JFS@-4DjY6q8?gHvr7I`P@2w_GVBmYa;^{#h3Hnp*J z@lISF(VKIIOG<3r_)^*QWj+ZVpARvgikV$bb9JR_%}3ClS#Cunoqo4XthOkOIaR5L z4(`wTbWZal*OHX(5Rc$W#HQGP!-eXraR&wc)`}N6zZtQj+13R#`Yp0^%2G-xYKW!K z((6^^EY;n&751#zZee+q7q?CwS~w;MMaIj9LNRvZ4#*ERHJ~&?HYc77K6gv{`D40w zB(}KcKOdp2H%_)h&0!Vmd*L~5=D=qmB#x~kxO2~G_tamEH`E z)wDv?jQ>Q%YgVEv62bx-TXoD}tj`xNaX;~_&InrPTjp`)d1!U1n$g1yLjj~Uyic|a zH^%e!(Mod2J(y9v6~X?fQnR(QomY1WE%brKkpEr2qyuP^XW^Q&RG;(}Mwc^V54B_- zJSo3G$G?C8Aj}8-P9EwV)xc2q*rpgG2O59w=BMA}Zo*kN&Xm+dIc#K_pC_y-$q8w% zadS%^iWvO>oM|~Pr5Ocvik|Vx{8tLP>B?EL#*oBp3XQhH8XMN5CeVbJDSaEtF3(&< z3!gpe2fHrs{apB0q1?oauji)Kp40YdyWDpsFubpSs;QMkwZv-uWRoE)ng{j1J{b_-<3QX!=cEC~AN>`z2FBsri_IQ+T%{@nZ0 zPsC8qn}=!4i6xi$l1^_uQLVZPqQ<`*Cu~KGWQ3z9AFwRNZ1FduTUg z$3~FFc3ZPR%wc5^OX~-Hr%SpyO_G#HxQ$i=8=V8=6r;_{mM+w<|%Wuw@;S!>aEFAX+$%H(ie~XbhM7CkM+k!{}F1I_c$mF zN>>}`R9i-g+u@mS*F?3u{7s_XygJ(cgTy-iv%FOKtC-k-aDeuHA>ALlB<_$8W6Aac z%~G}OKJXI1l zXnGyrT&Wza3XkL7(uF}748xM1i+446i3Yngm`lFgmIy6H-(?LY8mWGdY?--QEL-fN zTjp^=*=2!q1S#Ht-@ammyvf;4WE5jG-IDGkTd|gm_V7$B06=1>N96D?FbuZ_MwVat zPrf`*;gCqR6z`%+#I-9L3|NSD99ORi<6XJLX?aHALb^-W7HD;uGSa=it(~Bnxx}Y& zx$fg9eXFZ5mH3R)?|L((hP3h0brq0@Hbm{ z7Orv(lx#@fxf3?(;o^QUthLh4$oP`FM;?y^@)1QGNq;{BHqguPL%VvaVL2a=lq^JG zI-pTjwbk5^t_!Gw&8lL}jKeZdZ%M2o6czj7IYGFcp(3iL;WCT?s$js)J1+G;?~gTa zvhrl59TYdN{G&bwW}uMRxtnTjj<e=8VdN>$X-kzi z$vdz)!E%dMRmltH(L(`y(tztn$;f-}{l2q~+5z4Y(4fngxVhhOItq}4c>SZJSBwYl zOqmA>tw;|k5vMGd7-^bY($Geoy`7iJF8U>3LmEFB3fnhw23)zal?P%$u&bD*?qNgM;zJOUVkH_ZaAx(d=X?6q z5&}fSS_wz=(j6M=s%tx&n0^Rp`U`9To;TxQm~{FJA@4I=$S{#4e~4YX-5YeLzV@$MRJ8Ng z)cLKuc|m?gNCI#C@Vr4B!3v6F_39j!U}@jpWluL`Pj#-%<2OqaUo05A)zi2%fBm#^ z|F_`HCz=EcYjHKTGXw1|+aHk1)LaQr0Q`0i8ASi=W6)3oc&YjM7E zEhB8~nBlunx7wA)OkQdz>{cAr6I?IliNgiJG_yw|3^=i%Yeu&IgMMJL?=m)1nlGc+ zL1<5Ay?QysAiy%~dNV@{ssnQ*t+HU*@ zc+gblzkt=9o9aRrBL?prv{MIc19lE{}s&_LL^XqkQ(xVU~=xj(5T zh$9zUF5fD_cu)9i=Hg%gs~9^xi!Cf%#=YTNO_ zYU8CDH9ix()|2=*f8HVgtTlrhm>M~Jq4O{$Cfh3ZkVrXn*o_9}*cJBTr znaRTa#=Bm9Zo1!35(Jy%t-ozfm+@^0Wo?V}h)APXWUholoB6;4r-bvOn9#>ba8?43 zcwd#?Po%%>L7%bhMdtfjZkPrk9Ehq=*@6e;A*y=|gfM10Br^sb~ucGMs^lEudgcAP#i;Q#1ZV&|5fT!alRw1=`rwuUXhyA!>S_ zgTB@2H>|(Ul3F8l-)#TQSbge!hjBh#*4I!~I`gyGkvw@;)0#kJ>Si^mU(&#p|Kr=g zHrgvA`I9F?)l>+7(nn8QP;;K#CkG%#=zTk;__8R6V%K!eXkGC%iK-Ym{v+Gd5>R6S zYDvYd$Mq{kXe_Wqt@jEreEK|K9CrHCa^)9~$TJIkZFm%Hdsuic=haxIi`82{`>!9} zypMAZNxXA8uZnTGqs^W-+iQq66do;ES*Gw}1(@z-ebSm zfAmte(}wzk5uFv z4CnYGYEQ6)?WBhdyy!^=-7E4}-Bj8Ld&^POtuMS=6GjR}3?71usco7LCFb@jls+(} z6@_>u&UpWDWvJJDTv=A!KUHeI;tSf?Bt|jEhKTNm^q!gAiF=OnEarz4^mNt5bo-U1 z0DM6*w4QwCsNAto(r232W?svyrh$Yzog39=&5bDirp~$``G!!}Jt4Cv&ocrM9n^-b zQqTfP^WNWE4V)*`Jv=w@aba77`S~5wC#&`&Rh+)6eapK<~`d6QWBZB!{=WK6Z2vJrOPY! zr7m2$WpTc4*}2A;#(mFJ8}cCj!)yfk-m5E8?*>O>Uz6Kd|9*dB?F;AA$>1(BHY|Oh zR`x|==K0t5Y-CHo50DTs{NE3Sum0CW}H&&atCt0l7Ys92;Q0$us2# zWEH6p4GQ*F1aQTq`N>wuEA@5VjM77BFBg6VWsIXuJjE}6b@ z+*r@nlYAfe)0n?cB~GE>?3>Mn5j{)C)vA_}xD9ZcX80ueR0{bqsMVC>58BgR3}5*V z@d*<6$Wmj&Ob$rqjVb0(>k;a>`k6CbZ+1RA5xbMlOd3F1H!4-b@e(+pGA6u}4 z{%6+6NF4Gfg6}?XOS$`CB zE?cv1W+E3HoywA=h>b9--rF{OvWF|W+KkM7kg&{;vb{OwzZ3#wkF-9RPq^V^9)Eo9 zVA|MI8YcRdntHx4$^%mOh)YOaq6--(2-UW#&LwGPL}XBMl~-pc*?8`6*pjo`bRypS z>ArYY!CpIrX?bCOSHgnomB-y~5FB=*gQp{Z+pJn2nbmBD^hh=B{!~3#<1^i4PyIVY z-sZggEI8rP8M3Kcy&hU19Vz>K!a>I8d;qM)U2d{6S2%R2yJ5qv-abU3X$m)aqI{eG zLcW6X($OrlMZs&yYyoY_s`hIh{?It=(C(_|;fWN{i4eJZ3@ zwf-mWp;~@PL7));?5<&;tVQ+ zuiXgOov-@Z%KvJhCwyy9G7#xx!T0lk_)c8r#`~`~vY%mm+)MY!&;EZ<8?Vw?u(Nb7 zU0sAYC%qcfbR%TG*bai9yP~!C8HX4@{Jqk1TXc8P2j~kF8$eZNd=}Cg`3nLh1U_l_ z6_s#eJwez$f==)^Qr+m7wOvEufCjiRB$!T?)?D2%7V@emti&wYtx*9767qNlAUPqc zaf3!0$lz|p?&W(ZaFoLv`vB9189jLs_J%#EGv|k%e{{)x{stdxH*DyT?)P@2!w-X3mh7}a*Fm!Y z!0CbRh@FYiCM~vb+Dx21_2)9ThTo7<)-^g1uXJPm-#@=i@zH&4cBF*hDeh( zr?mn2GhXMX!M@z~wCkmAk+8{~OkOfwA`yf14oF*JYdzUE7;wT(6Ohvo+%MO{4lCyN zH zfG*TedO#~;%Rftn?aM!DUWjjsBrrI0%I`DMm zN$?tZv>7$wxuo#VNpWYkX$?NL5IBbt`8i^SMwMQF>Ou3$!2!|d-2gk?TmT8(p+;@kq>(JNA+HiN0IDCcdN|9Ld5V{S6g)U7btQk4$aVXYrm%=J zPnKZFNLHRl$pi>L zJGd{?Ok)XkA_RP~&5`rs5JT^+09VCYH<4$jtnk<5i8Cx@q2o`V(BXPJ5piO1I5n?| z?`*W&QpgAJ5WuECBRWZ(4YccZ^Vy5sBSN^dx0ft_xjb9s&a@XBtN2vSarxx`JVHv? z4=cf%y@J;PP0FNDvu5?-cHT^2FA9TC#dt*&cdZzZB^Dryo(2Q_D`pdMA(XHDo6}N@ zdG!{+vs(hssr_u!dWt>;c4c?ne-sr|x}5SvGJ0v&A8EDi=(Q5W)%BBtN6^*=JaBaB&ql=2WMz%( zuxHo%=y2+d8Vc=_ZQSxP#HxA;T6E*1R6=gJ9CU5W%kec9vzig}BpQd$H)1H(WWjB+ z7TL4DEeXn-YG=ZSvj6X8u$ft{V9GuPq@V^M`ZH46yWe)gh_OeWB>fFt2^u|7xc&3nEGy-h_^LhKhSO|o zlC{bm#1Zn3fNq#t`#!!D5x9NPZtvGP6Ww%8Dm5idom*>-Oj4HbqqW9B9_)*vdRF`Q zL=_IaMVr~O4<^D6U6kt%k#bPN-#qzZ zGQ%+p4~Fb*{}+JoShJG?K1XVE?rM(IgRtk49H#Yn2s$ zUs8m0v03qHl&YYzDFj#4rdUzI80dzaFal?4x=m|8y?zo)V@KVa=bGl;^}?1R7A?n* zL+@!=1gMi0n!)LsYG&x^As?fF-mVQm84nLd!3)ZZ?3-!Yn4L?SQZ1xIO$LXz|M#u+ z4cuA;b5QVs_}u+cOR(0albGMSQ9Sq32|fh+HAD5s16?c#$am7}U_T%@@*vc}!~y z0sj+g@Smkj^jZ@c;^p&U!)}{dYq5MI2PL9)>sFY^b*NaaHr}FE0-Qvf?m)g{VtHxO z&_g{4DHJDS|+6}qXoDmF%QWcajsCd7l2`87x1)f&qzs;r`QRet!_PiSi zEJ#~Jdn=HwXO%JK<{SR*nDF6ZfLG95CSsp$7t%y5U6azkVyHX{uUW4zQaGt}=|*iB z&K2}o)c^CIY49L`^pSgxzGBq>v21lzyMDdRx8C0;?uiH4-&d}ZoAhTB_!S$E=nR+s zB7+39l*oU~vSj*V^FvEYv_0;^*u`VVNdt;u)lcuE{@2$@|7iSR3Cv0Hg)qWne`&#wwgT`9;cpJofyhFds= z+ll`wU4YxhBzeouy;_4Y%5JVo1>qWpk=ir7V6=EPL(`nN-wh(MX;i_gVaXY>eDYK z_E&`VOY9gg0sYDdvmO(@IJoWp~8||?{SK*kcITB zPZyn9p1t$uQ*vrKDHH2({*1}}>n1N9bQDLj3Ia#-1J~>v*G~2PLS`y1SQgKb%2tPM zo8MPGeDVD2uNV+Yy>+AJ+VL|Z#k!JW?{6>PG;NT1`rNh7-E&h&;>fL^%A`N8GL65V zSn%ic9IX4C^IST!H0TY9S{!s0mRCD#bMOu^V$zkI%v|#RGaz#CiUJ za9#WNRHe8J!vm0~(J^sM9EH+UJzennjnRY3OZY34%F$fHh1{ta7ydmR_Pr*__Ve%MF7bN~yQ3C{{A6|Mub9IyX-XVOri3{i~QmQB*m7;JS&{~h$j^?ThX1x;!#>6x0lYX%`N0nRVlzs-w^(!9pLif}P_cY?6Pl{z)N#(SxU^O- z|I1Aa2IQKSN6b{>A`{zB2-$^JW;g<2T*S({QmIS%IIy4Z3#;iD6F zW4{hc-L%9Wz_^_MelWx>HgkRJpBBQ|$s86Ha&;LTs6QDU)y(~jX`^npY~>TLe^2f( z^Vz{Wf>W~q>Tc-|9;rrY1^;*3>zBF{x!JLmrF=IqV^k*Pp95Q8lN2U*GPg{VbL(>} zte@4bS9B!NNR;Bx@ZfsEEM}gyj0ewt^*N`te#}UZyo^9rh1dIF_0QYTSs9QfaqT^q z`N2sVuwf1rNM`K(HTT-dpa(+Pc#_6EQ;F?~zdUN$3ib&9fTG{%dF`YnPd-QOq-fN$ zdZ)TkSKJ2k*~#Xuo~ok!g;`ztxap9x>wLM%KD~C)(Y(G>%4K7{8pkVz_7eYA3sx4~ z!^M*F=2#tSsNWTh+-oJKC^tQ&$%ilYu65-cpW;TKb{Idbf%d^6GnCDT9q2{$(eKVG z-IVb0=XpsTjG5Ng^H2TZ9U8=CO1P#3e&M8k6JZ+7;rh7b~}o{0&l4qCx)#)e&BYr>4~fC>Do*) zKy;rUDrZw?#K8|<-mwhsN?%ak;eGX0SwsxhaNj6L{xYAUAi+rZO*Sh|o@eQR0e@=o zpb*xWp$z>hReS#x{{xRN4<&8?zO9w42?qA+326{3JafAOsk~KcZ~TnGgWIFQZ#@A2 z({&-PUeFqbryXX(olHBYex3Ihagj(R4H7P4!o@6-@>zGv+t}%h4ooS!S+e}5B*ia3 za5Fc$l6S8c-%_Iu_hrLHeCGH2F0&0&HdY%l5_RCMSqc!hh}ha!U1y0rpw~QQXj*&@ z>wQ+wdN1}DW&iGF*BZidwug8^(8aC5)Uk6hB9!O(eVQr`e z$ZDtU324* zSruaVo~cuw1!M818<9qO^%bdl-^fb%LxsjoNe_KYs!?La!H!Iu`oT0ixUL}2{^=a$ zF^jh?YXcnwAF#=N&*)0%9td${?!Kkga(_;*MVg5XIr&p713lkj?iev4x}x)HhXcb< z(H6zswW%y$Rl$35TYe!w1UKCCGhu^UWJ9=~7xcv)=k+r!f}i(=w|o7_L)+k^i_^?K zJg`8w_4iI9DlC8BV$|U!D-MS$Dwjb#tyMo`*2vOql3q#7GmsXBGd_X)bDKyETV2tn z@ypoP`=qh+^8NrLF5*jZazIt-LcS>J=}S>m?A)Pp3?ZCO<89V`B?K~fVUy*EJS7jS zxTPohp!(o`I)V>UuSq~qOALIm=~d_R=h9lSsldq&bVc|YIar@`ek>{HjNT(j5n%GQ zjnXGeJc$ubN<4`K>jhDIDDQ^3O@dY*X2eZWc*|6*k50NL%{S2`aD_|p-5oo;omgJd zG53<#aC+z4m?TzGv!LD2VXZ!2(7`9Q4}MC$?%O@uI~KQv56@;jN?c!6&qwB!7l+_6g^^STsAlMMvcp-Dgb=rec~|A{3TT>U76)5H7{1X6HF=^j(<9GMii zP}-6?qWL3z(0mvLe{6w`i+Nl0Us{t}~A^x`bWYNihe%?C+A9pCu=}x>0 z-Y{=@{lJf(MisVqZLE7!PmJLh`G^OuhNDzl61L?L0lbTw z(w~dH&W7eo4+fEG>*Z)!KlXHPd;uDhe<>135OE)c86N>!e7lHyP1xYGmrfD z*Dd$q)%kzk++M{^a!~&9)`t4saliZ)sj(x%XJoe8<**xi18+1CP5>KUd zv=l^m>6S+TTOPGLe@m*%#|RtRTRchLLh+n8F9-WIo4t4rz?8&#vfow>(ay>Fr{Dbk zt~o>3%1$c(<3WOX&as_ z*V)z_edqd-Uh?AX5A4($=i~*y3tMMn_TBNE!+)39_@5jYHAyt3fwH?S5n}n@* zgPai`9_{JA*WFYuy0Tq2qiK6+2l-x=hZZ!@QZ}(+{mQ5Li*wwGP8Ty=oa-Yb^avn~ z&rYwUI*tT_DaWAl5-W^b5#_05sNl7xdk=*79W}S|bm!|fMELa#Lk@nyyn?g@upRsF z${?Rf)df#Qc5g))0P12J@N2KqmE<9XP0-$Fkilocw0?8?^I}2iKs`*1w(J?J{5R=A zzHZpR4dwI>2F9p10(b5UwxK5F)+x()_xS0I_0?G z^Jqc7HA?@+k-ZGjH_E}J*KSPjl;TB7J8mWRLLCi-+OfzG4Jy1W1n-z>TiGF7!wE3Q z+_ork{V{0lBUe}YI9fK2x%GyP%0i(|Ut@Qb$x~Ln%4pa#b)W0>YoJVTVMaoe``H{C zK&Iw%GFQmfcT~_&p4b*;M{#)&G|w(n_A$z_jyz&#_jSl$!tkn#nkyW!V4 zGluuh3nR4tE*dsX7Tgs*u8*HMdn;@%%AOxKvR6!*H>cE7>o9l7#xk7ON~tk{YMQQI zET2aPY{uQAKmJtw_&Lt1=aw^L9{_j|u9rOPmS=)n{Oxxg#KT%!W9*vySK?hCtPtOY*BvRQ1 z;a|x7P)pJ{wme)ZOp5(bCilORv&n#4j15NStN4=C&-5%^bN31aH{P6Yn}99c=H}|L z?Ai!eXC-?+l%wp^K9h0(Qs8zMdsj}PK#?ujxi=??48g>J-?W~_VY1T)t;w&S$TQK# z7qq@zG2h?Ro^J8*m&?Qo7Aa9fGq(}2L7VMX;|D*C_R<=H_-J%Bgx#o_8&snrP>@<2iSA;7jQi9SMrRruv><`yQws;5!~LgKr=1;o%O> zQdJzvmiDqAcR)8e2y3gCwaj(Hk$>5I)#HLZ+s;`*e0j^2A6Jrm=O=~>b<2N@LN=Qe zuNl4Gakq5*@gsYu(KuT}{Y*h8ZANOc^Qb*}$((Pq-Y0~HV7VVsm^a8NGykgM$LJ)R zX|5fZfBsqgV1ERv$r?bJ4?}(KaQ{LOtuqRKGvvt#T8(>%_qmN|_EDc`7uOyc{%h?Nm+@MNe%<1? z0sA9klt!T?Q+s5%R3a_6#UT4hpYN1Qre|9uJFDwI+RD@3YtQlz?yp6-mNi`sJUhZ} z`#10HPgZV|grS3^H@ev4uBgaul~#9@ec7~{2)$EtpH}|gPvo#5Z&L{VY9@q$-`YiM zvip2&yA?3`N4HS=&&#}!xyZ_4i3BG_8Q-Zor6k8aLqzkv82?X8BttwLks`>$J%w@U zpA~ioJ&w`5!H+HN-h~jU-nTY}LC39S(|ddI(#PUriv0}BL%G~!aOKCmjA~E6k93(i zF|5~HsE29kGr*#qki5LYB3m1##(_OZez_t`hx6cRf+S}attd#dAo9H3lZ8Hg_Oc#qH7Mf1WE*RE9|pGPwM#ltP+0M#G; zE-5d-w42%_g*>Z%X!)CeK;LBW*bxd7-nvJxvUw`D!WS~%XTNa)jIYhr5k}A)7;Wig zk+z{`=TI?j{DPGY8xG#MZOH7O$e5Bi_IfY#VQ|;Vwtnj+)sq};yBR#aSdGoO$`&sjv@3|Q&gN)!AeeKq4dGjl5rNpUlmlSpk`|D|F)ii}hP<#pbc zpjj@!^b`{KDEo8I)|f?a$M)Rr)f|agenO^BYv9)se0)@1qrXXKLbLV#)K!~BhZWGV zU#G6uKK_sF#pNDGG^O~n1cGn71}GXm&12IXX)vfGDiTkpRFQT6++@YGE5;kJn-8_2mM59+ZN9Pu7oy|q_7i8w>M^0rtuzNS2o2Q1VdUE2?d{p#p+~NXS z(T_IN5V5DY&fU@hs5&p1W=n_HZl64U8oJzM>+4+idc%l=*yaR2L(>uMBOFSkyl_ zr|0Q+Ryz~Sz00!oz+68o2(bY5qj83eKk7)BKj7iA8c)Pudb5p@?S_d{^IKj;N-eSZmagTY&)CB)*`xt`R{h>3J0$vDuaq&-3Z0{qVW43wg1@4reb$p z_slmE^UANQYujFjlB&=Bx;)2g<4dZZyqfD0m3)f^5NajdmT>HcnyVxcd9beWOJN+? zx|C3Tn&L$tM^}Z{+jQRGxhBwv$}%rz}eer0OR{)FgH%Y3e=N;pCHYNwWEsv3hWR<7~E&aM!Lofe-io z8Och?to}<{{$aaSnrHAn?UO14;wyZS|lFx+dR6(N{Z4<|2(55XLD=YU^J))|TrpY()uYG47<<{!jcX$4OmcGu#ea z*1Y`lFc(6mGs7HDk`%2GCYnncL>aM{LHKzoaBD8wYetU}g9Y#($Px<|#^US_I58*|nNK?b@ z7PFJXaod4{m2U$cQ zck-t08mjl`&$qECK(kKTqItMlNOUKjRsY)GR$Y@8Kev>5fETR_JKIbk4H}A*YGOgl zNP%IiTB01p_a~=FK6T2@3`tr|5`jk7lQ7=-qp4d{CK^ye{(?tPsJidOt8o^f?|44`j zwMUu$+1agCBT`pR1 zWo2YYnUS#q;Dw*ngPC)I3@!3SB5zo65P17o9|GF4XH)Wt@uZ3$juG^-hH(iqd?O0- z*8B{;M-@RIBy_H5Lm`@drzvy8q3)`S8NOndt`3js>z%Ov2FI3=W zxa$M51bLe3=WDWY9uO=k<=R$?@Q$f+O3YS0 z7L}yaLZd^JGS@5OpG8f3Il+O}q&fkb`9IYDvfbrh&d5#VQzd3ctg7b&6T=?+=SXyP zEivRKSD~@rnQ-y~Z>zep%0R{cBz%K!_Jby9ye6~EQ8TGgg8$ES%v`sx?ICQ2*;u9C zmS#cUDC2VU)WSP{)v`iGV3N@5dngC>FuwMS+Ox)tU6?akAQyVwyz$}I1^noC841!u zZDBp6_@QqC((gER+dbZ)Med0vCH895`p*7aPmvYX`kBky0T=8NqBZ$n3vkZ2 zl;;+xBE?8ON~ zb^9+KqcrlCE8PFz^R(GjCnPOkF!955z}S(@Fw;FC<>I@_rS`UH-WFpN%4WpLwK3f{Yj55GxQ_Pz5Tt8srs(k>uCOvQ;-f-TeLB1) zN?DpucLwZ@5Aud*Pc~cVa6O}eBI5M0pmX*{xZcfIqZ|Ajr||( znrIc`+k>$WVbmx5o_CafuQ=!sS)~DgfOXwb#sdcjlIa6wpJkQ@7OAA_w4`tx_k<0w zA;v8J<6TQ?{1eE3EJuN4?Ye|qe8O?<_iDuv+k?-OMcE#VwS=B3!la#sg5S;>c)X~^ z;^Y$&IyI%!XCEBf(*_HnG3q+Go&lpv0ac3^|4M-#b6)(O%dN(~e_o9B?0Ki;p#b%k zDHe{{sBV<7A&9KNfXnDP@TvCNKVeR}y#K>vNbV!fe!*s_9eNT=TyBV<^dr%u9_+)~PSS@_3&Wh<{2F6x?6?-e++aDLzhB|alU}>H@NAr7`oyh!!gG7(P zUTOuZ_Te^W|GPWC%0J@{5TY#e<=`l7d)?WuU-gsmsKjcW3{TWns!I;O8){XK=|CTKy5f6PWd0<&QXPi{?v2t+!AmrY;ecb{VJhH!#o~LO>36Pa3z~ zv1G{lwy4S#38#63m{cSCA5Pb@ka2WkG*xqFhF;tx>4 z$CuqRsgD7OY7?eExM;)HK*}(R{&r zIR-@mjT`0yKy-hc2<^TDcLRV5k3Dh!=SB`}?OAP)-`3neE?(1Dzx=TQg^XD0E=`7u ztQ<^u5R_=)cp@vRbKXj}H-=a|47zPy{wgnIbt%&!gYdZamxlb1ll4iUMg|w%kokyFWt5BM8AX_l6Yw>e8uj{aIsqc$5~)J zT61yHhVsK3d*F=&eLvqY;=lli6^{g;j4{7g^Gx8tC$BFeE1S*OX{Fnw=jM?WCq6^G zESXEsuVY4>wMg0?c(m{qi$a6d%M6@D=oXsgLV%6mvs#shl5;X%hoaT0_Cx_b|0VMu z+|xU+eTy2P#Ky}*ldhXV4g{Q&K~kbUr?J}VA_gd~3tP_|Q}Xyhm_clcvcA|Zbd!eA zt#LHeJUUb1!LnWLgnl_W-zO@9uSRRcjZl)RzdQ^+oQw878B=&>^QS6LY8QxEU3-b@ zXA$DriAg;5F|qd5&^L)(ANQd1O9nh)Lp67B7P>w^qk0KbwXL(K|Z}Fr8Pbk!n5^ zLT(bZ<=T81gx5ObkrC+1g-IH0A-fz_joTzC~)sls^vb!x5Q{4h&SIIKi*Uw|DKDvWbXtk)3a3C(K6N5rIFNW&> zOzbLeP%?{lFNuth&I|Ko}gcJ)10=qO10*n8JkJ-U9_3_y(?7?yczn<=&|vh6x??U zw?IPMe-6A2-or!r?+kv}&<*K)We^Q?xl+>>AH8eCWD=jgl|0Omdp!MD2WPYwQd^dL z%(KfGf^v!9-sr1?t^{%dCV+p={ODnKG8F0WO`iN9FXNreCTCLfQ$KeQQ2g84PCtY0 zihSz}u_RTpTc$CM&J3Pz^y}jKtlc@%lJjiU0j2eJvvJM6%M4Peug=?@$jgrO^L=g< zA8mOy+d=Q3djaJL!8=*|f}^Ty>3O*}h^pBS4%xmDxR&1e7R4un)LkhG4BN$fnuS?Z zUc6j<)VN5KWFXuHitd9!%DHa?SCfv&*`hB0Cu(-{N;vc&+WZK3r%Q<&pF4Wlpb-SL0r>0fa4V8|K|3_X2gmby8k;%>a+9gDSr^IatlBO7N7sxmn=Bi z!@V)v4yY#nds{GY7VKRN_El$FJrC3m*h~daK}>26`_bgH7Kh4zgdMdtT}PdAYkuw6PDT*3fgr)>QA><}S%-8a@GgJN zl{#f6LM4_rM5Vp#S##IDxj9DR*iS$IChLy5_n+0i7@ZX0>$$&HI%XF}RYGUI`mS0sDg?QT42| z&id~Pcx+OT=h2#^dB)L<-=Fjim~D%0*QVfSDl+m@v(fuvfQbkXzK~C`n2vB@aoi&Irp9uIQ4NNIjc~Ubg zWpL)WyiR5igZ4Rgx@IL~W~z``?dVQ@$mj>r6+4hw%pZH|D$gpeIXf6ebh`AQr~ce- z9M8rpP8O;<=rmE6xe zwpq{howydN)i4te0Su%h0dsQFOdw(Rw-z>q!+W5!IivC7y{u^mfMlap7yzEvnr|+daa1TWH zPK{CiTraYM0WP;!X857yfWECb!&Lh(HnmC*Ks#T88ub%fL4$cnx4GM46f?GlxyeaeIEX9%6ug$4#;@i3LP zhEiC+3qDTybN;J{Ka4*Bkh#2kFFdaWeBW^&7!oz--iob&d~5}g{>0W(=MhxL1dDHC zJSvx37PcMIxbeG3Lr2Se$Eq8R5=1LcrnoIu;vg1y#!i)TNw`xOqhF%BHMVtyZZ{G(oyO(}C+ne`9yFLCwXui!||(w+=|!XJp?Gj5vXBT(`WqBgFY0(&Qa z3?2p0`&Th<&BLvZl}h~c(Ng&HDUUby^9}Nt&0F^&KLLw3mt2JufW!@n zAkmk+ehBd4ygU|F2jK8rC<*=5BCd|+7*Y;ZcWWM%Bn>=d{=UEpMJ_Bjpcg-VvcX-a0`x*Yt)KmiiQ*9d)KdB|sJ-s-_-<~{-;`I3{~rRXprKGj_CAj*>|bKiaD|$!CpzMR-fmPQjEgB z(SygULzR;sgdW9Aiz~qz=-vinwIFIeHfu1(agZrvj#mkN55>ZP8WJg-sHPGOO(P59 z98!&(%KX6Uoy3lcV$xU*ZVl45*J_r#8TuE=^%gwo+rw;{>|oHS4N4K6j>!Z?l>iU#+8hF;j^M#0`!&GL+9spjrw)|!rJt>E(0Ty3t!ae z0zqI(UlX7T_^%YNY5D%QtAOXpl~L!gC$K&KT&x`p4Nl6Ta#8b_Ui^1Z;>-`ul(I(_tV zTrf;`V?aR5MePvy{Ud!4-g0nIum8y(j=RXHO<|MSs33xr|L4>^(Fi_h(A6lP?xsU~ zqyEg4)ul!t&&#;*=5ILynrBU2MvHMyfyJFmIb#%T+p`Nvlj%Xl?_RnoaJ#tseBJWT zwF1$z_k~F>6J!HrZe97JR^K?54PLC`3Q?na! zUgEYkG)qH>qv$q@O=0@^Zq^6BlspRAY34SqsxLe#RNn;dcm(>Yt>MzKO5O~!@)29A z9Mh*Dr)TGOk|vha8JR|j+zY2ymbZ!iD08U1dEY)e=H>TyLiF{bs+v7H@e2j7F5NCB zC!Y-#lo9+7`+XG|hG?b}dD!7&H2y+Pnl8Y+H;9$SD@yuOY-sm+rg1^*zH_LmKCokA z9!5M>U;E|sCkoMAB6t9S_s@cnZ%C?W3!m=T4Th{VN!v3Zm;

I$GpG&iA$$^+lHvf$cS(cKcKIg5dOK%CC6fYbpul0;R3DQR^ofjV)d9@RLI9~LHdp@75L=>2cHSC1a(5PWnoBYy-?1{RP+O>}QkUUA^q z@p_GRmI}@67Y} zDED7SM~z73Hcur8&@|uU_Z4{c%XByLDINaw#*^r=`j7M{Ks&r%LiURMF`~SSIHTn8 zRrj$TqW64vKsiMMKjc}%6|>0iSO(fxLm!ZgA6l>V55#$G9?{CKWeRgToApM28JG*K zGaz=FKo!?}G8TW4%3G)ZO=p=>8$q(w4xsriv)$9vUGyBtU`P}_-JK3F?Kq@lj{R^v zn0w*?CqhoZKFkJPkUN#9$7|(w`|CfkKL#tYgKze%;P^eo*bfR^>5~BDwVMJ2`x+I< z1Xh5};KS!&C`i$61*6(7)pB|l;IJM=yZKK0kFIyR)$>h`)Ioe`4s?0RBDSm6gpUEU ze_+#;paT;tBv9nxysp}ur_wNY8S}4GM*~Ovfdu?P7tbqA6tVs_o@@kkeZK(WaIDS8 z$7}W<6{MPRcbBRiiA2V=JVC06jaL|}S<*~2AU-;6&4lcHDE>)k0eVcMWfi>rOZ+Z@ zI4h-0Jv*@jLU3$gKnuhiI)FK02_^>owF-l9xU>Ayfz3NauQWS&XvSwVXq%^j>O~wF zToNl%*#Xz9L;!i|DgdmHy>zr^NMm-c`5#ac%a1WMSLNDz-N@B+Up%d1_X{SFuV>ny zhP6ZqEfJ@B4ETNAWX$=+C5Tif&;^#hkf{0avp%g7iyi1g;VRof17!jvobk=1}!>UV-k!Z7@ti#G#cYk4$tf zz8Lvh3!zWqFd1|i1mS1~Bz)(Cy?6wGM4CLlhp*Co!OvM-Be%du@pnjwFW~egP_~HM z4(U44KNhw#2{O}>ASP9{eCj;EvfC7ek0<8*k17MgQa#=eT^$v0VWD~1 zsy9UC>j3w_p=Qq1W*S|0$SMHT5aQU4aSdn&778_={<&FxbTIP;#1amK3AN(fV0H?q zCDDGPyVe7oVBT%}sG~Z10iZn%J7WClTO>VtLf;pFT^3i^G8k2hMKd99^bwP3md3CbW2`ugibC!`8U)dpPxkf?*@uqtaaFM>jD{m;q4kHgCkNvA!^z zvuq*+|LlY3?v>goBKaQcFF@&YPm7w!~C!TKXu8c;VRdz}IYk zE?NE(XoLhhRLto{EB;ISO+vh*MR@_iscmo0a)Vo;G^^TKX}-5`LGA+;SFaLze~{fa z5bC!HleygriRlYBYSwoo^eevu9?tegw4RC-=ZCeuV|0yTY?Das55b%RSo9z#&GgsD z`lt}&8R3m^QaMnyFLN6#)|P17ecdaFymrcWfl^Aw1`FswZr$R^POaX)xTHc?wo&p#1h0f?kcvRQDb=a(w6u7DeHDtsB@mi%*a zKqlEH%dl{F<&=5?cj`|QV@WlszwaKIZtp0CLUB)~r$h@rmUx!NT;{UgXk6RiX!9QQ>gmA?-eqIe z`@A*i%7%2AGKhJ`B_PA)6Gpg3YXF&^a!xb!oEfnUW0-&aQcDLTY+N$SiKO+ zurbI%=4Seo`E?J%*B*Vp355vw}wSAB<>Pz3A96E>JIrCdyawmp$b~LMmO}* zLW9GR+UR9PH_)=qpV<;)XwCW!_`EvS zSC@cM&-od@7G3yRyb!IAXl9e&DKgYxWVDh80)-Y?+)cWI94anW1r6rG;=8f6nfv2e}g*l;22UlrWp%E`8NHPO664HgjU8#YddE6O*yR+w5{&cH~!hr z_j;pGyCFK1+W%Dc%+}0z*YB&p`v)$>I8UVN65HK!L<_ zbBu7AOT~g!EaRUDSf>7E>f)Oif)K%UDALtfSQ_6h(@t=iH5xHb=%FTQ?{-=I z%8g!4>DqnA_MCE|PFY{*Jv>ABY+Hw!Opk?SM&=B4s@YG=mu5AMp|WO{jDFFNXf8gW z$7~I56GQSbV?18Q3na9p*0$4ajhb6iq6|mtagYmKA@Cw^{Kt5G(lXbkDP_H|E#snm z)qF5YZxv{Q84K%Io;HY>YC52e<9nqR!R%EHRI;v2QM(>U+!IpHPrvKgncuAHhRj_L zFFXKBF;Jj;RGR{siIdqleV(9oFJ5s2M@#+dVR#HZ%X__wE9rtDA1L}JOyi{QDPAK; z_Cu+pI0J5YM4n*%n-iICjS<~a5k=G6MaCTDh}<2xk>+fe|1CFuN9g(MI}_DI%C)A2 zv;`DsdgP_Di>#Q&QwF@5j;V;QELUpX!H%DqJ3E=RL)kTOdy4R^vPeXFzjJ+?#1d~F zVxZoH@?DP4ipR~~V2dLVoGHZQCwaztieW4A23;ZqE7`{wpuDsK6ug0k#pFslT8cnc zTwyFMm(DLZt?kEPGYq-UPTQQ@`gCKndZZjqiLgGX3Lk}K}cSb|1J1FMoCA+Q_3r(5}KI)LV5$%*zVHc_Mye| zlfyf1`X%dNU~6?FWgVzj(qFJzGPhAv~ssp7Zo*``toMEqJtUJ2z`@=OCD zM_p570#pY;&p*tS)eRN=tt^$8>j&{)-bG#IPI&03jmE#UP@pg|;D80&3LN(zsBD!k zt6cQZ_!T0S?Vpj!EfD2LfgyVtw3N`;Na#nvOj`FAYIkF*h|Vq5%^~iR*t82s^z*p_ zO(pw=-&RSLgi|ZB7&qi+fXHut@k*!&q)x9JTLW>BE)sp+PT6`S-%ik zzn!(grN->1kn1_$R~cuTzHMXA4>XTWwSH9~x1L1>sp4(9|1LG0*vgt#mfQvQu!xv^ zCatcNOd%-`o7l#GD8q}qjh9~Dl;Nh zjQwUx^&X5kGpcPN`xu)}i#NtE8@RMZD3;!E4MXa&Fz=Ms<&f5p#GB$m)KZs(SPpOA&K30{Ss;;o4(Wp}R`_$N+hPl-ykM z%Nbs!@_COSzNG9+$?->OqKxx5Ji*Y0<4|?7tR#W`jk0n7Ybj-Uv?>o5XTHb42_fv80_4Zo}W{Tm^^ayGC#G<4(e{Vi*C$`-PSVj`f!MmZsf} zK6jlJ6AY%fm(IrIFD>0rVLrWjGHVTI$4IAYve!9@O$PFzU(3n|dv2_+5xqxjD5JV= zebFk;+Irm6`c#2-i88|ODbLay%$QpA36+O}38j3af*$yGg0kVnPPTcnThi{r z^upPqIz12d1bE?-?ogBfPVJTra2^M7i3LfR>Iivx5%cAk@x7gvbMmNlzl zXoXvTuoP&JA%)#X9W^GWjK@9ex1=9O7_|{VNSb9Cd}2y zUi|ppJ>p)4 zumUL)&B%+|Xc@1EaQ#J*^Qea4w=Sa+*r(25q!G&@^ak@sPvzba^@3`5$mLC;;dQTj zZ9!csr?pJKK!#$?lwjq#d^iLo<^q*$8oz55os980&JpBq!9 zb?6CDd7&<>5(RrAM-+6})WNU<)Lw&g0Q z`ZuM&cteDp(8`wnC2(}{Ilf~N9tH|H)5qi6OjcULBvGE%4d1!MpCS87%;H2=TqL#O zFDx*^@(^W%bGx}SPJTP2l5Xz>ghk#9*qKEsZ^-9z&kItVH&`OyvJ}4e-pE9|@LQZo z{leku*qi00h1EPJxA{7HcKkG9F(cC|jE)69~dvEwO zI>EDdH_k=4TSUGTeTwHJ*e(|nRNE0%?ZRKB&HKJJeScpy*rapes`!NCt^E2lQ4z0R zkL8xGkaSrin=X-@frlh{oZ3)X-F<_%`o?Ob8m_f*mPk{zkUHB#+e=WN>K@9|;+PyQ z(z$mc4Tu-sQh_j1^@B?VWD6}>TwX5lM=F~L?amtw<;-d)JFaK~TYh3i@#KY*i{2!F zq)?)7&$fK6oKG*#W;tMY49hd9^QO;8`18`nFnK%Y0CF-HavfY($FW;ao(w`;^)6{; zhyYFX7Nw{bA+U^vU#ZC7e=8D1iPTkV8MyJMAAyuD;=b*F$!EQhpBX6j=u$OpQIiXn z8c8ock_N#x`MI{>XTTqzs>kBL2DNVcB+0dW%qM}LnP@a(KgMI}mdx02@-{x3o^&z( zLM=N4W^~5)Xuh8FYiZFApLD>XL{%$R_GIc(rByE|Y!1z4DX+X0>ZH!{gM7JvZ!blH z8gQ6woBlo0Ai9$q_(Ie7u<6!h>OS%Ds8}f>6$&X4ojj^W60=q!d^Swefsv`-X>0e% z&YOa~d&mOhR7M}jXK#@U`NfX(>WCyyPqjpN;0^;u?^E)Lo)v~<{v}oLPL4^Kf#m2@ zGb?LSbyn!kxwHiD1&$*m(``YqZAqchyItu7M#Lp>1i{b!p8+$zh3!A%^F*IU{Gp61 zw$GJ}3@G;gbg?3g%6I4SFeKU_VTHYuN#|9+5+691UE*X}(bZ(_AjN=hm`bFm!2Tg! z^zG4J=0q*m8hZ5wPr2LgJ%4x%+j9B~fpL6qekO05qR~q4ggWfV3h7rqr&#zra`E*I zL#2ZAVck9#hk3P8idT2MDII5(bqfruj(q5cXIo{4;e9@@x+#&n zF)l)Zbs9G?re!h84S4VsT`@H5**w3-Xq>dQkTb!L$Z~k6EqE;ulr*_GC)m;JF1aWt znc+*VGN%&=B#gN|^?+=KG4V()%+8l^Jh}@R++OCH%TrWkR+Uw-?FfxBlsb6Ho5h=o(wMgtm2hm^*K=&?~dz9!nOdq!oQIOaH)xQ~nC^xA|_? z&hQpw>$uwn()$fL#o5d*eVipr!Wbd=lDHhG`#qSad#l4Gu^EKURDy;%tn84Lvm$rcW?6I~&kB|C?7rupc5(kYn3}sKK6Gz{0=z_u2mQns1Q$em6HIftl+B96uL=En; zy709rnKYegSa!H<>NZEqT#h+ALTnhI zs^4*Iue)dY<|-`7l4BPCu1!fBCKVjlEv+ZdwEGTIAy3-a4%ugGPNX!5Nxp~=D z#wJoNpAl8G`BsTNVr*zK)SOV zjTqF>IGSoC%7RHPh?Wq2U37`zfrGj>DiP&$hx6J!pg;qU~s`dK3Uc7>B{;?h7{dF(-#( zv&uBA9j3L-flyQ+B*q?3xDV`P_BT!e>BC!L=2tpWD82w<4StGr(Tg1peW_(&$^5MK zbAK&Xy(1(#>f-29?vQ%BdRn|}-gZt@=zRj{-CkfNsj(MnmhAo#OJEo9`br1^G9I$SPJL?0lrBKk^HopEbSA{be{g@|FaaI5rfuF-bW8v7W07ui0Ci( zY74b0Gma7bWqk&$U_R_3``&N2Q_z#36VV0bx+GgAL~>&;#3Wg<-?-3=fBrpgqQHgu zLlmsIqD(dWqf4&6I5c+VDT|3ew`wERFVt=M-s~`S055HMB#ZCNijxxdU2~oobUVCr zm|%f7CF`qSU}4ooOlTMeh87)x*Pz{ypG2frkv->hm&qhmxkbkbbwZ_N|GMvWSb>3_ zhW|~185t$icJZLQ20EdOFO?%;z61PTJP8Ay(Hl z6~TDCdra@OTL899dSH5b-U6@R#wtdS(W7fXX#q( z*!0d%JH2ALKE*ADmwqWWdzW{Ba&0b+b0be1lt0_k;&Lb75s9k6!BDTmqNmOsx64=HD6D;wgk9Jm{HJLb7B?QFWePY=!&`B1VK}EWMf_ zp73*}EZ}_ZV{PS31vDmuPh2)QgwNW%3~rIeNR~MV^@X?UJP{A)y56)rG&RELzWnW> zKf_n^ForogdFgXAHMecu@|evFg|6=k!F{!gup=cnDVH6hN~L1va`g+^hF>_0_DMnz z2{Ng;PF|M}Z<>nrJm5;Ha%sbgjv%g_StBO~GwvObiGxo>fdA|CC}%1)@dcylEanv4 zI_k13mFS22mK(0nJ5kwvE7PzF&%bf8uHhcHWJ8(OQVOC^mFo#r*UZZASceqIgx*${QVIWuL_u;L$tvl} zVllcZWE}S`&r{6fv|s<@TJxgZiOJo#_l3iD=CISKOC{i5sFKsiO*C=Ebk?BIb6zi| zMw`4S&!pRl^Tu7X&B-|v8qr?U+8^p$w!nTN>71iOGTG^92>oKr&0NVDrO#rG*)umY zSune9$!ZuY;`t8D-}g%Mk&`(}q+*I`ziQT~C;BbyhUddA%%D6)n(()j)in7bt8?f6 zDh$JRXMoc7!aryqsc|b1q%svA`)<0+B1GFd!!udJF$A8B*dPH6ITA@OP`u;Dl!Cq_ zGZSJVaLXnlsCVe&QnJ=+2KTDR&dukpE}S7d>0b^Be~8Sl8SP?4=JC9cHmH=Pp32M$Nw zQ6rit7D@5ioh$6M7D+FS4Y%?xU^e`C=Z1ezq&T~=zD#41%y`)O{mh%i{kl&QvN4&S za`sau$HFlS2Oo35!l&?6xadhs!nyL~*iPy3s7}9&*!NbshDH`n))(eQr!$Xs-i>3` z-}jBF@BNByOV;My5);-;31Ofbm?Bjw6RhFh)}LjiD^H16Qe{hP)I=?UpJ1b|eNvi6 z?6*$#mNZ87D1*FuK}~b+r)Ms>nktAJU>^2j_ku`8Cv$ex27Ybgr-pjgr#9K z#sT_?rXs70TqJH;5ggLk!KrJn%FA3`L>%W z_iJ2$!pXqgKvIRqBE{l82x5FGEvzD z)ng(2qklz_b3@KH`~euy>}Le(vj;fp4=@=jiVvjjFsse+F&FpEU>IHi{Wh4V z)#$l=Q!&@>n`Or-vY8}vbe@5$@B(~Ll``!rv_}0FGJ5i8k30%DdR&#s#_#7?g63J_ zh2$|in`b{$!k18Tp#*Q8GU-%2;ikkmk@$-TZHf%|G$t~4s%gIaiJ#<$rPzrZlTjs> zvDH`zujxKy1!_a@GYb8=Uth6 z<>8(wBE^18wZM;g!v6GCGt*wt*6jp*L2gW_xOjgYn5bsig7{A)&(^oany|QNDX?nX&1aRRLVYd`Y8HdE`VR%jg_r z&4mJXkmZ~_igM$F*XnIpx5oq52#3gPn!Pitl&e}_oej7uMcN3^hVH2A#T zPKKCi`L7jm)KL&cYmODyc`g!TZYgTTKe|8{sO-ex@>5fzei%axnk=9m)-fN~QeG7Nk_LkV*J@J~ZPc_Knr z3~oX!&*t!DI|T--@5H3vdwRf^1T?sY8_qwj3x337H0Sd0Zo!lC6^q46qQR9uknUgHrKxN}pff!%pM=W%|D%OOB z#Hg)a|DFZ(O$2kBt|8V45Q%|e7KRgbdtQ-81`kGnb6H+xtj=$@Z{;!&%yK|P_R=1* z_*e@B0z~5wIzu~bBK(lHdNV*-pKDz19Bq3tv@ZNJWbD$Ljs$r?3SMBQ#3fVRN8dPSr{p!wrX0U-$xvaT`PbEM&u8KwZ1go08ufI2^O+ zQ@4Kb56$wl@QImdcqz?izv{6(sO(^w|z02XS z%o8(W?`inA3JKYOhk(o&qy&nl;gh!MeJzZv-@a8frSY zJaR(&ZxHZ8)i^wtyluMe_Puk9jnK~i+%tcfDh=c&BLbi_@`EYK(xy?b=&bOQh>h7& z5EZgDCud10pXX=i)|AzqtTGAM>-zooWMUG@ zDW-oq=6^{%DAyMt(fkw}gg$HTM!)#=>so8X56JmV02zxSf?Fe-2*I}&005R|Gz61P z^fDOn9N)719EvAgH?W3!e-(xGY`Fi!RbQ zS9<+#9-G*2%|F%GAfZ^X;L$pXnW!+%>kiXE#n3`$eN*6!ZUaw%-Go(f34UM44jFu% z&Er`p@>27!J~#xB&O6(@TEy)D1oGXVh5Y#sTeGBSrq+kHC3aZqqd$llN$nq3-u)>) zS&p>6Jp}fUHzd;$M?b{m=*l3F+{7!ol&I=D&@x3{A2}1=nr$JZ_d`6t)nN1vz~#Jf?QN@IGmo+kbL~Xxl}WII&G2yC2FN0t=l1ea($XeG zOG~HN+|zpwV8s5IeN+CMU(kbb3hq3?oz#xemw<);QTm)@L1@790#ghm^5dX~5kG!^ zMQdS=vTq*hThnnhEZW~LhP}hz{0-2FPy3BB2>y*$_n!YdtWp1|vs^lWJEg06NY|_Q z-08WKybup9Q?nF&{l~zcLky99L>a@3pRxw%sqeqj#X>ZCs^dvY^jSmaxXDnPCiZ>m zOX)ZOqGC<%zsh)#M8!p?t>i=imSIBm|LU);2G;7~0UuJ|zVmwL1ui-UXa4)bg2%>C%pqaj{Zg#o5B4-%+*(nUuNH4} z^WDoGw3jz0?yEpYb$J-@u+ zkIwP|y(9e~GP+-b6TS?pb@7n5I!ik305n^n1jh7T{18^XG4Mu|+rdx)M)}+QtlFOY z+}`u3KUeCTUZY`$o_1ti>2Dwe>zlAG`3~x9*HhZip!$+%V=3UDFWMabT_}YVF+8{%p$XX(Xd(95= z9?g@k?MY?PK-^K?Us|e^o-qzEB=>&P zEd8Du%A&g2vzZhW3))x`o01(4-u$3+RQIO5#Ox(4#Zx-I&VJ!bafKwpOcYC^BU@*z z0$%|4)9CeC3aB>Cq|3Rfe~`oCB$$uwH83X>Kx$%j$!h(iFpwz9nEiXZz*F9@#rr$t z;CKLx3iQ4!!5zcXxaFD`dh}8dYiMrA zcQt?Nibh3sS`a)yDjwn)tM+s<-Lt}&6WU2rM_S}+)&ttcWvN2~!=fEBge z()j2p^SBI5;c~OI1_-g@Q;tVxbZ<#sZH7}x~6j8eZK5W4J&kV_(N1NPBV;Ki%!x!JLLOj|P*X zirKT^7M6sYs^sY~(#fG_>l@9#`#*UIEETS=x}CzVQVA@2qXU2Nzzv4FHqF=20P?GW zNKKeY9J563A+Gv~&qF?sB>mh^^@vr7_{`0-q2#Au-^hQw&7JK=Uu%|;Q8~L9f*mU3EB$rmqR5{lG!jgQcci>XgCdbRq z=*8n-kOQh09tHR}61LmJc2|CX2k~%!EENE+uI&+-e$H8#W&KNVjoV7DQF}9o*M$W> zOeCK;=4Wit&pY)v!0L$xmNfUa$MA4-Jq5BVhgR!?f?nAz9=pLqMZc3Z8RgyPPITT+ z3u?&Mur?r;B_<)c-e&7HGJvtc+U!)^&&|c^8~~<6(VlxxuQ~-!4gM7yYCn(~-}a5; zEG@5}j3nRox+1?4=71hvx!-U~57$+7XuhIUlJH4qCVa4-IU^jE2CYk4dN-~VS`y=; zs^=k3^clk6YadP~Ku>z5LDp6Hs{E?K=*hj6>*8+Ef7}-T-+N-#ch6r~3bCU+`)W*G zs+b9Dv%NraK5Kt;D?GCCpv~Oc=8jfIB-_&J_V(hIT>r)xV{`LOnSC2|b+vFyGpCWk r$fjLP`^pwcrB>zcDkOZm literal 0 HcmV?d00001 diff --git a/yarn.lock b/yarn.lock new file mode 100644 index 00000000000..fb57ccd13af --- /dev/null +++ b/yarn.lock @@ -0,0 +1,4 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + From 7ac21d31712849847b7b7901d28e6337bafd5d90 Mon Sep 17 00:00:00 2001 From: b_mountain <49973336+MRgenial@users.noreply.github.com> Date: Mon, 19 Aug 2024 00:41:49 +0800 Subject: [PATCH 201/257] [improve] Add GeneralConfigTypeEnum. (#2555) Co-authored-by: Calvin --- .../constants/GeneralConfigTypeEnum.java | 54 +++++++++++++++++++ .../impl/MailGeneralConfigServiceImpl.java | 3 +- .../impl/ObjectStoreConfigServiceImpl.java | 3 +- .../impl/SmsGeneralConfigServiceImpl.java | 3 +- .../impl/SystemGeneralConfigServiceImpl.java | 3 +- .../service/impl/SystemSecretServiceImpl.java | 3 +- .../impl/TemplateConfigServiceImpl.java | 3 +- .../manager/service/ConfigServiceTest.java | 16 +++--- .../service/MailGeneralConfigServiceTest.java | 3 +- .../service/ObjectStoreConfigServiceTest.java | 3 +- .../service/SmsGeneralConfigServiceTest.java | 3 +- .../SystemGeneralConfigServiceTest.java | 3 +- .../service/SystemSecretServiceTest.java | 3 +- .../service/TemplateConfigServiceTest.java | 3 +- 14 files changed, 87 insertions(+), 19 deletions(-) create mode 100644 common/src/main/java/org/apache/hertzbeat/common/constants/GeneralConfigTypeEnum.java diff --git a/common/src/main/java/org/apache/hertzbeat/common/constants/GeneralConfigTypeEnum.java b/common/src/main/java/org/apache/hertzbeat/common/constants/GeneralConfigTypeEnum.java new file mode 100644 index 00000000000..6d8d0d74ed9 --- /dev/null +++ b/common/src/main/java/org/apache/hertzbeat/common/constants/GeneralConfigTypeEnum.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.constants; + +/** + * General Config Type Enum + */ +public enum GeneralConfigTypeEnum { + + /** + * template config + */ + template, + + /** + * system secret config + */ + secret, + + /** + * sms general config + */ + sms, + + /** + * system config + */ + system, + + /** + * mail general config + */ + email, + + /** + * system store config + */ + oss; +} diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MailGeneralConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MailGeneralConfigServiceImpl.java index aac05ad6b88..4397e1729f3 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MailGeneralConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/MailGeneralConfigServiceImpl.java @@ -20,6 +20,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import java.lang.reflect.Type; +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.manager.dao.GeneralConfigDao; import org.apache.hertzbeat.manager.pojo.dto.EmailNoticeSender; import org.springframework.stereotype.Service; @@ -46,7 +47,7 @@ public MailGeneralConfigServiceImpl(GeneralConfigDao generalConfigDao, ObjectMap @Override public String type() { - return "email"; + return GeneralConfigTypeEnum.email.name(); } /** diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObjectStoreConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObjectStoreConfigServiceImpl.java index 5a0dd1424b9..9682935d8ad 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObjectStoreConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ObjectStoreConfigServiceImpl.java @@ -23,6 +23,7 @@ import java.lang.reflect.Type; import javax.annotation.Resource; import lombok.extern.slf4j.Slf4j; +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.manager.dao.GeneralConfigDao; import org.apache.hertzbeat.manager.pojo.dto.ObjectStoreConfigChangeEvent; import org.apache.hertzbeat.manager.pojo.dto.ObjectStoreDTO; @@ -63,7 +64,7 @@ public ObjectStoreConfigServiceImpl(GeneralConfigDao generalConfigDao, ObjectMap @Override public String type() { - return "oss"; + return GeneralConfigTypeEnum.oss.name(); } @Override diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SmsGeneralConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SmsGeneralConfigServiceImpl.java index ebd8da36578..c7cd3647575 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SmsGeneralConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SmsGeneralConfigServiceImpl.java @@ -20,6 +20,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import java.lang.reflect.Type; +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.manager.dao.GeneralConfigDao; import org.apache.hertzbeat.manager.pojo.dto.SmsNoticeSender; import org.springframework.stereotype.Service; @@ -46,7 +47,7 @@ public SmsGeneralConfigServiceImpl(GeneralConfigDao generalConfigDao, ObjectMapp @Override public String type() { - return "sms"; + return GeneralConfigTypeEnum.sms.name(); } /** diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemGeneralConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemGeneralConfigServiceImpl.java index 5d2b629a92f..396721afee5 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemGeneralConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemGeneralConfigServiceImpl.java @@ -22,6 +22,7 @@ import jakarta.annotation.Resource; import java.lang.reflect.Type; import java.util.Objects; +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.common.support.event.SystemConfigChangeEvent; import org.apache.hertzbeat.common.util.TimeZoneUtil; import org.apache.hertzbeat.manager.dao.GeneralConfigDao; @@ -60,7 +61,7 @@ public void handler(SystemConfig systemConfig) { @Override public String type() { - return "system"; + return GeneralConfigTypeEnum.system.name(); } @Override diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemSecretServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemSecretServiceImpl.java index 4c25b12adc3..2915298dd4d 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemSecretServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SystemSecretServiceImpl.java @@ -20,6 +20,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import java.lang.reflect.Type; +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.manager.dao.GeneralConfigDao; import org.apache.hertzbeat.manager.pojo.dto.SystemSecret; import org.springframework.stereotype.Service; @@ -43,7 +44,7 @@ public SystemSecretServiceImpl(GeneralConfigDao generalConfigDao, ObjectMapper o @Override public String type() { - return "secret"; + return GeneralConfigTypeEnum.secret.name(); } @Override diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/TemplateConfigServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/TemplateConfigServiceImpl.java index 099e794d380..492918ca244 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/TemplateConfigServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/TemplateConfigServiceImpl.java @@ -21,6 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import jakarta.annotation.Resource; import java.lang.reflect.Type; +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.manager.dao.GeneralConfigDao; import org.apache.hertzbeat.manager.pojo.dto.TemplateConfig; import org.apache.hertzbeat.manager.service.AppService; @@ -55,7 +56,7 @@ public void handler(TemplateConfig templateConfig) { @Override public String type() { - return "template"; + return GeneralConfigTypeEnum.template.name(); } @Override diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/ConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/ConfigServiceTest.java index ed7f1ac4e76..af7e1613601 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/ConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/ConfigServiceTest.java @@ -25,6 +25,8 @@ import static org.mockito.Mockito.when; import java.util.ArrayList; import java.util.List; + +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.manager.pojo.dto.EmailNoticeSender; import org.apache.hertzbeat.manager.pojo.dto.ObjectStoreDTO; import org.apache.hertzbeat.manager.pojo.dto.TemplateConfig; @@ -57,9 +59,9 @@ public class ConfigServiceTest { @BeforeEach public void setUp() { List generalConfigServices = new ArrayList<>(); - when(objectStoreConfigService.type()).thenReturn("oss"); - when(templateConfigService.type()).thenReturn("template"); - when(mailGeneralConfigService.type()).thenReturn("mail"); + when(objectStoreConfigService.type()).thenReturn(GeneralConfigTypeEnum.oss.name()); + when(templateConfigService.type()).thenReturn(GeneralConfigTypeEnum.template.name()); + when(mailGeneralConfigService.type()).thenReturn(GeneralConfigTypeEnum.email.name()); generalConfigServices.add(objectStoreConfigService); generalConfigServices.add(templateConfigService); generalConfigServices.add(mailGeneralConfigService); @@ -68,10 +70,10 @@ public void setUp() { @Test public void testSaveConfig() { - configService.saveConfig("oss", new ObjectStoreDTO<>()); + configService.saveConfig(GeneralConfigTypeEnum.oss.name(), new ObjectStoreDTO<>()); verify(objectStoreConfigService, times(1)).saveConfig(any(ObjectStoreDTO.class)); - configService.saveConfig("mail", new EmailNoticeSender()); + configService.saveConfig(GeneralConfigTypeEnum.email.name(), new EmailNoticeSender()); verify(mailGeneralConfigService, times(1)).saveConfig(any(EmailNoticeSender.class)); } @@ -79,11 +81,11 @@ public void testSaveConfig() { public void testGetConfig() { ObjectStoreDTO ossConfig = new ObjectStoreDTO<>(); when(objectStoreConfigService.getConfig()).thenReturn(ossConfig); - assertNotNull(configService.getConfig("oss")); + assertNotNull(configService.getConfig(GeneralConfigTypeEnum.oss.name())); EmailNoticeSender emailNoticeSender = new EmailNoticeSender(); when(mailGeneralConfigService.getConfig()).thenReturn(emailNoticeSender); - configService.getConfig("mail"); + configService.getConfig(GeneralConfigTypeEnum.email.name()); verify(mailGeneralConfigService, times(1)).getConfig(); } diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/MailGeneralConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/MailGeneralConfigServiceTest.java index 5a9fc345458..9a7a662c460 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/MailGeneralConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/MailGeneralConfigServiceTest.java @@ -19,6 +19,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.manager.dao.GeneralConfigDao; import org.apache.hertzbeat.manager.pojo.dto.EmailNoticeSender; import org.apache.hertzbeat.manager.service.impl.MailGeneralConfigServiceImpl; @@ -54,7 +55,7 @@ void setUp() { @Test void testType() { - assertEquals("email", mailGeneralConfigService.type()); + assertEquals(GeneralConfigTypeEnum.email.name(), mailGeneralConfigService.type()); } @Test diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/ObjectStoreConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/ObjectStoreConfigServiceTest.java index 95f123ffbf8..d9a0087888e 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/ObjectStoreConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/ObjectStoreConfigServiceTest.java @@ -18,6 +18,7 @@ package org.apache.hertzbeat.manager.service; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.manager.pojo.dto.ObjectStoreConfigChangeEvent; import org.apache.hertzbeat.manager.pojo.dto.ObjectStoreDTO; import org.apache.hertzbeat.manager.service.impl.ObjectStoreConfigServiceImpl; @@ -66,7 +67,7 @@ void setUp() { void testGetType() { String type = objectStoreConfigService.type(); - assertEquals("oss", type); + assertEquals(GeneralConfigTypeEnum.oss.name(), type); } @Test diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/SmsGeneralConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/SmsGeneralConfigServiceTest.java index ab98888a968..2a1e8be3c58 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/SmsGeneralConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/SmsGeneralConfigServiceTest.java @@ -19,6 +19,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.manager.dao.GeneralConfigDao; import org.apache.hertzbeat.manager.pojo.dto.SmsNoticeSender; import org.apache.hertzbeat.manager.service.impl.SmsGeneralConfigServiceImpl; @@ -60,7 +61,7 @@ void setUp() { @Test void testType() { String result = service.type(); - assertEquals("sms", result); + assertEquals(GeneralConfigTypeEnum.sms.name(), result); } @Test diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemGeneralConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemGeneralConfigServiceTest.java index 942928f3f4b..defb241b06c 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemGeneralConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemGeneralConfigServiceTest.java @@ -19,6 +19,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.manager.dao.GeneralConfigDao; import org.apache.hertzbeat.manager.pojo.dto.SystemConfig; import org.apache.hertzbeat.manager.service.impl.SystemGeneralConfigServiceImpl; @@ -58,7 +59,7 @@ void setUp() { void testType() { String result = service.type(); - assertEquals("system", result); + assertEquals(GeneralConfigTypeEnum.system.name(), result); } @Test diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemSecretServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemSecretServiceTest.java index 688847e3c2b..21f1986dcf1 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemSecretServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/SystemSecretServiceTest.java @@ -19,6 +19,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.manager.dao.GeneralConfigDao; import org.apache.hertzbeat.manager.pojo.dto.SystemSecret; import org.apache.hertzbeat.manager.service.impl.SystemSecretServiceImpl; @@ -54,7 +55,7 @@ void setUp() { @Test void testType() { - assertEquals("secret", systemSecretService.type()); + assertEquals(GeneralConfigTypeEnum.secret.name(), systemSecretService.type()); } @Test diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/TemplateConfigServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/TemplateConfigServiceTest.java index 81ba4f6dd2b..872167d5bd5 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/TemplateConfigServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/TemplateConfigServiceTest.java @@ -18,6 +18,7 @@ package org.apache.hertzbeat.manager.service; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hertzbeat.common.constants.GeneralConfigTypeEnum; import org.apache.hertzbeat.manager.dao.GeneralConfigDao; import org.apache.hertzbeat.manager.pojo.dto.TemplateConfig; import org.apache.hertzbeat.manager.service.impl.TemplateConfigServiceImpl; @@ -88,7 +89,7 @@ void testHandlerNullTemplateConfig() { void testType() { String type = templateConfigServiceImpl.type(); - assertEquals("template", type); + assertEquals(GeneralConfigTypeEnum.template.name(), type); } } From a2bd61947b618ca4d7b665bbc5f0e7744d893225 Mon Sep 17 00:00:00 2001 From: b_mountain <49973336+MRgenial@users.noreply.github.com> Date: Mon, 19 Aug 2024 00:48:34 +0800 Subject: [PATCH 202/257] [imporve] move ai package and improve ai code. (#2542) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: 刘进山 Co-authored-by: YuLuo Co-authored-by: linDong <56677297@qq.com> Co-authored-by: Calvin --- .../common/constants/AiTypeEnum.java | 13 ++-- .../manager/controller/AiController.java | 6 +- .../manager/service/{ => ai}/AiService.java | 2 +- .../{impl => ai}/AlibabaAiServiceImpl.java | 70 ++++++++----------- .../{impl => ai}/KimiAiServiceImpl.java | 44 +++++------- .../{impl => ai}/SparkDeskAiServiceImpl.java | 41 +++++------ .../{impl => ai}/ZhiPuServiceImpl.java | 42 +++++------ .../factory}/AiServiceFactoryImpl.java | 4 +- .../manager/controller/AiControllerTest.java | 4 +- .../manager/service/AiServiceFactoryTest.java | 3 +- .../manager/service/AlibabaAiServiceTest.java | 2 +- .../manager/service/KimiAiServiceTest.java | 2 +- .../service/SparkDeskAiServiceTest.java | 2 +- .../manager/service/ZhiPuServiceTest.java | 2 +- 14 files changed, 103 insertions(+), 134 deletions(-) rename manager/src/main/java/org/apache/hertzbeat/manager/service/{ => ai}/AiService.java (96%) rename manager/src/main/java/org/apache/hertzbeat/manager/service/{impl => ai}/AlibabaAiServiceImpl.java (64%) rename manager/src/main/java/org/apache/hertzbeat/manager/service/{impl => ai}/KimiAiServiceImpl.java (70%) rename manager/src/main/java/org/apache/hertzbeat/manager/service/{impl => ai}/SparkDeskAiServiceImpl.java (73%) rename manager/src/main/java/org/apache/hertzbeat/manager/service/{impl => ai}/ZhiPuServiceImpl.java (71%) rename manager/src/main/java/org/apache/hertzbeat/manager/service/{impl => ai/factory}/AiServiceFactoryImpl.java (95%) diff --git a/common/src/main/java/org/apache/hertzbeat/common/constants/AiTypeEnum.java b/common/src/main/java/org/apache/hertzbeat/common/constants/AiTypeEnum.java index 14e58decfbc..d1594033de1 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/constants/AiTypeEnum.java +++ b/common/src/main/java/org/apache/hertzbeat/common/constants/AiTypeEnum.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.common.constants; +import java.util.Arrays; + /** * Ai type Enum */ @@ -48,13 +50,10 @@ public enum AiTypeEnum { * get type */ public static AiTypeEnum getTypeByName(String type) { - for (AiTypeEnum aiTypeEnum : values()) { - if (aiTypeEnum.name().equals(type)) { - return aiTypeEnum; - } - - } - return null; + return Arrays.stream(values()) + .filter(ai -> ai.name().equals(type)) + .findFirst() + .orElse(null); } } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/controller/AiController.java b/manager/src/main/java/org/apache/hertzbeat/manager/controller/AiController.java index 6c7408826d4..718d9c09410 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/controller/AiController.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/controller/AiController.java @@ -22,8 +22,8 @@ import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; import org.apache.hertzbeat.manager.config.AiProperties; -import org.apache.hertzbeat.manager.service.AiService; -import org.apache.hertzbeat.manager.service.impl.AiServiceFactoryImpl; +import org.apache.hertzbeat.manager.service.ai.AiService; +import org.apache.hertzbeat.manager.service.ai.factory.AiServiceFactoryImpl; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.codec.ServerSentEvent; import org.springframework.util.Assert; @@ -59,10 +59,8 @@ public class AiController { @Operation(summary = "Artificial intelligence questions and Answers", description = "Artificial intelligence questions and Answers") public Flux> requestAi(@Parameter(description = "Request text", example = "Who are you") @RequestParam("text") String text) { - Assert.notNull(aiServiceFactory, "please check that your type value is consistent with the documentation on the website"); AiService aiServiceImplBean = aiServiceFactory.getAiServiceImplBean(aiProperties.getType()); - return aiServiceImplBean.requestAi(text); } } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/AiService.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/ai/AiService.java similarity index 96% rename from manager/src/main/java/org/apache/hertzbeat/manager/service/AiService.java rename to manager/src/main/java/org/apache/hertzbeat/manager/service/ai/AiService.java index 22ccd55d799..de8e683ed91 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/AiService.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/ai/AiService.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hertzbeat.manager.service; +package org.apache.hertzbeat.manager.service.ai; import org.apache.hertzbeat.common.constants.AiTypeEnum; diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AlibabaAiServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/ai/AlibabaAiServiceImpl.java similarity index 64% rename from manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AlibabaAiServiceImpl.java rename to manager/src/main/java/org/apache/hertzbeat/manager/service/ai/AlibabaAiServiceImpl.java index 2cac8b4ef2b..0f0fc339be3 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AlibabaAiServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/ai/AlibabaAiServiceImpl.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hertzbeat.manager.service.impl; +package org.apache.hertzbeat.manager.service.ai; import java.util.List; import java.util.Objects; @@ -27,7 +27,6 @@ import org.apache.hertzbeat.manager.pojo.dto.AiMessage; import org.apache.hertzbeat.manager.pojo.dto.AliAiRequestParamDTO; import org.apache.hertzbeat.manager.pojo.dto.AliAiResponse; -import org.apache.hertzbeat.manager.service.AiService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.http.HttpHeaders; @@ -77,46 +76,39 @@ public AiTypeEnum getType() { @Override public Flux> requestAi(String text) { checkParam(text, aiProperties.getModel(), aiProperties.getApiKey()); - try { - AliAiRequestParamDTO aliAiRequestParamDTO = AliAiRequestParamDTO.builder() - .model(aiProperties.getModel()) - .input(AliAiRequestParamDTO.Input.builder() - .messages(List.of(new AiMessage(AiConstants.AliAiConstants.REQUEST_ROLE, text))) - .build()) - .parameters(AliAiRequestParamDTO.Parameters.builder() - .maxTokens(AiConstants.AliAiConstants.MAX_TOKENS) - .temperature(AiConstants.AliAiConstants.TEMPERATURE) - .enableSearch(true) - .resultFormat("message") - .incrementalOutput(true) - .build()) - .build(); + AliAiRequestParamDTO aliAiRequestParamDTO = AliAiRequestParamDTO.builder() + .model(aiProperties.getModel()) + .input(AliAiRequestParamDTO.Input.builder() + .messages(List.of(new AiMessage(AiConstants.AliAiConstants.REQUEST_ROLE, text))) + .build()) + .parameters(AliAiRequestParamDTO.Parameters.builder() + .maxTokens(AiConstants.AliAiConstants.MAX_TOKENS) + .temperature(AiConstants.AliAiConstants.TEMPERATURE) + .enableSearch(true) + .resultFormat("message") + .incrementalOutput(true) + .build()) + .build(); - return webClient.post() - .body(BodyInserters.fromValue(aliAiRequestParamDTO)) - .retrieve() - .bodyToFlux(AliAiResponse.class) - .map(aliAiResponse -> { - if (Objects.nonNull(aliAiResponse)) { - List choices = aliAiResponse.getOutput().getChoices(); - if (CollectionUtils.isEmpty(choices)) { - return ServerSentEvent.builder().build(); - } - String content = choices.get(0).getMessage().getContent(); - return ServerSentEvent.builder() - .data(content) - .build(); + return webClient.post() + .body(BodyInserters.fromValue(aliAiRequestParamDTO)) + .retrieve() + .bodyToFlux(AliAiResponse.class) + .map(aliAiResponse -> { + if (Objects.nonNull(aliAiResponse)) { + List choices = aliAiResponse.getOutput().getChoices(); + if (CollectionUtils.isEmpty(choices)) { + return ServerSentEvent.builder().build(); } - return ServerSentEvent.builder().build(); - }) - .doOnError(error -> log.info("AiResponse Exception:{}", error.toString())); - - } catch (Exception e) { - log.info("KimiAiServiceImpl.requestAi exception:{}", e.toString()); - throw e; - } - + String content = choices.get(0).getMessage().getContent(); + return ServerSentEvent.builder() + .data(content) + .build(); + } + return ServerSentEvent.builder().build(); + }) + .doOnError(error -> log.info("AlibabaAiServiceImpl.requestAi exception:{}", error.getMessage())); } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/KimiAiServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/ai/KimiAiServiceImpl.java similarity index 70% rename from manager/src/main/java/org/apache/hertzbeat/manager/service/impl/KimiAiServiceImpl.java rename to manager/src/main/java/org/apache/hertzbeat/manager/service/ai/KimiAiServiceImpl.java index 29c46fa717c..031a33271b3 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/KimiAiServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/ai/KimiAiServiceImpl.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hertzbeat.manager.service.impl; +package org.apache.hertzbeat.manager.service.ai; import java.util.List; import javax.annotation.PostConstruct; @@ -26,7 +26,6 @@ import org.apache.hertzbeat.manager.pojo.dto.AiMessage; import org.apache.hertzbeat.manager.pojo.dto.OpenAiRequestParamDTO; import org.apache.hertzbeat.manager.pojo.dto.OpenAiResponse; -import org.apache.hertzbeat.manager.service.AiService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.http.HttpHeaders; @@ -71,37 +70,28 @@ public AiTypeEnum getType() { @Override public Flux> requestAi(String text) { - try { - checkParam(text, aiProperties.getModel(), aiProperties.getApiKey()); - OpenAiRequestParamDTO zhiPuRequestParamDTO = OpenAiRequestParamDTO.builder() - .model(aiProperties.getModel()) - .stream(Boolean.TRUE) - .maxTokens(AiConstants.KimiAiConstants.MAX_TOKENS) - .temperature(AiConstants.KimiAiConstants.TEMPERATURE) - .messages(List.of(new AiMessage(AiConstants.KimiAiConstants.REQUEST_ROLE, text))) - .build(); - - - return webClient.post() - .body(BodyInserters.fromValue(zhiPuRequestParamDTO)) - .retrieve() - .bodyToFlux(String.class) - .filter(aiResponse -> !"[DONE]".equals(aiResponse)) - .map(OpenAiResponse::convertToResponse) - .doOnError(error -> log.info("AiResponse Exception:{}", error.toString())); - - - } catch (Exception e) { - log.info("KimiAiServiceImpl.requestAi exception:{}", e.toString()); - throw e; - } + checkParam(text, aiProperties.getModel(), aiProperties.getApiKey()); + OpenAiRequestParamDTO zhiPuRequestParamDTO = OpenAiRequestParamDTO.builder() + .model(aiProperties.getModel()) + .stream(Boolean.TRUE) + .maxTokens(AiConstants.KimiAiConstants.MAX_TOKENS) + .temperature(AiConstants.KimiAiConstants.TEMPERATURE) + .messages(List.of(new AiMessage(AiConstants.KimiAiConstants.REQUEST_ROLE, text))) + .build(); + return webClient.post() + .body(BodyInserters.fromValue(zhiPuRequestParamDTO)) + .retrieve() + .bodyToFlux(String.class) + .filter(aiResponse -> !"[DONE]".equals(aiResponse)) + .map(OpenAiResponse::convertToResponse) + .doOnError(error -> log.info("KimiAiServiceImpl.requestAi exception:{}", error.getMessage())); } private void checkParam(String param, String model, String apiKey) { Assert.notNull(param, "text is null"); - Assert.notNull(param, "model is null"); + Assert.notNull(model, "model is null"); Assert.notNull(apiKey, "ai.api-key is null"); } } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SparkDeskAiServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/ai/SparkDeskAiServiceImpl.java similarity index 73% rename from manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SparkDeskAiServiceImpl.java rename to manager/src/main/java/org/apache/hertzbeat/manager/service/ai/SparkDeskAiServiceImpl.java index 3b1271d5111..4c2520f63a0 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/SparkDeskAiServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/ai/SparkDeskAiServiceImpl.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hertzbeat.manager.service.impl; +package org.apache.hertzbeat.manager.service.ai; import java.util.List; import javax.annotation.PostConstruct; @@ -26,7 +26,6 @@ import org.apache.hertzbeat.manager.pojo.dto.AiMessage; import org.apache.hertzbeat.manager.pojo.dto.OpenAiRequestParamDTO; import org.apache.hertzbeat.manager.pojo.dto.OpenAiResponse; -import org.apache.hertzbeat.manager.service.AiService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.http.HttpHeaders; @@ -78,33 +77,29 @@ public AiTypeEnum getType() { @Override public Flux> requestAi(String text) { + checkParam(text, aiProperties.getApiKey(), aiProperties.getModel()); + OpenAiRequestParamDTO zhiPuRequestParamDTO = OpenAiRequestParamDTO.builder() + .model(aiProperties.getModel()) + //sse + .stream(Boolean.TRUE) + .maxTokens(AiConstants.SparkDeskConstants.MAX_TOKENS) + .temperature(AiConstants.SparkDeskConstants.TEMPERATURE) + .messages(List.of(new AiMessage(AiConstants.SparkDeskConstants.REQUEST_ROLE, text))) + .build(); - try { - checkParam(text, aiProperties.getApiKey(), aiProperties.getModel()); - OpenAiRequestParamDTO zhiPuRequestParamDTO = OpenAiRequestParamDTO.builder() - .model(aiProperties.getModel()) - //sse - .stream(Boolean.TRUE) - .maxTokens(AiConstants.SparkDeskConstants.MAX_TOKENS) - .temperature(AiConstants.SparkDeskConstants.TEMPERATURE) - .messages(List.of(new AiMessage(AiConstants.SparkDeskConstants.REQUEST_ROLE, text))) - .build(); + return webClient.post() + .body(BodyInserters.fromValue(zhiPuRequestParamDTO)) + .retrieve() + .bodyToFlux(String.class) + .filter(aiResponse -> !"[DONE]".equals(aiResponse)) + .map(OpenAiResponse::convertToResponse) + .doOnError(error -> log.info("SparkDeskAiServiceImpl.requestAi exception:{}", error.getMessage())); - return webClient.post() - .body(BodyInserters.fromValue(zhiPuRequestParamDTO)) - .retrieve() - .bodyToFlux(String.class) - .filter(aiResponse -> !"[DONE]".equals(aiResponse)) - .map(OpenAiResponse::convertToResponse); - } catch (Exception e) { - log.info("SparkDeskAiServiceImpl.requestAi exception:{}", e.toString()); - throw e; - } } private void checkParam(String param, String apiKey, String model) { Assert.notNull(param, "text is null"); - Assert.notNull(param, "model is null"); + Assert.notNull(model, "model is null"); Assert.notNull(apiKey, "ai.api-key is null"); } } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ZhiPuServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/ai/ZhiPuServiceImpl.java similarity index 71% rename from manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ZhiPuServiceImpl.java rename to manager/src/main/java/org/apache/hertzbeat/manager/service/ai/ZhiPuServiceImpl.java index b6111c24720..75a55f1b8cd 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/ZhiPuServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/ai/ZhiPuServiceImpl.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hertzbeat.manager.service.impl; +package org.apache.hertzbeat.manager.service.ai; import java.util.List; @@ -27,7 +27,6 @@ import org.apache.hertzbeat.manager.pojo.dto.AiMessage; import org.apache.hertzbeat.manager.pojo.dto.OpenAiRequestParamDTO; import org.apache.hertzbeat.manager.pojo.dto.OpenAiResponse; -import org.apache.hertzbeat.manager.service.AiService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.http.HttpHeaders; @@ -73,33 +72,28 @@ public AiTypeEnum getType() { @Override public Flux> requestAi(String text) { - try { - checkParam(text, aiProperties.getModel(), aiProperties.getApiKey()); - OpenAiRequestParamDTO zhiPuRequestParamDTO = OpenAiRequestParamDTO.builder() - .model(aiProperties.getModel()) - //sse - .stream(Boolean.TRUE) - .maxTokens(AiConstants.ZhiPuConstants.MAX_TOKENS) - .temperature(AiConstants.ZhiPuConstants.TEMPERATURE) - .messages(List.of(new AiMessage(AiConstants.ZhiPuConstants.REQUEST_ROLE, text))) - .build(); - - return webClient.post() - .body(BodyInserters.fromValue(zhiPuRequestParamDTO)) - .retrieve() - .bodyToFlux(String.class) - .filter(aiResponse -> !"[DONE]".equals(aiResponse)) - .map(OpenAiResponse::convertToResponse) - .doOnError(error -> log.info("AiResponse Exception:{}", error.toString())); + checkParam(text, aiProperties.getModel(), aiProperties.getApiKey()); + OpenAiRequestParamDTO zhiPuRequestParamDTO = OpenAiRequestParamDTO.builder() + .model(aiProperties.getModel()) + //sse + .stream(Boolean.TRUE) + .maxTokens(AiConstants.ZhiPuConstants.MAX_TOKENS) + .temperature(AiConstants.ZhiPuConstants.TEMPERATURE) + .messages(List.of(new AiMessage(AiConstants.ZhiPuConstants.REQUEST_ROLE, text))) + .build(); - } catch (Exception e) { - log.info("ZhiPuServiceImpl.requestAi exception:{}", e.toString()); - throw e; - } + return webClient.post() + .body(BodyInserters.fromValue(zhiPuRequestParamDTO)) + .retrieve() + .bodyToFlux(String.class) + .filter(aiResponse -> !"[DONE]".equals(aiResponse)) + .map(OpenAiResponse::convertToResponse) + .doOnError(error -> log.info("ZhiPuServiceImpl.requestAi exception:{}", error.getMessage())); } private void checkParam(String param, String model, String apiKey) { Assert.notNull(param, "text is null"); + Assert.notNull(model, "model is null"); Assert.notNull(apiKey, "ai.api-key is null"); } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AiServiceFactoryImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/ai/factory/AiServiceFactoryImpl.java similarity index 95% rename from manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AiServiceFactoryImpl.java rename to manager/src/main/java/org/apache/hertzbeat/manager/service/ai/factory/AiServiceFactoryImpl.java index 8a18e0ce1b5..8e0954c7adb 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/AiServiceFactoryImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/ai/factory/AiServiceFactoryImpl.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hertzbeat.manager.service.impl; +package org.apache.hertzbeat.manager.service.ai.factory; import java.util.HashMap; import java.util.List; @@ -24,7 +24,7 @@ import java.util.stream.Collectors; import javax.annotation.PostConstruct; import org.apache.hertzbeat.common.constants.AiTypeEnum; -import org.apache.hertzbeat.manager.service.AiService; +import org.apache.hertzbeat.manager.service.ai.AiService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.stereotype.Component; diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/controller/AiControllerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/controller/AiControllerTest.java index 7c1bfa5f27e..70f3e150e41 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/controller/AiControllerTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/controller/AiControllerTest.java @@ -18,8 +18,8 @@ package org.apache.hertzbeat.manager.controller; import org.apache.hertzbeat.manager.config.AiProperties; -import org.apache.hertzbeat.manager.service.AiService; -import org.apache.hertzbeat.manager.service.impl.AiServiceFactoryImpl; +import org.apache.hertzbeat.manager.service.ai.AiService; +import org.apache.hertzbeat.manager.service.ai.factory.AiServiceFactoryImpl; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/AiServiceFactoryTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/AiServiceFactoryTest.java index 067585ce159..b0ac2089339 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/AiServiceFactoryTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/AiServiceFactoryTest.java @@ -24,7 +24,8 @@ import java.util.function.Function; import java.util.stream.Collectors; import org.apache.hertzbeat.common.constants.AiTypeEnum; -import org.apache.hertzbeat.manager.service.impl.AiServiceFactoryImpl; +import org.apache.hertzbeat.manager.service.ai.AiService; +import org.apache.hertzbeat.manager.service.ai.factory.AiServiceFactoryImpl; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/AlibabaAiServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/AlibabaAiServiceTest.java index ac6b7bbccec..ff0a5b0f331 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/AlibabaAiServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/AlibabaAiServiceTest.java @@ -17,7 +17,7 @@ package org.apache.hertzbeat.manager.service; -import org.apache.hertzbeat.manager.service.impl.AlibabaAiServiceImpl; +import org.apache.hertzbeat.manager.service.ai.AlibabaAiServiceImpl; /** * test case for {@link AlibabaAiServiceImpl} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/KimiAiServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/KimiAiServiceTest.java index 30f0eb790f9..a747c6f2ad9 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/KimiAiServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/KimiAiServiceTest.java @@ -18,7 +18,7 @@ package org.apache.hertzbeat.manager.service; -import org.apache.hertzbeat.manager.service.impl.KimiAiServiceImpl; +import org.apache.hertzbeat.manager.service.ai.KimiAiServiceImpl; /** * test case for {@link KimiAiServiceImpl} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/SparkDeskAiServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/SparkDeskAiServiceTest.java index f8dd26a998b..47d281bd778 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/SparkDeskAiServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/SparkDeskAiServiceTest.java @@ -17,7 +17,7 @@ package org.apache.hertzbeat.manager.service; -import org.apache.hertzbeat.manager.service.impl.SparkDeskAiServiceImpl; +import org.apache.hertzbeat.manager.service.ai.SparkDeskAiServiceImpl; /** * test case for {@link SparkDeskAiServiceImpl} diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/service/ZhiPuServiceTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/service/ZhiPuServiceTest.java index 68329d472b4..12fb3fd10b4 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/service/ZhiPuServiceTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/service/ZhiPuServiceTest.java @@ -17,7 +17,7 @@ package org.apache.hertzbeat.manager.service; -import org.apache.hertzbeat.manager.service.impl.ZhiPuServiceImpl; +import org.apache.hertzbeat.manager.service.ai.ZhiPuServiceImpl; /** * test case for {@link ZhiPuServiceImpl} From 5d872752067c6dcbbf61b3e5695371076f5508e1 Mon Sep 17 00:00:00 2001 From: liutianyou Date: Mon, 19 Aug 2024 12:36:51 +0800 Subject: [PATCH 203/257] [improve] support plugins using other jar (#2553) Co-authored-by: YuLuo Co-authored-by: Logic --- .../service/impl/PluginServiceImpl.java | 59 ++++++++++++++++++- manager/src/main/resources/application.yml | 11 ++-- plugin/pom.xml | 24 ++++++++ .../src/main/resources/assembly/assembly.xml | 41 +++++++++++++ 4 files changed, 129 insertions(+), 6 deletions(-) create mode 100644 plugin/src/main/resources/assembly/assembly.xml diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/PluginServiceImpl.java b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/PluginServiceImpl.java index 69b79047916..162d4bc394a 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/PluginServiceImpl.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/service/impl/PluginServiceImpl.java @@ -19,7 +19,10 @@ import jakarta.persistence.criteria.Predicate; import java.io.File; +import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; import java.net.MalformedURLException; import java.net.URL; import java.net.URLClassLoader; @@ -96,6 +99,11 @@ public void deletePlugins(Set ids) { if (jarFile.exists()) { FileUtils.delete(jarFile); } + // removing jar files that are dependencies for the plugin + File otherLibDir = new File(getOtherLibDir(plugin.getJarFilePath())); + if (otherLibDir.exists()) { + FileUtils.deleteDirectory(otherLibDir); + } // delete metadata metadataDao.deleteById(plugin.getId()); } catch (IOException e) { @@ -108,6 +116,16 @@ public void deletePlugins(Set ids) { loadJarToClassLoader(); } + /** + * get the directory where the JAR files dependent on the plugin are saved + * + * @param pluginJarPath jar file path + * @return lib dir + */ + private String getOtherLibDir(String pluginJarPath) { + return pluginJarPath.substring(0, pluginJarPath.lastIndexOf(".")); + } + @Override public void updateStatus(PluginMetadata plugin) { Optional pluginMetadata = metadataDao.findById(plugin.getId()); @@ -274,8 +292,9 @@ private void loadJarToClassLoader() { System.gc(); List plugins = metadataDao.findPluginMetadataByEnableStatusTrue(); for (PluginMetadata metadata : plugins) { - URL url = new File(metadata.getJarFilePath()).toURI().toURL(); - pluginClassLoaders.add(new URLClassLoader(new URL[]{url}, Plugin.class.getClassLoader())); + List urls = loadLibInPlugin(metadata.getJarFilePath()); + urls.add(new File(metadata.getJarFilePath()).toURI().toURL()); + pluginClassLoaders.add(new URLClassLoader(urls.toArray(new URL[0]), Plugin.class.getClassLoader())); } } catch (MalformedURLException e) { log.error("Failed to load plugin:{}", e.getMessage()); @@ -285,6 +304,42 @@ private void loadJarToClassLoader() { } } + /** + * loading other JAR files that are dependencies for the plugin + * + * @param pluginJarPath jar file path + * @return urls + */ + @SneakyThrows + private List loadLibInPlugin(String pluginJarPath) { + File libDir = new File(getOtherLibDir(pluginJarPath)); + FileUtils.forceMkdir(libDir); + List libUrls = new ArrayList<>(); + try (JarFile jarFile = new JarFile(pluginJarPath)) { + Enumeration entries = jarFile.entries(); + while (entries.hasMoreElements()) { + JarEntry entry = entries.nextElement(); + File file = new File(libDir, entry.getName()); + if (!entry.isDirectory() && entry.getName().endsWith(".jar")) { + if (!file.getParentFile().exists()) { + FileUtils.createParentDirectories(file); + } + try (InputStream in = jarFile.getInputStream(entry); + OutputStream out = new FileOutputStream(file)) { + byte[] buffer = new byte[4096]; + int len; + while ((len = in.read(buffer)) != -1) { + out.write(buffer, 0, len); + } + libUrls.add(file.toURI().toURL()); + out.flush(); + } + } + } + } + return libUrls; + } + @Override public void pluginExecute(Class clazz, Consumer execute) { for (URLClassLoader pluginClassLoader : pluginClassLoaders) { diff --git a/manager/src/main/resources/application.yml b/manager/src/main/resources/application.yml index 619e52b1c8d..33ad559a274 100644 --- a/manager/src/main/resources/application.yml +++ b/manager/src/main/resources/application.yml @@ -33,7 +33,10 @@ spring: exclude: org.springframework.boot.autoconfigure.mongo.MongoAutoConfiguration, org.springframework.boot.autoconfigure.data.mongo.MongoDataAutoConfiguration freemarker: enabled: false - + servlet: + multipart: + max-file-size: 100MB + max-request-size: 100MB management: health: @@ -81,7 +84,7 @@ spring: eclipselink: logging: level: SEVERE - + flyway: enabled: true clean-disabled: true @@ -89,9 +92,9 @@ spring: baseline-version: 1 locations: - classpath:db/migration/{vendor} - + mail: - # Mail server address, eg: qq-mailbox is smtp.qq.com, qq-exmail is smtp.exmail.qq.com + # Mail server address, eg: qq-mailbox is smtp.qq.com, qq-exmail is smtp.exmail.qq.com host: smtp.qq.com username: tancloud@qq.com # Attention this is not email account password, this requires an email authorization code diff --git a/plugin/pom.xml b/plugin/pom.xml index 461adca6704..023225ceb8f 100644 --- a/plugin/pom.xml +++ b/plugin/pom.xml @@ -37,8 +37,32 @@ org.apache.hertzbeat hertzbeat-common + provided + + + + org.apache.maven.plugins + maven-assembly-plugin + 3.3.0 + + + src/main/resources/assembly/assembly.xml + + + + + make-assembly + package + + single + + + + + + diff --git a/plugin/src/main/resources/assembly/assembly.xml b/plugin/src/main/resources/assembly/assembly.xml new file mode 100644 index 00000000000..2b5008142f7 --- /dev/null +++ b/plugin/src/main/resources/assembly/assembly.xml @@ -0,0 +1,41 @@ + + + jar-with-lib + + jar + + false + + + /lib + false + runtime + + ${project.groupId}:${project.artifactId} + + + + + + ${project.build.outputDirectory} + / + + + From a49fa3d91b6cca683a15319082d43e1e8a856d69 Mon Sep 17 00:00:00 2001 From: Jast Date: Mon, 19 Aug 2024 14:24:05 +0800 Subject: [PATCH 204/257] [fixbug] Fixbug doc format temporary process (#2557) Co-authored-by: tomsun28 --- pom.xml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 31f9f010b8f..c0ed26b9548 100644 --- a/pom.xml +++ b/pom.xml @@ -490,7 +490,9 @@ false - home/**/*.md + home/docs/**/*.md + home/blog/**/*.md + home/i18n/**/*.md From ceab3023b18c0d1a78c7e814e14cd7092ab337c1 Mon Sep 17 00:00:00 2001 From: Jast Date: Mon, 19 Aug 2024 22:25:24 +0800 Subject: [PATCH 205/257] [improve] linux chart display (#2559) --- manager/src/main/resources/define/app-almalinux.yml | 8 ++++---- manager/src/main/resources/define/app-centos.yml | 8 ++++---- manager/src/main/resources/define/app-coreos.yml | 8 ++++---- manager/src/main/resources/define/app-debian.yml | 8 ++++---- manager/src/main/resources/define/app-euleros.yml | 8 ++++---- manager/src/main/resources/define/app-linux.yml | 8 ++++---- manager/src/main/resources/define/app-linux_script.yml | 8 ++++---- manager/src/main/resources/define/app-opensuse.yml | 8 ++++---- manager/src/main/resources/define/app-redhat.yml | 8 ++++---- manager/src/main/resources/define/app-rockylinux.yml | 8 ++++---- manager/src/main/resources/define/app-ubuntu.yml | 8 ++++---- 11 files changed, 44 insertions(+), 44 deletions(-) diff --git a/manager/src/main/resources/define/app-almalinux.yml b/manager/src/main/resources/define/app-almalinux.yml index 101e0de905b..bac55b99830 100644 --- a/manager/src/main/resources/define/app-almalinux.yml +++ b/manager/src/main/resources/define/app-almalinux.yml @@ -177,7 +177,7 @@ metrics: zh-CN: 型号 en-US: Info - field: cores - type: 0 + type: 1 i18n: zh-CN: 核数 en-US: Cores @@ -238,7 +238,7 @@ metrics: priority: 2 fields: - field: total - type: 0 + type: 1 unit: Mb i18n: zh-CN: 总内存容量 @@ -305,12 +305,12 @@ metrics: priority: 3 fields: - field: disk_num - type: 0 + type: 1 i18n: zh-CN: 磁盘总数 en-US: Disk Num - field: partition_num - type: 0 + type: 1 i18n: zh-CN: 分区总数 en-US: Partition Num diff --git a/manager/src/main/resources/define/app-centos.yml b/manager/src/main/resources/define/app-centos.yml index 4b5a579037a..55a13804707 100644 --- a/manager/src/main/resources/define/app-centos.yml +++ b/manager/src/main/resources/define/app-centos.yml @@ -178,7 +178,7 @@ metrics: zh-CN: 型号 en-US: Info - field: cores - type: 0 + type: 1 i18n: zh-CN: 核数 en-US: Cores @@ -239,7 +239,7 @@ metrics: priority: 2 fields: - field: total - type: 0 + type: 1 unit: Mb i18n: zh-CN: 总内存容量 @@ -306,12 +306,12 @@ metrics: priority: 3 fields: - field: disk_num - type: 0 + type: 1 i18n: zh-CN: 磁盘总数 en-US: Disk Num - field: partition_num - type: 0 + type: 1 i18n: zh-CN: 分区总数 en-US: Partition Num diff --git a/manager/src/main/resources/define/app-coreos.yml b/manager/src/main/resources/define/app-coreos.yml index e074c2c9ede..b3e22a4b9fe 100644 --- a/manager/src/main/resources/define/app-coreos.yml +++ b/manager/src/main/resources/define/app-coreos.yml @@ -177,7 +177,7 @@ metrics: zh-CN: 型号 en-US: Info - field: cores - type: 0 + type: 1 i18n: zh-CN: 核数 en-US: Cores @@ -238,7 +238,7 @@ metrics: priority: 2 fields: - field: total - type: 0 + type: 1 unit: Mb i18n: zh-CN: 总内存容量 @@ -305,12 +305,12 @@ metrics: priority: 3 fields: - field: disk_num - type: 0 + type: 1 i18n: zh-CN: 磁盘总数 en-US: Disk Num - field: partition_num - type: 0 + type: 1 i18n: zh-CN: 分区总数 en-US: Partition Num diff --git a/manager/src/main/resources/define/app-debian.yml b/manager/src/main/resources/define/app-debian.yml index 225f4d46e9e..11827652a52 100644 --- a/manager/src/main/resources/define/app-debian.yml +++ b/manager/src/main/resources/define/app-debian.yml @@ -177,7 +177,7 @@ metrics: zh-CN: 型号 en-US: Info - field: cores - type: 0 + type: 1 i18n: zh-CN: 核数 en-US: Cores @@ -238,7 +238,7 @@ metrics: priority: 2 fields: - field: total - type: 0 + type: 1 unit: Mb i18n: zh-CN: 总内存容量 @@ -305,12 +305,12 @@ metrics: priority: 3 fields: - field: disk_num - type: 0 + type: 1 i18n: zh-CN: 磁盘总数 en-US: Disk Num - field: partition_num - type: 0 + type: 1 i18n: zh-CN: 分区总数 en-US: Partition Num diff --git a/manager/src/main/resources/define/app-euleros.yml b/manager/src/main/resources/define/app-euleros.yml index 8bcbb4d52a3..9c9f172a612 100644 --- a/manager/src/main/resources/define/app-euleros.yml +++ b/manager/src/main/resources/define/app-euleros.yml @@ -177,7 +177,7 @@ metrics: zh-CN: 型号 en-US: Info - field: cores - type: 0 + type: 1 i18n: zh-CN: 核数 en-US: Cores @@ -238,7 +238,7 @@ metrics: priority: 2 fields: - field: total - type: 0 + type: 1 unit: Mb i18n: zh-CN: 总内存容量 @@ -305,12 +305,12 @@ metrics: priority: 3 fields: - field: disk_num - type: 0 + type: 1 i18n: zh-CN: 磁盘总数 en-US: Disk Num - field: partition_num - type: 0 + type: 1 i18n: zh-CN: 分区总数 en-US: Partition Num diff --git a/manager/src/main/resources/define/app-linux.yml b/manager/src/main/resources/define/app-linux.yml index 698f43b0b2d..8579496413d 100644 --- a/manager/src/main/resources/define/app-linux.yml +++ b/manager/src/main/resources/define/app-linux.yml @@ -177,7 +177,7 @@ metrics: zh-CN: 型号 en-US: Info - field: cores - type: 0 + type: 1 i18n: zh-CN: 核数 en-US: Cores @@ -238,7 +238,7 @@ metrics: priority: 2 fields: - field: total - type: 0 + type: 1 unit: Mb i18n: zh-CN: 总内存容量 @@ -305,12 +305,12 @@ metrics: priority: 3 fields: - field: disk_num - type: 0 + type: 1 i18n: zh-CN: 磁盘总数 en-US: Disk Num - field: partition_num - type: 0 + type: 1 i18n: zh-CN: 分区总数 en-US: Partition Num diff --git a/manager/src/main/resources/define/app-linux_script.yml b/manager/src/main/resources/define/app-linux_script.yml index b5d2633ed09..113c90f1a87 100644 --- a/manager/src/main/resources/define/app-linux_script.yml +++ b/manager/src/main/resources/define/app-linux_script.yml @@ -97,7 +97,7 @@ metrics: zh-CN: 型号 en-US: Info - field: cores - type: 0 + type: 1 i18n: zh-CN: 核数 en-US: Cores @@ -155,7 +155,7 @@ metrics: priority: 2 fields: - field: total - type: 0 + type: 1 unit: Mb i18n: zh-CN: 总内存容量 @@ -217,12 +217,12 @@ metrics: priority: 3 fields: - field: disk_num - type: 0 + type: 1 i18n: zh-CN: 磁盘总数 en-US: Disk Num - field: partition_num - type: 0 + type: 1 i18n: zh-CN: 分区总数 en-US: Partition Num diff --git a/manager/src/main/resources/define/app-opensuse.yml b/manager/src/main/resources/define/app-opensuse.yml index 75546e737d5..7cae69a79ac 100644 --- a/manager/src/main/resources/define/app-opensuse.yml +++ b/manager/src/main/resources/define/app-opensuse.yml @@ -177,7 +177,7 @@ metrics: zh-CN: 型号 en-US: Info - field: cores - type: 0 + type: 1 i18n: zh-CN: 核数 en-US: Cores @@ -238,7 +238,7 @@ metrics: priority: 2 fields: - field: total - type: 0 + type: 1 unit: Mb i18n: zh-CN: 总内存容量 @@ -305,12 +305,12 @@ metrics: priority: 3 fields: - field: disk_num - type: 0 + type: 1 i18n: zh-CN: 磁盘总数 en-US: Disk Num - field: partition_num - type: 0 + type: 1 i18n: zh-CN: 分区总数 en-US: Partition Num diff --git a/manager/src/main/resources/define/app-redhat.yml b/manager/src/main/resources/define/app-redhat.yml index d9f47b8e091..e733df10a17 100644 --- a/manager/src/main/resources/define/app-redhat.yml +++ b/manager/src/main/resources/define/app-redhat.yml @@ -177,7 +177,7 @@ metrics: zh-CN: 型号 en-US: Info - field: cores - type: 0 + type: 1 i18n: zh-CN: 核数 en-US: Cores @@ -238,7 +238,7 @@ metrics: priority: 2 fields: - field: total - type: 0 + type: 1 unit: Mb i18n: zh-CN: 总内存容量 @@ -305,12 +305,12 @@ metrics: priority: 3 fields: - field: disk_num - type: 0 + type: 1 i18n: zh-CN: 磁盘总数 en-US: Disk Num - field: partition_num - type: 0 + type: 1 i18n: zh-CN: 分区总数 en-US: Partition Num diff --git a/manager/src/main/resources/define/app-rockylinux.yml b/manager/src/main/resources/define/app-rockylinux.yml index e608572e60e..f93737ed7ae 100644 --- a/manager/src/main/resources/define/app-rockylinux.yml +++ b/manager/src/main/resources/define/app-rockylinux.yml @@ -177,7 +177,7 @@ metrics: zh-CN: 型号 en-US: Info - field: cores - type: 0 + type: 1 i18n: zh-CN: 核数 en-US: Cores @@ -238,7 +238,7 @@ metrics: priority: 2 fields: - field: total - type: 0 + type: 1 unit: Mb i18n: zh-CN: 总内存容量 @@ -305,12 +305,12 @@ metrics: priority: 3 fields: - field: disk_num - type: 0 + type: 1 i18n: zh-CN: 磁盘总数 en-US: Disk Num - field: partition_num - type: 0 + type: 1 i18n: zh-CN: 分区总数 en-US: Partition Num diff --git a/manager/src/main/resources/define/app-ubuntu.yml b/manager/src/main/resources/define/app-ubuntu.yml index 3a5d3e0fbd7..9bd626928fe 100644 --- a/manager/src/main/resources/define/app-ubuntu.yml +++ b/manager/src/main/resources/define/app-ubuntu.yml @@ -177,7 +177,7 @@ metrics: zh-CN: 型号 en-US: Info - field: cores - type: 0 + type: 1 i18n: zh-CN: 核数 en-US: Cores @@ -238,7 +238,7 @@ metrics: priority: 2 fields: - field: total - type: 0 + type: 1 unit: Mb i18n: zh-CN: 总内存容量 @@ -305,12 +305,12 @@ metrics: priority: 3 fields: - field: disk_num - type: 0 + type: 1 i18n: zh-CN: 磁盘总数 en-US: Disk Num - field: partition_num - type: 0 + type: 1 i18n: zh-CN: 分区总数 en-US: Partition Num From 98b03b62c76afd7b6a20787f63295309a107fef9 Mon Sep 17 00:00:00 2001 From: Jast Date: Tue, 20 Aug 2024 00:31:51 +0800 Subject: [PATCH 206/257] [doc] update doc style check (#2560) Co-authored-by: tomsun28 --- home/docs/community/code-style-and-quality-guide.md | 5 +++++ .../current/community/code-style-and-quality-guide.md | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/home/docs/community/code-style-and-quality-guide.md b/home/docs/community/code-style-and-quality-guide.md index c86438a577e..70ddc5d518e 100644 --- a/home/docs/community/code-style-and-quality-guide.md +++ b/home/docs/community/code-style-and-quality-guide.md @@ -61,6 +61,11 @@ limitations under the License. - Frontend code formatting plugin `eslint` Just run `npm run lint:fix` in web-app +### 2.2 Document style check + +1. Run `mvn spotless:check` in the project to automatically detect the Markdown file format. +2. Run `mvn spotless:apply` in the project to automatically format the Markdown file format to ensure that all documents meet the specifications. + ## 3 Programming Specification ### 3.1 Naming Style diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md index 1cad8c3add1..9e11d3de6b7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md @@ -61,6 +61,11 @@ limitations under the License. - 前端代码格式化插件 `eslint` 前端运行 `npm run lint:fix` +### 2.2 文档样式检查 + +1. 在项目中运行`mvn spotless:check`,会执行Markdown文件格式自动检测。 +2. 在项目中运行`mvn spotless:apply`,会执行Markdown文件格式自动格式化,以确保所有文档都符合规范。 + ## 3 编程规范 ### 3.1 命名风格 From bc47768bfbe4d5468f6e1c8bb6e3d1faea9e9b9f Mon Sep 17 00:00:00 2001 From: aias00 Date: Tue, 20 Aug 2024 23:57:09 +0800 Subject: [PATCH 207/257] [feature] add kingbase help md (#2568) --- home/docs/help/kingbase.md | 56 +++++++++++++++++++ .../current/help/kingbase.md | 56 +++++++++++++++++++ home/sidebars.json | 1 + 3 files changed, 113 insertions(+) create mode 100644 home/docs/help/kingbase.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kingbase.md diff --git a/home/docs/help/kingbase.md b/home/docs/help/kingbase.md new file mode 100644 index 00000000000..92d3b440496 --- /dev/null +++ b/home/docs/help/kingbase.md @@ -0,0 +1,56 @@ +--- +id: kingbase +title: Monitoring:Kingbase database monitoring +sidebar_label: Kingbase database +keywords: [open source monitoring tool, open source database monitoring tool, monitoring kingbase database metrics] +--- + +> Collect and monitor the general performance Metrics of Kingbase database. Support Kingbase V8R6+. + +### Configuration parameter + +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 5432 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | + +### Collection Metric + +#### Metric set:basic + +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|-------------------------------------------| +| server_version | none | Version number of the database server | +| port | none | Database server exposure service port | +| server_encoding | none | Character set encoding of database server | +| data_directory | none | Database storage data disk address | +| max_connections | connections | Database maximum connections | + +#### Metric set:state + +| Metric name | Metric unit | Metric help description | +|----------------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | none | Database name, or share-object is a shared object | +| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | +| deadlocks | number | Number of deadlocks detected in the database | +| blks_read | times | The number of disk blocks read in the database | +| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the Kingbase buffer, not in the operating system file system buffer) | +| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | +| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | +| stats_reset | none | The last time these statistics were reset | + +#### Metric set:activity + +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------------| +| running | connections | Number of current client connections | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kingbase.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kingbase.md new file mode 100644 index 00000000000..2f22804c38b --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kingbase.md @@ -0,0 +1,56 @@ +--- +id: kingbase +title: 监控:Kingbase数据库监控 +sidebar_label: Kingbase数据库 +keywords: [开源监控系统, 开源数据库监控, Kingbase数据库监控] +--- + +> 对Kingbase数据库的通用性能指标进行采集监控。支持Kingbase V8R6+。 + +### 配置参数 + +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为5432。 | +| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | + +### 采集指标 + +#### 指标集合:basic + +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------------| +| server_version | 无 | 数据库服务器的版本号 | +| port | 无 | 数据库服务器端暴露服务端口 | +| server_encoding | 无 | 数据库服务器端的字符集编码 | +| data_directory | 无 | 数据库存储数据盘地址 | +| max_connections | 连接数 | 数据库最大连接数 | + +#### 指标集合:state + +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|--------------------------------------------------------------------------| +| name | 无 | 数据库名称,或share-object为共享对象。 | +| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | +| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | +| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | +| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 Kingbase 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | +| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | +| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | +| stats_reset | 无 | 这些统计信息上次被重置的时间 | + +#### 指标集合:activity + +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------| +| running | 连接数 | 当前客户端连接数 | + diff --git a/home/sidebars.json b/home/sidebars.json index 4c14bc6c39a..8db82fa48c4 100755 --- a/home/sidebars.json +++ b/home/sidebars.json @@ -206,6 +206,7 @@ "help/mysql", "help/mariadb", "help/postgresql", + "help/kingbase", "help/sqlserver", "help/oracle", "help/dm", From bdfffeec4cd24aaa26f9c27dc48f9fe55cad40aa Mon Sep 17 00:00:00 2001 From: aias00 Date: Wed, 21 Aug 2024 10:54:54 +0800 Subject: [PATCH 208/257] [bugfix] fix kingbase spotless apply (#2575) --- home/docs/help/kingbase.md | 2 +- .../current/help/kingbase.md | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/home/docs/help/kingbase.md b/home/docs/help/kingbase.md index 92d3b440496..ccdb5cc0391 100644 --- a/home/docs/help/kingbase.md +++ b/home/docs/help/kingbase.md @@ -37,7 +37,7 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo #### Metric set:state -| Metric name | Metric unit | Metric help description | +| Metric name | Metric unit | Metric help description | |----------------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | name | none | Database name, or share-object is a shared object | | conflicts | times | The number of queries canceled in the database due to a conflict with recovery | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kingbase.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kingbase.md index 2f22804c38b..b09187ff456 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kingbase.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kingbase.md @@ -37,16 +37,16 @@ keywords: [开源监控系统, 开源数据库监控, Kingbase数据库监控] #### 指标集合:state -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------------|------|--------------------------------------------------------------------------| -| name | 无 | 数据库名称,或share-object为共享对象。 | -| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | -| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | -| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|------------------------------------------------------------------------| +| name | 无 | 数据库名称,或share-object为共享对象。 | +| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | +| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | +| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | | blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 Kingbase 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | -| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | -| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | -| stats_reset | 无 | 这些统计信息上次被重置的时间 | +| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | +| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | +| stats_reset | 无 | 这些统计信息上次被重置的时间 | #### 指标集合:activity From a1d73a292ca4a36a8ce532ee42e20cd5274e955b Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Wed, 21 Aug 2024 13:39:39 +0800 Subject: [PATCH 209/257] [webapp] auto resize the monitor detail ui width (#2577) Signed-off-by: tomsun28 Co-authored-by: YuLuo --- .../monitor-data-chart.component.html | 3 +- .../monitor-data-chart.component.ts | 21 ++++++++++-- .../monitor-data-table.component.html | 4 +-- .../monitor-data-table.component.ts | 34 +++++++++++++++++-- .../monitor-detail.component.html | 2 -- 5 files changed, 53 insertions(+), 11 deletions(-) diff --git a/web-app/src/app/routes/monitor/monitor-data-chart/monitor-data-chart.component.html b/web-app/src/app/routes/monitor/monitor-data-chart/monitor-data-chart.component.html index 8f1c4cb7fa6..e744e7afaa9 100644 --- a/web-app/src/app/routes/monitor/monitor-data-chart/monitor-data-chart.component.html +++ b/web-app/src/app/routes/monitor/monitor-data-chart/monitor-data-chart.component.html @@ -17,7 +17,8 @@ ~ under the License. --> - +

+
= 5) { + this.cardWidth = this.cardWidth + this.cardWidth; + } this.lineHistoryTheme.series = []; let valueKeyArr = Object.keys(values); for (let index = 0; index < valueKeyArr.length; index++) { diff --git a/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.html b/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.html index 019ef5535a2..05f4348eb1f 100644 --- a/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.html +++ b/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.html @@ -16,10 +16,10 @@ ~ specific language governing permissions and limitations ~ under the License. --> - +
= 5) { + updateWidth = true; + } + } + this.valueRows.forEach(row => { + row.values.forEach((value: any) => { + if (value.origin?.length > 60) { + updateWidth = true; + } + }); + }); + if (updateWidth) { + this.cardWidth = this.cardWidth + this.cardWidth; + this.cdr.detectChanges(); } } else if (message.code !== 0) { this.notifySvc.warning(`${this.metrics}:${message.msg}`, ''); diff --git a/web-app/src/app/routes/monitor/monitor-detail/monitor-detail.component.html b/web-app/src/app/routes/monitor/monitor-detail/monitor-detail.component.html index 7cf402565f0..2dcdaa66ee0 100755 --- a/web-app/src/app/routes/monitor/monitor-detail/monitor-detail.component.html +++ b/web-app/src/app/routes/monitor/monitor-detail/monitor-detail.component.html @@ -65,7 +65,6 @@ [app]="app" >
Date: Wed, 21 Aug 2024 17:54:20 +0800 Subject: [PATCH 210/257] chore: rename push service unit test class name (#2562) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .../apache/hertzbeat/push/service/PushGatewayServiceTest.java | 2 +- .../java/org/apache/hertzbeat/push/service/PushServiceTest.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/push/src/test/java/org/apache/hertzbeat/push/service/PushGatewayServiceTest.java b/push/src/test/java/org/apache/hertzbeat/push/service/PushGatewayServiceTest.java index 9cec4edc5ba..dc4d079da24 100644 --- a/push/src/test/java/org/apache/hertzbeat/push/service/PushGatewayServiceTest.java +++ b/push/src/test/java/org/apache/hertzbeat/push/service/PushGatewayServiceTest.java @@ -47,7 +47,7 @@ */ @ExtendWith(MockitoExtension.class) -class PushGatewayServiceImplTest { +class PushGatewayServiceTest { @InjectMocks private PushGatewayServiceImpl pushGatewayService; diff --git a/push/src/test/java/org/apache/hertzbeat/push/service/PushServiceTest.java b/push/src/test/java/org/apache/hertzbeat/push/service/PushServiceTest.java index fdf9d45780b..222844f6913 100644 --- a/push/src/test/java/org/apache/hertzbeat/push/service/PushServiceTest.java +++ b/push/src/test/java/org/apache/hertzbeat/push/service/PushServiceTest.java @@ -49,7 +49,7 @@ */ @ExtendWith(MockitoExtension.class) -class PushServiceImplTest { +class PushServiceTest { @Mock private PushMonitorDao monitorDao; From 235df8bca122f6e61902c7b1769f4cc6690d6dcb Mon Sep 17 00:00:00 2001 From: aias00 Date: Wed, 21 Aug 2024 18:29:20 +0800 Subject: [PATCH 211/257] [feature] add kvrocks template (#2564) Co-authored-by: YuLuo Co-authored-by: tomsun28 --- .../src/main/resources/define/app-kvrocks.yml | 718 ++++++++++++++++++ 1 file changed, 718 insertions(+) create mode 100644 manager/src/main/resources/define/app-kvrocks.yml diff --git a/manager/src/main/resources/define/app-kvrocks.yml b/manager/src/main/resources/define/app-kvrocks.yml new file mode 100644 index 00000000000..7ce849070fa --- /dev/null +++ b/manager/src/main/resources/define/app-kvrocks.yml @@ -0,0 +1,718 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring +category: cache +# The monitoring type eg: linux windows tomcat mysql aws... +app: kvrocks +# The monitoring i18n name +name: + zh-CN: Kvrocks 数据库 + en-US: Kvrocks +# The description and help of this monitoring type +help: + zh-CN: HertzBeat 对 Apache Kvrocks 数据库的通用性能指标进行采集监控(server、clients、memory、persistence、stats、replication、cpu、cluster、commandstats),支持版本为 Apache Kvrocks 2.9.0+。
您可以点击“新建 Kvrocks 数据库”并进行配置,或者选择“更多操作”,导入已有配置。 + en-US: HertzBeat monitors Apache Kvrocks database of general performance metrics such as memory, persistence, replication and so on. The versions we support is Apache Kvrocks 2.9.0+.
You could click the "New Kvrocks" button and proceed with the configuration or import an existing setup through the "More Actions" menu. + zh-TW: HertzBeat 對 Apache Kvrocks 數據庫的通用性能指標進行采集監控(server、clients、memory、persistence、stats、replication、cpu、cluster、commandstats),支持版本爲 Apache Kvrocks 2.9.0+。
您可以點擊“新建 Kvrocks 數據庫”並進行配置,或者選擇“更多操作”,導入已有配置。 +helpLink: + zh-CN: https://hertzbeat.apache.org/zh-cn/docs/help/kvrocks + en-US: https://hertzbeat.apache.org/docs/help/kvrocks +# Input params define for monitoring(render web ui by +params: + # field-param field key + - field: host + # name-param field display i18n name + name: + zh-CN: 目标Host + en-US: Target Host + # type-param field type(most mapping the html input type) + type: host + # required-true or false + required: true + # field-param field key + - field: port + # name-param field display i18n name + name: + zh-CN: 端口 + en-US: Port + # type-param field type(most mapping the html input type) + type: number + # when type is number, range is required + range: '[0,65535]' + # required-true or false + required: true + # default value + defaultValue: 6666 + # field-param field key + - field: timeout + # name-param field display i18n name + name: + zh-CN: 超时时间 + en-US: Timeout + # type-param field type(most mapping the html input type) + type: number + # when type is number, range is required + range: '[0,100000]' + # required-true or false + required: true + # default value + defaultValue: 3000 + # field-param field key + - field: username + name: + zh-CN: 用户名 + en-US: Username + type: text + limit: 50 + required: false + # field-param field key + - field: password + name: + zh-CN: 密码 + en-US: Password + type: password + required: false + +# collect metrics config list +metrics: + # metrics - server + - name: server + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + i18n: + zh-CN: 服务器信息 + en-US: Server + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: kvrocks_version + type: 1 + i18n: + zh-CN: Kvrocks 服务版本 + en-US: Kvrocks Version + - field: redis_version + type: 1 + i18n: + zh-CN: Redis 服务版本 + en-US: Redis Version + - field: git_sha1 + type: 0 + i18n: + zh-CN: Kvrocks Git SHA1 + en-US: Kvrocks Git SHA1 + - field: kvrocks_mode + type: 1 + i18n: + zh-CN: 运行模式 + en-US: Server Mode + - field: os + type: 1 + i18n: + zh-CN: 操作系统 + en-US: Operating System + - field: arch_bits + type: 0 + i18n: + zh-CN: 架构 + en-US: Architecture Bits + - field: multiplexing_api + type: 1 + i18n: + zh-CN: IO多路复用器API + en-US: Multiplexing API + - field: atomicvar_api + type: 1 + i18n: + zh-CN: 原子操作处理API + en-US: Atomicvar API + - field: gcc_version + type: 1 + i18n: + zh-CN: GCC版本 + en-US: GCC Version + - field: process_id + type: 0 + i18n: + zh-CN: 进程ID + en-US: PID + - field: tcp_port + type: 0 + i18n: + zh-CN: TCP/IP监听端口 + en-US: TCP Port + - field: server_time_usec + type: 0 + i18n: + zh-CN: 服务器时间戳 + en-US: Server Time Usec + - field: uptime_in_seconds + type: 0 + i18n: + zh-CN: 运行时长(秒) + en-US: Uptime(Seconds) + - field: uptime_in_days + type: 0 + i18n: + zh-CN: 运行时长(天) + en-US: Uptime(Days) + - field: hz + type: 0 + i18n: + zh-CN: 事件循环频率 + en-US: hz + - field: configured_hz + type: 0 + i18n: + zh-CN: 配置的事件循环频率 + en-US: Configured hz + - field: lru_clock + type: 0 + i18n: + zh-CN: LRU时钟 + en-US: LRU Clock + - field: executable + type: 1 + i18n: + zh-CN: 服务器执行路径 + en-US: Server's Executable Path + - field: config_file + type: 1 + i18n: + zh-CN: 配置文件路径 + en-US: Config File Path + - field: io_threads_active + type: 0 + i18n: + zh-CN: 活跃IO线程数 + en-US: Active IO Threads + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + # metrics - clients + - name: clients + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 1 + i18n: + zh-CN: 客户端信息 + en-US: Clients + # collect metrics content + fields: + - field: connected_clients + type: 0 + i18n: + zh-CN: 已连接客户端数量 + en-US: Connected Clients + - field: maxclients + type: 0 + i18n: + zh-CN: 最大客户端连接数 + en-US: Max Clients + - field: blocked_clients + type: 0 + i18n: + zh-CN: 阻塞客户端数量 + en-US: Blocked Clients + - field: monitor_clients + type: 0 + i18n: + zh-CN: 监控的客户端数量 + en-US: monitor Clients + protocol: redis + redis: + host: ^_^host^_^ + port: ^_^port^_^ + username: ^_^username^_^ + password: ^_^password^_^ + timeout: ^_^timeout^_^ + # metrics - memory + - name: memory + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 2 + i18n: + zh-CN: 内存信息 + en-US: Memory + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: used_memory_rss + type: 0 + i18n: + zh-CN: 已使用内存(字节) + en-US: Used Memory RSS + - field: used_memory_rss_human + type: 0 + unit: MB + i18n: + zh-CN: 已使用物理内存 + en-US: Used Memory RSS Human + - field: used_memory_lua + type: 0 + i18n: + zh-CN: LUA脚本占用的内存(字节) + en-US: Used Memory LUA + - field: used_memory_lua_human + type: 0 + unit: KB + i18n: + zh-CN: LUA脚本占用的内存 + en-US: Used Memory LUA Human + - field: used_memory_startup + type: 0 + i18n: + zh-CN: 启动占用内存 + en-US: Used Memory Startup + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - persistence + - name: persistence + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 3 + i18n: + zh-CN: 持久化信息 + en-US: Persistence + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: loading + type: 0 + i18n: + zh-CN: 是否正在加载持久化文件 + en-US: Loading + - field: bgsave_in_progress + type: 0 + i18n: + zh-CN: 是否正在进行bgsave + en-US: bgsave In Progress + - field: last_bgsave_time + type: 0 + i18n: + zh-CN: 最近一次bgsave命令执行时间 + en-US: Last Save Time + - field: last_bgsave_status + type: 1 + i18n: + zh-CN: 最近一次bgsave命令执行状态 + en-US: Last bgsave Status + - field: last_bgsave_time_sec + type: 0 + i18n: + zh-CN: 最近一次bgsave命令执行时间(秒) + en-US: Last bgsave Time Sec + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - stats + - name: stats + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 4 + i18n: + zh-CN: 全局统计信息 + en-US: Stats + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: total_connections_received + type: 0 + i18n: + zh-CN: 已接受的总连接数 + en-US: Total Connections Received + - field: total_commands_processed + type: 0 + i18n: + zh-CN: 执行过的命令总数 + en-US: Total Commands Processed + - field: instantaneous_ops_per_sec + type: 0 + i18n: + zh-CN: 命令处理条数/秒 + en-US: Instantaneous Ops Per Sec + - field: total_net_input_bytes + type: 0 + i18n: + zh-CN: 输入总网络流量(字节) + en-US: Total Net Input Bytes + - field: total_net_output_bytes + type: 0 + i18n: + zh-CN: 输出总网络流量(字节) + en-US: Total Net Output Bytes + - field: instantaneous_input_kbps + type: 0 + i18n: + zh-CN: 输入字节数/秒 + en-US: Instantaneous Input Kbps + - field: instantaneous_output_kbps + type: 0 + i18n: + zh-CN: 输出字节数/秒 + en-US: Instantaneous Output Kbps + - field: sync_full + type: 0 + i18n: + zh-CN: 主从完全同步成功次数 + en-US: Sync Full + - field: sync_partial_ok + type: 0 + i18n: + zh-CN: 主从部分同步成功次数 + en-US: Sync Partial OK + - field: sync_partial_err + type: 0 + i18n: + zh-CN: 主从部分同步失败次数 + en-US: Sync Partial Error + - field: pubsub_channels + type: 0 + i18n: + zh-CN: 订阅的频道数量 + en-US: Pubsub Channels + - field: pubsub_patterns + type: 0 + i18n: + zh-CN: 订阅的模式数量 + en-US: Pubsub Patterns + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - replication + - name: replication + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 5 + i18n: + zh-CN: 主从同步信息 + en-US: Replication + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: role + type: 1 + i18n: + zh-CN: 节点角色 + en-US: Role + - field: connected_slaves + type: 0 + i18n: + zh-CN: 已连接的从节点个数 + en-US: Connected Slaves + - field: master_repl_offset + type: 0 + i18n: + zh-CN: 主节点偏移量 + en-US: Master Repl Offset + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - cpu + - name: cpu + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 6 + i18n: + zh-CN: CPU消耗信息 + en-US: CPU + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: used_cpu_sys + type: 0 + i18n: + zh-CN: Kvrocks进程使用的CPU时钟总和(内核态) + en-US: Used CPU Sys + - field: used_cpu_user + type: 0 + i18n: + zh-CN: Kvrocks进程使用的CPU时钟总和(用户态) + en-US: Used CPU User + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - Commandstats + - name: Commandstats + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 8 + i18n: + zh-CN: 命令信息 + en-US: Command Stats + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: cmdstat_command + type: 1 + i18n: + zh-CN: 命令 + en-US: Command Stat Command + - field: cmdstat_info + type: 1 + i18n: + zh-CN: 命令监控信息 + en-US: Command Stat Info + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - cluster + - name: cluster + # collect metrics content + priority: 9 + i18n: + zh-CN: 集群信息 + en-US: Cluster + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: cluster_enabled + type: 0 + i18n: + zh-CN: 节点是否开启集群模式 + en-US: Cluster Enabled + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - commandstats + - name: commandstats + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 9 + i18n: + zh-CN: 命令统计信息 + en-US: Command Stats + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: cmdstat_client + type: 1 + i18n: + zh-CN: 客户端命令统计 + en-US: cmdstat client + - field: cmdstat_config + type: 1 + i18n: + zh-CN: 配置命令统计 + en-US: cmdstat config + - field: cmdstat_get + type: 1 + i18n: + zh-CN: get + en-US: get + - field: cmdstat_hello + type: 1 + i18n: + zh-CN: hello + en-US: hello + - field: cmdstat_info + type: 1 + i18n: + zh-CN: info + en-US: info + - field: cmdstat_keys + type: 1 + i18n: + zh-CN: keys + en-US: keys + - field: cmdstat_ping + type: 1 + i18n: + zh-CN: ping + en-US: ping + - field: cmdstat_select + type: 1 + i18n: + zh-CN: select + en-US: select + - field: cmdstat_set + type: 1 + i18n: + zh-CN: set + en-US: set + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ + + # metrics - keyspace + - name: keyspace + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 9 + i18n: + zh-CN: 数据库统计信息 + en-US: Keyspace + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: db0 + type: 1 + i18n: + zh-CN: db0 + en-US: db0 + - field: sequence + type: 1 + i18n: + zh-CN: 序列 + en-US: sequence + - field: used_db_size + type: 1 + i18n: + zh-CN: 数据库使用大小 + en-US: used_db_size + - field: max_db_size + type: 1 + i18n: + zh-CN: 数据库最大使用大小 + en-US: max_db_size + - field: used_percent + type: 1 + i18n: + zh-CN: 数据库使用百分比 + en-US: used_percent + - field: disk_capacity + type: 1 + i18n: + zh-CN: 磁盘容量 + en-US: disk_capacity + - field: used_disk_size + type: 1 + i18n: + zh-CN: 占用磁盘大小 + en-US: used_disk_size + - field: used_disk_percent + type: 1 + i18n: + zh-CN: 占用磁盘百分比 + en-US: used_disk_percent + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk + protocol: redis + # the config content when protocol is redis + redis: + # redis host: ipv4 ipv6 host + host: ^_^host^_^ + # redis port + port: ^_^port^_^ + # username + username: ^_^username^_^ + # password + password: ^_^password^_^ + # timeout unit:ms + timeout: ^_^timeout^_^ From 1fac6ab2fa8afe6bc61a3239dc6b9ac1623270bb Mon Sep 17 00:00:00 2001 From: YuLuo Date: Wed, 21 Aug 2024 18:52:01 +0800 Subject: [PATCH 212/257] [e2e] add some configuration args to observation atest running status (#2565) Signed-off-by: yuluo-yx Co-authored-by: tomsun28 --- .github/workflows/backend-build-test.yml | 6 ++++-- e2e/docker-compose.yml | 3 ++- e2e/report/report.md | 1 + e2e/testsuite.yaml | 11 +++++------ 4 files changed, 12 insertions(+), 9 deletions(-) create mode 100644 e2e/report/report.md diff --git a/.github/workflows/backend-build-test.yml b/.github/workflows/backend-build-test.yml index d9f16f5a68f..38aaaaf9bb3 100644 --- a/.github/workflows/backend-build-test.yml +++ b/.github/workflows/backend-build-test.yml @@ -72,9 +72,11 @@ jobs: sudo docker-compose up --exit-code-from testing --remove-orphans # upload application logs - - name: Upload logs + - name: Upload logs & API test reports uses: actions/upload-artifact@v3 if: always() with: name: hz-logs-${{ github.run_id }} - path: e2e/logs/ + path: | + e2e/logs/ + e2e/report/ diff --git a/e2e/docker-compose.yml b/e2e/docker-compose.yml index a8527fe519a..5a000e14054 100644 --- a/e2e/docker-compose.yml +++ b/e2e/docker-compose.yml @@ -25,7 +25,8 @@ services: volumes: - ./data/:/work/data/ - ./testsuite.yaml:/work/testsuite.yaml - command: atest run -p /work/testsuite.yaml --report md + - ./report/report.md:/report.md + command: atest run -p /work/testsuite.yaml --level warn --thread 3 --report md --report-file /report.md depends_on: hertzbeat: condition: service_healthy diff --git a/e2e/report/report.md b/e2e/report/report.md new file mode 100644 index 00000000000..43ed15285b4 --- /dev/null +++ b/e2e/report/report.md @@ -0,0 +1 @@ +keep, not delete! diff --git a/e2e/testsuite.yaml b/e2e/testsuite.yaml index e9989083cae..f1b2193151f 100644 --- a/e2e/testsuite.yaml +++ b/e2e/testsuite.yaml @@ -58,11 +58,6 @@ items: } expect: statusCode: 409 -- name: missing-auth-header - request: - api: /api/monitors - expect: - statusCode: 401 - name: monitorList request: api: /api/monitors?pageIndex=0&pageSize=8 @@ -71,7 +66,11 @@ items: expect: bodyFieldsExpect: code: "0" - +- name: missing-auth-header + request: + api: /api/monitors + expect: + statusCode: 401 - name: createSitemapMonitor request: api: /api/monitor From 4dceb42dc70645362f70e4e6441a2445cb7f075c Mon Sep 17 00:00:00 2001 From: aias00 Date: Wed, 21 Aug 2024 19:53:26 +0800 Subject: [PATCH 213/257] [feature] add iceberg help md (#2573) Co-authored-by: Calvin Co-authored-by: YuLuo --- home/docs/help/hive.md | 2 +- home/docs/help/iceberg.md | 77 +++++++++++++++++++ .../current/help/hive.md | 2 +- .../current/help/iceberg.md | 77 +++++++++++++++++++ home/sidebars.json | 1 + 5 files changed, 157 insertions(+), 2 deletions(-) create mode 100644 home/docs/help/iceberg.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iceberg.md diff --git a/home/docs/help/hive.md b/home/docs/help/hive.md index 806969c2e7c..3dfbf0bfb78 100644 --- a/home/docs/help/hive.md +++ b/home/docs/help/hive.md @@ -5,7 +5,7 @@ sidebar_label: Apache Hive keywords: [open source monitoring tool, open source apache hive monitoring tool, monitoring apache hive metrics] --- -> Collect and monitor the general performance metrics exposed by the SpringBoot actuator. +> Collect and monitor the general performance metrics exposed by the Apache Hive. ## Pre-monitoring operations diff --git a/home/docs/help/iceberg.md b/home/docs/help/iceberg.md new file mode 100644 index 00000000000..5b63bca574d --- /dev/null +++ b/home/docs/help/iceberg.md @@ -0,0 +1,77 @@ +--- +id: iceberg +Title: Monitoring Apache Iceberg +sidebar_label: Apache Iceberg +keywords: [open source monitoring tool, open source apache hive monitoring tool, monitoring apache iceberg metrics] +--- + +> Collect and monitor the general performance metrics exposed by the Apache Iceberg. + +## Pre-monitoring operations + +If you want to monitor information in `Apache Iceberg` with this monitoring type, you need to open your `Hive Server2` in remoting mode. + +**1、Enable metastore:** + +```shell +hive --service metastore & +``` + +**2. Enable hive server2:** + +```shell +hive --service hiveserver2 & +``` + +### Configure parameters + +| Parameter name | Parameter Help describes the | +|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | +| Monitoring Name | A name that identifies this monitoring that needs to be unique. | +| Port | The default port provided by the database is 10002. | +| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | +| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | +| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | +| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | + +### Collect metrics + +#### metric Collection: basic + +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-----------------------------------------------------------| +| vm_name | None | The name of the virtual machine (VM) running HiveServer2. | +| vm_vendor | None | The vendor or provider of the virtual machine. | +| vm_version | None | The version of the virtual machine. | +| up_time | None | The duration for which HiveServer2 has been running. | + +#### metric Collection: enviroment + +| Metric Name | metric unit | Metrics help describe | +|----------------------|-------------|-------------------------------------------------------------------| +| https_proxyPort | None | The port number used for HTTPS proxy communication. | +| os_name | None | The name of the operating system on which HiveServer2 is running. | +| os_version | None | The version of the operating system. | +| os_arch | None | The architecture of the operating system. | +| java_runtime_name | None | The name of the Java runtime environment used by HiveServer2. | +| java_runtime_version | None | The version of the Java runtime environment. | + +#### metric Collection: thread + +| Metric Name | metric unit | Metrics help describe | +|----------------------|-------------|----------------------------------------------------------------------| +| thread_count | None | The current number of threads being used by HiveServer2. | +| total_started_thread | None | The total count of threads started by HiveServer2 since its launch. | +| peak_thread_count | None | The highest number of threads used by HiveServer2 at any given time. | +| daemon_thread_count | None | The number of daemon threads currently active in HiveServer2. | + +#### metric Collection: code_cache + +| Metric Name | metric unit | Metrics help describe | +|-------------|-------------|-------------------------------------------------------------------------| +| committed | MB | The amount of memory currently allocated for the memory pool. | +| init | MB | The initial amount of memory requested for the memory pool. | +| max | MB | The maximum amount of memory that can be allocated for the memory pool. | +| used | MB | The amount of memory currently being used by the memory pool. | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hive.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hive.md index 3b41d3979c6..8396a870f11 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hive.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hive.md @@ -5,7 +5,7 @@ sidebar_label: Apache Hive keywords: [开源监控工具, 开源 Apache Hive 监控工具, 监控 Apache Hive 指标] --- -> 收集和监控由 SpringBoot Actuator 提供的常规性能指标。 +> 收集和监控由 Apache Hive 提供的常规性能指标。 ## 监控前操作 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iceberg.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iceberg.md new file mode 100644 index 00000000000..5ee2f434e96 --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iceberg.md @@ -0,0 +1,77 @@ +--- +id: iceberg +Title: 监控 Apache Iceberg +sidebar_label: Apache Iceberg +keywords: [开源监控工具, 开源 Apache Iceberg 监控工具, 监控 Apache Iceberg 指标] +--- + +> 收集和监控由 Apache Iceberg 提供的常规性能指标。 + +## 监控前操作 + +如果您想使用此监控类型监控 Apache Iceberg 的信息,您需要以远程模式启动您的 Hive Server2。 + +**1、启用元数据存储:** + +```shell +hive --service metastore & +``` + +**2. 启用 Hive Server2:** + +```shell +hive --service hiveserver2 & +``` + +### 配置参数 + +| 参数名称 | 参数描述 | +|----------|--------------------------------------------------------| +| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | +| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | +| 端口 | 数据库提供的默认端口为 10002。 | +| 启用 HTTPS | 是否通过 HTTPS 访问网站,请注意⚠️当启用 HTTPS 时,需要将默认端口更改为 443 | +| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | +| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | +| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | + +### 采集指标 + +#### 指标收集: 基本信息 + +| 指标名称 | 指标单位 | 指标描述 | +|--------|------|-----------------------------| +| 虚拟机名称 | 无 | 运行 HiveServer2 的虚拟机(VM)的名称。 | +| 虚拟机供应商 | 无 | 虚拟机的供应商或提供者。 | +| 虚拟机版本 | 无 | 虚拟机的版本。 | +| 允许持续时间 | 无 | HiveServer2 运行的持续时间。 | + +#### 指标收集: 环境信息 + +| 指标名称 | 指标单位 | 指标描述 | +|------------|------|--------------------------------| +| HTTPS代理端口号 | 无 | 用于 HTTPS 代理通信的端口号。 | +| 操作系统 | 无 | 运行 HiveServer2 的操作系统的名称。 | +| 操作系统版本 | 无 | 操作系统的版本。 | +| 操作系统架构 | 无 | 操作系统的架构。 | +| java运行环境 | 无 | HiveServer2 使用的 Java 运行时环境的名称。 | +| java运行环境版本 | 无 | Java 运行时环境的版本。 | + +#### 指标收集: 线程信息 + +| 指标名称 | 指标单位 | 指标描述 | +|--------|------|------------------------------| +| 线程数量 | None | HiveServer2 当前正在使用的线程数。 | +| 总启动线程数 | None | HiveServer2 启动以来启动的线程总数。 | +| 最高线程数 | None | HiveServer2 在任何给定时间使用的最高线程数。 | +| 守护线程数 | None | HiveServer2 当前活动的守护线程数。 | + +#### 指标收集: 代码缓存 + +| 指标名称 | 指标单位 | 指标描述 | +|------------|------|---------------| +| 内存池当前内存 | MB | 当前为内存池分配的内存量。 | +| 内存池初始内存 | MB | 内存池请求的初始内存量。 | +| 内存池可分配最大内存 | MB | 内存池可分配的最大内存量。 | +| 内存池内存使用量 | MB | 内存池已使用内存量 | + diff --git a/home/sidebars.json b/home/sidebars.json index 8db82fa48c4..90ca715fbc8 100755 --- a/home/sidebars.json +++ b/home/sidebars.json @@ -275,6 +275,7 @@ "help/hdfs_datanode", "help/iotdb", "help/hive", + "help/iceberg", "help/airflow", "help/clickhouse", "help/elasticsearch", From c2d92eb57d873c76c72bc7b85bda385c8c3c294b Mon Sep 17 00:00:00 2001 From: Jast Date: Wed, 21 Aug 2024 23:06:39 +0800 Subject: [PATCH 214/257] [Improve] improve doc check (#2572) Co-authored-by: YuLuo Co-authored-by: tomsun28 Co-authored-by: Calvin --- .github/workflows/doc-build-test.yml | 4 + .markdownlint-cli2.jsonc | 39 ++ home/README.md | 1 - home/blog/2022-06-01-hertzbeat-v1.0.md | 11 +- home/blog/2022-06-19-hertzbeat-v1.1.0.md | 12 +- home/blog/2022-06-22-one-step-up.md | 10 +- home/blog/2022-07-10-hertzbeat-v1.1.1.md | 19 +- home/blog/2022-09-04-hertzbeat-v1.1.3.md | 9 +- home/blog/2022-09-10-ssl-practice.md | 18 +- home/blog/2022-10-08-hertzbeat-v1.2.0.md | 6 +- home/blog/2022-11-28-hertzbeat-v1.2.2.md | 9 +- home/blog/2022-12-19-new-committer.md | 3 +- home/blog/2022-12-28-hertzbeat-v1.2.3.md | 7 +- home/blog/2023-01-05-monitor-iotdb.md | 19 +- home/blog/2023-01-08-monitor-shenyu.md | 33 +- home/blog/2023-02-02-monitor-dynamic-tp.md | 25 +- home/blog/2023-02-10-new-committer.md | 39 +- home/blog/2023-02-11-monitor-mysql.md | 15 +- home/blog/2023-02-15-monitor-linux.md | 15 +- home/blog/2023-03-15-hertzbeat-v1.3.0.md | 18 +- home/blog/2023-03-22-monitor-springboot2.md | 28 +- home/blog/2023-05-09-hertzbeat-v1.3.1.md | 25 +- home/blog/2023-05-11-greptimedb-store.md | 16 +- home/blog/2023-07-05-hertzbeat-v1.3.2.md | 22 +- home/blog/2023-08-14-hertzbeat-v1.4.0.md | 140 ++-- home/blog/2023-08-28-new-committer.md | 4 +- home/blog/2023-09-26-hertzbeat-v1.4.1.md | 148 ++-- home/blog/2023-11-12-hertzbeat-v1.4.2.md | 166 ++--- home/blog/2023-12-11-hertzbeat-v1.4.3.md | 188 ++--- home/blog/2024-01-11-new-committer.md | 19 +- home/blog/2024-01-18-hertzbeat-v1.4.4.md | 222 +++--- home/blog/2024-04-17-to-apache.md | 19 +- ...-09-hertzbeat-ospp-subject-introduction.md | 6 +- .../2024-06-11-hertzbeat-v1.6.0-update.md | 8 +- home/blog/2024-06-15-hertzbeat-v1.6.0.md | 644 +++++++++--------- home/blog/2024-07-07-new-committer.md | 1 - home/blog/2024-07-08-new-committer.md | 1 - home/blog/2024-07-28-new-committer.md | 2 +- home/blog/2024-08-18-new-committer.md | 4 +- home/docs/advanced/extend-http-default.md | 1 - .../advanced/extend-http-example-hertzbeat.md | 8 +- .../advanced/extend-http-example-token.md | 12 +- home/docs/advanced/extend-http-jsonpath.md | 3 +- home/docs/advanced/extend-http.md | 5 +- home/docs/advanced/extend-jdbc.md | 13 +- home/docs/advanced/extend-jmx.md | 3 +- home/docs/advanced/extend-ngql.md | 9 +- home/docs/advanced/extend-point.md | 3 +- home/docs/advanced/extend-snmp.md | 3 +- home/docs/advanced/extend-ssh.md | 25 +- home/docs/advanced/extend-telnet.md | 3 +- home/docs/advanced/extend-tutorial.md | 8 +- home/docs/community/become_committer.md | 3 +- home/docs/community/become_pmc_member.md | 3 +- .../community/code-style-and-quality-guide.md | 95 ++- home/docs/community/contribution.md | 3 +- home/docs/community/development.md | 4 +- home/docs/community/document.md | 5 +- home/docs/community/how-to-release.md | 38 +- home/docs/community/how-to-verify.md | 18 +- home/docs/community/mailing_lists.md | 8 +- home/docs/community/new_committer_process.md | 3 +- home/docs/community/new_pmc_member_process.md | 3 +- home/docs/community/submit-code.md | 4 +- home/docs/download.md | 8 +- home/docs/help/activemq.md | 1 - home/docs/help/ai_config.md | 15 +- home/docs/help/airflow.md | 1 - home/docs/help/alert_dingtalk.md | 2 +- home/docs/help/alert_discord.md | 4 +- home/docs/help/alert_email.md | 2 +- home/docs/help/alert_feishu.md | 4 +- home/docs/help/alert_slack.md | 2 +- home/docs/help/alert_smn.md | 2 +- home/docs/help/alert_telegram.md | 4 +- home/docs/help/alert_threshold_expr.md | 2 +- home/docs/help/alert_wework.md | 4 +- home/docs/help/almalinux.md | 1 - home/docs/help/api.md | 1 - home/docs/help/centos.md | 1 - home/docs/help/clickhouse.md | 1 - home/docs/help/debian.md | 1 - home/docs/help/dm.md | 1 - home/docs/help/dns.md | 1 - home/docs/help/docker.md | 3 +- home/docs/help/doris_be.md | 1 - home/docs/help/doris_fe.md | 1 - home/docs/help/dynamic_tp.md | 1 - home/docs/help/elasticsearch.md | 1 - home/docs/help/euleros.md | 1 - home/docs/help/flink.md | 1 - home/docs/help/flink_on_yarn.md | 1 - home/docs/help/freebsd.md | 1 - home/docs/help/ftp.md | 1 - home/docs/help/fullsite.md | 5 +- home/docs/help/guide.md | 6 +- home/docs/help/hadoop.md | 1 - home/docs/help/hbase_master.md | 1 - home/docs/help/hbase_regionserver.md | 1 - home/docs/help/hdfs_datanode.md | 1 - home/docs/help/hdfs_namenode.md | 1 - home/docs/help/hive.md | 1 - home/docs/help/http_sd.md | 1 - home/docs/help/huawei_switch.md | 1 - home/docs/help/hugegraph.md | 1 - home/docs/help/imap.md | 1 - home/docs/help/influxdb.md | 1 - home/docs/help/influxdb_promql.md | 1 - home/docs/help/iotdb.md | 1 - home/docs/help/issue.md | 25 +- home/docs/help/jetty.md | 1 - home/docs/help/jvm.md | 3 +- home/docs/help/kafka.md | 1 - home/docs/help/kafka_promql.md | 1 - home/docs/help/kubernetes.md | 5 +- home/docs/help/linux.md | 1 - home/docs/help/mariadb.md | 3 +- home/docs/help/memcached.md | 5 +- home/docs/help/mongodb.md | 1 - home/docs/help/mongodb_atlas.md | 1 - home/docs/help/mysql.md | 3 +- home/docs/help/nacos.md | 1 - home/docs/help/nebulagraph.md | 15 +- home/docs/help/nebulagraph_cluster.md | 1 - home/docs/help/nginx.md | 11 +- home/docs/help/ntp.md | 1 - home/docs/help/openai.md | 5 +- home/docs/help/opengauss.md | 1 - home/docs/help/opensuse.md | 1 - home/docs/help/oracle.md | 1 - home/docs/help/ping.md | 7 +- home/docs/help/plugin.md | 3 +- home/docs/help/pop3.md | 1 - home/docs/help/port.md | 1 - home/docs/help/postgresql.md | 1 - home/docs/help/prestodb.md | 1 - home/docs/help/process.md | 1 - home/docs/help/prometheus.md | 1 - home/docs/help/rabbitmq.md | 3 +- home/docs/help/redhat.md | 1 - home/docs/help/redis.md | 1 - home/docs/help/redis_cluster.md | 6 +- home/docs/help/rocketmq.md | 1 - home/docs/help/rockylinux.md | 1 - home/docs/help/shenyu.md | 1 - home/docs/help/smtp.md | 3 +- home/docs/help/spark.md | 3 +- home/docs/help/spring_gateway.md | 1 - home/docs/help/springboot2.md | 1 - home/docs/help/springboot3.md | 1 - home/docs/help/sqlserver.md | 10 +- home/docs/help/ssl_cert.md | 1 - home/docs/help/status.md | 7 +- home/docs/help/tidb.md | 1 - home/docs/help/time_expression.md | 1 - home/docs/help/tomcat.md | 1 - home/docs/help/ubuntu.md | 1 - home/docs/help/udp_port.md | 1 - home/docs/help/website.md | 1 - home/docs/help/websocket.md | 1 - home/docs/help/windows.md | 9 +- home/docs/help/yarn.md | 1 - home/docs/help/zookeeper.md | 1 - home/docs/introduce.md | 141 ++-- home/docs/start/account-modify.md | 6 +- home/docs/start/custom-config.md | 5 +- home/docs/start/docker-compose-deploy.md | 15 +- home/docs/start/docker-deploy.md | 18 +- home/docs/start/greptime-init.md | 8 +- home/docs/start/influxdb-init.md | 16 +- home/docs/start/iotdb-init.md | 2 +- home/docs/start/mysql-change.md | 13 +- home/docs/start/package-deploy.md | 33 +- home/docs/start/postgresql-change.md | 9 +- home/docs/start/quickstart.md | 5 +- home/docs/start/sslcert-practice.md | 8 +- home/docs/start/tdengine-init.md | 27 +- home/docs/start/update-1.6.0.md | 8 +- home/docs/start/victoria-metrics-init.md | 10 +- home/docs/template.md | 2 +- .../2022-06-01-hertzbeat-v1.0.md | 15 +- .../2022-06-19-hertzbeat-v1.1.0.md | 19 +- .../2022-06-22-one-step-up.md | 19 +- .../2022-07-10-hertzbeat-v1.1.1.md | 13 +- .../2022-09-04-hertzbeat-v1.1.3.md | 18 +- .../2022-09-10-ssl-practice.md | 18 +- .../2022-10-08-hertzbeat-v1.2.0.md | 9 +- .../2022-11-28-hertzbeat-v1.2.2.md | 9 +- .../2022-12-19-new-committer.md | 3 +- .../2022-12-28-hertzbeat-v1.2.3.md | 7 +- .../2023-01-05-monitor-iotdb.md | 29 +- .../2023-01-08-monitor-shenyu.md | 27 +- .../2023-02-02-monitor-dynamic-tp.md | 21 +- .../2023-02-10-new-committer.md | 43 +- .../2023-02-11-monitor-mysql.md | 15 +- .../2023-02-15-monitor-linux.md | 19 +- .../2023-03-15-hertzbeat-v1.3.0.md | 16 +- .../2023-03-22-monitor-springboot2.md | 21 +- .../2023-05-09-hertzbeat-v1.3.1.md | 17 +- .../2023-05-11-greptimedb-store.md | 12 +- .../2023-07-05-hertzbeat-v1.3.2.md | 22 +- .../2023-08-14-hertzbeat-v1.4.0.md | 116 ++-- .../2023-08-28-new-committer.md | 2 +- .../2023-09-26-hertzbeat-v1.4.1.md | 18 +- .../2023-11-12-hertzbeat-v1.4.2.md | 14 +- .../2023-12-11-hertzbeat-v1.4.3.md | 129 ++-- .../2024-01-11-new-committer.md | 17 +- .../2024-01-18-hertzbeat-v1.4.4.md | 155 ++--- .../2024-04-17-to-apache.md | 11 +- ...-09-hertzbeat-ospp-subject-introduction.md | 4 +- .../2024-06-15-hertzbeat-v1.6.0.md | 588 ++++++++-------- .../2024-07-07-new-committer.md | 1 - .../2024-07-28-new-committer.md | 2 +- .../current/advanced/extend-http-default.md | 3 +- .../advanced/extend-http-example-hertzbeat.md | 6 +- .../advanced/extend-http-example-token.md | 6 +- .../current/advanced/extend-http-jsonpath.md | 3 +- .../current/advanced/extend-http.md | 9 +- .../current/advanced/extend-jdbc.md | 15 +- .../current/advanced/extend-jmx.md | 7 +- .../current/advanced/extend-ngql.md | 11 +- .../current/advanced/extend-point.md | 1 - .../current/advanced/extend-snmp.md | 7 +- .../current/advanced/extend-ssh.md | 29 +- .../current/advanced/extend-telnet.md | 7 +- .../current/advanced/extend-tutorial.md | 3 +- .../current/community/become_committer.md | 3 +- .../current/community/become_pmc_member.md | 3 +- .../community/code-style-and-quality-guide.md | 99 ++- .../current/community/contribution.md | 3 +- .../current/community/development.md | 2 +- .../current/community/document.md | 5 +- .../current/community/how-to-release.md | 37 +- .../current/community/how-to-verify.md | 20 +- .../current/community/mailing_lists.md | 8 +- .../community/new_committer_process.md | 6 +- .../community/new_pmc_member_process.md | 3 +- .../current/community/submit-code.md | 6 +- .../current/download.md | 8 +- .../current/help/activemq.md | 1 - .../current/help/ai_config.md | 15 +- .../current/help/airflow.md | 1 - .../current/help/alert_dingtalk.md | 6 +- .../current/help/alert_discord.md | 4 +- .../current/help/alert_email.md | 8 +- .../current/help/alert_feishu.md | 8 +- .../current/help/alert_slack.md | 2 +- .../current/help/alert_smn.md | 2 +- .../current/help/alert_telegram.md | 4 +- .../current/help/alert_webhook.md | 6 +- .../current/help/alert_wework.md | 8 +- .../current/help/almalinux.md | 1 - .../current/help/api.md | 1 - .../current/help/centos.md | 1 - .../current/help/clickhouse.md | 1 - .../current/help/debian.md | 1 - .../current/help/dm.md | 1 - .../current/help/dns.md | 1 - .../current/help/docker.md | 1 - .../current/help/doris_fe.md | 1 - .../current/help/dynamic_tp.md | 1 - .../current/help/elasticsearch.md | 1 - .../current/help/euleros.md | 1 - .../current/help/flink.md | 1 - .../current/help/flink_on_yarn.md | 1 - .../current/help/freebsd.md | 1 - .../current/help/ftp.md | 1 - .../current/help/fullsite.md | 5 +- .../current/help/guide.md | 8 +- .../current/help/hadoop.md | 1 - .../current/help/hbase_master.md | 1 - .../current/help/hbase_regionserver.md | 1 - .../current/help/hdfs_datanode.md | 1 - .../current/help/hdfs_namenode.md | 1 - .../current/help/hive.md | 1 - .../current/help/huawei_switch.md | 1 - .../current/help/hugegraph.md | 1 - .../current/help/imap.md | 1 - .../current/help/influxdb.md | 1 - .../current/help/influxdb_promql.md | 1 - .../current/help/iotdb.md | 3 +- .../current/help/issue.md | 29 +- .../current/help/jetty.md | 1 - .../current/help/jvm.md | 3 +- .../current/help/kafka.md | 3 +- .../current/help/kafka_promql.md | 1 - .../current/help/kubernetes.md | 5 +- .../current/help/linux.md | 1 - .../current/help/mariadb.md | 3 +- .../current/help/memcached.md | 3 +- .../current/help/mongodb.md | 1 - .../current/help/mongodb_atlas.md | 1 - .../current/help/mysql.md | 3 +- .../current/help/nacos.md | 1 - .../current/help/nebulagraph.md | 15 +- .../current/help/nebulagraph_cluster.md | 1 - .../current/help/nginx.md | 11 +- .../current/help/ntp.md | 1 - .../current/help/openai.md | 7 +- .../current/help/opengauss.md | 1 - .../current/help/opensuse.md | 1 - .../current/help/oracle.md | 1 - .../current/help/ping.md | 7 +- .../current/help/plugin.md | 1 - .../current/help/pop3.md | 1 - .../current/help/port.md | 1 - .../current/help/postgresql.md | 1 - .../current/help/prestodb.md | 1 - .../current/help/process.md | 1 - .../current/help/prometheus.md | 1 - .../current/help/pulsar.md | 1 - .../current/help/rabbitmq.md | 3 +- .../current/help/redhat.md | 1 - .../current/help/redis.md | 1 - .../current/help/redis_cluster.md | 6 +- .../current/help/rocketmq.md | 1 - .../current/help/rockylinux.md | 1 - .../current/help/shenyu.md | 1 - .../current/help/smtp.md | 3 +- .../current/help/spring_gateway.md | 1 - .../current/help/springboot2.md | 1 - .../current/help/springboot3.md | 1 - .../current/help/sqlserver.md | 4 +- .../current/help/ssl_cert.md | 1 - .../current/help/status.md | 6 +- .../current/help/tidb.md | 1 - .../current/help/time_expression.md | 1 - .../current/help/tomcat.md | 2 +- .../current/help/ubuntu.md | 1 - .../current/help/udp_port.md | 1 - .../current/help/website.md | 1 - .../current/help/websocket.md | 1 - .../current/help/windows.md | 9 +- .../current/help/yarn.md | 1 - .../current/help/zookeeper.md | 1 - .../current/introduce.md | 27 +- .../current/others/resource.md | 1 - .../current/start/account-modify.md | 8 +- .../current/start/custom-config.md | 5 +- .../current/start/docker-compose-deploy.md | 19 +- .../current/start/docker-deploy.md | 37 +- .../current/start/greptime-init.md | 4 +- .../current/start/influxdb-init.md | 10 +- .../current/start/iotdb-init.md | 2 +- .../current/start/mysql-change.md | 17 +- .../current/start/package-deploy.md | 35 +- .../current/start/postgresql-change.md | 13 +- .../current/start/quickstart.md | 5 +- .../current/start/sslcert-practice.md | 12 +- .../current/start/tdengine-init.md | 14 +- .../current/start/victoria-metrics-init.md | 10 +- .../current/template.md | 2 +- .../advanced/extend-http-default.md | 3 +- .../advanced/extend-http-example-hertzbeat.md | 8 +- .../advanced/extend-http-example-token.md | 8 +- .../advanced/extend-http-jsonpath.md | 3 +- .../version-v1.4.x/advanced/extend-http.md | 9 +- .../version-v1.4.x/advanced/extend-jdbc.md | 15 +- .../version-v1.4.x/advanced/extend-jmx.md | 7 +- .../version-v1.4.x/advanced/extend-point.md | 1 - .../version-v1.4.x/advanced/extend-snmp.md | 7 +- .../version-v1.4.x/advanced/extend-ssh.md | 29 +- .../advanced/extend-tutorial.md | 3 +- .../version-v1.4.x/help/activemq.md | 1 - .../version-v1.4.x/help/airflow.md | 1 - .../version-v1.4.x/help/alert_dingtalk.md | 6 +- .../version-v1.4.x/help/alert_discord.md | 4 +- .../version-v1.4.x/help/alert_email.md | 8 +- .../version-v1.4.x/help/alert_feishu.md | 8 +- .../version-v1.4.x/help/alert_slack.md | 2 +- .../version-v1.4.x/help/alert_smn.md | 2 +- .../version-v1.4.x/help/alert_telegram.md | 4 +- .../version-v1.4.x/help/alert_threshold.md | 16 +- .../help/alert_threshold_expr.md | 14 +- .../version-v1.4.x/help/alert_webhook.md | 6 +- .../version-v1.4.x/help/alert_wework.md | 8 +- .../version-v1.4.x/help/api.md | 1 - .../version-v1.4.x/help/centos.md | 1 - .../version-v1.4.x/help/dm.md | 1 - .../version-v1.4.x/help/docker.md | 1 - .../version-v1.4.x/help/dynamic_tp.md | 1 - .../version-v1.4.x/help/fullsite.md | 5 +- .../version-v1.4.x/help/guide.md | 8 +- .../version-v1.4.x/help/hadoop.md | 1 - .../version-v1.4.x/help/hive.md | 1 - .../version-v1.4.x/help/iotdb.md | 3 +- .../version-v1.4.x/help/issue.md | 29 +- .../version-v1.4.x/help/jetty.md | 1 - .../version-v1.4.x/help/jvm.md | 3 +- .../version-v1.4.x/help/kafka.md | 3 +- .../version-v1.4.x/help/kubernetes.md | 5 +- .../version-v1.4.x/help/linux.md | 1 - .../version-v1.4.x/help/mariadb.md | 1 - .../version-v1.4.x/help/memcached.md | 5 +- .../version-v1.4.x/help/mysql.md | 1 - .../version-v1.4.x/help/nebulagraph.md | 15 +- .../version-v1.4.x/help/nginx.md | 11 +- .../version-v1.4.x/help/ntp.md | 1 - .../version-v1.4.x/help/opengauss.md | 1 - .../version-v1.4.x/help/oracle.md | 1 - .../version-v1.4.x/help/ping.md | 7 +- .../version-v1.4.x/help/pop3.md | 1 - .../version-v1.4.x/help/port.md | 1 - .../version-v1.4.x/help/postgresql.md | 1 - .../version-v1.4.x/help/rabbitmq.md | 3 +- .../version-v1.4.x/help/redis.md | 1 - .../version-v1.4.x/help/shenyu.md | 1 - .../version-v1.4.x/help/smtp.md | 3 +- .../version-v1.4.x/help/spring_gateway.md | 1 - .../version-v1.4.x/help/springboot2.md | 1 - .../version-v1.4.x/help/sqlserver.md | 4 +- .../version-v1.4.x/help/ssl_cert.md | 1 - .../version-v1.4.x/help/tomcat.md | 2 +- .../version-v1.4.x/help/ubuntu.md | 1 - .../version-v1.4.x/help/website.md | 1 - .../version-v1.4.x/help/windows.md | 9 +- .../version-v1.4.x/help/zookeeper.md | 1 - .../version-v1.4.x/introduce.md | 31 +- .../version-v1.4.x/others/contributing.md | 4 +- .../version-v1.4.x/others/developer.md | 2 +- .../version-v1.4.x/others/huaweicloud.md | 2 +- .../version-v1.4.x/others/images-deploy.md | 26 +- .../version-v1.4.x/others/resource.md | 1 - .../version-v1.4.x/others/sponsor.md | 3 +- .../version-v1.4.x/start/account-modify.md | 8 +- .../version-v1.4.x/start/custom-config.md | 5 +- .../version-v1.4.x/start/docker-deploy.md | 46 +- .../version-v1.4.x/start/greptime-init.md | 8 +- .../version-v1.4.x/start/influxdb-init.md | 10 +- .../version-v1.4.x/start/iotdb-init.md | 10 +- .../version-v1.4.x/start/mysql-change.md | 18 +- .../version-v1.4.x/start/package-deploy.md | 31 +- .../version-v1.4.x/start/postgresql-change.md | 14 +- .../version-v1.4.x/start/quickstart.md | 13 +- .../version-v1.4.x/start/sslcert-practice.md | 18 +- .../version-v1.4.x/start/tdengine-init.md | 14 +- .../start/victoria-metrics-init.md | 10 +- .../version-v1.4.x/template.md | 2 +- .../advanced/extend-http-default.md | 3 +- .../advanced/extend-http-example-hertzbeat.md | 6 +- .../advanced/extend-http-example-token.md | 6 +- .../advanced/extend-http-jsonpath.md | 3 +- .../version-v1.5.x/advanced/extend-http.md | 9 +- .../version-v1.5.x/advanced/extend-jdbc.md | 15 +- .../version-v1.5.x/advanced/extend-jmx.md | 7 +- .../version-v1.5.x/advanced/extend-ngql.md | 11 +- .../version-v1.5.x/advanced/extend-point.md | 1 - .../version-v1.5.x/advanced/extend-snmp.md | 7 +- .../version-v1.5.x/advanced/extend-ssh.md | 29 +- .../advanced/extend-tutorial.md | 3 +- .../community/become_committer.md | 3 +- .../community/become_pmc_member.md | 3 +- .../community/code-style-and-quality-guide.md | 37 +- .../version-v1.5.x/community/contribution.md | 3 +- .../version-v1.5.x/community/development.md | 2 +- .../version-v1.5.x/community/document.md | 5 +- .../community/how-to-release.md | 37 +- .../version-v1.5.x/community/how-to-verify.md | 20 +- .../version-v1.5.x/community/mailing_lists.md | 8 +- .../community/new_committer_process.md | 3 +- .../community/new_pmc_member_process.md | 3 +- .../version-v1.5.x/community/submit-code.md | 6 +- .../version-v1.5.x/download.md | 9 +- .../version-v1.5.x/help/activemq.md | 1 - .../version-v1.5.x/help/airflow.md | 1 - .../version-v1.5.x/help/alert_dingtalk.md | 6 +- .../version-v1.5.x/help/alert_discord.md | 4 +- .../version-v1.5.x/help/alert_email.md | 8 +- .../version-v1.5.x/help/alert_feishu.md | 8 +- .../version-v1.5.x/help/alert_slack.md | 2 +- .../version-v1.5.x/help/alert_smn.md | 2 +- .../version-v1.5.x/help/alert_telegram.md | 4 +- .../version-v1.5.x/help/alert_webhook.md | 6 +- .../version-v1.5.x/help/alert_wework.md | 8 +- .../version-v1.5.x/help/almalinux.md | 1 - .../version-v1.5.x/help/api.md | 1 - .../version-v1.5.x/help/centos.md | 1 - .../version-v1.5.x/help/clickhouse.md | 1 - .../version-v1.5.x/help/debian.md | 1 - .../version-v1.5.x/help/dm.md | 1 - .../version-v1.5.x/help/dns.md | 1 - .../version-v1.5.x/help/docker.md | 1 - .../version-v1.5.x/help/doris_fe.md | 1 - .../version-v1.5.x/help/dynamic_tp.md | 1 - .../version-v1.5.x/help/elasticsearch.md | 1 - .../version-v1.5.x/help/euleros.md | 1 - .../version-v1.5.x/help/flink.md | 1 - .../version-v1.5.x/help/freebsd.md | 1 - .../version-v1.5.x/help/ftp.md | 1 - .../version-v1.5.x/help/fullsite.md | 5 +- .../version-v1.5.x/help/guide.md | 8 +- .../version-v1.5.x/help/hadoop.md | 1 - .../version-v1.5.x/help/hbase_master.md | 1 - .../version-v1.5.x/help/hbase_regionserver.md | 1 - .../version-v1.5.x/help/hdfs_datanode.md | 1 - .../version-v1.5.x/help/hdfs_namenode.md | 1 - .../version-v1.5.x/help/hive.md | 1 - .../version-v1.5.x/help/huawei_switch.md | 1 - .../version-v1.5.x/help/hugegraph.md | 1 - .../version-v1.5.x/help/influxdb.md | 1 - .../version-v1.5.x/help/influxdb_promql.md | 1 - .../version-v1.5.x/help/iotdb.md | 3 +- .../version-v1.5.x/help/issue.md | 29 +- .../version-v1.5.x/help/jetty.md | 1 - .../version-v1.5.x/help/jvm.md | 3 +- .../version-v1.5.x/help/kafka.md | 3 +- .../version-v1.5.x/help/kafka_promql.md | 1 - .../version-v1.5.x/help/kubernetes.md | 5 +- .../version-v1.5.x/help/linux.md | 1 - .../version-v1.5.x/help/mariadb.md | 1 - .../version-v1.5.x/help/memcached.md | 3 +- .../version-v1.5.x/help/mongodb.md | 1 - .../version-v1.5.x/help/mysql.md | 1 - .../version-v1.5.x/help/nacos.md | 1 - .../version-v1.5.x/help/nebulagraph.md | 15 +- .../help/nebulagraph_cluster.md | 1 - .../version-v1.5.x/help/nginx.md | 11 +- .../version-v1.5.x/help/ntp.md | 1 - .../version-v1.5.x/help/openai.md | 7 +- .../version-v1.5.x/help/opengauss.md | 1 - .../version-v1.5.x/help/opensuse.md | 1 - .../version-v1.5.x/help/oracle.md | 1 - .../version-v1.5.x/help/ping.md | 7 +- .../version-v1.5.x/help/plugin.md | 1 - .../version-v1.5.x/help/pop3.md | 1 - .../version-v1.5.x/help/port.md | 1 - .../version-v1.5.x/help/postgresql.md | 1 - .../version-v1.5.x/help/process.md | 1 - .../version-v1.5.x/help/prometheus.md | 1 - .../version-v1.5.x/help/pulsar.md | 1 - .../version-v1.5.x/help/rabbitmq.md | 3 +- .../version-v1.5.x/help/redhat.md | 1 - .../version-v1.5.x/help/redis.md | 1 - .../version-v1.5.x/help/rocketmq.md | 1 - .../version-v1.5.x/help/rockylinux.md | 1 - .../version-v1.5.x/help/shenyu.md | 1 - .../version-v1.5.x/help/smtp.md | 3 +- .../version-v1.5.x/help/spring_gateway.md | 1 - .../version-v1.5.x/help/springboot2.md | 1 - .../version-v1.5.x/help/springboot3.md | 1 - .../version-v1.5.x/help/sqlserver.md | 4 +- .../version-v1.5.x/help/ssl_cert.md | 1 - .../version-v1.5.x/help/tidb.md | 1 - .../version-v1.5.x/help/time_expression.md | 1 - .../version-v1.5.x/help/tomcat.md | 2 +- .../version-v1.5.x/help/ubuntu.md | 1 - .../version-v1.5.x/help/udp_port.md | 1 - .../version-v1.5.x/help/website.md | 1 - .../version-v1.5.x/help/websocket.md | 1 - .../version-v1.5.x/help/windows.md | 9 +- .../version-v1.5.x/help/yarn.md | 1 - .../version-v1.5.x/help/zookeeper.md | 1 - .../version-v1.5.x/introduce.md | 27 +- .../version-v1.5.x/others/resource.md | 1 - .../version-v1.5.x/start/account-modify.md | 8 +- .../version-v1.5.x/start/custom-config.md | 5 +- .../version-v1.5.x/start/docker-deploy.md | 46 +- .../version-v1.5.x/start/greptime-init.md | 8 +- .../version-v1.5.x/start/influxdb-init.md | 10 +- .../version-v1.5.x/start/iotdb-init.md | 2 +- .../version-v1.5.x/start/mysql-change.md | 19 +- .../version-v1.5.x/start/package-deploy.md | 31 +- .../version-v1.5.x/start/postgresql-change.md | 14 +- .../version-v1.5.x/start/quickstart.md | 13 +- .../version-v1.5.x/start/sslcert-practice.md | 12 +- .../version-v1.5.x/start/tdengine-init.md | 14 +- .../start/victoria-metrics-init.md | 10 +- .../version-v1.5.x/template.md | 2 +- .../advanced/extend-http-default.md | 1 - .../advanced/extend-http-example-hertzbeat.md | 10 +- .../advanced/extend-http-example-token.md | 14 +- .../advanced/extend-http-jsonpath.md | 3 +- .../version-v1.4.x/advanced/extend-http.md | 5 +- .../version-v1.4.x/advanced/extend-jdbc.md | 13 +- .../version-v1.4.x/advanced/extend-jmx.md | 3 +- .../version-v1.4.x/advanced/extend-point.md | 3 +- .../version-v1.4.x/advanced/extend-snmp.md | 3 +- .../version-v1.4.x/advanced/extend-ssh.md | 25 +- .../advanced/extend-tutorial.md | 10 +- .../version-v1.4.x/help/activemq.md | 1 - .../version-v1.4.x/help/airflow.md | 1 - .../version-v1.4.x/help/alert_dingtalk.md | 2 +- .../version-v1.4.x/help/alert_discord.md | 4 +- .../version-v1.4.x/help/alert_email.md | 4 +- .../version-v1.4.x/help/alert_feishu.md | 4 +- .../version-v1.4.x/help/alert_slack.md | 2 +- .../version-v1.4.x/help/alert_smn.md | 2 +- .../version-v1.4.x/help/alert_telegram.md | 4 +- .../version-v1.4.x/help/alert_threshold.md | 12 +- .../help/alert_threshold_expr.md | 14 +- .../version-v1.4.x/help/alert_wework.md | 4 +- .../versioned_docs/version-v1.4.x/help/api.md | 1 - .../version-v1.4.x/help/centos.md | 1 - home/versioned_docs/version-v1.4.x/help/dm.md | 1 - .../version-v1.4.x/help/docker.md | 3 +- .../version-v1.4.x/help/doris_be.md | 1 - .../version-v1.4.x/help/doris_fe.md | 1 - .../version-v1.4.x/help/dynamic_tp.md | 1 - .../version-v1.4.x/help/fullsite.md | 5 +- .../version-v1.4.x/help/guide.md | 8 +- .../version-v1.4.x/help/hadoop.md | 1 - .../version-v1.4.x/help/hive.md | 1 - .../version-v1.4.x/help/iotdb.md | 1 - .../version-v1.4.x/help/issue.md | 25 +- .../version-v1.4.x/help/jetty.md | 1 - .../versioned_docs/version-v1.4.x/help/jvm.md | 3 +- .../version-v1.4.x/help/kafka.md | 1 - .../version-v1.4.x/help/kubernetes.md | 5 +- .../version-v1.4.x/help/linux.md | 1 - .../version-v1.4.x/help/mariadb.md | 1 - .../version-v1.4.x/help/memcached.md | 5 +- .../version-v1.4.x/help/mysql.md | 1 - .../version-v1.4.x/help/nebulagraph.md | 15 +- .../version-v1.4.x/help/nginx.md | 11 +- .../versioned_docs/version-v1.4.x/help/ntp.md | 1 - .../version-v1.4.x/help/opengauss.md | 1 - .../version-v1.4.x/help/oracle.md | 1 - .../version-v1.4.x/help/ping.md | 7 +- .../version-v1.4.x/help/pop3.md | 1 - .../version-v1.4.x/help/port.md | 1 - .../version-v1.4.x/help/postgresql.md | 1 - .../version-v1.4.x/help/rabbitmq.md | 3 +- .../version-v1.4.x/help/redis.md | 1 - .../version-v1.4.x/help/shenyu.md | 1 - .../version-v1.4.x/help/smtp.md | 3 +- .../version-v1.4.x/help/spark.md | 3 +- .../version-v1.4.x/help/spring_gateway.md | 1 - .../version-v1.4.x/help/springboot2.md | 1 - .../version-v1.4.x/help/sqlserver.md | 10 +- .../version-v1.4.x/help/ssl_cert.md | 1 - .../version-v1.4.x/help/tomcat.md | 1 - .../version-v1.4.x/help/ubuntu.md | 1 - .../version-v1.4.x/help/website.md | 1 - .../version-v1.4.x/help/windows.md | 9 +- .../version-v1.4.x/help/zookeeper.md | 1 - .../version-v1.4.x/introduce.md | 145 ++-- .../version-v1.4.x/others/contributing.md | 1 + .../version-v1.4.x/others/developer.md | 4 +- .../version-v1.4.x/others/hertzbeat.md | 42 +- .../version-v1.4.x/others/huaweicloud.md | 2 +- .../version-v1.4.x/others/images-deploy.md | 26 +- .../version-v1.4.x/others/resource.md | 1 - .../version-v1.4.x/others/sponsor.md | 3 +- .../version-v1.4.x/start/account-modify.md | 6 +- .../version-v1.4.x/start/custom-config.md | 5 +- .../version-v1.4.x/start/docker-deploy.md | 41 +- .../version-v1.4.x/start/greptime-init.md | 12 +- .../version-v1.4.x/start/influxdb-init.md | 16 +- .../version-v1.4.x/start/iotdb-init.md | 10 +- .../version-v1.4.x/start/mysql-change.md | 13 +- .../version-v1.4.x/start/package-deploy.md | 42 +- .../version-v1.4.x/start/postgresql-change.md | 10 +- .../version-v1.4.x/start/quickstart.md | 5 +- .../version-v1.4.x/start/sslcert-practice.md | 16 +- .../version-v1.4.x/start/tdengine-init.md | 27 +- .../start/victoria-metrics-init.md | 10 +- .../versioned_docs/version-v1.4.x/template.md | 2 +- .../advanced/extend-http-default.md | 1 - .../advanced/extend-http-example-hertzbeat.md | 8 +- .../advanced/extend-http-example-token.md | 12 +- .../advanced/extend-http-jsonpath.md | 3 +- .../version-v1.5.x/advanced/extend-http.md | 5 +- .../version-v1.5.x/advanced/extend-jdbc.md | 13 +- .../version-v1.5.x/advanced/extend-jmx.md | 3 +- .../version-v1.5.x/advanced/extend-ngql.md | 9 +- .../version-v1.5.x/advanced/extend-point.md | 3 +- .../version-v1.5.x/advanced/extend-snmp.md | 3 +- .../version-v1.5.x/advanced/extend-ssh.md | 25 +- .../advanced/extend-tutorial.md | 8 +- .../community/become_committer.md | 3 +- .../community/become_pmc_member.md | 3 +- .../community/code-style-and-quality-guide.md | 35 +- .../version-v1.5.x/community/contribution.md | 3 +- .../version-v1.5.x/community/development.md | 4 +- .../version-v1.5.x/community/document.md | 5 +- .../community/how-to-release.md | 38 +- .../version-v1.5.x/community/how-to-verify.md | 18 +- .../version-v1.5.x/community/mailing_lists.md | 8 +- .../community/new_committer_process.md | 3 +- .../community/new_pmc_member_process.md | 3 +- .../version-v1.5.x/community/submit-code.md | 4 +- .../versioned_docs/version-v1.5.x/download.md | 9 +- .../version-v1.5.x/help/activemq.md | 1 - .../version-v1.5.x/help/airflow.md | 1 - .../version-v1.5.x/help/alert_dingtalk.md | 2 +- .../version-v1.5.x/help/alert_discord.md | 4 +- .../version-v1.5.x/help/alert_email.md | 4 +- .../version-v1.5.x/help/alert_feishu.md | 4 +- .../version-v1.5.x/help/alert_slack.md | 2 +- .../version-v1.5.x/help/alert_smn.md | 2 +- .../version-v1.5.x/help/alert_telegram.md | 4 +- .../help/alert_threshold_expr.md | 2 +- .../version-v1.5.x/help/alert_wework.md | 4 +- .../version-v1.5.x/help/almalinux.md | 1 - .../versioned_docs/version-v1.5.x/help/api.md | 1 - .../version-v1.5.x/help/centos.md | 1 - .../version-v1.5.x/help/clickhouse.md | 1 - .../version-v1.5.x/help/debian.md | 1 - home/versioned_docs/version-v1.5.x/help/dm.md | 1 - .../versioned_docs/version-v1.5.x/help/dns.md | 1 - .../version-v1.5.x/help/docker.md | 3 +- .../version-v1.5.x/help/doris_be.md | 1 - .../version-v1.5.x/help/doris_fe.md | 1 - .../version-v1.5.x/help/dynamic_tp.md | 1 - .../version-v1.5.x/help/elasticsearch.md | 1 - .../version-v1.5.x/help/euleros.md | 1 - .../version-v1.5.x/help/flink.md | 1 - .../version-v1.5.x/help/freebsd.md | 1 - .../versioned_docs/version-v1.5.x/help/ftp.md | 1 - .../version-v1.5.x/help/fullsite.md | 5 +- .../version-v1.5.x/help/guide.md | 6 +- .../version-v1.5.x/help/hadoop.md | 1 - .../version-v1.5.x/help/hbase_master.md | 1 - .../version-v1.5.x/help/hbase_regionserver.md | 1 - .../version-v1.5.x/help/hdfs_datanode.md | 1 - .../version-v1.5.x/help/hdfs_namenode.md | 1 - .../version-v1.5.x/help/hive.md | 1 - .../version-v1.5.x/help/http_sd.md | 1 - .../version-v1.5.x/help/huawei_switch.md | 1 - .../version-v1.5.x/help/hugegraph.md | 1 - .../version-v1.5.x/help/influxdb.md | 1 - .../version-v1.5.x/help/influxdb_promql.md | 1 - .../version-v1.5.x/help/iotdb.md | 1 - .../version-v1.5.x/help/issue.md | 25 +- .../version-v1.5.x/help/jetty.md | 1 - .../versioned_docs/version-v1.5.x/help/jvm.md | 3 +- .../version-v1.5.x/help/kafka.md | 1 - .../version-v1.5.x/help/kafka_promql.md | 1 - .../version-v1.5.x/help/kubernetes.md | 5 +- .../version-v1.5.x/help/linux.md | 1 - .../version-v1.5.x/help/mariadb.md | 1 - .../version-v1.5.x/help/memcached.md | 5 +- .../version-v1.5.x/help/mongodb.md | 1 - .../version-v1.5.x/help/mysql.md | 1 - .../version-v1.5.x/help/nacos.md | 1 - .../version-v1.5.x/help/nebulagraph.md | 15 +- .../help/nebulagraph_cluster.md | 1 - .../version-v1.5.x/help/nginx.md | 11 +- .../versioned_docs/version-v1.5.x/help/ntp.md | 1 - .../version-v1.5.x/help/openai.md | 5 +- .../version-v1.5.x/help/opengauss.md | 1 - .../version-v1.5.x/help/opensuse.md | 1 - .../version-v1.5.x/help/oracle.md | 1 - .../version-v1.5.x/help/ping.md | 7 +- .../version-v1.5.x/help/plugin.md | 3 +- .../version-v1.5.x/help/pop3.md | 1 - .../version-v1.5.x/help/port.md | 1 - .../version-v1.5.x/help/postgresql.md | 1 - .../version-v1.5.x/help/process.md | 1 - .../version-v1.5.x/help/prometheus.md | 1 - .../version-v1.5.x/help/rabbitmq.md | 3 +- .../version-v1.5.x/help/redhat.md | 1 - .../version-v1.5.x/help/redis.md | 1 - .../version-v1.5.x/help/rocketmq.md | 1 - .../version-v1.5.x/help/rockylinux.md | 1 - .../version-v1.5.x/help/shenyu.md | 1 - .../version-v1.5.x/help/smtp.md | 3 +- .../version-v1.5.x/help/spark.md | 3 +- .../version-v1.5.x/help/spring_gateway.md | 1 - .../version-v1.5.x/help/springboot2.md | 1 - .../version-v1.5.x/help/springboot3.md | 1 - .../version-v1.5.x/help/sqlserver.md | 10 +- .../version-v1.5.x/help/ssl_cert.md | 1 - .../version-v1.5.x/help/tidb.md | 1 - .../version-v1.5.x/help/time_expression.md | 1 - .../version-v1.5.x/help/tomcat.md | 1 - .../version-v1.5.x/help/ubuntu.md | 1 - .../version-v1.5.x/help/udp_port.md | 1 - .../version-v1.5.x/help/website.md | 1 - .../version-v1.5.x/help/websocket.md | 1 - .../version-v1.5.x/help/windows.md | 9 +- .../version-v1.5.x/help/yarn.md | 1 - .../version-v1.5.x/help/zookeeper.md | 1 - .../version-v1.5.x/introduce.md | 141 ++-- .../version-v1.5.x/start/account-modify.md | 6 +- .../version-v1.5.x/start/custom-config.md | 5 +- .../version-v1.5.x/start/docker-deploy.md | 41 +- .../version-v1.5.x/start/greptime-init.md | 12 +- .../version-v1.5.x/start/influxdb-init.md | 16 +- .../version-v1.5.x/start/iotdb-init.md | 2 +- .../version-v1.5.x/start/mysql-change.md | 12 +- .../version-v1.5.x/start/package-deploy.md | 42 +- .../version-v1.5.x/start/postgresql-change.md | 10 +- .../version-v1.5.x/start/quickstart.md | 5 +- .../version-v1.5.x/start/sslcert-practice.md | 8 +- .../version-v1.5.x/start/tdengine-init.md | 27 +- .../start/victoria-metrics-init.md | 10 +- .../versioned_docs/version-v1.5.x/template.md | 2 +- pom.xml | 33 - 789 files changed, 3859 insertions(+), 4030 deletions(-) create mode 100644 .markdownlint-cli2.jsonc diff --git a/.github/workflows/doc-build-test.yml b/.github/workflows/doc-build-test.yml index 33841bd4006..e8b750618be 100644 --- a/.github/workflows/doc-build-test.yml +++ b/.github/workflows/doc-build-test.yml @@ -34,6 +34,10 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + - name: Check Markdown + uses: DavidAnson/markdownlint-cli2-action@v16 + with: + globs: './home/**/*.md' - name: Check filename in home/blog run: | TARGET_DIR="./home/blog" diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc new file mode 100644 index 00000000000..9dddffec75a --- /dev/null +++ b/.markdownlint-cli2.jsonc @@ -0,0 +1,39 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +{ + "config": { + "default": true, + "MD001": false, + "MD052": false, + "MD003": false, + "MD013": false, + "MD024": false, + "MD025": false, + "MD029": false, + "MD033": false, + "MD035": false, + "MD036": false, + "MD040": false, + "MD041": false, + "MD045": false, + "MD046": false, + "MD047": false + }, + "ignore": [ + "node_modules/" + ] +} diff --git a/home/README.md b/home/README.md index 0904a4af224..e2c8b81c50b 100644 --- a/home/README.md +++ b/home/README.md @@ -84,4 +84,3 @@ yarn docusaurus docs:version v1.5.x |-- docusaurus.config.js |-- sidebars.js // document sidebar menu configuration ``` - diff --git a/home/blog/2022-06-01-hertzbeat-v1.0.md b/home/blog/2022-06-01-hertzbeat-v1.0.md index 8338eb93a57..eaf32fa4a1a 100644 --- a/home/blog/2022-06-01-hertzbeat-v1.0.md +++ b/home/blog/2022-06-01-hertzbeat-v1.0.md @@ -52,11 +52,11 @@ Bug fix. 5. [[collector]bugfix: fix warehouse data queue consume error #153](https://github.com/apache/hertzbeat/pull/153). issue by @daqianxiaoyao 6. [[web-app]bugfix:fix input blocking when input error in dark theme #157](https://github.com/apache/hertzbeat/pull/157). issue by @ConradWen -**Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.0-beta.8...v1.0 +**Full Changelog**: -Online https://console.tancloud.cn. +Online . ------------------------ +----------------------- Redis monitor is coming: @@ -77,6 +77,5 @@ Redis monitor is coming: **Repository url** -[Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat -[Gitee](https://gitee.com/hertzbeat/hertzbeat) https://gitee.com/hertzbeat/hertzbeat - +[Github](https://github.com/apache/hertzbeat) +[Gitee](https://gitee.com/hertzbeat/hertzbeat) diff --git a/home/blog/2022-06-19-hertzbeat-v1.1.0.md b/home/blog/2022-06-19-hertzbeat-v1.1.0.md index cbc3102db5c..c2de558f751 100644 --- a/home/blog/2022-06-19-hertzbeat-v1.1.0.md +++ b/home/blog/2022-06-19-hertzbeat-v1.1.0.md @@ -33,7 +33,7 @@ Bugfixes: 3. [[monitor] bugfix: Fix for Elasticsearch monitoring failure under basic authentication #174](https://github.com/apache/hertzbeat/pull/174) Contributed by @weifuqing 4. [Fix for monitoring failure due to ambiguous Oracle monitoring parameter "database name" #182](https://github.com/apache/hertzbeat/pull/182) @zklmcookle -Online at https://console.tancloud.cn. +Online at . --- Windows Monitor coming: @@ -58,13 +58,13 @@ commit; Have Fun! ----- +---- ## V1.1.0 Home: hertzbeat.com | tancloud.cn -Hi guys! HertzBeat v1.1.0 is coming. This version we support snmp protocol and use snmp to collect windows metrics. +Hi guys! HertzBeat v1.1.0 is coming. This version we support snmp protocol and use snmp to collect windows metrics. Another major change is that we use the H2 database by default to replace the MYSQL database as storage to facilitate the installation and deployment of users. Now only one docker command is needed to install and experience hertzbeat: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` Let's Try It! @@ -86,7 +86,7 @@ Bugfix. 3. [[monitor] bugfix: fix elasticsearch collect error when need basic auth #174](https://github.com/apache/hertzbeat/pull/174) contribute by @weifuqing 4. [Change the Oracle database name to the service name to reduce ambiguity #182](https://github.com/apache/hertzbeat/pull/182) @zklmcookle -Online https://console.tancloud.cn. +Online . --- @@ -128,5 +128,5 @@ Have Fun! **Repository Addresses** -[Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat -[Gitee](https://gitee.com/hertzbeat/hertzbeat) https://gitee.com/hertzbeat/hertzbeat +[Github](https://github.com/apache/hertzbeat) +[Gitee](https://gitee.com/hertzbeat/hertzbeat) diff --git a/home/blog/2022-06-22-one-step-up.md b/home/blog/2022-06-22-one-step-up.md index 9c60d422482..ad172eef3ca 100644 --- a/home/blog/2022-06-22-one-step-up.md +++ b/home/blog/2022-06-22-one-step-up.md @@ -58,13 +58,13 @@ commit; Have Fun! ----- +---- ## V1.1.0 Home: hertzbeat.com | tancloud.cn -Hi guys! HertzBeat v1.1.0 is coming. This version we support snmp protocol and use snmp to collect windows metrics. +Hi guys! HertzBeat v1.1.0 is coming. This version we support snmp protocol and use snmp to collect windows metrics. Another major change is that we use the H2 database by default to replace the MYSQL database as storage to facilitate the installation and deployment of users. Now only one docker command is needed to install and experience hertzbeat: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` Let's Try It! @@ -86,7 +86,7 @@ Bugfix. 3. [[monitor] bugfix: fix elasticsearch collect error when need basic auth #174](https://github.com/apache/hertzbeat/pull/174) contribute by @weifuqing 4. [Change the Oracle database name to the service name to reduce ambiguity #182](https://github.com/apache/hertzbeat/pull/182) @zklmcookle -Online https://console.tancloud.cn. +Online . --- @@ -126,5 +126,5 @@ Have Fun! **Repository Addresses** -[Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat -[Gitee](https://gitee.com/hertzbeat/hertzbeat) https://gitee.com/hertzbeat/hertzbeat +[Github](https://github.com/apache/hertzbeat) +[Gitee](https://gitee.com/hertzbeat/hertzbeat) diff --git a/home/blog/2022-07-10-hertzbeat-v1.1.1.md b/home/blog/2022-07-10-hertzbeat-v1.1.1.md index c4a87a40c3d..7cad9437d31 100644 --- a/home/blog/2022-07-10-hertzbeat-v1.1.1.md +++ b/home/blog/2022-07-10-hertzbeat-v1.1.1.md @@ -40,24 +40,23 @@ Bugfix. 8. [[manager] 修改监控页面取消监控功能再启动监控导致多生成jobId,原有监控项目并没有真实取消 #215](https://github.com/apache/hertzbeat/pull/215) contribute by @yangshihui 9. [[warehouse] bugfix exception when tdengine create table SQL contain special char #220](https://github.com/apache/hertzbeat/pull/220) -Online https://console.tancloud.cn. +Online . Have Fun! ----- +---- -> [HertzBeat](https://github.com/apache/hertzbeat) is an opensource monitoring and alarm project incubated by [Dromara](https://dromara.org) and open sourced by [TanCloud](https://tancloud.cn), which supports Website, API, PING, Port, Database, OS Monitor etc. -> We also provide **[Monitoring Cloud For Saas](https://console.tancloud.cn)**, people no longer need to deploy a cumbersome monitoring tool in order to monitor their website resources. **[Sign in to get started for free](https://console.tancloud.cn)**. -> HertzBeat supports more liberal threshold alarm configuration (calculation expression), supports alarm notification, alarm template, email, DingDing, WeChat FeiShu and WebHook. -> Most important is HertzBeat supports [Custom Monitoring](https://hertzbeat.com/docs/advanced/extend-point), just by configuring the YML file, we can customize the monitoring types and metrics what we need. +> [HertzBeat](https://github.com/apache/hertzbeat) is an opensource monitoring and alarm project incubated by [Dromara](https://dromara.org) and open sourced by [TanCloud](https://tancloud.cn), which supports Website, API, PING, Port, Database, OS Monitor etc. +> We also provide **[Monitoring Cloud For Saas](https://console.tancloud.cn)**, people no longer need to deploy a cumbersome monitoring tool in order to monitor their website resources. **[Sign in to get started for free](https://console.tancloud.cn)**. +> HertzBeat supports more liberal threshold alarm configuration (calculation expression), supports alarm notification, alarm template, email, DingDing, WeChat FeiShu and WebHook. +> Most important is HertzBeat supports [Custom Monitoring](https://hertzbeat.com/docs/advanced/extend-point), just by configuring the YML file, we can customize the monitoring types and metrics what we need. > HertzBeat is modular, `manager, collector, scheduler, warehouse, alerter` modules are decoupled for easy understanding and custom development. -> Welcome to HertzBeat's [Cloud Environment TanCloud](https://console.tancloud.cn) to try and discover more. +> Welcome to HertzBeat's [Cloud Environment TanCloud](https://console.tancloud.cn) to try and discover more. > Welcome to join us to build hertzbeat together. > > `HertzBeat`'s multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring tool. **If you like HertzBeat, star us on GitHub** -[Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat -[Gitee](https://gitee.com/hertzbeat/hertzbeat) https://gitee.com/hertzbeat/hertzbeat - +[Github](https://github.com/apache/hertzbeat) +[Gitee](https://gitee.com/hertzbeat/hertzbeat) diff --git a/home/blog/2022-09-04-hertzbeat-v1.1.3.md b/home/blog/2022-09-04-hertzbeat-v1.1.3.md index 3cec823079b..9cba7acaa90 100644 --- a/home/blog/2022-09-04-hertzbeat-v1.1.3.md +++ b/home/blog/2022-09-04-hertzbeat-v1.1.3.md @@ -33,11 +33,10 @@ Bugfix. 1. [[docs] fix extend-http-jsonpath.md parseScript error #262](https://github.com/apache/hertzbeat/pull/262) contribute by @woshiniusange . 2. [[monitor] update help docs, refactor redis metrics name #264](https://github.com/apache/hertzbeat/pull/264) -3. [[manager] bugfix alert tags is null when tags map key normal value null. #270](https://github.com/apache/hertzbeat/pull/270) issue by https://gitee.com/hello_brother_niu -4. [[alert] bugfix: the alert global preset config do not take effect #275](https://github.com/apache/hertzbeat/pull/275) issue by https://gitee.com/hello_brother_niu +3. [[manager] bugfix alert tags is null when tags map key normal value null. #270](https://github.com/apache/hertzbeat/pull/270) issue by +4. [[alert] bugfix: the alert global preset config do not take effect #275](https://github.com/apache/hertzbeat/pull/275) issue by -Online https://console.tancloud.cn. +Online . -Have Fun! +Have Fun --------- - diff --git a/home/blog/2022-09-10-ssl-practice.md b/home/blog/2022-09-10-ssl-practice.md index 5c0525f8c16..340a87b3149 100644 --- a/home/blog/2022-09-10-ssl-practice.md +++ b/home/blog/2022-09-10-ssl-practice.md @@ -17,10 +17,10 @@ Today's article describes how to use hertzbeat monitoring system to detect the v HertzBeat is a real-time monitoring tool with powerful customizable monitoring capabilities without the need for an agent. Website monitoring, PING connectivity, port availability, database, OS, middleware, API monitoring, threshold alerts, alert notifications (email weChat pinning flybook). -**Official website: https://hertzbeat.com | https://tancloud.cn** +**Official website: | ** -github: https://github.com/apache/hertzbeat -gitee: https://gitee.com/hertzbeat/hertzbeat +github: +gitee: #### Install HertzBeat @@ -42,7 +42,7 @@ gitee: https://gitee.com/hertzbeat/hertzbeat 2. Configure monitoring website -> Here is an example to monitor Baidu website, configure the host domain name, name, collection interval, etc. > Click OK. +> Here is an example to monitor Baidu website, configure the host domain name, name, collection interval, etc. > Click OK. > Click OK. Note that ⚠️ will test the connectivity of the website before adding it by default, and it will add it only if the connection is successful, of course, you can also gray out the **Whether to test** button. ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ad1154670648413bb82c8bdeb5b13609~tplv-k3u1fbpfcp-zoom-1.image) @@ -87,8 +87,8 @@ gitee: https://gitee.com/hertzbeat/hertzbeat You can refer to the help file for the token configuration of Nail WeChat Flying Book, etc. -https://hertzbeat.com/docs/help/alert_dingtalk -https://tancloud.cn/docs/help/alert_dingtalk + + > Alert Notification -> Add new alert notification policy -> Enable notification for the recipients you just configured @@ -98,10 +98,10 @@ https://tancloud.cn/docs/help/alert_dingtalk ---- -#### End! +#### End The practice of monitoring SSL certificates here, of course, for hertzbeat this function is just the tip of the iceberg, if you think hertzbeat this open source project is good if you welcome to give us in the GitHub Gitee star oh, thank you very much. Thank you for your support. The author! -**github: https://github.com/apache/hertzbeat** +**github: ** -**gitee: https://gitee.com/hertzbeat/hertzbeat** +**gitee: ** diff --git a/home/blog/2022-10-08-hertzbeat-v1.2.0.md b/home/blog/2022-10-08-hertzbeat-v1.2.0.md index 7137abeb7ca..dad19834fab 100644 --- a/home/blog/2022-10-08-hertzbeat-v1.2.0.md +++ b/home/blog/2022-10-08-hertzbeat-v1.2.0.md @@ -45,13 +45,13 @@ Bugfixes. 2. [[web-app] fix redirect when monitors app is null #286](https://github.com/apache/hertzbeat/pull/286) 3. [[alerter] bugfix aviator expression match npe #297](https://github.com/apache/hertzbeat/pull/297) 4. [[doc] fix project name error #294](https://github.com/apache/hertzbeat/pull/294) contributed by @CharlieXCL -5. [[common]feature:use "apache.http.conn.util" replace "sun.net.util" for upgrading java version #299](https://github.com/dromara/ hertzbeat/pull/299) contributed by @Privauto +5. [[common]feature:use "apache.http.conn.util" replace "sun.net.util" for upgrading java version #299]( hertzbeat/pull/299) contributed by @Privauto 6. [Update docker-deploy.md #304](https://github.com/apache/hertzbeat/pull/304) contributed by @emrys-he 7. [fix(sec): upgrade snakeyaml to 1.31 #313](https://github.com/apache/hertzbeat/pull/313) contributed by @SxLiuYu 8. [[script] add startup log and optimize port service judgment #321](https://github.com/apache/hertzbeat/pull/321) 9. [[web-app] fix echarts y-axis value tip overflow #325](https://github.com/apache/hertzbeat/pull/325) 10. [[webapp] fix interceptor http resp common error-msg when error #329](https://github.com/apache/hertzbeat/pull/329) - Online https://console.tancloud.cn. + Online . Have Fun! @@ -73,6 +73,7 @@ spring. web. resources: static-locations. static-locations. + - classpath:/dist/ - classpath:... /dist/ @@ -80,4 +81,3 @@ static-locations. ---- ``` - diff --git a/home/blog/2022-11-28-hertzbeat-v1.2.2.md b/home/blog/2022-11-28-hertzbeat-v1.2.2.md index be3b0b5bc75..cd93f7ace13 100644 --- a/home/blog/2022-11-28-hertzbeat-v1.2.2.md +++ b/home/blog/2022-11-28-hertzbeat-v1.2.2.md @@ -11,7 +11,7 @@ tags: [opensource] Home: hertzbeat.com | tancloud.cn -Hi guys! HertzBeat v1.2.2 is coming. This release brings significant features. This version we support monitor kubernetes, docker, springboot, nacos and database dm, opengauss and more. Also we bring an experimental feature, users can custom define metrics collect from prometheus with promql. Fixed several bugs and improved the overall stable usability. And more, linux monitor we support top10 cpu usage metrics, top10 memory usage metrics. +Hi guys! HertzBeat v1.2.2 is coming. This release brings significant features. This version we support monitor kubernetes, docker, springboot, nacos and database dm, opengauss and more. Also we bring an experimental feature, users can custom define metrics collect from prometheus with promql. Fixed several bugs and improved the overall stable usability. And more, linux monitor we support top10 cpu usage metrics, top10 memory usage metrics. Let's Try It Now! Only one docker command is needed to install and experience heartbeat: @@ -51,11 +51,11 @@ Bugfix. 6. [[manager] bugfix the gmtUpdate not change when update monitor param #459](https://github.com/apache/hertzbeat/pull/459) 7. [[home] fix typo in springboot2.md #464](https://github.com/apache/hertzbeat/pull/464) @eltociear -Online https://console.tancloud.cn. +Online . Have Fun! ----- +---- ## V1.2.2 @@ -100,5 +100,4 @@ Bugfix. 6. [[manager] bugfix the gmtUpdate not change when update monitor param #459](https://github.com/apache/hertzbeat/pull/459) 7. [[home] fix typo in springboot2.md #464](https://github.com/apache/hertzbeat/pull/464) @eltociear ----- - +---- diff --git a/home/blog/2022-12-19-new-committer.md b/home/blog/2022-12-19-new-committer.md index 34df92ffbd4..7acfd0f5aac 100644 --- a/home/blog/2022-12-19-new-committer.md +++ b/home/blog/2022-12-19-new-committer.md @@ -57,7 +57,7 @@ github:[wang1027-wqh](https://github.com/wang1027-wqh) 现从事:某互联网公司Java开发工程师 -email:1758619238@qq.com +email:<1758619238@qq.com> Hertzbeat Committer @@ -105,4 +105,3 @@ github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) + 如果是大的改动,建议提交前编写issues,在提交pr,同时请注意编码的规范,尽量减少bug和警告的产生 > 以上就是我们新晋Committer们的开源经历了,可以看出参与开源并不难,更重要的是迈出第一步,无论是代码还是文档修复或者提交issue,这些都是贡献者参与开源的姿势。快来加入我们吧! - diff --git a/home/blog/2022-12-28-hertzbeat-v1.2.3.md b/home/blog/2022-12-28-hertzbeat-v1.2.3.md index 40af1be10bf..c5527e4ecf5 100644 --- a/home/blog/2022-12-28-hertzbeat-v1.2.3.md +++ b/home/blog/2022-12-28-hertzbeat-v1.2.3.md @@ -49,11 +49,11 @@ Bugfix. 7. [监控k8s问题issue描述与解决方案 #511](https://github.com/apache/hertzbeat/pull/511) @MrAndyMing 8. [[manager] springboot2 monitor support base path config #515](https://github.com/apache/hertzbeat/pull/515) -Online https://console.tancloud.cn. +Online . Have Fun! ----- +---- ## V1.2.3 @@ -95,5 +95,4 @@ Bugfix. 7. [监控k8s问题issue描述与解决方案 #511](https://github.com/apache/hertzbeat/pull/511) @MrAndyMing 8. [[manager] springboot2 monitor support base path config #515](https://github.com/apache/hertzbeat/pull/515) ----- - +---- diff --git a/home/blog/2023-01-05-monitor-iotdb.md b/home/blog/2023-01-05-monitor-iotdb.md index 26dfc887aea..0f105d8fe53 100644 --- a/home/blog/2023-01-05-monitor-iotdb.md +++ b/home/blog/2023-01-05-monitor-iotdb.md @@ -7,7 +7,7 @@ author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 tags: [opensource, practice] --- -## Use HertzBeat to monitor the Internet of Things database IoTDB, and it will be done in 5 minutes! +## Use HertzBeat to monitor the Internet of Things database IoTDB, and it will be done in 5 minutes ### Introduction to IoTDB @@ -22,12 +22,12 @@ tags: [opensource, practice] ### Get monitoring IoTDB in HertzBeat in 5 minutes -#### Prerequisites, you already have IoTDB environment and HertzBeat environment. +#### Prerequisites, you already have IoTDB environment and HertzBeat environment - IoTDB [deployment and installation documentation](https://iotdb.apache.org/UserGuide/V0.13.x/QuickStart/QuickStart.html) - HertzBeat [deployment installation documentation](https://hertzbeat.com/docs/start/docker-deploy) -#### 1. Enable the `metrics` function on the IoTDB side, which will provide interface data in the form of prometheus metrics. +#### 1. Enable the `metrics` function on the IoTDB side, which will provide interface data in the form of prometheus metrics 1. The metric collection is disabled by default, you need to modify the parameters in `conf/iotdb-metric.yml` first, then restart the server @@ -41,7 +41,7 @@ metricReporterList: - PROMETHEUS ``` -2. Restart IoTDB, open a browser or use curl to access http://ip:9091/metrics, and you can see the metric data. +2. Restart IoTDB, open a browser or use curl to access , and you can see the metric data. #### 2. Add IoTDB monitoring on the HertzBeat monitoring page @@ -54,7 +54,7 @@ Path: Menu -> Database Monitoring -> IoTDB Monitoring -> Add IoTDB Monitoring 2. Configure the parameters required for monitoring IoTDB Fill in the IoTDB **service IP** and **monitoring port** (default 9091) on the monitoring page, and finally click OK to add. -For other parameters such as **collection interval**, **timeout period**, etc., please refer to [Help Documentation](https://hertzbeat.com/docs/help/iotdb/) https://hertzbeat.com/docs/help /iotdb/ +For other parameters such as **collection interval**, **timeout period**, etc., please refer to [Help Documentation](https://hertzbeat.com/docs/help/iotdb/) /iotdb/ ![hertzbeat](/img/blog/monitor-iotdb-2.png) @@ -71,6 +71,7 @@ For other parameters such as **collection interval**, **timeout period**, etc., ![hertzbeat](/img/blog/monitor-iotdb-5.png) **Complete DONE! Through the above steps, it is actually two steps to sum up** + - **Enable `metrics` function on IoTDB in one step** - **Another step is to configure the IP port on the HertzBeat monitoring page to add monitoring** @@ -95,7 +96,7 @@ Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. -- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk to configure the robot on DingTalk and set the security custom keyword `HertzBeat `, get the corresponding `access_token` value. +- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) to configure the robot on DingTalk and set the security custom keyword `HertzBeat`, get the corresponding `access_token` value. - Configure the receiver parameters in HertzBeat as follows. 【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 @@ -122,10 +123,10 @@ Content details: The status of IOTDB node 127.0.0.1 is monitored as OFFLINE, ple This practical article took us to experience that if HertzBeat is used to monitor the IoTDB database metric data, we can find that HertzBeat, which integrates monitoring-alarm-notification, is more convenient in operation and use, and IoTDB can be included in the monitoring with a simple click on the page, it is no longer necessary to deploy multiple components and write multiple YML configuration files with thresholds. -IoTDB Github: https://github.com/apache/iotdb -HertzBeat Github: https://github.com/apache/hertzbeat +IoTDB Github: +HertzBeat Github: -**Welcome to learn about using Star Support! ** +**Welcome to learn about using Star Support!** Only one docker command is needed to install and experience heartbeat: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` diff --git a/home/blog/2023-01-08-monitor-shenyu.md b/home/blog/2023-01-08-monitor-shenyu.md index 582176e34f6..37681ff86b8 100644 --- a/home/blog/2023-01-08-monitor-shenyu.md +++ b/home/blog/2023-01-08-monitor-shenyu.md @@ -7,7 +7,7 @@ author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 tags: [opensource, practice] --- -### Monitoring practice for API gateway Apache ShenYu using HertzBeat, 5 minutes! +### Monitoring practice for API gateway Apache ShenYu using HertzBeat, 5 minutes ### Introduction to Apache ShenYu @@ -24,20 +24,20 @@ tags: [opensource, practice] ### HertzBeat Introduction -> HertzBeat is an open source, easy to use and friendly real-time monitoring tool, no Agent, with powerful custom monitoring capabilities. -> Support for application services, database, operating system, middleware, cloud native monitoring, threshold alarms, alarm notification (email WeChat Nail Flybook). +> HertzBeat is an open source, easy to use and friendly real-time monitoring tool, no Agent, with powerful custom monitoring capabilities. +> Support for application services, database, operating system, middleware, cloud native monitoring, threshold alarms, alarm notification (email WeChat Nail Flybook). > HertzBeat's powerful customization, multi-type support, easy to extend, low-coupling, hope to help developers and small and medium-sized teams to quickly build their own monitoring system. -### Monitor Apache ShenYu in HertzBeat in 5 minutes! +### Monitor Apache ShenYu in HertzBeat in 5 minutes -#### You must have a ShenYu environment and a HertzBeat environment. +#### You must have a ShenYu environment and a HertzBeat environment - ShenYu [Deployment and Installation Documentation](https://shenyu.apache.org/zh/docs/deployment/deployment-before) - HertzBeat [Deployment and Installation Documentation](https://hertzbeat.com/docs/start/docker-deploy) -#### i. Enable the `metrics` plugin on the ShenYu side, which will provide the metrics interface data. +#### i. Enable the `metrics` plugin on the ShenYu side, which will provide the metrics interface data -> The plugin is the core implementer of the Apache ShenYu gateway, and metrics data collection is also integrated at `ShenYu` in the form of a plugin - `Metrics Plugin`. +> The plugin is the core implementer of the Apache ShenYu gateway, and metrics data collection is also integrated at `ShenYu` in the form of a plugin - `Metrics Plugin`. > The `Metrics plugin` is used by the gateway to monitor its own operational status (`JVM` related), request responses and other related metrics. 1. Add the `metrics plugin` dependency to the `pom.xml` file of the gateway. @@ -76,8 +76,8 @@ Path: Menu -> Middleware Monitor -> ShenYu Monitor -> Add ShenYu Monitor 2. Configure the parameters required for monitoring ShenYu -On the monitor page, fill in ShenYu **service IP**, **monitor port** (default 8090), and click OK to add. -For other parameters such as **collection interval**, **timeout**, etc., you can refer to the [help file](https://hertzbeat.com/docs/help/shenyu/) https://hertzbeat.com/docs/help/shenyu/ +On the monitor page, fill in ShenYu **service IP**, **monitor port** (default 8090), and click OK to add. +For other parameters such as **collection interval**, **timeout**, etc., you can refer to the [help file](https://hertzbeat.com/docs/help/shenyu/) ![hertzbeat](/img/blog/monitor-shenyu-1.png) @@ -96,6 +96,7 @@ For other parameters such as **collection interval**, **timeout**, etc., you can ![hertzbeat](/img/blog/monitor-shenyu-6.png) **DONE! With the above steps, it's really only two steps** + - **The first step is to enable the `metrics` plugin on the ShenYu side**. - **The second step is to configure the IP ports on the HertzBeat monitoring page to add monitoring @@ -113,7 +114,7 @@ Of course, just looking at it is not perfect, monitoring is often accompanied by Path: Menu -> Alert Thresholds -> Add Thresholds - There are a lot of metrics in ShenYu monitoring, for example, we will set the threshold for the `number of open file descriptors` `process_open_fds` -> `value` metric, which will alert you when the number of open file descriptors on the server side is greater than 3,000. -- Here we configure an alert to be issued when the `value' of `process_open_fds` exceeds 3000, with an alert level of **Warning alert**, which is triggered three times, as shown in the following figure. +- Here we configure an alert to be issued when the `value' of`process_open_fds` exceeds 3000, with an alert level of **Warning alert**, which is triggered three times, as shown in the following figure. ![hertzbeat](/img/blog/monitor-shenyu-7.png) @@ -125,7 +126,7 @@ Path: Menu -> Alert Notification -> Alert Recipients -> Add New Recipient. Message notification methods support **Email, Nail, WeChat, Flybook, WebHook, SMS**, etc. Here we take the commonly used Nail as an example. -- Refer to this [help document](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk Configure the bot on the pinning side, set the security customization keyword ` HertzBeat`, get the corresponding `access_token` value. +- Refer to this [help document](https://hertzbeat.com/docs/help/alert_dingtalk) Configure the bot on the pinning side, set the security customization keyword `HertzBeat`, get the corresponding `access_token` value. - Configure the recipient parameters in HertzBeat as follows. [Alert Notification] -> [Add Recipient] -> [Select Nailed Bot Notification Method] -> [Set Nailed Bot ACCESS_TOKEN] -> [OK] @@ -138,7 +139,7 @@ Message notification methods support **Email, Nail, WeChat, Flybook, WebHook, SM ![hertzbeat](/img/blog/alert-notice-2.png) -### Over and out, now wait for the alert message to come through. Ding, ding, ding, ding. +### Over and out, now wait for the alert message to come through. Ding, ding, ding, ding ``` [HertzBeat Alert Notification] @@ -156,10 +157,10 @@ Details : Please note that the number of file descriptors opened by ⚠️ ShenY This hands-on article takes us through how to use HertzBeat to monitor Apache ShenYu metrics data, and we can find that HertzBeat, which combines ``Monitoring-Alert-Notification``, is much more convenient to operate and use, and you can include ShenYu in the monitoring by simply clicking on a page. There is no need to deploy multiple components and write multiple YML configuration files. ::: -Apache ShenYu Github: https://github.com/apache/shenyu -HertzBeat Github: https://github.com/apache/hertzbeat +Apache ShenYu Github: +HertzBeat Github: -**Welcome to learn about using Star Support Oh! ** +**Welcome to learn about using Star Support Oh!** -Experience heartbeat with a single docker command: +Experience heartbeat with a single docker command: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` diff --git a/home/blog/2023-02-02-monitor-dynamic-tp.md b/home/blog/2023-02-02-monitor-dynamic-tp.md index cc42a8c0cab..83980106d3c 100644 --- a/home/blog/2023-02-02-monitor-dynamic-tp.md +++ b/home/blog/2023-02-02-monitor-dynamic-tp.md @@ -7,9 +7,9 @@ author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 tags: [opensource, practice] --- -## Monitoring practice for thread pooling framework DynamicTp using HertzBeat, 5 minutes! +## Monitoring practice for thread pooling framework DynamicTp using HertzBeat, 5 minutes -### Introducing DynamicTp, the thread pooling framework. +### Introducing DynamicTp, the thread pooling framework > DynamicTp is a lightweight configuration-centric dynamic thread pool in Jvm with built-in monitoring and alerting capabilities, which can be customized through SPI extensions. @@ -25,14 +25,14 @@ tags: [opensource, practice] - Its Http, Jmx, Ssh, Snmp, Jdbc, Prometheus and other protocol specifications configurable, just configure YML can use these protocols to customize the collection of any metrics you want to collect. Would you believe that you can instantly adapt a new monitoring type such as K8s or Docker by simply configuring YML? - HertzBeat's powerful customization, multi-type support, easy scalability, and low coupling will hopefully help developers and small to medium sized teams to quickly build their own monitoring systems. -### Monitor DynamicTp in 5 minutes at HertzBeat! +### Monitor DynamicTp in 5 minutes at HertzBeat -#### operation, you already have a DynamicTp environment and a HertzBeat environment. +#### operation, you already have a DynamicTp environment and a HertzBeat environment - DynamicTp [Integration Access Documentation](https://dynamictp.cn/guide/use/quick-start.html) - HertzBeat [Deployment and Installation Documentation](https://hertzbeat.com/docs/start/docker-deploy) -#### i. Expose the `DynamicTp` metrics interface `/actuator/dynamic-tp` on the DynamicTp side, which will provide the metrics interface data. +#### i. Expose the `DynamicTp` metrics interface `/actuator/dynamic-tp` on the DynamicTp side, which will provide the metrics interface data 1. Enable the SpringBoot Actuator Endpoint to expose the `DynamicTp` metrics interface. @@ -88,8 +88,8 @@ Path: Menu -> Middleware Monitor -> DynamicTp Monitor -> Add DynamicTp Monitor 2. Configure the parameters required for monitoring DynamicTp. -On the monitor page, fill in DynamicTp **service IP**, **monitoring port** (default 8080), and finally click OK to add it. -For other parameters such as **collection interval**, **timeout**, etc., you can refer to [help](https://hertzbeat.com/docs/help/dynamic_tp/) https://hertzbeat.com/docs/help/dynamic_tp/ +On the monitor page, fill in DynamicTp **service IP**, **monitoring port** (default 8080), and finally click OK to add it. +For other parameters such as **collection interval**, **timeout**, etc., you can refer to [help](https://hertzbeat.com/docs/help/dynamic_tp/) ![hertzbeat](/img/blog/monitor-dynamic-tp-2.png) @@ -108,6 +108,7 @@ For other parameters such as **collection interval**, **timeout**, etc., you can ![hertzbeat](/img/blog/monitor-dynamic-tp-5.png) **DONE! With the above steps, it's really just two steps** + - **The first step is to expose the DynamicTp `metrics` endpoint `/actuator/dynamic-tp`**. - **The second step is to configure the IP ports on the HertzBeat monitoring page to add the monitoring** @@ -137,7 +138,7 @@ Path: Menu -> Alert Notification -> Alert Recipient -> Add Recipient. Message notification methods support **Email, Dingtalk, WeChat, Flybook, WebHook, SMS**, etc. We take the commonly used Dingtalk as an example. -- Refer to this [help document](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk Configure the bot on Dingtalk side, set the security customization keyword ` HertzBeat`, get the corresponding `access_token` value. +- Refer to this [help document](https://hertzbeat.com/docs/help/alert_dingtalk) Configure the bot on Dingtalk side, set the security customization keyword `HertzBeat`, get the corresponding `access_token` value. - Configure the recipient parameters in HertzBeat as follows. [Alert Notification] -> [Add Recipient] -> [Choose Dingtalk bot notification method] -> [Set Dingtalk bot ACCESS_TOKEN] -> [OK] @@ -150,7 +151,7 @@ Message notification methods support **Email, Dingtalk, WeChat, Flybook, WebHook ![hertzbeat](/img/blog/alert-notice-2.png) -### Over and out, now wait for the alert message to come through. Ding, ding, ding, ding. +### Over and out, now wait for the alert message to come through. Ding, ding, ding, ding ``` [HertzBeat alert notification] @@ -168,10 +169,10 @@ Details : DynamicTp has run timeout thread, count is 2 This practical article takes us to experience how to use HertzBeat to monitor DynamicTp thread pool metrics data, and we can find that HertzBeat with ``monitoring-alerting-notification`` is much more convenient to operate and use, and you only need to point and click on a page to include DynamicTp thread pool into the monitoring and alert notification, and you don't need to deploy multiple components to write YML configuration files anymore. There is no need to deploy multiple components and write YML configuration files. ::: -DynamicTp Github: https://github.com/dromara/dynamic-tp -HertzBeat Github: https://github.com/apache/hertzbeat +DynamicTp Github: +HertzBeat Github: **Welcome to learn how to use Star Support!** -Experience heartbeat with a single docker command: +Experience heartbeat with a single docker command: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` diff --git a/home/blog/2023-02-10-new-committer.md b/home/blog/2023-02-10-new-committer.md index 37bf294428f..9dbc9399ba9 100644 --- a/home/blog/2023-02-10-new-committer.md +++ b/home/blog/2023-02-10-new-committer.md @@ -80,16 +80,16 @@ From the first `PR` to the present, I have participated in the `hertzbeat` open **contribute:** -- 1. Realize the monitoring of docker containers. -- 2. Complete the domestic database DM monitoring -- 3. Write a single test for the corresponding business. -- 4. English translation of some annotations. +* 1. Realize the monitoring of docker containers. +* 2. Complete the domestic database DM monitoring +* 3. Write a single test for the corresponding business. +* 4. English translation of some annotations. **reward:** -- 1. The technical ability has been further improved. -- 2. Broaden your horizons. -- 3. Learned a lot from the bosses. +* 1. The technical ability has been further improved. +* 2. Broaden your horizons. +* 3. Learned a lot from the bosses. ### 🌻 Thanks to the community partners @@ -99,10 +99,10 @@ Thanks to the friends who have helped me or inspired me for free (in no particul First of all, I am also a newcomer to Novice Village, but I can share some of my experience with you, hoping to help you. -- 1. Don't be too impatient, and calm down to understand the general implementation logic of each module. -- 2. Use different functions and debug to see the underlying implementation principle of each function. -- 3. Slowly try to read the source code and understand it. -- 4. If you encounter a bug, you can directly report it to issues, or you can try to solve it yourself. +* 1. Don't be too impatient, and calm down to understand the general implementation logic of each module. +* 2. Use different functions and debug to see the underlying implementation principle of each function. +* 3. Slowly try to read the source code and understand it. +* 4. If you encounter a bug, you can directly report it to issues, or you can try to solve it yourself. ## What is Hertz Beat? @@ -113,16 +113,15 @@ First of all, I am also a newcomer to Novice Village, but I can share some of my > > The powerful customization of `HertzBeat`, multi-type support, easy expansion, and low coupling, hope to help developers and small and medium-sized teams quickly build their own monitoring tools. -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** ## ⛄ Supported -- Website Monitoring, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap, Ssl Certificate, SpringBoot, FTP Server -- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, Dameng, OpenGauss, ClickHouse, IoTDB -- Linux, Ubuntu, CentOS, Windows -- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ +* Website Monitoring, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap, Ssl Certificate, SpringBoot, FTP Server +* Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, Dameng, OpenGauss, ClickHouse, IoTDB +* Linux, Ubuntu, CentOS, Windows +* Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ -Kubernetes, Docker -- and more for your custom monitoring. -- Notification support `Discord` `Slack` `Telegram` `Mail` `DingTalk` `WeChat` `Feishu` `SMS` `Webhook`. - +* and more for your custom monitoring. +* Notification support `Discord` `Slack` `Telegram` `Mail` `DingTalk` `WeChat` `Feishu` `SMS` `Webhook`. diff --git a/home/blog/2023-02-11-monitor-mysql.md b/home/blog/2023-02-11-monitor-mysql.md index 614ff205f7b..fa0519a7cde 100644 --- a/home/blog/2023-02-11-monitor-mysql.md +++ b/home/blog/2023-02-11-monitor-mysql.md @@ -8,7 +8,7 @@ tags: [opensource, practice] Keywords: [Open source monitoring tool, open source database monitoring, Mysql database monitoring] --- -## Use the open source real-time monitoring tool HertzBeat to monitor and alarm the Mysql database, and it will be done in 5 minutes! +## Use the open source real-time monitoring tool HertzBeat to monitor and alarm the Mysql database, and it will be done in 5 minutes ### Mysql database introduction @@ -24,7 +24,7 @@ Keywords: [Open source monitoring tool, open source database monitoring, Mysql d ### Get the Mysql database monitoring in HertzBeat in 5 minutes -#### The premise of the operation is that you already have the Mysql environment and the HertzBeat environment. +#### The premise of the operation is that you already have the Mysql environment and the HertzBeat environment - Mysql [Installation and deployment document](https://www.runoob.com/mysql/mysql-install.html) - HertzBeat [Installation and deployment documentation](https://hertzbeat.com/docs/start/docker-deploy) @@ -40,7 +40,7 @@ Path: Menu -> Database Monitoring -> Mysql Database -> Add Mysql Database Monito 2. Configure the parameters required for the new monitoring Mysql database On the monitoring page, fill in Mysql **service IP**, **monitoring port** (default 3306), **account password, etc.**, and finally click OK to add. -For other parameters such as **collection interval**, **timeout period**, etc., please refer to [Help Documentation](https://hertzbeat.com/docs/help/mysql/) https://hertzbeat.com/docs/help /mysql/ +For other parameters such as **collection interval**, **timeout period**, etc., please refer to [Help Documentation](https://hertzbeat.com/docs/help/mysql/) /mysql/ ![hertzbeat](/img/blog/monitor-mysql-2.png) @@ -57,6 +57,7 @@ For other parameters such as **collection interval**, **timeout period**, etc., ![hertzbeat](/img/blog/monitor-mysql-4.png) **DONE! Done! Through the above steps, in fact, it only takes one step to sum up** + - **On the HertzBeat monitoring page, configure the IP port account password and add Mysql monitoring** :::tip @@ -87,7 +88,7 @@ Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. -- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk to configure the robot on DingTalk and set the security custom keyword `HertzBeat `, get the corresponding `access_token` value. +- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) to configure the robot on DingTalk and set the security custom keyword `HertzBeat`, get the corresponding `access_token` value. - Configure the receiver parameters in HertzBeat as follows. 【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 @@ -118,10 +119,10 @@ Content details: mysql db query_cache_hit_rate is too low, now is 20. This practical article took us to experience how to use the open source real-time monitoring tool HertzBeat to monitor Mysql database metric data. We can find that HertzBeat, which integrates `monitoring-alarm-notification`, is more convenient in operation and use, just click on the page The Mysql database can be included in the monitoring and alarm notification, and the tedious operations of deploying multiple components and writing configuration files are no longer needed. ::: -Mysql Github: https://github.com/mysql/mysql-server -HertzBeat Github: https://github.com/apache/hertzbeat +Mysql Github: +HertzBeat Github: -**Welcome to learn, use and star! ** +**Welcome to learn, use and star!** > Only one docker command is needed to install and experience heartbeat: diff --git a/home/blog/2023-02-15-monitor-linux.md b/home/blog/2023-02-15-monitor-linux.md index 8681f564e11..5b41eefc41b 100644 --- a/home/blog/2023-02-15-monitor-linux.md +++ b/home/blog/2023-02-15-monitor-linux.md @@ -8,7 +8,7 @@ tags: [opensource, practice] keywords: [Open source monitoring tool, operating system monitoring, Linux monitoring] --- -## Use the open source real-time monitoring tool HertzBeat to monitor and alarm the Linux operating system, and it will be done in 5 minutes! +## Use the open source real-time monitoring tool HertzBeat to monitor and alarm the Linux operating system, and it will be done in 5 minutes ### Introduction to HertzBeat @@ -18,11 +18,11 @@ keywords: [Open source monitoring tool, operating system monitoring, Linux monit - It configurable protocol specifications such as Http, Jmx, Ssh, Snmp, Jdbc, Prometheus, etc. You only need to configure YML to use these protocols to customize and collect any metrics you want to collect. Do you believe that you can immediately adapt to a new monitoring type such as K8s or Docker just by configuring YML? - HertzBeat's powerful customization, multi-type support, easy expansion, and low coupling, hope to help developers and small and medium teams quickly build their own monitoring tools. -Github: https://github.com/apache/hertzbeat +Github: ### Get Linux Monitoring Done in HertzBeat in 5 Minutes -#### Prerequisites, you already have a Linux environment and a HertzBeat environment. +#### Prerequisites, you already have a Linux environment and a HertzBeat environment - HertzBeat [Installation and deployment documentation](https://hertzbeat.com/docs/start/docker-deploy) @@ -37,7 +37,7 @@ Path: Menu -> Operating System Monitoring -> Linux Operating System -> Add Linux 2. Configure the parameters required for new monitoring Linux Fill in the Linux **peer IP**, **SSH port** (default 22), **account password, etc.** on the monitoring page, and finally click OK to add. -For other parameters such as **collection interval**, **timeout period**, etc., please refer to the help document https://hertzbeat.com/docs/help/mysql/ +For other parameters such as **collection interval**, **timeout period**, etc., please refer to the help document ![hertzbeat](/img/blog/monitor-linux-2.png) @@ -89,7 +89,7 @@ Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. -- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk to configure the robot on DingTalk and set the security custom keyword `HertzBeat `, get the corresponding `access_token` value. +- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) to configure the robot on DingTalk and set the security custom keyword `HertzBeat`, get the corresponding `access_token` value. - Configure the receiver parameters in HertzBeat as follows. 【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 @@ -133,8 +133,8 @@ This practical article took us to experience how to use the open source real-tim > > The powerful customization of `HertzBeat`, multi-type support, easy expansion, and low coupling, hope to help developers and small and medium-sized teams quickly build their own monitoring tools. -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** ## ⛄ Supported @@ -145,4 +145,3 @@ This practical article took us to experience how to use the open source real-tim -Kubernetes, Docker - and more for your custom monitoring. - Notification support `Discord` `Slack` `Telegram` `Mail` `DingTalk` `WeChat` `Feishu` `SMS` `Webhook`. - diff --git a/home/blog/2023-03-15-hertzbeat-v1.3.0.md b/home/blog/2023-03-15-hertzbeat-v1.3.0.md index 14a550ec61e..1082e4fa16c 100644 --- a/home/blog/2023-03-15-hertzbeat-v1.3.0.md +++ b/home/blog/2023-03-15-hertzbeat-v1.3.0.md @@ -14,8 +14,8 @@ Website: hertzbeat.com | tancloud.cn ### What is HertzBeat? -> HertzBeat is an open source real-time monitoring and alerting tool with powerful custom monitoring capabilities and no Agent required. -> It supports monitoring of application services, database, operating system, middleware, cloud native, network and other metrics, and threshold alert notification in one step. +> HertzBeat is an open source real-time monitoring and alerting tool with powerful custom monitoring capabilities and no Agent required. +> It supports monitoring of application services, database, operating system, middleware, cloud native, network and other metrics, and threshold alert notification in one step. > Support more liberal threshold rules (calculation expressions), `email` `Discord` `Slack` `Telegram` `Pegging` `WeChat` `FlyBook` `SMS` `Webhook` and other ways to timely delivery. > > We have made the protocol specifications such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable so that you can simply configure `YML` to use these protocols to customize the collection of any metrics you want. @@ -24,9 +24,9 @@ Website: hertzbeat.com | tancloud.cn ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4236e748f5ac4352b7cf4bb65ccf97aa~tplv-k3u1fbpfcp-zoom-1.image) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** ### v1.3.0 release is here @@ -36,13 +36,13 @@ After a month of iterations, HertzBeat v1.3.0 was officially released last weeke ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/b5c9dd3e28c54c72b49a7470012a0c36~tplv-k3u1fbpfcp-zoom-1.image) -- **support for monitoring network switches**. +- **support for monitoring network switches**. hertzbeat supported snmp protocol long time ago, windows monitoring is monitored by snmp protocol, this version we not only support more windows performance metrics, but also support snmp walk, adapt several common network switches monitoring, welcome to contribute more types and metrics to the community. -- **Support for monitoring redis clusters and more database metrics**. +- **Support for monitoring redis clusters and more database metrics**. Community contributors have contributed extended metrics for redis clusters and multiple databases, enriching the performance metrics data. -- **Support iotdb1.0 storage, dependency-free mode** +- **Support iotdb1.0 storage, dependency-free mode** and more new features welcome to explore - Fix several bugs, better documentation, refactored code. @@ -91,5 +91,5 @@ COMMIT; ---- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** diff --git a/home/blog/2023-03-22-monitor-springboot2.md b/home/blog/2023-03-22-monitor-springboot2.md index 8dac9aade12..6c669bfc9fb 100644 --- a/home/blog/2023-03-22-monitor-springboot2.md +++ b/home/blog/2023-03-22-monitor-springboot2.md @@ -8,29 +8,29 @@ tags: [opensource, practice] keywords: [opensource monitoring, SpringBoot monitoring, alert] --- -## Use the open source real-time monitoring tool HertzBeat to monitor and alarm the SpringBoot2 application, and it will be done in 5 minutes! +## Use the open source real-time monitoring tool HertzBeat to monitor and alarm the SpringBoot2 application, and it will be done in 5 minutes ### HertzBeat Intro > HertzBeat is an open source, real-time monitoring tool with custom-monitor and agentLess. | 易用友好的开源实时监控告警工具,无需Agent,强大自定义监控能力. > -> **Monitor+Alerter+Notify** all in one. Support monitoring web service, database, os, middleware, cloud-native, network and more. +> **Monitor+Alerter+Notify** all in one. Support monitoring web service, database, os, middleware, cloud-native, network and more. > More flexible threshold rule(calculation expression), timely notification delivery by `Discord` `Slack` `Telegram` `Email` `DingDing` `WeChat` `FeiShu` `Webhook` `SMS`. > -> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. +> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? > > `HertzBeat`'s powerful custom-define, multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring system. -Github: https://github.com/apache/hertzbeat +Github: ### Monitoring SpringBoot2 Metrics with HertzBeat in 5 minutes -#### Prerequisite, you already have SpringBoot2 application environment and HertzBeat environment. +#### Prerequisite, you already have SpringBoot2 application environment and HertzBeat environment - HertzBeat [Installation and deployment documentation](https://hertzbeat.com/docs/start/docker-deploy) -#### 1. The `actuator` metric endpoint is exposed on the SpringBoot2 application side, which will provide metrics endpoints data. +#### 1. The `actuator` metric endpoint is exposed on the SpringBoot2 application side, which will provide metrics endpoints data 1. Open SpringBoot Actuator Endpoint to expose `metrics health env` metric interface @@ -94,7 +94,7 @@ Path: Menu -> Application Service Monitoring -> SpringBoot2 -> Add SpringBoot2 M 2. Configure the parameters required for new monitoring SpringBoot2 Fill in the SpringBoot2 application **peer IP**, **service port** (default 8080), **account password, etc.** on the monitoring page, and finally click OK to add. -For other parameters such as **collection interval**, **timeout period**, etc., please refer to the help document https://hertzbeat.com/docs/help/ +For other parameters such as **collection interval**, **timeout period**, etc., please refer to the help document ![hertzbeat](/img/blog/monitor-springboot2-2.png) @@ -142,7 +142,7 @@ Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. -- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk to configure the robot on DingTalk and set the security custom keyword `HertzBeat `, get the corresponding `access_token` value. +- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) to configure the robot on DingTalk and set the security custom keyword `HertzBeat`, get the corresponding `access_token` value. - Configure the receiver parameters in HertzBeat as follows. 【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 @@ -185,16 +185,16 @@ This practical article took us to experience how to use the open source real-tim ## What is HertzBeat? -> [HertzBeat](https://github.com/apache/hertzbeat) is an open source, real-time monitoring tool with custom-monitor and agentless. -> **Monitor+Alerter+Notify** all in one. Support monitoring web service, database, os, middleware, cloud-native, network and more. +> [HertzBeat](https://github.com/apache/hertzbeat) is an open source, real-time monitoring tool with custom-monitor and agentless. +> **Monitor+Alerter+Notify** all in one. Support monitoring web service, database, os, middleware, cloud-native, network and more. > More flexible threshold rule(calculation expression), timely notification delivery by `Discord` `Slack` `Telegram` `Email` `DingDing` `WeChat` `FeiShu` `Webhook` `SMS`. > -> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. +> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? > > `HertzBeat`'s powerful custom-define, multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring system. ----- +---- ## ⛄ Supported @@ -209,5 +209,5 @@ This practical article took us to experience how to use the open source real-tim ---- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** diff --git a/home/blog/2023-05-09-hertzbeat-v1.3.1.md b/home/blog/2023-05-09-hertzbeat-v1.3.1.md index 1e5c663aebd..e2b4e50c169 100644 --- a/home/blog/2023-05-09-hertzbeat-v1.3.1.md +++ b/home/blog/2023-05-09-hertzbeat-v1.3.1.md @@ -15,27 +15,27 @@ Website: hertzbeat.com | tancloud.cn ### What is HertzBeat? > [HertzBeat](https://github.com/apache/hertzbeat) is an open source, real-time monitoring system with custom-monitoring and agentLess. -> **Monitoring+Alarm+Notify** all in one. Support monitoring web service, database, os, middleware, cloud-native, network and more. -> Easy to use, full web-based operation, monitoring and alerting at the click of a mouse, zero learning cost. +> **Monitoring+Alarm+Notify** all in one. Support monitoring web service, database, os, middleware, cloud-native, network and more. +> Easy to use, full web-based operation, monitoring and alerting at the click of a mouse, zero learning cost. > More flexible threshold rule, timely notification delivery by `Discord` `Slack` `Telegram` `Email` `DingDing` `WeChat` `FeiShu` `Webhook` `SMS`. > -> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. +> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? > -> `HertzBeat`'s powerful custom-define, multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring system. +> `HertzBeat`'s powerful custom-define, multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring system. > We also provide **[Monitoring SaaS Cloud](https://console.tancloud.cn)**, users no longer need to deploy a cumbersome monitoring system in order to monitor resources. **[Get started for free](https://console.tancloud.cn)**. ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4236e748f5ac4352b7cf4bb65ccf97aa~tplv-k3u1fbpfcp-zoom-1.image) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** ### v1.3.1 release is here -Hi guys! Major release. HertzBeat v1.3.1 is coming. **So excited that more than 30 friends have contributed this version.**. -We support **greptimeDB, tedngine3.0 and opengauss influxdb** to store to metrics data. -New feature **monitoring export and import by excel json yaml** , **alert silence**, **new monitoring template** and more. +Hi guys! Major release. HertzBeat v1.3.1 is coming. **So excited that more than 30 friends have contributed this version.**. +We support **greptimeDB, tedngine3.0 and opengauss influxdb** to store to metrics data. +New feature **monitoring export and import by excel json yaml** , **alert silence**, **new monitoring template** and more. Support monitoring EulerOS metrics and SpringBoot3 metrics. Fixed several bugs, imporved document and improved the overall stable usability. Let's Try Now! @@ -43,7 +43,7 @@ Let's Try Now! Only one docker command is needed to install and experience hertzbeat: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` -## Upgrade Note⚠️. +## Upgrade Note⚠️ If use tdengine before, please upgrade tdengine to 3.0+ @@ -55,6 +55,7 @@ COMMIT; ``` --- + ## ⛄ Supported - Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server @@ -68,5 +69,5 @@ COMMIT; --- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** diff --git a/home/blog/2023-05-11-greptimedb-store.md b/home/blog/2023-05-11-greptimedb-store.md index 0343565c282..210a41f5420 100644 --- a/home/blog/2023-05-11-greptimedb-store.md +++ b/home/blog/2023-05-11-greptimedb-store.md @@ -76,9 +76,9 @@ $ docker run -d -p 1157:1157 \ - `-v /opt/application.yml:/opt/hertzbeat/config/application.yml` : Mount customized local configuration files to the container, i.e. use local configuration files to overwrite the container configuration files. -Note that the ⚠️ local mount configuration file `application.yml` needs to exist in advance, and the full contents of the file can be found in the project repository [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/ application.yml) +Note that the ⚠️ local mount configuration file `application.yml` needs to exist in advance, and the full contents of the file can be found in the project repository [/script/application.yml]( application.yml) -2. Go to http://ip:1157/ with the default account and password admin/hertzbeat to see if HertzBeat starts successfully. +2. Go to with the default account and password admin/hertzbeat to see if HertzBeat starts successfully. #### Configure to use GreptimeDB to store HertzBeat monitoring metrics metrics data @@ -101,12 +101,12 @@ warehouse: 2. Restart HertzBeat. ```shell -$ docker restart hertzbeat +docker restart hertzbeat ``` #### Observe the authentication effect -1. visit HertzBeat in your browser http://ip:1157/ default account password admin/hertzbeat +1. visit HertzBeat in your browser default account password admin/hertzbeat 2. Use HertzBeat to add application monitors, such as website monitors, Linux monitors, Mysql monitors, and so on. 3. After monitoring and collecting several cycles, check whether GreptimeDB database stores the metrics data and whether HertzBeat metrics data graph data is displayed normally. @@ -120,12 +120,12 @@ Here's the picture: ! ## Summary -This article took us to experience how to use the open source time-series database GreptimeDB to store the metrics data of the open source real-time monitoring HertzBeat, in general, the two open source products is very simple to get started, the key is that if it is too much trouble do not want to deploy both of them still have cloud services 😂 let you toss. +This article took us to experience how to use the open source time-series database GreptimeDB to store the metrics data of the open source real-time monitoring HertzBeat, in general, the two open source products is very simple to get started, the key is that if it is too much trouble do not want to deploy both of them still have cloud services 😂 let you toss. As one of the developers of the feature [HertzBeat supports GreptimeDB](https://github.com/apache/hertzbeat/pull/834), in the actual adaptation process, GreptimeDB's silky-smooth native SDK and relational database-like SQL, let us from other GreptimeDB native SDK and relational database-like SQL make it very easy to switch from other time-series databases like `TDengine, IotDB, InfluxDB` to GreptimeDB, and the experience is very smooth. -GreptimeDB Github: https://github.com/GreptimeTeam/greptimedb -HertzBeat Github: https://github.com/apache/hertzbeat +GreptimeDB Github: +HertzBeat Github: -** Finally, you are welcome to be more understanding, more use, more comments, more ISSUE, more PR, more Star support these two did not come out for a long time hope to get care of open source cattle are not afraid of difficulties a small star oh! Do open source, we are sincere, love 💗** +**Finally, you are welcome to be more understanding, more use, more comments, more ISSUE, more PR, more Star support these two did not come out for a long time hope to get care of open source cattle are not afraid of difficulties a small star oh! Do open source, we are sincere, love 💗** Thanks to the contributors of this feature [HertzBeat support GreptimeDB](https://github.com/apache/hertzbeat/pull/834) @zqr10159, @fengjiachun, @killme2008, @tomsun28 diff --git a/home/blog/2023-07-05-hertzbeat-v1.3.2.md b/home/blog/2023-07-05-hertzbeat-v1.3.2.md index f8eda452b26..c676c96028f 100644 --- a/home/blog/2023-07-05-hertzbeat-v1.3.2.md +++ b/home/blog/2023-07-05-hertzbeat-v1.3.2.md @@ -15,21 +15,21 @@ Website: hertzbeat.com | tancloud.cn ### What is HertzBeat? > [HertzBeat](https://github.com/apache/hertzbeat) is an open source, real-time monitoring system with custom-monitoring and agentLess. -> **Monitoring+Alarm+Notify** all in one. Support monitoring web service, database, os, middleware, cloud-native, network and more. -> Easy to use, full web-based operation, monitoring and alerting at the click of a mouse, zero learning cost. +> **Monitoring+Alarm+Notify** all in one. Support monitoring web service, database, os, middleware, cloud-native, network and more. +> Easy to use, full web-based operation, monitoring and alerting at the click of a mouse, zero learning cost. > More flexible threshold rule, timely notification delivery by `Discord` `Slack` `Telegram` `Email` `DingDing` `WeChat` `FeiShu` `Webhook` `SMS`. > -> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. +> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? > -> `HertzBeat`'s powerful custom-define, multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring system. +> `HertzBeat`'s powerful custom-define, multi-type support, easy expansion, low coupling, hope to help developers and micro teams to quickly build their own monitoring system. > We also provide **[Monitoring SaaS Cloud](https://console.tancloud.cn)**, users no longer need to deploy a cumbersome monitoring system in order to monitor resources. **[Get started for free](https://console.tancloud.cn)**. ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4236e748f5ac4352b7cf4bb65ccf97aa~tplv-k3u1fbpfcp-zoom-1.image) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** ### v1.3.2 @@ -59,7 +59,7 @@ Only one docker command is needed to install and experience hertzbeat: ```docker run -d -p 1157:1157 --name hertzbeat quay.io/tancloud/hertzbeat``` ---- +--- Upgrade Note⚠️. @@ -69,7 +69,7 @@ For h2 database users, sholud exec sql below: ALTER TABLE HZB_PARAM DROP CONSTRAINT CONSTRAINT_82;; ``` -How to Enable H2 WEB Console: +How to Enable H2 WEB Console: Modify `application.yml` and restart, access `ip:1157/h2-console` ``` @@ -81,6 +81,7 @@ spring: ``` --- + ## ⛄ Supported - Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server @@ -94,6 +95,5 @@ spring: --- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** - +**Github: ** +**Gitee: ** diff --git a/home/blog/2023-08-14-hertzbeat-v1.4.0.md b/home/blog/2023-08-14-hertzbeat-v1.4.0.md index 54ea6267be8..34179eb4df7 100644 --- a/home/blog/2023-08-14-hertzbeat-v1.4.0.md +++ b/home/blog/2023-08-14-hertzbeat-v1.4.0.md @@ -26,9 +26,9 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ![hertzBeat](/img/docs/hertzbeat-arch.png) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** ### The cluster version is coming @@ -39,9 +39,9 @@ The cluster version not only brings us more powerful monitoring performance, but #### High performance cluster -- Supports the deployment of collector clusters, multi-collector clusters scale horizontally, and exponentially improves the number of monitors and collection performance. -- The monitoring task is self-scheduled in the collector cluster. A single collector hangs up and migrates the collection task without perceptual failure. Newly added collector nodes are automatically scheduled to share the collection pressure. -- It is very convenient to switch between stand-alone mode and cluster mode and deploy without additional components. +* Supports the deployment of collector clusters, multi-collector clusters scale horizontally, and exponentially improves the number of monitors and collection performance. +* The monitoring task is self-scheduled in the collector cluster. A single collector hangs up and migrates the collection task without perceptual failure. Newly added collector nodes are automatically scheduled to share the collection pressure. +* It is very convenient to switch between stand-alone mode and cluster mode and deploy without additional components. ![hertzbeat](/img/docs/cluster-arch.png) @@ -62,10 +62,10 @@ Some people on the Internet will sneer at such open source projects that are div So back to the topic, why do we want an open source cluster version? Just because you love open source? If you say that we are still teenagers, you may believe this, but do you believe this when a person who is going to be 30 and has family responsibilities says this, I don’t believe it myself😂. First of all, let's take a look at what open source can bring, or why open source should be done. The idea of full-time open source at the beginning is very simple, to make your favorite open source product (realized), the programmer's dream can be deployed on thousands of servers (see the downloads have been realized), and then make money based on this open source product ( not crying yet). -- User traffic. Open source projects are provided free of charge to users and developers, and have advantages in attracting users to use and promoting them. -- User trust. Open source products are naturally easy to gain the trust and patience of users, or lower the threshold of trust for users. -- Community collaboration. Open source products can attract top contributors to contribute together, receive user feedback issues, pr contributions, etc. Driven by the community, open source projects will become better and better, and more people will participate and use them after positive feedback. Community collaboration I think this is the meaning of open source, and this is not just the contribution code collaboration between programmers, users are all collaboration objects (for example, our project has a large number of operation and maintenance friends who contribute code and documents), if it is only code Open source without community collaboration, it is better to release an installation package for others to use and download for free. -- Product ecology. This is required for some ecological products, such as hertzbeat, which need to support monitoring types that connect to various types of protocols, and a large number of monitoring templates. Only a good open source project ecology can attract other contributors to contribute and share, exchange what is needed in the ecology, and ultimately everyone will benefit from the ecology. This is difficult to do in closed source programs. +* User traffic. Open source projects are provided free of charge to users and developers, and have advantages in attracting users to use and promoting them. +* User trust. Open source products are naturally easy to gain the trust and patience of users, or lower the threshold of trust for users. +* Community collaboration. Open source products can attract top contributors to contribute together, receive user feedback issues, pr contributions, etc. Driven by the community, open source projects will become better and better, and more people will participate and use them after positive feedback. Community collaboration I think this is the meaning of open source, and this is not just the contribution code collaboration between programmers, users are all collaboration objects (for example, our project has a large number of operation and maintenance friends who contribute code and documents), if it is only code Open source without community collaboration, it is better to release an installation package for others to use and download for free. +* Product ecology. This is required for some ecological products, such as hertzbeat, which need to support monitoring types that connect to various types of protocols, and a large number of monitoring templates. Only a good open source project ecology can attract other contributors to contribute and share, exchange what is needed in the ecology, and ultimately everyone will benefit from the ecology. This is difficult to do in closed source programs. The above points focus on community collaboration and product ecology. This is also the reason for the open source cluster version. Only open source products can be rolled into stronger product power. For example, the technical feature of cluster will naturally attract developers (and the cluster itself is The product of our community collaboration) will attract more users and contributors to use feedback and iterate together. The community drives and then positively promotes open source projects and satisfies user functional experience. As for open source commercialization, the premise of open source commercialization is to have a really good, popular, and widely used open source product, and then do commercialization on this basis to make money. @@ -88,80 +88,80 @@ As for open source commercialization, the premise of open source commercializati docker run -d -e IDENTITY=custom-collector-name -e MANAGER_IP=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` -- `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -- `-e MANAGER_IP=127.0.0.1` : set the main hertzbeat server ip. -- `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. +* `-e IDENTITY=custom-collector-name` : set the collector unique identity name. +* `-e MANAGER_IP=127.0.0.1` : set the main hertzbeat server ip. +* `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) --- + ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! -* [doc] add v1.3.2 publish doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1075 -* remove elasticsearch unused param index by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1080 -* feature support monitoring apache airflow by @luoxuanzao in https://github.com/apache/hertzbeat/pull/1081 -* add luoxuanzao as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1083 -* [collector] bugfix sshd cannot use private key to connect by @gcdd1993 in https://github.com/apache/hertzbeat/pull/1084 -* bugfix update dashboard alerts cards height not consist by @tomsun28 in https://github.com/apache/hertzbeat/pull/1087 -* Feature#serverchan by @zqr10159 in https://github.com/apache/hertzbeat/pull/1092 -* bugfix dm database monitoring connect error by @lisongning in https://github.com/apache/hertzbeat/pull/1094 -* add lisongning as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1096 -* update alert rule operator display "<=" to ">=" by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1097 -* [doc] add custom monitoring relate document by @tomsun28 in https://github.com/apache/hertzbeat/pull/1098 -* add YutingNie as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1103 -* Remove unreachable status by @YutingNie in https://github.com/apache/hertzbeat/pull/1102 -* 139 auto update alert status by @l646505418 in https://github.com/apache/hertzbeat/pull/1104 -* feat: aviator fn for str contains, exists & matches by @mikezzb in https://github.com/apache/hertzbeat/pull/1106 -* add mikezzb as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1107 -* bugfix common alarm do not need monitorId tag existed by @tomsun28 in https://github.com/apache/hertzbeat/pull/1108 -* bugfix extern alert do not have labels mapping inner monitor by @tomsun28 in https://github.com/apache/hertzbeat/pull/1111 -* feature: support apache spark metrics monitoring by @a-little-fool in https://github.com/apache/hertzbeat/pull/1114 -* add a-little-fool as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1116 -* [Feature]Add third report of TenCloud by @zqr10159 in https://github.com/apache/hertzbeat/pull/1113 -* [Feature]Add third report of TenCloud (#1113) by @zqr10159 in https://github.com/apache/hertzbeat/pull/1119 -* [manager] fix: can query by tags when tagValue is null by @l646505418 in https://github.com/apache/hertzbeat/pull/1118 -* bugfix the notification template environment variable display error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1120 -* add littlezhongzer as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1127 -* feature:monitor brearer token api, ignore letter case to comparison by @littlezhongzer in https://github.com/apache/hertzbeat/pull/1122 -* docs: enhance README by @mikezzb in https://github.com/apache/hertzbeat/pull/1128 -* Update app-oracle.yml by @ChenXiangxxxxx in https://github.com/apache/hertzbeat/pull/1129 -* add ChenXiangxxxxx as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1130 -* fix alarm silence strategy setting failed by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1131 -* support run sql script file in jdbc protocol config by @tomsun28 in https://github.com/apache/hertzbeat/pull/1117 -* bugfix return old cache json file when upgrade version by @tomsun28 in https://github.com/apache/hertzbeat/pull/1137 -* support ssh protocol config choose if reuse connection by @tomsun28 in https://github.com/apache/hertzbeat/pull/1136 -* feat(web): alert threshold UI support matches & contains by @mikezzb in https://github.com/apache/hertzbeat/pull/1138 -* support hertzbeat metrics collector cluster by @tomsun28 in https://github.com/apache/hertzbeat/pull/1101 -* add collector card in dashboard by @tomsun28 in https://github.com/apache/hertzbeat/pull/1147 -* bugfix: linux collect warning: bad syntax, perhaps a bogus '-' by @Mr-zhou315 in https://github.com/apache/hertzbeat/pull/1151 -* add Mr-zhou315 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1157 -* support config timezone locale language region on web ui by @tomsun28 in https://github.com/apache/hertzbeat/pull/1154 -* bugfix monitoring template app name already exists by @tomsun28 in https://github.com/apache/hertzbeat/pull/1152 -* bugfix can not startup when error monitoring template yml file by @tomsun28 in https://github.com/apache/hertzbeat/pull/1153 -* tags also deleted when the monitor is deleted by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1159 -* monitoring param host with http head will not be error reported by @littlezhongzer in https://github.com/apache/hertzbeat/pull/1155 -* [script] feature update build.sh and Dockerfile: detect app version a… by @XimfengYao in https://github.com/apache/hertzbeat/pull/1162 -* add XimfengYao as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1163 -* [doc] add collector clusters document by @tomsun28 in https://github.com/apache/hertzbeat/pull/1161 -* [hertzbeat] release hertzbeat version v1.4.0 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1168 +* [doc] add v1.3.2 publish doc by @tomsun28 in +* remove elasticsearch unused param index by @Ceilzcx in +* feature support monitoring apache airflow by @luoxuanzao in +* add luoxuanzao as a contributor for code by @allcontributors in +* [collector] bugfix sshd cannot use private key to connect by @gcdd1993 in +* bugfix update dashboard alerts cards height not consist by @tomsun28 in +* Feature#serverchan by @zqr10159 in +* bugfix dm database monitoring connect error by @lisongning in +* add lisongning as a contributor for code by @allcontributors in +* update alert rule operator display "<=" to ">=" by @Ceilzcx in +* [doc] add custom monitoring relate document by @tomsun28 in +* add YutingNie as a contributor for code by @allcontributors in +* Remove unreachable status by @YutingNie in +* 139 auto update alert status by @l646505418 in +* feat: aviator fn for str contains, exists & matches by @mikezzb in +* add mikezzb as a contributor for code by @allcontributors in +* bugfix common alarm do not need monitorId tag existed by @tomsun28 in +* bugfix extern alert do not have labels mapping inner monitor by @tomsun28 in +* feature: support apache spark metrics monitoring by @a-little-fool in +* add a-little-fool as a contributor for code by @allcontributors in +* [Feature]Add third report of TenCloud by @zqr10159 in +* [Feature]Add third report of TenCloud (#1113) by @zqr10159 in +* [manager] fix: can query by tags when tagValue is null by @l646505418 in +* bugfix the notification template environment variable display error by @tomsun28 in +* add littlezhongzer as a contributor for code by @allcontributors in +* feature:monitor brearer token api, ignore letter case to comparison by @littlezhongzer in +* docs: enhance README by @mikezzb in +* Update app-oracle.yml by @ChenXiangxxxxx in +* add ChenXiangxxxxx as a contributor for code by @allcontributors in +* fix alarm silence strategy setting failed by @Ceilzcx in +* support run sql script file in jdbc protocol config by @tomsun28 in +* bugfix return old cache json file when upgrade version by @tomsun28 in +* support ssh protocol config choose if reuse connection by @tomsun28 in +* feat(web): alert threshold UI support matches & contains by @mikezzb in +* support hertzbeat metrics collector cluster by @tomsun28 in +* add collector card in dashboard by @tomsun28 in +* bugfix: linux collect warning: bad syntax, perhaps a bogus '-' by @Mr-zhou315 in +* add Mr-zhou315 as a contributor for code by @allcontributors in +* support config timezone locale language region on web ui by @tomsun28 in +* bugfix monitoring template app name already exists by @tomsun28 in +* bugfix can not startup when error monitoring template yml file by @tomsun28 in +* tags also deleted when the monitor is deleted by @Ceilzcx in +* monitoring param host with http head will not be error reported by @littlezhongzer in +* [script] feature update build.sh and Dockerfile: detect app version a… by @XimfengYao in +* add XimfengYao as a contributor for code by @allcontributors in +* [doc] add collector clusters document by @tomsun28 in +* [hertzbeat] release hertzbeat version v1.4.0 by @tomsun28 in --- ## ⛄ Supported -- Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server -- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, Damon, OpenGauss, ClickHouse, IoTDB, Redis Cluster -- Linux, Ubuntu, CentOS, Windows -- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ -- Kubernetes, Docker -- Huawei Switch, HPE Switch, TP-LINK Switch, Cisco Switch -- and more for your custom monitoring. -- Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook`. +* Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server +* Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, Damon, OpenGauss, ClickHouse, IoTDB, Redis Cluster +* Linux, Ubuntu, CentOS, Windows +* Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ +* Kubernetes, Docker +* Huawei Switch, HPE Switch, TP-LINK Switch, Cisco Switch +* and more for your custom monitoring. +* Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook`. ---- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** - +**Github: ** +**Gitee: ** diff --git a/home/blog/2023-08-28-new-committer.md b/home/blog/2023-08-28-new-committer.md index 222670b3c22..62a9b03d7b6 100644 --- a/home/blog/2023-08-28-new-committer.md +++ b/home/blog/2023-08-28-new-committer.md @@ -34,7 +34,7 @@ In March 2023, I started to contact Hertzbeat, due to the need for a complete mo So far, to participate in the Hertzbeat open source project has been more than five months, from the first time to submit issues to the first time to submit pr, are the process of growth and progress, full count or a lot of pr, specific as follows: -** contribution **: +**contribution**: * Realize real-time data deposited into Redis custom db @@ -81,6 +81,6 @@ HertzBeat HertzBeat is an open source real-time monitoring and alerting system w > `HertzBeat`'s powerful customization, multi-type support, high performance, easy to extend, low-coupling, and hopefully can help developers and teams to quickly build their own monitoring system. -Github: https://github.com/apache/hertzbeat +Github: More users are welcome to participate in `HertzBeat` open source collaboration, no matter a typo or punctuation we are very welcome. diff --git a/home/blog/2023-09-26-hertzbeat-v1.4.1.md b/home/blog/2023-09-26-hertzbeat-v1.4.1.md index fc91ebb300d..3c9910bc556 100644 --- a/home/blog/2023-09-26-hertzbeat-v1.4.1.md +++ b/home/blog/2023-09-26-hertzbeat-v1.4.1.md @@ -26,33 +26,33 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ![hertzBeat](/img/docs/hertzbeat-arch.png) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** -### HertzBeat's 1.4.1 version is coming! +### HertzBeat's 1.4.1 version is coming -- new login page +* new login page image image -- collector manage +* collector manage image image -- new help moudle +* new help moudle image -- monitor metrics dashboard name i18n +* monitor metrics dashboard name i18n image -- refactor collector dispatcher and more +* refactor collector dispatcher and more ### Install quickly via docker @@ -72,85 +72,85 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` -- `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -- `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. -- `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. +* `-e IDENTITY=custom-collector-name` : set the collector unique identity name. +* `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. +* `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) --- + ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! -* bugfix npe when get undefined name monitor template yml by @tomsun28 in https://github.com/apache/hertzbeat/pull/1173 -* [bug fixed]When importing and exporting monitoring, support export collectors, configure collectors when importing by @zqr10159 in https://github.com/apache/hertzbeat/pull/1178 -* support alert threshold rule config system value row count by @tomsun28 in https://github.com/apache/hertzbeat/pull/1180 -* Update README.md by @zqr10159 in https://github.com/apache/hertzbeat/pull/1182 -* support config alert threshold tags bind by @tomsun28 in https://github.com/apache/hertzbeat/pull/1181 -* the back-end of help component has been built by @YutingNie in https://github.com/apache/hertzbeat/pull/1160 -* support enable alert threshold auto resolved notice by @tomsun28 in https://github.com/apache/hertzbeat/pull/1185 -* Delete tag of the dashboard's homepage on the top four pages by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1189 -* replace obsolete `registry.npm.taobao.org` to`registry.npmmirror.com` by @zqr10159 in https://github.com/apache/hertzbeat/pull/1192 -* refactor MonitorServiceImpl by @Carpe-Wang in https://github.com/apache/hertzbeat/pull/1190 -* config default system timezone and fix monitor status auto recover by @tomsun28 in https://github.com/apache/hertzbeat/pull/1187 -* update-doc-doris by @zqr10159 in https://github.com/apache/hertzbeat/pull/1193 -* [manager] support tidb database monitoring by @luxx-lq in https://github.com/apache/hertzbeat/pull/733 -* refactor fix potential npe by @Carpe-Wang in https://github.com/apache/hertzbeat/pull/1197 -* [ospp] support ui help massage component by @YutingNie in https://github.com/apache/hertzbeat/pull/1199 -* support monitor metrics name i18n by @tomsun28 in https://github.com/apache/hertzbeat/pull/1198 -* support google analytics by @tomsun28 in https://github.com/apache/hertzbeat/pull/1202 -* refactor code and fix some npe by @Carpe-Wang in https://github.com/apache/hertzbeat/pull/1201 -* bugfix fix found 2 dataQueue bean when not config common.queue param by @tomsun28 in https://github.com/apache/hertzbeat/pull/1205 -* Help component update by @YutingNie in https://github.com/apache/hertzbeat/pull/1207 -* bugfix enterprise wechat push display content is too cumbersome by @l646505418 in https://github.com/apache/hertzbeat/pull/1149 -* bugfix WeChatAppAlertNotifyHandlerImpl by @LINGLUOJUN in https://github.com/apache/hertzbeat/pull/1208 -* add LINGLUOJUN as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1209 -* fix jmx jndi inject vulnerability by @luelueking in https://github.com/apache/hertzbeat/pull/1215 -* add luelueking as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1217 -* bugfix monitoring param number limit range by @qyaaaa in https://github.com/apache/hertzbeat/pull/1216 -* add qyaaaa as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1218 -* add app-ping i18n by @qyaaaa in https://github.com/apache/hertzbeat/pull/1220 -* some codes opt by @LINGLUOJUN in https://github.com/apache/hertzbeat/pull/1214 -* support deploy hertzbeat by kubernetes helm charts by @tomsun28 in https://github.com/apache/hertzbeat/pull/1221 -* bugfix threshold setting template variables has repeated parameters by @qyaaaa in https://github.com/apache/hertzbeat/pull/1223 -* support display metrics i18n label when threshold setting by @tomsun28 in https://github.com/apache/hertzbeat/pull/1225 -* bugfix user role display not correctly on webui by @tomsun28 in https://github.com/apache/hertzbeat/pull/1227 -* add hertzbeat about msg card by @tomsun28 in https://github.com/apache/hertzbeat/pull/1229 -* add app-api i18n by @novohit in https://github.com/apache/hertzbeat/pull/1236 -* add novohit as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1238 -* [feature]Add `getAlertDefinesByName`. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1237 -* thread pool executor support shutdown gracefully by @LINGLUOJUN in https://github.com/apache/hertzbeat/pull/1240 -* fix: expression injection RCE by @mikezzb in https://github.com/apache/hertzbeat/pull/1241 -* [bugfix]Replace schema "{key1:value1}" to "{\"key1\":\"value1\"}" by @zqr10159 in https://github.com/apache/hertzbeat/pull/1245 -* [Refactor] Use static methods instead of constructors for Message.java by @gcdd1993 in https://github.com/apache/hertzbeat/pull/1247 -* bugfix snake yaml decode rce by @tomsun28 in https://github.com/apache/hertzbeat/pull/1239 -* bugfix jackson deserialize localDatetime error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1249 -* netty as an independent module, add new feature about collector list by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1244 -* support show deploy collector script in web by @tomsun28 in https://github.com/apache/hertzbeat/pull/1251 -* bugfix mongodb collect extra metrics npe by @tomsun28 in https://github.com/apache/hertzbeat/pull/1257 -* bugfix fix collector run cyclic when connect auth failed by @tomsun28 in https://github.com/apache/hertzbeat/pull/1256 -* update webapp login ui by @tomsun28 in https://github.com/apache/hertzbeat/pull/1260 -* bugfix collector can not auto reconnect when channel idle by @tomsun28 in https://github.com/apache/hertzbeat/pull/1259 -* update alarm notice wework app send content ui by @tomsun28 in https://github.com/apache/hertzbeat/pull/1258 -* [hertzbeat] release hertzbeat version v1.4.1 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1261 -* auto split webhook token when user input hook url by @tomsun28 in https://github.com/apache/hertzbeat/pull/1262 +* bugfix npe when get undefined name monitor template yml by @tomsun28 in +* [bug fixed]When importing and exporting monitoring, support export collectors, configure collectors when importing by @zqr10159 in +* support alert threshold rule config system value row count by @tomsun28 in +* Update README.md by @zqr10159 in +* support config alert threshold tags bind by @tomsun28 in +* the back-end of help component has been built by @YutingNie in +* support enable alert threshold auto resolved notice by @tomsun28 in +* Delete tag of the dashboard's homepage on the top four pages by @Ceilzcx in +* replace obsolete `registry.npm.taobao.org` to`registry.npmmirror.com` by @zqr10159 in +* refactor MonitorServiceImpl by @Carpe-Wang in +* config default system timezone and fix monitor status auto recover by @tomsun28 in +* update-doc-doris by @zqr10159 in +* [manager] support tidb database monitoring by @luxx-lq in +* refactor fix potential npe by @Carpe-Wang in +* [ospp] support ui help massage component by @YutingNie in +* support monitor metrics name i18n by @tomsun28 in +* support google analytics by @tomsun28 in +* refactor code and fix some npe by @Carpe-Wang in +* bugfix fix found 2 dataQueue bean when not config common.queue param by @tomsun28 in +* Help component update by @YutingNie in +* bugfix enterprise wechat push display content is too cumbersome by @l646505418 in +* bugfix WeChatAppAlertNotifyHandlerImpl by @LINGLUOJUN in +* add LINGLUOJUN as a contributor for code by @allcontributors in +* fix jmx jndi inject vulnerability by @luelueking in +* add luelueking as a contributor for code by @allcontributors in +* bugfix monitoring param number limit range by @qyaaaa in +* add qyaaaa as a contributor for code by @allcontributors in +* add app-ping i18n by @qyaaaa in +* some codes opt by @LINGLUOJUN in +* support deploy hertzbeat by kubernetes helm charts by @tomsun28 in +* bugfix threshold setting template variables has repeated parameters by @qyaaaa in +* support display metrics i18n label when threshold setting by @tomsun28 in +* bugfix user role display not correctly on webui by @tomsun28 in +* add hertzbeat about msg card by @tomsun28 in +* add app-api i18n by @novohit in +* add novohit as a contributor for code by @allcontributors in +* [feature]Add `getAlertDefinesByName`. by @zqr10159 in +* thread pool executor support shutdown gracefully by @LINGLUOJUN in +* fix: expression injection RCE by @mikezzb in +* [bugfix]Replace schema "{key1:value1}" to "{\"key1\":\"value1\"}" by @zqr10159 in +* [Refactor] Use static methods instead of constructors for Message.java by @gcdd1993 in +* bugfix snake yaml decode rce by @tomsun28 in +* bugfix jackson deserialize localDatetime error by @tomsun28 in +* netty as an independent module, add new feature about collector list by @Ceilzcx in +* support show deploy collector script in web by @tomsun28 in +* bugfix mongodb collect extra metrics npe by @tomsun28 in +* bugfix fix collector run cyclic when connect auth failed by @tomsun28 in +* update webapp login ui by @tomsun28 in +* bugfix collector can not auto reconnect when channel idle by @tomsun28 in +* update alarm notice wework app send content ui by @tomsun28 in +* [hertzbeat] release hertzbeat version v1.4.1 by @tomsun28 in +* auto split webhook token when user input hook url by @tomsun28 in --- ## ⛄ Supported -- Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server -- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, Damon, OpenGauss, ClickHouse, IoTDB, Redis Cluster -- Linux, Ubuntu, CentOS, Windows -- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ -- Kubernetes, Docker -- Huawei Switch, HPE Switch, TP-LINK Switch, Cisco Switch -- and more for your custom monitoring. -- Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. +* Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server +* Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, Damon, OpenGauss, ClickHouse, IoTDB, Redis Cluster +* Linux, Ubuntu, CentOS, Windows +* Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ +* Kubernetes, Docker +* Huawei Switch, HPE Switch, TP-LINK Switch, Cisco Switch +* and more for your custom monitoring. +* Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. ---- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** - +**Github: ** +**Gitee: ** diff --git a/home/blog/2023-11-12-hertzbeat-v1.4.2.md b/home/blog/2023-11-12-hertzbeat-v1.4.2.md index ad9b6783518..879ccd288e0 100644 --- a/home/blog/2023-11-12-hertzbeat-v1.4.2.md +++ b/home/blog/2023-11-12-hertzbeat-v1.4.2.md @@ -24,17 +24,17 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ![hertzBeat](/img/docs/hertzbeat-arch.png) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** -### HertzBeat's 1.4.2 version release! +### HertzBeat's 1.4.2 version release -- support custom notice template -- support push metrics monitoring(beta) -- support using Huawei Cloud OBS to store monitoring templates yml -- support emqx monitoring and udp port monitoring -- more features , fix multiple bugs and so on +* support custom notice template +* support push metrics monitoring(beta) +* support using Huawei Cloud OBS to store monitoring templates yml +* support emqx monitoring and udp port monitoring +* more features , fix multiple bugs and so on ### Install quickly via docker @@ -54,106 +54,106 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` -- `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -- `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. -- `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. +* `-e IDENTITY=custom-collector-name` : set the collector unique identity name. +* `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. +* `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) --- + ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! -* bugfix counting wrong tasks num of collector by @tomsun28 in https://github.com/apache/hertzbeat/pull/1265 -* [ospp] add push style collector by @vinci-897 in https://github.com/apache/hertzbeat/pull/1222 -* add 1.4.1 version doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1263 -* support using Huawei Cloud OBS to store custom define yml file by @gcdd1993 in https://github.com/apache/hertzbeat/pull/1266 -* [doc] add more contact channel by @tomsun28 in https://github.com/apache/hertzbeat/pull/1272 -* bugfix app-tomcat memory_pool unit mb by @rbsrcy in https://github.com/apache/hertzbeat/pull/1268 -* add rbsrcy as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1271 -* [doc] update docker.md by @ruanliang-hualun in https://github.com/apache/hertzbeat/pull/1270 -* add ruanliang-hualun as a contributor for doc by @allcontributors in https://github.com/apache/hertzbeat/pull/1274 -* bugfix jmx memory_pool unit and time unit error by @rbsrcy in https://github.com/apache/hertzbeat/pull/1273 -* bugfix old version monitor alert has no monitor name by @tomsun28 in https://github.com/apache/hertzbeat/pull/1278 -* support edit monitor in monitor detail page by @tomsun28 in https://github.com/apache/hertzbeat/pull/1282 -* reset alert converge reduce cache when restored alert trigger by @tomsun28 in https://github.com/apache/hertzbeat/pull/1281 -* [ospp] add push style collector doc by @vinci-897 in https://github.com/apache/hertzbeat/pull/1267 -* bugfix threshold availability automatically carries threshold parameters by @tomsun28 in https://github.com/apache/hertzbeat/pull/1285 -* [ospp] support custom notice template by @Eden4701 in https://github.com/apache/hertzbeat/pull/1233 -* add Eden4701 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1287 -* bugfix AvailableAlertDefineInit - query did not return a unique result by @tomsun28 in https://github.com/apache/hertzbeat/pull/1288 -* upgrade to version angular 15 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1291 -* support push style for multiple messages by @vinci-897 in https://github.com/apache/hertzbeat/pull/1292 -* update hertzbeat upgrade help doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1294 -* feat alert converge, define, silence support search query by @tomsun28 in https://github.com/apache/hertzbeat/pull/1300 -* feature:support monitoring udp port availability by @tomsun28 in https://github.com/apache/hertzbeat/pull/1295 -* support emqx monitor by @vinci-897 in https://github.com/apache/hertzbeat/pull/1302 -* add an explicit tag filter by @vinci-897 in https://github.com/apache/hertzbeat/pull/1303 -* add hertzbeat icon by @zqr10159 in https://github.com/apache/hertzbeat/pull/1305 -* [doc] update kafka help doc by @XiaTian688 in https://github.com/apache/hertzbeat/pull/1308 -* add XiaTian688 as a contributor for doc by @allcontributors in https://github.com/apache/hertzbeat/pull/1309 -* support webhook custom template by @tomsun28 in https://github.com/apache/hertzbeat/pull/1306 -* set ssh param connect reused default false by @tomsun28 in https://github.com/apache/hertzbeat/pull/1310 -* upgrade greptimedb to v0.4 by @liyin in https://github.com/apache/hertzbeat/pull/1311 -* add liyin as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1313 -* add some emqx monitoring metrics by @tomsun28 in https://github.com/apache/hertzbeat/pull/1312 -* feature: app-mysql.yml by @a-little-fool in https://github.com/apache/hertzbeat/pull/1316 -* modify default IoTDB version config to V_1_0 by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1315 -* bugfix timestamp is null by @qyaaaa in https://github.com/apache/hertzbeat/pull/1246 -* [hertzbeat] release hertzbeat version v1.4.2 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1317 -* bugfix alarm time span match in silence and notice by @tomsun28 in https://github.com/apache/hertzbeat/pull/1318 -* update available alert threshold trigger times default 2 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1324 -* bugfix rabbitmq contains duplicated metric by @tomsun28 in https://github.com/apache/hertzbeat/pull/1322 -* [alerter] optimize the encoding of how to add Extern Alarm Manage API(#1320) by @SurryChen in https://github.com/apache/hertzbeat/pull/1325 -* bugfix webhook post body error and alarm recover exception by @tomsun28 in https://github.com/apache/hertzbeat/pull/1327 -* build hertzbeat package with jdk runtime by @tomsun28 in https://github.com/apache/hertzbeat/pull/1328 +* bugfix counting wrong tasks num of collector by @tomsun28 in +* [ospp] add push style collector by @vinci-897 in +* add 1.4.1 version doc by @tomsun28 in +* support using Huawei Cloud OBS to store custom define yml file by @gcdd1993 in +* [doc] add more contact channel by @tomsun28 in +* bugfix app-tomcat memory_pool unit mb by @rbsrcy in +* add rbsrcy as a contributor for code by @allcontributors in +* [doc] update docker.md by @ruanliang-hualun in +* add ruanliang-hualun as a contributor for doc by @allcontributors in +* bugfix jmx memory_pool unit and time unit error by @rbsrcy in +* bugfix old version monitor alert has no monitor name by @tomsun28 in +* support edit monitor in monitor detail page by @tomsun28 in +* reset alert converge reduce cache when restored alert trigger by @tomsun28 in +* [ospp] add push style collector doc by @vinci-897 in +* bugfix threshold availability automatically carries threshold parameters by @tomsun28 in +* [ospp] support custom notice template by @Eden4701 in +* add Eden4701 as a contributor for code by @allcontributors in +* bugfix AvailableAlertDefineInit - query did not return a unique result by @tomsun28 in +* upgrade to version angular 15 by @tomsun28 in +* support push style for multiple messages by @vinci-897 in +* update hertzbeat upgrade help doc by @tomsun28 in +* feat alert converge, define, silence support search query by @tomsun28 in +* feature:support monitoring udp port availability by @tomsun28 in +* support emqx monitor by @vinci-897 in +* add an explicit tag filter by @vinci-897 in +* add hertzbeat icon by @zqr10159 in +* [doc] update kafka help doc by @XiaTian688 in +* add XiaTian688 as a contributor for doc by @allcontributors in +* support webhook custom template by @tomsun28 in +* set ssh param connect reused default false by @tomsun28 in +* upgrade greptimedb to v0.4 by @liyin in +* add liyin as a contributor for code by @allcontributors in +* add some emqx monitoring metrics by @tomsun28 in +* feature: app-mysql.yml by @a-little-fool in +* modify default IoTDB version config to V_1_0 by @Ceilzcx in +* bugfix timestamp is null by @qyaaaa in +* [hertzbeat] release hertzbeat version v1.4.2 by @tomsun28 in +* bugfix alarm time span match in silence and notice by @tomsun28 in +* update available alert threshold trigger times default 2 by @tomsun28 in +* bugfix rabbitmq contains duplicated metric by @tomsun28 in +* [alerter] optimize the encoding of how to add Extern Alarm Manage API(#1320) by @SurryChen in +* bugfix webhook post body error and alarm recover exception by @tomsun28 in +* build hertzbeat package with jdk runtime by @tomsun28 in ## New Contributors -* @rbsrcy made their first contribution in https://github.com/apache/hertzbeat/pull/1268 -* @XiaTian688 made their first contribution in https://github.com/apache/hertzbeat/pull/1308 -* @liyin made their first contribution in https://github.com/apache/hertzbeat/pull/1311 +* @rbsrcy made their first contribution in +* @XiaTian688 made their first contribution in +* @liyin made their first contribution in -**Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.4.1...v1.4.2 +**Full Changelog**: --- ## ⛄ Supported -- Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server -- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, Damon, OpenGauss, ClickHouse, IoTDB, Redis Cluster -- Linux, Ubuntu, CentOS, Windows -- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ -- Kubernetes, Docker -- Huawei Switch, HPE Switch, TP-LINK Switch, Cisco Switch -- and more for your custom monitoring. -- Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. +* Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server +* Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, Damon, OpenGauss, ClickHouse, IoTDB, Redis Cluster +* Linux, Ubuntu, CentOS, Windows +* Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ +* Kubernetes, Docker +* Huawei Switch, HPE Switch, TP-LINK Switch, Cisco Switch +* and more for your custom monitoring. +* Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. ---- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** ### **Download Link** **hertzbeat server** -- ⬇️ [hertzbeat-1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-1.4.2.tar.gz) -- ⬇️ [hertzbeat-1.4.2.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-1.4.2.zip) -- ⬇️ [hertzbeat-linux_amd64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-linux_amd64_1.4.2.tar.gz) -- ⬇️ [hertzbeat-linux_arm64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-linux_arm64_1.4.2.tar.gz) -- ⬇️ [hertzbeat-macos_arm64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-macos_arm64_1.4.2.tar.gz) -- ⬇️ [hertzbeat-macos_amd64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-macos_amd64_1.4.2.tar.gz) -- ⬇️ [hertzbeat-windows64_1.4.2.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-windows64_1.4.2.zip) +* ⬇️ [hertzbeat-1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-1.4.2.tar.gz) +* ⬇️ [hertzbeat-1.4.2.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-1.4.2.zip) +* ⬇️ [hertzbeat-linux_amd64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-linux_amd64_1.4.2.tar.gz) +* ⬇️ [hertzbeat-linux_arm64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-linux_arm64_1.4.2.tar.gz) +* ⬇️ [hertzbeat-macos_arm64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-macos_arm64_1.4.2.tar.gz) +* ⬇️ [hertzbeat-macos_amd64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-macos_amd64_1.4.2.tar.gz) +* ⬇️ [hertzbeat-windows64_1.4.2.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-windows64_1.4.2.zip) **hertzbeat collector** -- ⬇️ [hertzbeat-collector-1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-1.4.2.tar.gz) -- ⬇️ [hertzbeat-collector-1.4.2.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-1.4.2.zip) -- ⬇️ [hertzbeat-collector-linux_amd64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-linux_amd64_1.4.2.tar.gz) -- ⬇️ [hertzbeat-collector-linux_arm64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-linux_arm64_1.4.2.tar.gz) -- ⬇️ [hertzbeat-collector-macos_arm64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-macos_arm64_1.4.2.tar.gz) -- ⬇️ [hertzbeat-collector-macos_amd64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-macos_amd64_1.4.2.tar.gz) -- ⬇️ [hertzbeat-collector-windows64_1.4.2.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-windows64_1.4.2.zip) - +* ⬇️ [hertzbeat-collector-1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-1.4.2.tar.gz) +* ⬇️ [hertzbeat-collector-1.4.2.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-1.4.2.zip) +* ⬇️ [hertzbeat-collector-linux_amd64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-linux_amd64_1.4.2.tar.gz) +* ⬇️ [hertzbeat-collector-linux_arm64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-linux_arm64_1.4.2.tar.gz) +* ⬇️ [hertzbeat-collector-macos_arm64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-macos_arm64_1.4.2.tar.gz) +* ⬇️ [hertzbeat-collector-macos_amd64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-macos_amd64_1.4.2.tar.gz) +* ⬇️ [hertzbeat-collector-windows64_1.4.2.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-windows64_1.4.2.zip) diff --git a/home/blog/2023-12-11-hertzbeat-v1.4.3.md b/home/blog/2023-12-11-hertzbeat-v1.4.3.md index 3d4508b1bff..7a2ed2e01b0 100644 --- a/home/blog/2023-12-11-hertzbeat-v1.4.3.md +++ b/home/blog/2023-12-11-hertzbeat-v1.4.3.md @@ -25,20 +25,20 @@ keywords: [open source monitoring system, alerting system] ![hertzBeat](/img/docs/hertzbeat-arch.png) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** -### HertzBeat's 1.4.3 Version Release! +### HertzBeat's 1.4.3 Version Release -- enhanced reporting of external general alert API -- support mysql api port website mongodb jvm redis monitoring metrics name i18n -- support auto collect metrics by prometheus task -- support victoriametrics as metrics data storage -- support monitoring spring gateway metrics -- add more windows monitoring metrics -- add e2e testing module, support by api-testing -- more feature, document and bugfix +* enhanced reporting of external general alert API +* support mysql api port website mongodb jvm redis monitoring metrics name i18n +* support auto collect metrics by prometheus task +* support victoriametrics as metrics data storage +* support monitoring spring gateway metrics +* add more windows monitoring metrics +* add e2e testing module, support by api-testing +* more feature, document and bugfix Compatible with the Prometheus ecosystem, now we can monitor what Prometheus can monitoring with few clicks on webui. @@ -60,114 +60,114 @@ Compatible with the Prometheus ecosystem, now we can monitor what Prometheus can docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` -- `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -- `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. -- `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. +* `-e IDENTITY=custom-collector-name` : set the collector unique identity name. +* `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. +* `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) --- + ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! -* update package deploy doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1330 -* bugfix duplicate collect job when update monitor templates by @tomsun28 in https://github.com/apache/hertzbeat/pull/1332 -* bugfix number variable in freemarker template display error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1334 -* [alerter] Enhanced reporting of external general alert API by @SurryChen in https://github.com/apache/hertzbeat/pull/1326 -* [doc] update hertzbeat-mysql-tdengine readme by @jiashu1024 in https://github.com/apache/hertzbeat/pull/1335 -* add jiashu1024 as a contributor for doc by @allcontributors in https://github.com/apache/hertzbeat/pull/1336 -* app-mysql.yml: Adjust slow query translation by @1036664317 in https://github.com/apache/hertzbeat/pull/1337 -* add 1036664317 as a contributor for doc by @allcontributors in https://github.com/apache/hertzbeat/pull/1338 -* Bump com.google.guava:guava from 31.0.1-jre to 32.0.0-jre by @dependabot in https://github.com/apache/hertzbeat/pull/1339 -* [feature] support auto collect metrics by prometheus task by @tomsun28 in https://github.com/apache/hertzbeat/pull/1342 -* [doc] add vinci as new committer by @tomsun28 in https://github.com/apache/hertzbeat/pull/1341 -* [feature] add tag word cloud in dashboard by @tomsun28 in https://github.com/apache/hertzbeat/pull/1345 -* support custom prometheus endpoint path by @tomsun28 in https://github.com/apache/hertzbeat/pull/1346 -* bugfix tdengine query interval history metrics data with instance error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1348 -* unlimit Alert.java content field length by @xiaoguolong in https://github.com/apache/hertzbeat/pull/1351 -* add xiaoguolong as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1353 -* update monitor detail table ui layout by @tomsun28 in https://github.com/apache/hertzbeat/pull/1352 -* [doc]add star history by @zqr10159 in https://github.com/apache/hertzbeat/pull/1356 -* feature: app-mongodb.yml by @a-little-fool in https://github.com/apache/hertzbeat/pull/1359 -* alarm threshold support prometheus task metrics by @tomsun28 in https://github.com/apache/hertzbeat/pull/1354 -* support victoriametrics as metrics data storage by @tomsun28 in https://github.com/apache/hertzbeat/pull/1361 -* Add time type to support query_time of mysql and mariadb by @Clownsw in https://github.com/apache/hertzbeat/pull/1364 -* add Clownsw as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1365 -* Error occured when I followed running steps to start Front-web by @Calvin979 in https://github.com/apache/hertzbeat/pull/1366 -* add Calvin979 as a contributor for doc by @allcontributors in https://github.com/apache/hertzbeat/pull/1367 -* enriches the cncf landscape by @tomsun28 in https://github.com/apache/hertzbeat/pull/1368 -* Fix flaky test in CollectUtilTest by @bbelide2 in https://github.com/apache/hertzbeat/pull/1371 -* add bbelide2 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1372 -* Fix flaky test replaceSmilingPlaceholder by @bbelide2 in https://github.com/apache/hertzbeat/pull/1373 -* add docker-compose script hertzbeat+mysql+victoria-metrics all in one by @tomsun28 in https://github.com/apache/hertzbeat/pull/1370 -* Feature: app-jvm.yml support for international name aliases by @Calvin979 in https://github.com/apache/hertzbeat/pull/1376 -* add Calvin979 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1377 -* feature: support monitoring spring gateway metrics by @a-little-fool in https://github.com/apache/hertzbeat/pull/1374 -* update code comment and doc, bugfix concurrent exception by @tomsun28 in https://github.com/apache/hertzbeat/pull/1378 -* update windows define and accept snmp leaf by @jinyaoMa in https://github.com/apache/hertzbeat/pull/1379 -* add jinyaoMa as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1380 -* fix exception when sending email has special chars by @Carpe-Wang in https://github.com/apache/hertzbeat/pull/1383 -* test: add e2e testing for some basic APIs by @LinuxSuRen in https://github.com/apache/hertzbeat/pull/1387 -* add LinuxSuRen as a contributor for code, and test by @allcontributors in https://github.com/apache/hertzbeat/pull/1389 -* bugfix auto generate monitor name error when add monitor by @tomsun28 in https://github.com/apache/hertzbeat/pull/1384 -* bugfix CalculateAlarm execAlertExpression NPE by @tomsun28 in https://github.com/apache/hertzbeat/pull/1388 -* Feature: app-redis.yml support for international name aliases by @Calvin979 in https://github.com/apache/hertzbeat/pull/1390 -* test: add more monitor related e2e testing case by @LinuxSuRen in https://github.com/apache/hertzbeat/pull/1391 -* chore: update the pr template about the e2e testing by @LinuxSuRen in https://github.com/apache/hertzbeat/pull/1392 -* add help header ui when update or add monitors by @tomsun28 in https://github.com/apache/hertzbeat/pull/1399 -* [hertzbeat] release hertzbeat version v1.4.3 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1400 +* update package deploy doc by @tomsun28 in +* bugfix duplicate collect job when update monitor templates by @tomsun28 in +* bugfix number variable in freemarker template display error by @tomsun28 in +* [alerter] Enhanced reporting of external general alert API by @SurryChen in +* [doc] update hertzbeat-mysql-tdengine readme by @jiashu1024 in +* add jiashu1024 as a contributor for doc by @allcontributors in +* app-mysql.yml: Adjust slow query translation by @1036664317 in +* add 1036664317 as a contributor for doc by @allcontributors in +* Bump com.google.guava:guava from 31.0.1-jre to 32.0.0-jre by @dependabot in +* [feature] support auto collect metrics by prometheus task by @tomsun28 in +* [doc] add vinci as new committer by @tomsun28 in +* [feature] add tag word cloud in dashboard by @tomsun28 in +* support custom prometheus endpoint path by @tomsun28 in +* bugfix tdengine query interval history metrics data with instance error by @tomsun28 in +* unlimit Alert.java content field length by @xiaoguolong in +* add xiaoguolong as a contributor for code by @allcontributors in +* update monitor detail table ui layout by @tomsun28 in +* [doc]add star history by @zqr10159 in +* feature: app-mongodb.yml by @a-little-fool in +* alarm threshold support prometheus task metrics by @tomsun28 in +* support victoriametrics as metrics data storage by @tomsun28 in +* Add time type to support query_time of mysql and mariadb by @Clownsw in +* add Clownsw as a contributor for code by @allcontributors in +* Error occured when I followed running steps to start Front-web by @Calvin979 in +* add Calvin979 as a contributor for doc by @allcontributors in +* enriches the cncf landscape by @tomsun28 in +* Fix flaky test in CollectUtilTest by @bbelide2 in +* add bbelide2 as a contributor for code by @allcontributors in +* Fix flaky test replaceSmilingPlaceholder by @bbelide2 in +* add docker-compose script hertzbeat+mysql+victoria-metrics all in one by @tomsun28 in +* Feature: app-jvm.yml support for international name aliases by @Calvin979 in +* add Calvin979 as a contributor for code by @allcontributors in +* feature: support monitoring spring gateway metrics by @a-little-fool in +* update code comment and doc, bugfix concurrent exception by @tomsun28 in +* update windows define and accept snmp leaf by @jinyaoMa in +* add jinyaoMa as a contributor for code by @allcontributors in +* fix exception when sending email has special chars by @Carpe-Wang in +* test: add e2e testing for some basic APIs by @LinuxSuRen in +* add LinuxSuRen as a contributor for code, and test by @allcontributors in +* bugfix auto generate monitor name error when add monitor by @tomsun28 in +* bugfix CalculateAlarm execAlertExpression NPE by @tomsun28 in +* Feature: app-redis.yml support for international name aliases by @Calvin979 in +* test: add more monitor related e2e testing case by @LinuxSuRen in +* chore: update the pr template about the e2e testing by @LinuxSuRen in +* add help header ui when update or add monitors by @tomsun28 in +* [hertzbeat] release hertzbeat version v1.4.3 by @tomsun28 in ## New Contributors -* @1036664317 made their first contribution in https://github.com/apache/hertzbeat/pull/1337 -* @dependabot made their first contribution in https://github.com/apache/hertzbeat/pull/1339 -* @xiaoguolong made their first contribution in https://github.com/apache/hertzbeat/pull/1351 -* @Clownsw made their first contribution in https://github.com/apache/hertzbeat/pull/1364 -* @Calvin979 made their first contribution in https://github.com/apache/hertzbeat/pull/1366 -* @bbelide2 made their first contribution in https://github.com/apache/hertzbeat/pull/1371 -* @jinyaoMa made their first contribution in https://github.com/apache/hertzbeat/pull/1379 -* @LinuxSuRen made their first contribution in https://github.com/apache/hertzbeat/pull/1387 +* @1036664317 made their first contribution in +* @dependabot made their first contribution in +* @xiaoguolong made their first contribution in +* @Clownsw made their first contribution in +* @Calvin979 made their first contribution in +* @bbelide2 made their first contribution in +* @jinyaoMa made their first contribution in +* @LinuxSuRen made their first contribution in -**Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.4.2...v1.4.3 +**Full Changelog**: --- ## ⛄ Supported -- Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server -- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, Damon, OpenGauss, ClickHouse, IoTDB, Redis Cluster -- Linux, Ubuntu, CentOS, Windows -- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ -- Kubernetes, Docker -- Huawei Switch, HPE Switch, TP-LINK Switch, Cisco Switch -- and more for your custom monitoring. -- Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. +* Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server +* Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, Damon, OpenGauss, ClickHouse, IoTDB, Redis Cluster +* Linux, Ubuntu, CentOS, Windows +* Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ +* Kubernetes, Docker +* Huawei Switch, HPE Switch, TP-LINK Switch, Cisco Switch +* and more for your custom monitoring. +* Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. ---- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** ### **Download Link** **hertzbeat server** -- ⬇️ [hertzbeat-1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-1.4.3.tar.gz) -- ⬇️ [hertzbeat-1.4.3.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-1.4.3.zip) -- ⬇️ [hertzbeat-linux_amd64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-linux_amd64_1.4.3.tar.gz) -- ⬇️ [hertzbeat-linux_arm64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-linux_arm64_1.4.3.tar.gz) -- ⬇️ [hertzbeat-macos_arm64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-macos_arm64_1.4.3.tar.gz) -- ⬇️ [hertzbeat-macos_amd64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-macos_amd64_1.4.3.tar.gz) -- ⬇️ [hertzbeat-windows64_1.4.3.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-windows64_1.4.3.zip) +* ⬇️ [hertzbeat-1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-1.4.3.tar.gz) +* ⬇️ [hertzbeat-1.4.3.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-1.4.3.zip) +* ⬇️ [hertzbeat-linux_amd64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-linux_amd64_1.4.3.tar.gz) +* ⬇️ [hertzbeat-linux_arm64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-linux_arm64_1.4.3.tar.gz) +* ⬇️ [hertzbeat-macos_arm64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-macos_arm64_1.4.3.tar.gz) +* ⬇️ [hertzbeat-macos_amd64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-macos_amd64_1.4.3.tar.gz) +* ⬇️ [hertzbeat-windows64_1.4.3.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-windows64_1.4.3.zip) **hertzbeat collector** -- ⬇️ [hertzbeat-collector-1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-1.4.3.tar.gz) -- ⬇️ [hertzbeat-collector-1.4.3.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-1.4.3.zip) -- ⬇️ [hertzbeat-collector-linux_amd64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-linux_amd64_1.4.3.tar.gz) -- ⬇️ [hertzbeat-collector-linux_arm64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-linux_arm64_1.4.3.tar.gz) -- ⬇️ [hertzbeat-collector-macos_arm64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-macos_arm64_1.4.3.tar.gz) -- ⬇️ [hertzbeat-collector-macos_amd64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-macos_amd64_1.4.3.tar.gz) -- ⬇️ [hertzbeat-collector-windows64_1.4.3.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-windows64_1.4.3.zip) - +* ⬇️ [hertzbeat-collector-1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-1.4.3.tar.gz) +* ⬇️ [hertzbeat-collector-1.4.3.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-1.4.3.zip) +* ⬇️ [hertzbeat-collector-linux_amd64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-linux_amd64_1.4.3.tar.gz) +* ⬇️ [hertzbeat-collector-linux_arm64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-linux_arm64_1.4.3.tar.gz) +* ⬇️ [hertzbeat-collector-macos_arm64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-macos_arm64_1.4.3.tar.gz) +* ⬇️ [hertzbeat-collector-macos_amd64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-macos_amd64_1.4.3.tar.gz) +* ⬇️ [hertzbeat-collector-windows64_1.4.3.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-windows64_1.4.3.zip) diff --git a/home/blog/2024-01-11-new-committer.md b/home/blog/2024-01-11-new-committer.md index a9828d755c5..69dc9a239af 100644 --- a/home/blog/2024-01-11-new-committer.md +++ b/home/blog/2024-01-11-new-committer.md @@ -41,6 +41,7 @@ In the following period, I spent some time reading Hertzbeat's code, and submitt Thanks to Tom and my ospp mentor, Zheng Chenxin, who gave me a lot of help and advice during my exposure to the open source community. Currently I am still in charge of some of the code development in the community, I hope Hertzbeat can be better and better in the future! --- + # New Committer - SongXiao **Name: Zhou Shusheng** @@ -57,13 +58,13 @@ In July this year, after basically completing the study of Java framework develo ## Open source contributions -* Support for Spring Gateway, Apache Spark, Apache Hive and other services metrics collection -* Customize nginx and pop3 protocols to collect metrics for Nginx and POP3 mailbox servers, and add corresponding help files. +- Support for Spring Gateway, Apache Spark, Apache Hive and other services metrics collection +- Customize nginx and pop3 protocols to collect metrics for Nginx and POP3 mailbox servers, and add corresponding help files. ## Harvest -* Exposed to better and more complex large-scale projects, improved programming and problem-solving skills. -* Put the theoretical knowledge into practice, gained JUC, microservice related development experience, and valuable project experience. +- Exposed to better and more complex large-scale projects, improved programming and problem-solving skills. +- Put the theoretical knowledge into practice, gained JUC, microservice related development experience, and valuable project experience. ## Thanks to our community partners @@ -73,8 +74,8 @@ Thanks to the other partners in the community, I've gained a lot from communicat ## Some advice for newcomers -* When you first get involved in an open source project, start with simple tasks. Gradually familiarize yourself with the code and process of the project, and gradually take on more complex tasks. -* If you encounter problems that you can't solve by yourself, you can ask for help from the community. +- When you first get involved in an open source project, start with simple tasks. Gradually familiarize yourself with the code and process of the project, and gradually take on more complex tasks. +- If you encounter problems that you can't solve by yourself, you can ask for help from the community. --- @@ -104,7 +105,7 @@ Since July this year, I found hertzbeat's issues and prs are very active, so I w - Gained a deeper understanding of network protocols. - I gained a deeper understanding of network protocols. I gained a preliminary understanding of the contribution process of open source projects. -## Thank you to our community partners. +## Thank you to our community partners Thanks to the authors of hertzbeat for the documentation and help. Thanks to my friends for providing me with the courage to try to enter the open source project to contribute. Thanks to other community members for their issues and prs, which accelerated my understanding of the project. @@ -130,8 +131,8 @@ Thanks to the authors of hertzbeat for the documentation and help. Thanks to my > ``HertzBeat``s powerful customization, multi-type support, high performance, easy to extend, low coupling, hope to help developers and teams quickly build their own monitoring system. -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** Welcome more partners to participate in HertzBeat's open source collaboration, no matter a typo or punctuation we are very welcome, we learn together to make progress, the goal is to do a world-class open source software. diff --git a/home/blog/2024-01-18-hertzbeat-v1.4.4.md b/home/blog/2024-01-18-hertzbeat-v1.4.4.md index 66e8f6a25b3..efeaa2b1db8 100644 --- a/home/blog/2024-01-18-hertzbeat-v1.4.4.md +++ b/home/blog/2024-01-18-hertzbeat-v1.4.4.md @@ -25,23 +25,23 @@ keywords: [open source monitoring system, alerting system] ![hertzBeat](/img/docs/hertzbeat-arch.png) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** -### HertzBeat's 1.4.4 Version Release! +### HertzBeat's 1.4.4 Version Release -- support snmp v3 monitoring protocol @TJxiaobao -- support monitoring NebulaGraph metrics @ZY945 -- support monitoring pop3 metrics @a-little-fool -- support monitoring memcached metrics @ZY945 -- support monitoring nginx metrics @a-little-fool -- support monitoring hive metrics @a-little-fool -- feature: support for dns monitoring by @Calvin979 -- monitoring the availability of websockets through handshake. by @ZY945 -- add ntp protocol and support ntp monitoring by @ZY945 -- add smtp protocol and support smtp monitoring by @ZY945 -- more feature, document and bugfix +* support snmp v3 monitoring protocol @TJxiaobao +* support monitoring NebulaGraph metrics @ZY945 +* support monitoring pop3 metrics @a-little-fool +* support monitoring memcached metrics @ZY945 +* support monitoring nginx metrics @a-little-fool +* support monitoring hive metrics @a-little-fool +* feature: support for dns monitoring by @Calvin979 +* monitoring the availability of websockets through handshake. by @ZY945 +* add ntp protocol and support ntp monitoring by @ZY945 +* add smtp protocol and support smtp monitoring by @ZY945 +* more feature, document and bugfix ### Install Quickly Via Docker @@ -61,131 +61,131 @@ keywords: [open source monitoring system, alerting system] docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` -- `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -- `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. -- `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. +* `-e IDENTITY=custom-collector-name` : set the collector unique identity name. +* `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. +* `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) --- + ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! -* bugfix metrics tags value store jpa data-storage error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1403 -* add smtp protocol and support smtp monitoring by @ZY945 in https://github.com/apache/hertzbeat/pull/1407 -* add ZY945 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1409 -* support new parse type 'log' in ssh collect protocol by @tomsun28 in https://github.com/apache/hertzbeat/pull/1410 -* add ntp protocol and support ntp monitoring by @ZY945 in https://github.com/apache/hertzbeat/pull/1411 -* monitoring the availability of websockets through handshake. by @ZY945 in https://github.com/apache/hertzbeat/pull/1413 -* [Task-1386] When adding tags in tag management, random colors are given by default. by @prolevel1 in https://github.com/apache/hertzbeat/pull/1412 -* add prolevel1 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1415 -* - -# 1397 feature: support for dns monitoring by @Calvin979 in https://github.com/apache/hertzbeat/pull/1416 - -* Support monitoring hive metrics by @a-little-fool in https://github.com/apache/hertzbeat/pull/1417 -* support legend pageable in history data charts by @tomsun28 in https://github.com/apache/hertzbeat/pull/1414 -* update component tip and help tip doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1418 -* feature: support monitoring nginx metrics and add a help doc by @a-little-fool in https://github.com/apache/hertzbeat/pull/1420 -* update parser to parse from prometheus txt metrics data by @tomsun28 in https://github.com/apache/hertzbeat/pull/1421 -* support monitoring memcached metrics and add a help doc by @ZY945 in https://github.com/apache/hertzbeat/pull/1423 -* support all ssh connect key exchange by @tomsun28 in https://github.com/apache/hertzbeat/pull/1424 -* [doc] add code of conduct by @tomsun28 in https://github.com/apache/hertzbeat/pull/1425 -* update label structure store in victoria metrics, make it prometheus like by @tomsun28 in https://github.com/apache/hertzbeat/pull/1426 -* feature: support monitoring pop3 metrics and add help doc by @a-little-fool in https://github.com/apache/hertzbeat/pull/1427 -* Update sidebars.json by @a-little-fool in https://github.com/apache/hertzbeat/pull/1428 -* Add zh-cn help doc by @a-little-fool in https://github.com/apache/hertzbeat/pull/1429 -* update monitoring state un-manage to unmonitored, update pic by @tomsun28 in https://github.com/apache/hertzbeat/pull/1430 -* Add jpa to date type storage by @Clownsw in https://github.com/apache/hertzbeat/pull/1431 -* bugfix ^o^ token error, protect metrics api auth by @tomsun28 in https://github.com/apache/hertzbeat/pull/1434 -* Add relevant documents for SMTP and NTP by @ZY945 in https://github.com/apache/hertzbeat/pull/1437 -* bugfix threshold init error in mysql env by @tomsun28 in https://github.com/apache/hertzbeat/pull/1435 -* app-rabbitmq.yml support for international name aliases by @ZY945 in https://github.com/apache/hertzbeat/pull/1439 -* fix(*): error create lru-cache-timeout-cleaner thread by @Clownsw in https://github.com/apache/hertzbeat/pull/1438 -* app-rabbitmq.yml Modifying Error Fields. by @ZY945 in https://github.com/apache/hertzbeat/pull/1440 -* support monitoring NebulaGraph metrics and add help doc by @ZY945 in https://github.com/apache/hertzbeat/pull/1441 -* Fix Nginx Collect validateParams function NPE by @Clownsw in https://github.com/apache/hertzbeat/pull/1442 -* feature: add metrics i18n for app-springboot3.yml by @liyin in https://github.com/apache/hertzbeat/pull/1445 -* feat: add metrics i18n for app-docker.yml by @liyin in https://github.com/apache/hertzbeat/pull/1446 -* update docker-compose script and fix version by @tomsun28 in https://github.com/apache/hertzbeat/pull/1447 -* bugfix java.lang.IllegalArgumentException: Illegal character in query… by @tomsun28 in https://github.com/apache/hertzbeat/pull/1443 -* bugfix delete monitor error after monitor canceled by @ZhangZixuan1994 in https://github.com/apache/hertzbeat/pull/1451 -* add ZhangZixuan1994 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1454 -* remove sleep, probably busy-waiting by @tomsun28 in https://github.com/apache/hertzbeat/pull/1456 -* [doc] add new committer ZY945 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1453 -* Update app-zookeeper.yml by @hurenjie1 in https://github.com/apache/hertzbeat/pull/1458 -* add hurenjie1 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1459 -* update dashboard ui, remove ssh custom SignatureFactories, update app name by @tomsun28 in https://github.com/apache/hertzbeat/pull/1460 -* [Task] Monitoring Template Yml Metrics I18n | 监控模版指标国际化任务认领 #1212 by @tslj1024 in https://github.com/apache/hertzbeat/pull/1461 -* add tslj1024 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1462 -* Add alarm trigger time for alarm restore by @Calvin979 in https://github.com/apache/hertzbeat/pull/1464 -* bugfix history range query not work when victoria-metrics store by @tomsun28 in https://github.com/apache/hertzbeat/pull/1463 -* bugfix springboot3 translation by @liyin in https://github.com/apache/hertzbeat/pull/1467 -* bugfix telegram-notice can not input bot-token by @tomsun28 in https://github.com/apache/hertzbeat/pull/1465 -* feat: support hostname target by @ldysdu in https://github.com/apache/hertzbeat/pull/1455 -* add ldysdu as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1471 -* [feature] support snmp v3 monitoring protocol by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1469 -* bugfix alarm trigger-times not work when alarm and recovered trigger cyclically by @tomsun28 in https://github.com/apache/hertzbeat/pull/1468 -* update switch monitoring metrics i18n by @tomsun28 in https://github.com/apache/hertzbeat/pull/1472 -* fixed: snmpv3 contextName bug by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1473 -* Fix npt of webhook notify by @Calvin979 in https://github.com/apache/hertzbeat/pull/1474 -* [hertzbeat] release hertzbeat version v1.4.4 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1475 -* bugfix nginx collect http deadlock error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1476 -* alarm calculate ignore metrics collect code - TIMEOUT by @tomsun28 in https://github.com/apache/hertzbeat/pull/1478 +* bugfix metrics tags value store jpa data-storage error by @tomsun28 in +* add smtp protocol and support smtp monitoring by @ZY945 in +* add ZY945 as a contributor for code by @allcontributors in +* support new parse type 'log' in ssh collect protocol by @tomsun28 in +* add ntp protocol and support ntp monitoring by @ZY945 in +* monitoring the availability of websockets through handshake. by @ZY945 in +* [Task-1386] When adding tags in tag management, random colors are given by default. by @prolevel1 in +* add prolevel1 as a contributor for code by @allcontributors in +* + +# 1397 feature: support for dns monitoring by @Calvin979 in + +* Support monitoring hive metrics by @a-little-fool in +* support legend pageable in history data charts by @tomsun28 in +* update component tip and help tip doc by @tomsun28 in +* feature: support monitoring nginx metrics and add a help doc by @a-little-fool in +* update parser to parse from prometheus txt metrics data by @tomsun28 in +* support monitoring memcached metrics and add a help doc by @ZY945 in +* support all ssh connect key exchange by @tomsun28 in +* [doc] add code of conduct by @tomsun28 in +* update label structure store in victoria metrics, make it prometheus like by @tomsun28 in +* feature: support monitoring pop3 metrics and add help doc by @a-little-fool in +* Update sidebars.json by @a-little-fool in +* Add zh-cn help doc by @a-little-fool in +* update monitoring state un-manage to unmonitored, update pic by @tomsun28 in +* Add jpa to date type storage by @Clownsw in +* bugfix ^o^ token error, protect metrics api auth by @tomsun28 in +* Add relevant documents for SMTP and NTP by @ZY945 in +* bugfix threshold init error in mysql env by @tomsun28 in +* app-rabbitmq.yml support for international name aliases by @ZY945 in +* fix(*): error create lru-cache-timeout-cleaner thread by @Clownsw in +* app-rabbitmq.yml Modifying Error Fields. by @ZY945 in +* support monitoring NebulaGraph metrics and add help doc by @ZY945 in +* Fix Nginx Collect validateParams function NPE by @Clownsw in +* feature: add metrics i18n for app-springboot3.yml by @liyin in +* feat: add metrics i18n for app-docker.yml by @liyin in +* update docker-compose script and fix version by @tomsun28 in +* bugfix java.lang.IllegalArgumentException: Illegal character in query… by @tomsun28 in +* bugfix delete monitor error after monitor canceled by @ZhangZixuan1994 in +* add ZhangZixuan1994 as a contributor for code by @allcontributors in +* remove sleep, probably busy-waiting by @tomsun28 in +* [doc] add new committer ZY945 by @tomsun28 in +* Update app-zookeeper.yml by @hurenjie1 in +* add hurenjie1 as a contributor for code by @allcontributors in +* update dashboard ui, remove ssh custom SignatureFactories, update app name by @tomsun28 in +* [Task] Monitoring Template Yml Metrics I18n | 监控模版指标国际化任务认领 #1212 by @tslj1024 in +* add tslj1024 as a contributor for code by @allcontributors in +* Add alarm trigger time for alarm restore by @Calvin979 in +* bugfix history range query not work when victoria-metrics store by @tomsun28 in +* bugfix springboot3 translation by @liyin in +* bugfix telegram-notice can not input bot-token by @tomsun28 in +* feat: support hostname target by @ldysdu in +* add ldysdu as a contributor for code by @allcontributors in +* [feature] support snmp v3 monitoring protocol by @TJxiaobao in +* bugfix alarm trigger-times not work when alarm and recovered trigger cyclically by @tomsun28 in +* update switch monitoring metrics i18n by @tomsun28 in +* fixed: snmpv3 contextName bug by @TJxiaobao in +* Fix npt of webhook notify by @Calvin979 in +* [hertzbeat] release hertzbeat version v1.4.4 by @tomsun28 in +* bugfix nginx collect http deadlock error by @tomsun28 in +* alarm calculate ignore metrics collect code - TIMEOUT by @tomsun28 in ## New Contributors -* @ZY945 made their first contribution in https://github.com/apache/hertzbeat/pull/1407 -* @prolevel1 made their first contribution in https://github.com/apache/hertzbeat/pull/1412 -* @ZhangZixuan1994 made their first contribution in https://github.com/apache/hertzbeat/pull/1451 -* @hurenjie1 made their first contribution in https://github.com/apache/hertzbeat/pull/1458 -* @tslj1024 made their first contribution in https://github.com/apache/hertzbeat/pull/1461 -* @ldysdu made their first contribution in https://github.com/apache/hertzbeat/pull/1455 +* @ZY945 made their first contribution in +* @prolevel1 made their first contribution in +* @ZhangZixuan1994 made their first contribution in +* @hurenjie1 made their first contribution in +* @tslj1024 made their first contribution in +* @ldysdu made their first contribution in -**Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.4.3...v1.4.4 +**Full Changelog**: --- ## ⛄ Supported -- Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server -- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, Damon, OpenGauss, ClickHouse, IoTDB, Redis Cluster -- Linux, Ubuntu, CentOS, Windows -- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ -- Kubernetes, Docker -- Huawei Switch, HPE Switch, TP-LINK Switch, Cisco Switch -- and more for your custom monitoring. -- Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. +* Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server +* Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, Damon, OpenGauss, ClickHouse, IoTDB, Redis Cluster +* Linux, Ubuntu, CentOS, Windows +* Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ +* Kubernetes, Docker +* Huawei Switch, HPE Switch, TP-LINK Switch, Cisco Switch +* and more for your custom monitoring. +* Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. ---- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** ### **Download Link** **hertzbeat server** -- ⬇️ [hertzbeat-1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-1.4.4.tar.gz) -- ⬇️ [hertzbeat-1.4.4.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-1.4.4.zip) -- ⬇️ [hertzbeat-linux_amd64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-linux_amd64_1.4.4.tar.gz) -- ⬇️ [hertzbeat-linux_arm64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-linux_arm64_1.4.4.tar.gz) -- ⬇️ [hertzbeat-macos_arm64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-macos_arm64_1.4.4.tar.gz) -- ⬇️ [hertzbeat-macos_amd64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-macos_amd64_1.4.4.tar.gz) -- ⬇️ [hertzbeat-windows64_1.4.4.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-windows64_1.4.4.zip) +* ⬇️ [hertzbeat-1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-1.4.4.tar.gz) +* ⬇️ [hertzbeat-1.4.4.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-1.4.4.zip) +* ⬇️ [hertzbeat-linux_amd64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-linux_amd64_1.4.4.tar.gz) +* ⬇️ [hertzbeat-linux_arm64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-linux_arm64_1.4.4.tar.gz) +* ⬇️ [hertzbeat-macos_arm64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-macos_arm64_1.4.4.tar.gz) +* ⬇️ [hertzbeat-macos_amd64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-macos_amd64_1.4.4.tar.gz) +* ⬇️ [hertzbeat-windows64_1.4.4.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-windows64_1.4.4.zip) **hertzbeat collector** -- ⬇️ [hertzbeat-collector-1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-1.4.4.tar.gz) -- ⬇️ [hertzbeat-collector-1.4.4.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-1.4.4.zip) -- ⬇️ [hertzbeat-collector-linux_amd64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-linux_amd64_1.4.4.tar.gz) -- ⬇️ [hertzbeat-collector-linux_arm64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-linux_arm64_1.4.4.tar.gz) -- ⬇️ [hertzbeat-collector-macos_arm64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-macos_arm64_1.4.4.tar.gz) -- ⬇️ [hertzbeat-collector-macos_amd64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-macos_amd64_1.4.4.tar.gz) -- ⬇️ [hertzbeat-collector-windows64_1.4.4.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-windows64_1.4.4.zip) +* ⬇️ [hertzbeat-collector-1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-1.4.4.tar.gz) +* ⬇️ [hertzbeat-collector-1.4.4.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-1.4.4.zip) +* ⬇️ [hertzbeat-collector-linux_amd64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-linux_amd64_1.4.4.tar.gz) +* ⬇️ [hertzbeat-collector-linux_arm64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-linux_arm64_1.4.4.tar.gz) +* ⬇️ [hertzbeat-collector-macos_arm64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-macos_arm64_1.4.4.tar.gz) +* ⬇️ [hertzbeat-collector-macos_amd64_1.4.4.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-macos_amd64_1.4.4.tar.gz) +* ⬇️ [hertzbeat-collector-windows64_1.4.4.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/hertzbeat-collector-windows64_1.4.4.zip) **hertzbeat docker compose script** -- ⬇️ [docker-compose.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/docker-compose.zip) - +* ⬇️ [docker-compose.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.4/docker-compose.zip) diff --git a/home/blog/2024-04-17-to-apache.md b/home/blog/2024-04-17-to-apache.md index b06725d522d..ddce09b41d0 100644 --- a/home/blog/2024-04-17-to-apache.md +++ b/home/blog/2024-04-17-to-apache.md @@ -282,16 +282,16 @@ Special thanks to the Champions and Mentors who have kindly offered guidance to **Champion:** -- Yonglun Zhang(zhangyonglun at apache dot org) +* Yonglun Zhang(zhangyonglun at apache dot org) > Thanks to Champion @Yonglun for leading the project from the proposal drafting stage to discussions and initiating the voting process, providing selfless assistance and guidance throughout. **Mentors:** -- Yu Xiao [xiaoyu at apache dot org] -- Yonglun Zhang [zhangyonglun at apache dot org] -- Justin Mclean [jmclean at apache dot org] -- Francis Chuang [francischuang at apache dot org] +* Yu Xiao [xiaoyu at apache dot org] +* Yonglun Zhang [zhangyonglun at apache dot org] +* Justin Mclean [jmclean at apache dot org] +* Francis Chuang [francischuang at apache dot org] > Thanks to Mentors @XiaoYu, @Yonglun, @Justin, and @Francis for their selfless assistance, providing professional guidance on compliance and process advancement. From the Dromara open-source community to the Apache Incubator, @XiaoYu has always been the guiding light for HertzBeat's open-source journey. We believe that under the guidance of all mentors, the community will grow even healthier in the future. @@ -301,19 +301,18 @@ Special thanks to the Champions and Mentors who have kindly offered guidance to > > May HertzBeat continue to grow and innovate in its future journey, becoming a shining star in the open-source community. We believe that with the outstanding talent of the team and the extensive support of the community, HertzBeat will achieve even greater achievements, providing high-quality services and experiences to developers and users worldwide. Dromara will continue to fully support and pay attention to the development of HertzBeat, looking forward to it creating more wonderful chapters! ---- +--- **Repo Url** -**https://github.com/apache/hertzbeat** +**** Welcome to star us🐶🐶🐶 **Home Url** -**https://hertzbeat.apache.org/** +**** **Email List** -**dev@hertzbeat.apache.org** - +**** diff --git a/home/blog/2024-05-09-hertzbeat-ospp-subject-introduction.md b/home/blog/2024-05-09-hertzbeat-ospp-subject-introduction.md index 3e9bf0cb9c5..8595b33a35c 100644 --- a/home/blog/2024-05-09-hertzbeat-ospp-subject-introduction.md +++ b/home/blog/2024-05-09-hertzbeat-ospp-subject-introduction.md @@ -8,14 +8,14 @@ HertzBeat is a powerful custom monitoring capabilities, high-performance cluster - Set **monitoring + alarm + notification** as a whole, support for application services, applications, database, cache, operating system, big data, middleware, Web server, cloud native, network, custom and other monitoring threshold alarm notification in one step. - Easy to use and friendly, no `Agent`, full `WEB` page operation, a mouse click can monitor alarms, zero hand learning costs. -- Protocol specifications such as Http, Jmx, Ssh, Snmp, Jdbc, Prometheus, etc. can be configured, and the monitoring template YML can be configured in the browser to use these protocols to customize the desired metrics. Do you believe that you can immediately adapt a new monitoring type such as` K8s` or `Docker` just by configuring it? +- Protocol specifications such as Http, Jmx, Ssh, Snmp, Jdbc, Prometheus, etc. can be configured, and the monitoring template YML can be configured in the browser to use these protocols to customize the desired metrics. Do you believe that you can immediately adapt a new monitoring type such as`K8s` or `Docker` just by configuring it? - Compatible with Prometheus` ecosystem and more, only page operations can monitor what Prometheus can monitor. - High-performance, supports horizontal expansion of multiple collector clusters, supports multi-isolated network monitoring, and cloud edge collaboration. - Free alarm threshold rules, `mail,` `Discord,` `Slack,` `Telegram,` `Dingding,` `wechat,` `Feibook,` `SMS,` `Webhook,` `Server sauce,` and other ways to send messages in a timely manner. -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** ## What is Open Source Summer? diff --git a/home/blog/2024-06-11-hertzbeat-v1.6.0-update.md b/home/blog/2024-06-11-hertzbeat-v1.6.0-update.md index d421d2eaf16..47dfe69fb79 100644 --- a/home/blog/2024-06-11-hertzbeat-v1.6.0-update.md +++ b/home/blog/2024-06-11-hertzbeat-v1.6.0-update.md @@ -21,7 +21,7 @@ Go to [https://github.com/apache/hertzbeat/tree/master/manager/src/main/resource Due to significant changes in `application.yml` and `sureness.yml`, it is recommended to directly use the new `yml` configuration files and then modify them based on your own needs. -#### `application.yml` generally needs to modify the following parts: +#### `application.yml` generally needs to modify the following parts Default is: @@ -65,7 +65,7 @@ If you change to a MySQL database, here is an example: level: SEVERE ``` -#### `sureness.yml` modification is optional, usually when you need to change account passwords: +#### `sureness.yml` modification is optional, usually when you need to change account passwords ```yaml # account info config @@ -109,6 +109,7 @@ Next, run the start-up script as before to experience the latest HertzBeat 1.6.0 ``` docker stop hertzbeat ``` + - Upgrade the database script: - Go to [https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), choose the directory of your database and execute the corresponding `V160__update_column.sql` file in MySQL. - Upgrade the configuration files: @@ -125,6 +126,7 @@ Next, run HertzBeat using Docker as before to experience the latest HertzBeat 1. ``` docker stop hertzbeat ``` + - Edit the H2 database files: - Assuming you have mounted the H2 database files in the `data` directory to the local system, or copied the `/opt/hertzbeat/data` directory from the old container manually. - Download the H2 driver jar from [https://mvnrepository.com/artifact/com.h2database/h2/2.2.220](https://mvnrepository.com/artifact/com.h2database/h2/2.2.220). @@ -133,6 +135,7 @@ Next, run HertzBeat using Docker as before to experience the latest HertzBeat 1. ``` java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 ``` + - Upgrade the configuration files: - As mentioned, due to significant changes in `application.yml` and `sureness.yml`, it is recommended to directly mount and use the new `yml` configuration files, and then modify them based on your own needs. - Add the corresponding database drivers: @@ -146,4 +149,3 @@ If you do not want to go through the tedious script upgrade method mentioned abo - Deploy a new environment with the latest version. - Export the monitoring tasks and threshold information from the old environment on the page - diff --git a/home/blog/2024-06-15-hertzbeat-v1.6.0.md b/home/blog/2024-06-15-hertzbeat-v1.6.0.md index 9647d1680e3..c35fcfaee8c 100644 --- a/home/blog/2024-06-15-hertzbeat-v1.6.0.md +++ b/home/blog/2024-06-15-hertzbeat-v1.6.0.md @@ -17,9 +17,9 @@ At the same time, some bugs were fixed and some functions were optimized, and mo **Of course, the most important thing is to give the best thanks to the contributors in the community!** -Download Page: https://hertzbeat.apache.org/docs/download/ +Download Page: -Upgrade Guide: https://hertzbeat.apache.org/blog/2024/06/11/hertzbeat-v1.6.0-update/ +Upgrade Guide: ## What is HertzBeat? @@ -39,332 +39,332 @@ Upgrade Guide: https://hertzbeat.apache.org/blog/2024/06/11/hertzbeat-v1.6.0-upd ![hertzBeat](/img/docs/hertzbeat-arch.png) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -## HertzBeat's 1.6.0 Version Release! +## HertzBeat's 1.6.0 Version Release ## Highlights -- HertzBeat is donated to the Apache Incubator. -- migrate repo, clean up code, license, add more help doc and more -- add dependency license doc -- [feature]Hertzbeat custom plugin. by @zqr10159 in #1973 -- [feature] add apache hugegraph monitor by @zhangshenghang in #1972 -- [improve][HIP] HIP-01: Implement refactoring AbstractCollect by @crossoverJie in #1966 -- [feature] Support monitoring of OpenAI accounts by @zuobiao-zhou in #1947 -- [feature] add apache yarn monitor by @zhangshenghang in #1937 -- [featrue]add apache hdfs monitor by @zhangshenghang in #1920 -- [feature] support use ngql query metrics from nebulaGraph by @LiuTianyou in #1917 -- [feature] support random jwt secret when not custom by @tomsun28 in #1897 -- feat Support Time Type to Tengine Data Storage by @Clownsw in #1890 -- [feature] support the VictoriaMetrics cluster by @xuziyang in #1880 -- [feature] support flyway database migration by @tomsun28 in #1875 -- [feature] Support Redfish protocol to monitoring server by @gjjjj0101 in #1867 -- [feature] add influxdb metrics monitoring by @TJxiaobao in #1730 -- [improve] use apache jexl replace of aviator by @tomsun28 in #1859 -- [feature] Add Linux process monitoring by @zhangshenghang in #1857 -- [feature] Add Apache Hbase RegionServer monitoring by @zhangshenghang in #1833 -- [improve] use eclipselink orm replace of hibernate orm by @tomsun28 in #1801 -- [feature]Add monitoring for Hbase Master by @zhangshenghang in #1820 -- [feature] Improve the import checkstyle by @crossoverJie in #1802 -- [Improve]When multiple lines are returned, each alarm is triggered instead of only the first alarm by @15613060203 in #1797 -- [improve]Add external lib folder to store mysql and oracle driver. by @zqr10159 in #1783 -- [feature:update-checkstyle] Limit the java file header by @YxYL6125 in #1799 -- monitor center add search type modal by @tomsun28 in #1699 -- mongodb monitoring support custom connection timeout param by @ZY945 in #1697 -- System config theme by @TJxiaobao in #1636 -- [feature] add storm monitor by @starmilkxin in #1673 -- add a online prometheus parser and a prometheus-like push style. by @vinci-897 in #1644 -- and more bugfix, doc, features power by our contributors, thanks to them. +* HertzBeat is donated to the Apache Incubator. +* migrate repo, clean up code, license, add more help doc and more +* add dependency license doc +* [feature]Hertzbeat custom plugin. by @zqr10159 in #1973 +* [feature] add apache hugegraph monitor by @zhangshenghang in #1972 +* [improve][HIP] HIP-01: Implement refactoring AbstractCollect by @crossoverJie in #1966 +* [feature] Support monitoring of OpenAI accounts by @zuobiao-zhou in #1947 +* [feature] add apache yarn monitor by @zhangshenghang in #1937 +* [featrue]add apache hdfs monitor by @zhangshenghang in #1920 +* [feature] support use ngql query metrics from nebulaGraph by @LiuTianyou in #1917 +* [feature] support random jwt secret when not custom by @tomsun28 in #1897 +* feat Support Time Type to Tengine Data Storage by @Clownsw in #1890 +* [feature] support the VictoriaMetrics cluster by @xuziyang in #1880 +* [feature] support flyway database migration by @tomsun28 in #1875 +* [feature] Support Redfish protocol to monitoring server by @gjjjj0101 in #1867 +* [feature] add influxdb metrics monitoring by @TJxiaobao in #1730 +* [improve] use apache jexl replace of aviator by @tomsun28 in #1859 +* [feature] Add Linux process monitoring by @zhangshenghang in #1857 +* [feature] Add Apache Hbase RegionServer monitoring by @zhangshenghang in #1833 +* [improve] use eclipselink orm replace of hibernate orm by @tomsun28 in #1801 +* [feature]Add monitoring for Hbase Master by @zhangshenghang in #1820 +* [feature] Improve the import checkstyle by @crossoverJie in #1802 +* [Improve]When multiple lines are returned, each alarm is triggered instead of only the first alarm by @15613060203 in #1797 +* [improve]Add external lib folder to store mysql and oracle driver. by @zqr10159 in #1783 +* [feature:update-checkstyle] Limit the java file header by @YxYL6125 in #1799 +* monitor center add search type modal by @tomsun28 in #1699 +* mongodb monitoring support custom connection timeout param by @ZY945 in #1697 +* System config theme by @TJxiaobao in #1636 +* [feature] add storm monitor by @starmilkxin in #1673 +* add a online prometheus parser and a prometheus-like push style. by @vinci-897 in #1644 +* and more bugfix, doc, features power by our contributors, thanks to them. ## What's Changed -* bugfix collector can not startup alone by @tomsun28 in https://github.com/apache/hertzbeat/pull/1633 -* translate some hertzbeat blog by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1635 -* Check class description by @ZY945 in https://github.com/apache/hertzbeat/pull/1638 -* translate class description to english by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1641 -* support monitor metrics name i18n: ClickHouse by @ZY945 in https://github.com/apache/hertzbeat/pull/1642 -* translate blog 20220601 to English by @vinci-897 in https://github.com/apache/hertzbeat/pull/1646 -* add a online prometheus parser and a prometheus-like push style. by @vinci-897 in https://github.com/apache/hertzbeat/pull/1644 -* translate blog 20220320 to English by @vinci-897 in https://github.com/apache/hertzbeat/pull/1647 -* support monitor metrics name i18n: DynamicTp by @ZY945 in https://github.com/apache/hertzbeat/pull/1649 -* translate blog 20220228 to English by @vinci-897 in https://github.com/apache/hertzbeat/pull/1648 -* translate blog 20220310 to English by @vinci-897 in https://github.com/apache/hertzbeat/pull/1651 -* translate blog 20220904 to English by @vinci-897 in https://github.com/apache/hertzbeat/pull/1652 -* support monitor metrics name i18n: Airflow by @ZY945 in https://github.com/apache/hertzbeat/pull/1654 -* support monitor metrics name i18n: IoTDB by @ZY945 in https://github.com/apache/hertzbeat/pull/1659 -* Translate 2022-02-11-hertzbeat document by @wang1027-wqh in https://github.com/apache/hertzbeat/pull/1660 -* bugfix The annotation @Transactional specifies rollbackFor. by @handy-git in https://github.com/apache/hertzbeat/pull/1643 -* add handy-git as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1661 -* feature:Translate 2022-02-17-hertzbeat Document by @wang1027-wqh in https://github.com/apache/hertzbeat/pull/1662 -* support monitor metrics name i18n: rocketmq by @ZY945 in https://github.com/apache/hertzbeat/pull/1663 -* [doc] update relate doc and readme by @tomsun28 in https://github.com/apache/hertzbeat/pull/1667 -* bugfix monitoring mongodb not work in springboot3 by @ZY945 in https://github.com/apache/hertzbeat/pull/1668 -* [feature] add storm monitor by @starmilkxin in https://github.com/apache/hertzbeat/pull/1673 -* [bugfix] fixed the issue in http_sd where services were incorrectly reported as available when they were actually unavailable by @starmilkxin in https://github.com/apache/hertzbeat/pull/1678 -* remove mysql-oracle dependency jar from release package lib by @tomsun28 in https://github.com/apache/hertzbeat/pull/1680 -* System config theme by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1636 -* update webapp menu layout and doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1682 -* bugfix can not find mysql dependency when startup by @tomsun28 in https://github.com/apache/hertzbeat/pull/1686 -* support config common aes secret by @tomsun28 in https://github.com/apache/hertzbeat/pull/1683 -* [bugfix]fix the issue of add redis cluster node test error report(#1601) by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1684 -* add LiuTianyou as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1687 -* mongodb monitoring support custom connection timeout param by @ZY945 in https://github.com/apache/hertzbeat/pull/1697 -* bugfix old data decode error when use new common-secret by @tomsun28 in https://github.com/apache/hertzbeat/pull/1696 -* [bugfix] fix bug where reopening pop-up window still retained previously edited data after closing. by @starmilkxin in https://github.com/apache/hertzbeat/pull/1698 -* monitor center add search type modal by @tomsun28 in https://github.com/apache/hertzbeat/pull/1699 -* fix status page logo overflow by @tomsun28 in https://github.com/apache/hertzbeat/pull/1700 -* bugfix npe monitor jobid may be null by @tomsun28 in https://github.com/apache/hertzbeat/pull/1701 -* support custom main menus in monitor template by @tomsun28 in https://github.com/apache/hertzbeat/pull/1703 -* update home website doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1712 -* [Improve] change package group to org apache hertzbeat by @vinci-897 in https://github.com/apache/hertzbeat/pull/1724 -* [improve] initial license clean up by @tomsun28 in https://github.com/apache/hertzbeat/pull/1725 -* update manager and collector logback config(#1704) by @handy-git in https://github.com/apache/hertzbeat/pull/1723 -* fix(sec): upgrade com.h2database:h2 to by @WinterKi1ler in https://github.com/apache/hertzbeat/pull/1718 -* add WinterKi1ler as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1736 -* update asf branch protected check by @tomsun28 in https://github.com/apache/hertzbeat/pull/1738 -* [doc]Update star chart by @zqr10159 in https://github.com/apache/hertzbeat/pull/1737 -* [fixed] fixed click collector online offline button error by @miki-hmt in https://github.com/apache/hertzbeat/pull/1734 -* [improve] initial doc clean up by @tomsun28 in https://github.com/apache/hertzbeat/pull/1741 -* [Improvement]Support multiple receivers. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1731 -* [improvement]Add lisence. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1746 -* Backend LICENSE Initialize by @wang1027-wqh in https://github.com/apache/hertzbeat/pull/1744 -* Back-end dependency upgrade by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1743 -* [Improve] run hertzbeat in docker compose support dependen service condition by @gjjjj0101 in https://github.com/apache/hertzbeat/pull/1748 -* [bugfix] fix statuspage index exception by @makechoicenow in https://github.com/apache/hertzbeat/pull/1747 -* remove unlicensed dependency 'wolfy87 eventemitter' by @alpha951 in https://github.com/apache/hertzbeat/pull/1745 -* [improve] auto label when pr, update asf config by @tomsun28 in https://github.com/apache/hertzbeat/pull/1749 -* [improve] update asf config set required status checks context by @tomsun28 in https://github.com/apache/hertzbeat/pull/1751 -* [improve] home add apache info by @a-little-fool in https://github.com/apache/hertzbeat/pull/1740 -* [doc] Change e2e path by @crossoverJie in https://github.com/apache/hertzbeat/pull/1758 -* fix : ingress tls inoperative by @PeixyJ in https://github.com/apache/hertzbeat/pull/1760 -* [refactor] method improvement rationale by @dukbong in https://github.com/apache/hertzbeat/pull/1757 -* [improve] create disclaimer file, add incubating in describe by @tomsun28 in https://github.com/apache/hertzbeat/pull/1764 -* [improve] update new hertzbeat brand logo, update doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1761 -* Complete the code comment translation of the common module by @Hi-Mr-Wind in https://github.com/apache/hertzbeat/pull/1766 -* Remove unnecessary if-else statement. by @dukbong in https://github.com/apache/hertzbeat/pull/1770 -* [doc] remove and translate chinese to english in warehous by @xuziyang in https://github.com/apache/hertzbeat/pull/1773 -* Replace deprecated methods with builder pattern for RedisURI construction by @dukbong in https://github.com/apache/hertzbeat/pull/1772 -* remove and translate chinese to english in collector,script,push,remoting and manager module by @MananPoojara in https://github.com/apache/hertzbeat/pull/1774 -* Added the function of sending SMS messages through Alibaba Cloud. by @lwqzz in https://github.com/apache/hertzbeat/pull/1768 -* [improve]Add frontend license. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1776 -* [test] Add RedisSingleCollectImplTest by @crossoverJie in https://github.com/apache/hertzbeat/pull/1784 -* [refactor] add override annotation by @handy-git in https://github.com/apache/hertzbeat/pull/1782 -* '[docs]bugfix: display syntax error of ipmi protocol' by @tomorrowshipyltm in https://github.com/apache/hertzbeat/pull/1793 -* [doc] translate alerter moudle code chinese to english by @tomsun28 in https://github.com/apache/hertzbeat/pull/1765 -* [refactor] database-related properties class, type changed to record by @xuziyang in https://github.com/apache/hertzbeat/pull/1786 -* Fix snmp template unit conversion problem by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1796 -* [doc] Add help documentation for clickhouse monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1798 -* [feature:update-checkstyle] Limit the java file header by @YxYL6125 in https://github.com/apache/hertzbeat/pull/1799 -* [improve]Add external lib folder to store mysql and oracle driver. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1783 -* [Improve]When multiple lines are returned, each alarm is triggered instead of only the first alarm by @15613060203 in https://github.com/apache/hertzbeat/pull/1797 -* [doc] add team page in website by @alpha951 in https://github.com/apache/hertzbeat/pull/1800 -* [feature] Improve the import checkstyle by @crossoverJie in https://github.com/apache/hertzbeat/pull/1802 -* [doc] Add help document for dns monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1804 -* [improve] preventing NPE by @dukbong in https://github.com/apache/hertzbeat/pull/1808 -* [refactor] change the warehouse properties the type to record by @xuziyang in https://github.com/apache/hertzbeat/pull/1806 -* Refactor: upgrade syntax to jdk17(instanceof & switch) by @Calvin979 in https://github.com/apache/hertzbeat/pull/1807 -* [test] Add NginxCollect test by @crossoverJie in https://github.com/apache/hertzbeat/pull/1809 -* [website] update team page by @tomsun28 in https://github.com/apache/hertzbeat/pull/1803 -* [test] Add RedisClusterCollectImplTest by @crossoverJie in https://github.com/apache/hertzbeat/pull/1789 -* [improve] Fix typo ReqStatusResponse by @crossoverJie in https://github.com/apache/hertzbeat/pull/1811 -* Comparing N objects for null with Assert.noNullElements(). by @dukbong in https://github.com/apache/hertzbeat/pull/1814 -* [doc] Add help document for elasticsearch monitoring and ftp monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1815 -* [doc] add help documentation for huawei switch monitoring by @Alanxtl in https://github.com/apache/hertzbeat/pull/1813 -* chore: upgrade the api-testing (e2e) to v0.0.16 by @LinuxSuRen in https://github.com/apache/hertzbeat/pull/1817 -* [Remove][Improve]Mail config by @zqr10159 in https://github.com/apache/hertzbeat/pull/1819 -* Remove and translate chinese to english in code by @dukbong in https://github.com/apache/hertzbeat/pull/1816 -* [feature]Add monitoring for Hbase Master by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1820 -* [doc] resolve code conflicts and coverage caused by pr(#1813) merge by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1821 -* [doc] Add help document for tidb and nacos monitoring by @Alanxtl in https://github.com/apache/hertzbeat/pull/1823 -* [improve] use eclipselink orm replace of hibernate orm by @tomsun28 in https://github.com/apache/hertzbeat/pull/1801 -* [improve] Add whitespace checkstyle by @crossoverJie in https://github.com/apache/hertzbeat/pull/1824 -* [bugfix] dns monitoring template add query class parameter by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1825 -* [Refactor] Preventing Unnecessary Object Creation and Using Utility Methods by @dukbong in https://github.com/apache/hertzbeat/pull/1818 -* [doc]Add and modify Doris FE Chinese and English documentation by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1828 -* [docs] Optimize: add help docs for UDP port & Springboot3 help doc by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/1832 -* Code Simplification, Structure Changes, and Translation Work, Along with a Question by @dukbong in https://github.com/apache/hertzbeat/pull/1827 -* [doc] add help document for mongodb monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1834 -* [collector] fix: inverts the compareTo sort of MetricsCollect run queue by @Pzz-2021 in https://github.com/apache/hertzbeat/pull/1837 -* [doc]Doc add debian system by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1842 -* [feature] Add Apache Hbase RegionServer monitoring by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1833 -* [improve] Optimize websocket monitor by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1838 -* [refactor] Split the WarehouseProperties class by @xuziyang in https://github.com/apache/hertzbeat/pull/1830 -* [test] Add test for HttpsdImpl by @crossoverJie in https://github.com/apache/hertzbeat/pull/1840 -* [fix] Fix the wrong comment by @xuziyang in https://github.com/apache/hertzbeat/pull/1843 -* [refactor] trans and use assert by @dukbong in https://github.com/apache/hertzbeat/pull/1841 -* [bugfix] modify the command in the mongodb monitoring template by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1844 -* [bigfix]Fix Debian system Top10 monitoring bug by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1846 -* [cleanup]Delete the corresponding Chinese comments by @hudongdong129 in https://github.com/apache/hertzbeat/pull/1847 -* [doc] translates chinese comment to english. by @dukbong in https://github.com/apache/hertzbeat/pull/1853 -* [doc] fix error and add help document for prometheus task by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1852 -* [feature] Add Linux process monitoring by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1857 -* [test] Add test for FtpCollectImpl by @crossoverJie in https://github.com/apache/hertzbeat/pull/1856 -* [improve] use apache jexl replace of aviator by @tomsun28 in https://github.com/apache/hertzbeat/pull/1859 -* [bugfix] jpa data save logic repair by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1863 -* [feature] add influxdb metrics monitoring by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1730 -* [doc] add help document for rocketmq by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1874 -* [improve] Imporve checkstyle of test code. by @crossoverJie in https://github.com/apache/hertzbeat/pull/1864 -* [feature] Support Redfish protocol to monitoring server by @gjjjj0101 in https://github.com/apache/hertzbeat/pull/1867 -* Fix debian monitoring template issue about process monitoring by @LLP2333 in https://github.com/apache/hertzbeat/pull/1868 -* [bugfix] centos Top10 shows missing one by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1870 -* [improve] add website apache incubator footer by @tomsun28 in https://github.com/apache/hertzbeat/pull/1860 -* [doc] update help document by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1861 -* [featurn] support flyway database migration by @tomsun28 in https://github.com/apache/hertzbeat/pull/1875 -* [improve] Delete the timestamp field in the class MetricFamily.Metric by @xuziyang in https://github.com/apache/hertzbeat/pull/1878 -* [improve] Use java.lang.AutoCloseable instead of CacheCloseable by @crossoverJie in https://github.com/apache/hertzbeat/pull/1879 -* [bugfix]Fix top10 process command. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1876 -* [feature] support the VictoriaMetrics cluster by @xuziyang in https://github.com/apache/hertzbeat/pull/1880 -* [improve] Refactor common cache code by @crossoverJie in https://github.com/apache/hertzbeat/pull/1881 -* Eliminate Unnecessary Unboxing and Generics by @handy-git in https://github.com/apache/hertzbeat/pull/1882 -* [bugfix][doc]Add kafka sidebar. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1883 -* [doc] I18n for monitoring template yml metrics by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/1888 -* [refactor] StoreProperties is no longer useful, delete it by @xuziyang in https://github.com/apache/hertzbeat/pull/1887 -* bugfix statistical metrics data matching fails by @tomsun28 in https://github.com/apache/hertzbeat/pull/1884 -* [doc] add help doc for flink monitoring by @HeartLinked in https://github.com/apache/hertzbeat/pull/1893 -* [doc] add almalinux documentation by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1892 -* [improve] Missing a generic by @crossoverJie in https://github.com/apache/hertzbeat/pull/1889 -* [bugfix] Fixed some metrics of Jexlespression not matching in Elasticsearch by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1894 -* feat(*): Support Time Type to Tengine Data Storage by @Clownsw in https://github.com/apache/hertzbeat/pull/1890 -* [feature] support random jwt secret when not custom by @tomsun28 in https://github.com/apache/hertzbeat/pull/1897 -* [doc] add opensuse doc by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1902 -* fix when manager restart, collect register error by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1896 -* [bugfix] fix can not use empty collection as query params in eclipselink by @tomsun28 in https://github.com/apache/hertzbeat/pull/1900 -* [doc] update doc add download page and pic by @tomsun28 in https://github.com/apache/hertzbeat/pull/1904 -* [test] Add test for UdpCollectImpl by @crossoverJie in https://github.com/apache/hertzbeat/pull/1906 -* fix license by @yqxxgh in https://github.com/apache/hertzbeat/pull/1907 -* [improve] refactor code by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1901 -* [type:bugfix] fix customized menu invalid bug #1898 by @Aias00 in https://github.com/apache/hertzbeat/pull/1908 -* [type:bugfix] fix HTTP API bug #1895 by @Aias00 in https://github.com/apache/hertzbeat/pull/1909 -* [test] Add test for WebsocketCollectImpl by @crossoverJie in https://github.com/apache/hertzbeat/pull/1912 -* [doc] translates chinese comment to english. by @westboy in https://github.com/apache/hertzbeat/pull/1914 -* [doc] Add HIP document and template by @crossoverJie in https://github.com/apache/hertzbeat/pull/1913 -* [improve] clean up home webapp unused code by @tomsun28 in https://github.com/apache/hertzbeat/pull/1915 -* [feature] support use ngql query metrics from nebulaGraph by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1917 -* [doc] Improve the Contribution Documentation. by @crossoverJie in https://github.com/apache/hertzbeat/pull/1918 -* [featrue]add apache hdfs monitor by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1920 -* [doc] update hbase documentation description by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1921 -* [doc] Add documentation for nebulaGraph cluster monitoring and custom monitoring using NGQL, and clean up useless parameters by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1923 -* [test] Add test for TelnetCollectImplTest by @crossoverJie in https://github.com/apache/hertzbeat/pull/1924 -* fix(*): fix TdEngine Init not found Database by @Clownsw in https://github.com/apache/hertzbeat/pull/1891 -* [doc] update contribution and add run-build guide by @tomsun28 in https://github.com/apache/hertzbeat/pull/1919 -* bugfix collector startup error can not find JdbcClient by @tomsun28 in https://github.com/apache/hertzbeat/pull/1925 -* [doc] add help document for freebsd monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1928 -* [refactoring] Split AbstractHistoryDataStorage class by @xuziyang in https://github.com/apache/hertzbeat/pull/1926 -* [fix] fixed name error in monitoring template and improve NGQL protocol by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1931 -* [refactoring] Split AbstractRealTimeDataStorage class by @xuziyang in https://github.com/apache/hertzbeat/pull/1935 -* [bugfix] fix ssl-cert days_remaining and npe by @tomsun28 in https://github.com/apache/hertzbeat/pull/1934 -* [feature] add apache yarn monitor by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1937 -* [doc] add help document for redhat monitoring and rocky linux monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1939 -* [test] Add test for NtpCollectImpl by @crossoverJie in https://github.com/apache/hertzbeat/pull/1940 -* [bugfix] fix alarm center tags display error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1938 -* [improve] prepare for release hertzbeat v1.6.0 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1929 -* add:Updated the Open Source Summer Project blog. by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1943 -* [feature] Support monitoring of OpenAI accounts by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/1947 -* [refactoring] Inject a single instance of the data store by @xuziyang in https://github.com/apache/hertzbeat/pull/1944 -* [refactoring] AbstractHistoryDataStorage implement the DisposableBean by @xuziyang in https://github.com/apache/hertzbeat/pull/1946 -* [doc] update iotdb init document by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1948 -* [improve] update build script by @tomsun28 in https://github.com/apache/hertzbeat/pull/1949 -* [test] add test for NgqlCollectImpl by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1953 -* [bugfix]Replace monitors to alert. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1954 -* [improve] add llm, server menu and update doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1955 -* [improve][HIP] HIP-01: Refactoring AbstractCollect by @crossoverJie in https://github.com/apache/hertzbeat/pull/1930 -* [bugfix] fix ConnectionCommonCache possible npe by @crossoverJie in https://github.com/apache/hertzbeat/pull/1959 -* [doc] add help document for eulerOS monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1960 -* [fixbug] Fix the problem of no data for springboot3 monitoring by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1961 -* commit:fix the front-end popup cannot exit by @Yanshuming1 in https://github.com/apache/hertzbeat/pull/1957 -* [fixbug] expression rule adaptation by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1963 -* [doc] add help doc for influxdb-promql and kafka-promql monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1965 -* [doc]: update readme-cn docs by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1964 -* [improve][HIP] HIP-01: Implement refactoring AbstractCollect by @crossoverJie in https://github.com/apache/hertzbeat/pull/1966 -* [chore] update .gitignore to save .idea/icon.png by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1971 -* [improve][bugfix]: fix AlertTemplateUtilTest test exception and update code style by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1969 -* [feature] add apache hugegraph monitor by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1972 -* [improve] Implement cascading parameter list for SNMP protocol by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/1976 -* [improve] optimize DateUtil and add test case by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1974 -* [feature]Hertzbeat custom plugin. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1973 -* update login page and status page color by @lwjxy in https://github.com/apache/hertzbeat/pull/1977 -* [chore] update code style and add some comment by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1975 -* [doc]Hertzbeat plugin doc. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1980 -* [doc] update contributors and update status page style by @tomsun28 in https://github.com/apache/hertzbeat/pull/1981 -* [feature] Implement cascading parameter list by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/1978 -* [doc]update threshold alarm doc by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1983 -* [chore] optimize code style by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1984 -* [fix] Compatible with MongoDB versions earlier than 3.6 by @gjjjj0101 in https://github.com/apache/hertzbeat/pull/1988 -* [chore] optimize manager code style by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1993 -* [doc] Translate part of documentation development.md under `zh-cn` directory from `en` to `zh-cn` by @Thespica in https://github.com/apache/hertzbeat/pull/1995 -* [improve] http protocol prometheus parsing optimization by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1996 -* [feature] add at function for wechat by @Yanshuming1 in https://github.com/apache/hertzbeat/pull/1994 -* [improve] add common util test by @yuluo-yx in https://github.com/apache/hertzbeat/pull/2001 -* [improve] update release license notice and package by @tomsun28 in https://github.com/apache/hertzbeat/pull/2003 -* [bugfix] fix collector startup error classpath by @tomsun28 in https://github.com/apache/hertzbeat/pull/2004 -* [chore] optimize code style by @yuluo-yx in https://github.com/apache/hertzbeat/pull/2000 -* [improve] Bump up `eslint-plugin-jsdoc` to 48.2.5 to support node 20+ by @Thespica in https://github.com/apache/hertzbeat/pull/2005 -* [doc] fix doc highlighting by @boatrainlsz in https://github.com/apache/hertzbeat/pull/2006 -* [web-app]feature: case insensitive search by @JavaProgrammerLB in https://github.com/apache/hertzbeat/pull/2007 -* [feature] Support time calculation expressions. by @LiuTianyou in https://github.com/apache/hertzbeat/pull/2009 -* [doc] add document for time expression by @LiuTianyou in https://github.com/apache/hertzbeat/pull/2012 -* [feature] Add Apache Pulsar monitor by @zhangshenghang in https://github.com/apache/hertzbeat/pull/2013 -* [doc] home verify release doc update by @tomsun28 in https://github.com/apache/hertzbeat/pull/2014 -* [Improve] Improve clickhouse monitor And Improve Pulsar monitor by @zhangshenghang in https://github.com/apache/hertzbeat/pull/2015 -* [doc] translate help document for memcached monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/2019 -* [improve] optimize collector httpsd discovery by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1991 -* [optimize] optimize code style and logic, add unit test by @yuluo-yx in https://github.com/apache/hertzbeat/pull/2010 -* [fix] Fix possible potential thread safe bugs by @gjjjj0101 in https://github.com/apache/hertzbeat/pull/2021 -* [improve] add ci for home by @LiuTianyou in https://github.com/apache/hertzbeat/pull/2024 -* [bugfix]Tag with empty value Shouldn't transform to Tag: by @JavaProgrammerLB in https://github.com/apache/hertzbeat/pull/2025 -* [bugfix] modify popup confirm to clear cache and cancel popup save by @Yanshuming1 in https://github.com/apache/hertzbeat/pull/2026 -* [improve] update monitor state desc by @tomsun28 in https://github.com/apache/hertzbeat/pull/2028 -* bugfix: fix overflow of integers by @Calvin979 in https://github.com/apache/hertzbeat/pull/2029 -* [improve] tips need update initial default password by @tomsun28 in https://github.com/apache/hertzbeat/pull/2030 -* [improve] deprecate support iotdb 0.* version by @Ceilzcx in https://github.com/apache/hertzbeat/pull/2032 -* [fixbug] required field check by @zhangshenghang in https://github.com/apache/hertzbeat/pull/2022 -* [improve] add IcmpCollectImplTest by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/2033 -* [improve] fix code style by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/2034 -* [improve] increase the length limit of the username field by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/2035 -* [improve] Checkstyle include testSource by @crossoverJie in https://github.com/apache/hertzbeat/pull/2036 -* [bugfix] fix collector and frontend dependent license error by @tomsun28 in https://github.com/apache/hertzbeat/pull/2037 -* [improve] Add test for MemcachedCollectImpl by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/2044 -* [imprve] Remove duplicate indices by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/2045 -* [docs]: fix several typos in docs by @lw-yang in https://github.com/apache/hertzbeat/pull/2047 -* Add the missing parts of docs, fix layout, sync the English version with the Chinese version by @xfl12345 in https://github.com/apache/hertzbeat/pull/2048 -* [improve] add filename check in home ci by @LiuTianyou in https://github.com/apache/hertzbeat/pull/2049 -* [improve] update dependency licenses and remove the aliyun sms depend by @tomsun28 in https://github.com/apache/hertzbeat/pull/2058 +* bugfix collector can not startup alone by @tomsun28 in +* translate some hertzbeat blog by @TJxiaobao in +* Check class description by @ZY945 in +* translate class description to english by @TJxiaobao in +* support monitor metrics name i18n: ClickHouse by @ZY945 in +* translate blog 20220601 to English by @vinci-897 in +* add a online prometheus parser and a prometheus-like push style. by @vinci-897 in +* translate blog 20220320 to English by @vinci-897 in +* support monitor metrics name i18n: DynamicTp by @ZY945 in +* translate blog 20220228 to English by @vinci-897 in +* translate blog 20220310 to English by @vinci-897 in +* translate blog 20220904 to English by @vinci-897 in +* support monitor metrics name i18n: Airflow by @ZY945 in +* support monitor metrics name i18n: IoTDB by @ZY945 in +* Translate 2022-02-11-hertzbeat document by @wang1027-wqh in +* bugfix The annotation @Transactional specifies rollbackFor. by @handy-git in +* add handy-git as a contributor for code by @allcontributors in +* feature:Translate 2022-02-17-hertzbeat Document by @wang1027-wqh in +* support monitor metrics name i18n: rocketmq by @ZY945 in +* [doc] update relate doc and readme by @tomsun28 in +* bugfix monitoring mongodb not work in springboot3 by @ZY945 in +* [feature] add storm monitor by @starmilkxin in +* [bugfix] fixed the issue in http_sd where services were incorrectly reported as available when they were actually unavailable by @starmilkxin in +* remove mysql-oracle dependency jar from release package lib by @tomsun28 in +* System config theme by @TJxiaobao in +* update webapp menu layout and doc by @tomsun28 in +* bugfix can not find mysql dependency when startup by @tomsun28 in +* support config common aes secret by @tomsun28 in +* [bugfix]fix the issue of add redis cluster node test error report(#1601) by @LiuTianyou in +* add LiuTianyou as a contributor for code by @allcontributors in +* mongodb monitoring support custom connection timeout param by @ZY945 in +* bugfix old data decode error when use new common-secret by @tomsun28 in +* [bugfix] fix bug where reopening pop-up window still retained previously edited data after closing. by @starmilkxin in +* monitor center add search type modal by @tomsun28 in +* fix status page logo overflow by @tomsun28 in +* bugfix npe monitor jobid may be null by @tomsun28 in +* support custom main menus in monitor template by @tomsun28 in +* update home website doc by @tomsun28 in +* [Improve] change package group to org apache hertzbeat by @vinci-897 in +* [improve] initial license clean up by @tomsun28 in +* update manager and collector logback config(#1704) by @handy-git in +* fix(sec): upgrade com.h2database:h2 to by @WinterKi1ler in +* add WinterKi1ler as a contributor for code by @allcontributors in +* update asf branch protected check by @tomsun28 in +* [doc]Update star chart by @zqr10159 in +* [fixed] fixed click collector online offline button error by @miki-hmt in +* [improve] initial doc clean up by @tomsun28 in +* [Improvement]Support multiple receivers. by @zqr10159 in +* [improvement]Add lisence. by @zqr10159 in +* Backend LICENSE Initialize by @wang1027-wqh in +* Back-end dependency upgrade by @TJxiaobao in +* [Improve] run hertzbeat in docker compose support dependen service condition by @gjjjj0101 in +* [bugfix] fix statuspage index exception by @makechoicenow in +* remove unlicensed dependency 'wolfy87 eventemitter' by @alpha951 in +* [improve] auto label when pr, update asf config by @tomsun28 in +* [improve] update asf config set required status checks context by @tomsun28 in +* [improve] home add apache info by @a-little-fool in +* [doc] Change e2e path by @crossoverJie in +* fix : ingress tls inoperative by @PeixyJ in +* [refactor] method improvement rationale by @dukbong in +* [improve] create disclaimer file, add incubating in describe by @tomsun28 in +* [improve] update new hertzbeat brand logo, update doc by @tomsun28 in +* Complete the code comment translation of the common module by @Hi-Mr-Wind in +* Remove unnecessary if-else statement. by @dukbong in +* [doc] remove and translate chinese to english in warehous by @xuziyang in +* Replace deprecated methods with builder pattern for RedisURI construction by @dukbong in +* remove and translate chinese to english in collector,script,push,remoting and manager module by @MananPoojara in +* Added the function of sending SMS messages through Alibaba Cloud. by @lwqzz in +* [improve]Add frontend license. by @zqr10159 in +* [test] Add RedisSingleCollectImplTest by @crossoverJie in +* [refactor] add override annotation by @handy-git in +* '[docs]bugfix: display syntax error of ipmi protocol' by @tomorrowshipyltm in +* [doc] translate alerter moudle code chinese to english by @tomsun28 in +* [refactor] database-related properties class, type changed to record by @xuziyang in +* Fix snmp template unit conversion problem by @TJxiaobao in +* [doc] Add help documentation for clickhouse monitoring by @LiuTianyou in +* [feature:update-checkstyle] Limit the java file header by @YxYL6125 in +* [improve]Add external lib folder to store mysql and oracle driver. by @zqr10159 in +* [Improve]When multiple lines are returned, each alarm is triggered instead of only the first alarm by @15613060203 in +* [doc] add team page in website by @alpha951 in +* [feature] Improve the import checkstyle by @crossoverJie in +* [doc] Add help document for dns monitoring by @LiuTianyou in +* [improve] preventing NPE by @dukbong in +* [refactor] change the warehouse properties the type to record by @xuziyang in +* Refactor: upgrade syntax to jdk17(instanceof & switch) by @Calvin979 in +* [test] Add NginxCollect test by @crossoverJie in +* [website] update team page by @tomsun28 in +* [test] Add RedisClusterCollectImplTest by @crossoverJie in +* [improve] Fix typo ReqStatusResponse by @crossoverJie in +* Comparing N objects for null with Assert.noNullElements(). by @dukbong in +* [doc] Add help document for elasticsearch monitoring and ftp monitoring by @LiuTianyou in +* [doc] add help documentation for huawei switch monitoring by @Alanxtl in +* chore: upgrade the api-testing (e2e) to v0.0.16 by @LinuxSuRen in +* [Remove][Improve]Mail config by @zqr10159 in +* Remove and translate chinese to english in code by @dukbong in +* [feature]Add monitoring for Hbase Master by @zhangshenghang in +* [doc] resolve code conflicts and coverage caused by pr(#1813) merge by @LiuTianyou in +* [doc] Add help document for tidb and nacos monitoring by @Alanxtl in +* [improve] use eclipselink orm replace of hibernate orm by @tomsun28 in +* [improve] Add whitespace checkstyle by @crossoverJie in +* [bugfix] dns monitoring template add query class parameter by @LiuTianyou in +* [Refactor] Preventing Unnecessary Object Creation and Using Utility Methods by @dukbong in +* [doc]Add and modify Doris FE Chinese and English documentation by @zhangshenghang in +* [docs] Optimize: add help docs for UDP port & Springboot3 help doc by @zuobiao-zhou in +* Code Simplification, Structure Changes, and Translation Work, Along with a Question by @dukbong in +* [doc] add help document for mongodb monitoring by @LiuTianyou in +* [collector] fix: inverts the compareTo sort of MetricsCollect run queue by @Pzz-2021 in +* [doc]Doc add debian system by @zhangshenghang in +* [feature] Add Apache Hbase RegionServer monitoring by @zhangshenghang in +* [improve] Optimize websocket monitor by @LiuTianyou in +* [refactor] Split the WarehouseProperties class by @xuziyang in +* [test] Add test for HttpsdImpl by @crossoverJie in +* [fix] Fix the wrong comment by @xuziyang in +* [refactor] trans and use assert by @dukbong in +* [bugfix] modify the command in the mongodb monitoring template by @LiuTianyou in +* [bigfix]Fix Debian system Top10 monitoring bug by @zhangshenghang in +* [cleanup]Delete the corresponding Chinese comments by @hudongdong129 in +* [doc] translates chinese comment to english. by @dukbong in +* [doc] fix error and add help document for prometheus task by @LiuTianyou in +* [feature] Add Linux process monitoring by @zhangshenghang in +* [test] Add test for FtpCollectImpl by @crossoverJie in +* [improve] use apache jexl replace of aviator by @tomsun28 in +* [bugfix] jpa data save logic repair by @zhangshenghang in +* [feature] add influxdb metrics monitoring by @TJxiaobao in +* [doc] add help document for rocketmq by @LiuTianyou in +* [improve] Imporve checkstyle of test code. by @crossoverJie in +* [feature] Support Redfish protocol to monitoring server by @gjjjj0101 in +* Fix debian monitoring template issue about process monitoring by @LLP2333 in +* [bugfix] centos Top10 shows missing one by @zhangshenghang in +* [improve] add website apache incubator footer by @tomsun28 in +* [doc] update help document by @LiuTianyou in +* [featurn] support flyway database migration by @tomsun28 in +* [improve] Delete the timestamp field in the class MetricFamily.Metric by @xuziyang in +* [improve] Use java.lang.AutoCloseable instead of CacheCloseable by @crossoverJie in +* [bugfix]Fix top10 process command. by @zqr10159 in +* [feature] support the VictoriaMetrics cluster by @xuziyang in +* [improve] Refactor common cache code by @crossoverJie in +* Eliminate Unnecessary Unboxing and Generics by @handy-git in +* [bugfix][doc]Add kafka sidebar. by @zqr10159 in +* [doc] I18n for monitoring template yml metrics by @zuobiao-zhou in +* [refactor] StoreProperties is no longer useful, delete it by @xuziyang in +* bugfix statistical metrics data matching fails by @tomsun28 in +* [doc] add help doc for flink monitoring by @HeartLinked in +* [doc] add almalinux documentation by @zhangshenghang in +* [improve] Missing a generic by @crossoverJie in +* [bugfix] Fixed some metrics of Jexlespression not matching in Elasticsearch by @zhangshenghang in +* feat(*): Support Time Type to Tengine Data Storage by @Clownsw in +* [feature] support random jwt secret when not custom by @tomsun28 in +* [doc] add opensuse doc by @zhangshenghang in +* fix when manager restart, collect register error by @Ceilzcx in +* [bugfix] fix can not use empty collection as query params in eclipselink by @tomsun28 in +* [doc] update doc add download page and pic by @tomsun28 in +* [test] Add test for UdpCollectImpl by @crossoverJie in +* fix license by @yqxxgh in +* [improve] refactor code by @Ceilzcx in +* [type:bugfix] fix customized menu invalid bug #1898 by @Aias00 in +* [type:bugfix] fix HTTP API bug #1895 by @Aias00 in +* [test] Add test for WebsocketCollectImpl by @crossoverJie in +* [doc] translates chinese comment to english. by @westboy in +* [doc] Add HIP document and template by @crossoverJie in +* [improve] clean up home webapp unused code by @tomsun28 in +* [feature] support use ngql query metrics from nebulaGraph by @LiuTianyou in +* [doc] Improve the Contribution Documentation. by @crossoverJie in +* [featrue]add apache hdfs monitor by @zhangshenghang in +* [doc] update hbase documentation description by @zhangshenghang in +* [doc] Add documentation for nebulaGraph cluster monitoring and custom monitoring using NGQL, and clean up useless parameters by @LiuTianyou in +* [test] Add test for TelnetCollectImplTest by @crossoverJie in +* fix(*): fix TdEngine Init not found Database by @Clownsw in +* [doc] update contribution and add run-build guide by @tomsun28 in +* bugfix collector startup error can not find JdbcClient by @tomsun28 in +* [doc] add help document for freebsd monitoring by @LiuTianyou in +* [refactoring] Split AbstractHistoryDataStorage class by @xuziyang in +* [fix] fixed name error in monitoring template and improve NGQL protocol by @LiuTianyou in +* [refactoring] Split AbstractRealTimeDataStorage class by @xuziyang in +* [bugfix] fix ssl-cert days_remaining and npe by @tomsun28 in +* [feature] add apache yarn monitor by @zhangshenghang in +* [doc] add help document for redhat monitoring and rocky linux monitoring by @LiuTianyou in +* [test] Add test for NtpCollectImpl by @crossoverJie in +* [bugfix] fix alarm center tags display error by @tomsun28 in +* [improve] prepare for release hertzbeat v1.6.0 by @tomsun28 in +* add:Updated the Open Source Summer Project blog. by @TJxiaobao in +* [feature] Support monitoring of OpenAI accounts by @zuobiao-zhou in +* [refactoring] Inject a single instance of the data store by @xuziyang in +* [refactoring] AbstractHistoryDataStorage implement the DisposableBean by @xuziyang in +* [doc] update iotdb init document by @zhangshenghang in +* [improve] update build script by @tomsun28 in +* [test] add test for NgqlCollectImpl by @LiuTianyou in +* [bugfix]Replace monitors to alert. by @zqr10159 in +* [improve] add llm, server menu and update doc by @tomsun28 in +* [improve][HIP] HIP-01: Refactoring AbstractCollect by @crossoverJie in +* [bugfix] fix ConnectionCommonCache possible npe by @crossoverJie in +* [doc] add help document for eulerOS monitoring by @LiuTianyou in +* [fixbug] Fix the problem of no data for springboot3 monitoring by @zhangshenghang in +* commit:fix the front-end popup cannot exit by @Yanshuming1 in +* [fixbug] expression rule adaptation by @zhangshenghang in +* [doc] add help doc for influxdb-promql and kafka-promql monitoring by @LiuTianyou in +* [doc]: update readme-cn docs by @yuluo-yx in +* [improve][HIP] HIP-01: Implement refactoring AbstractCollect by @crossoverJie in +* [chore] update .gitignore to save .idea/icon.png by @yuluo-yx in +* [improve][bugfix]: fix AlertTemplateUtilTest test exception and update code style by @yuluo-yx in +* [feature] add apache hugegraph monitor by @zhangshenghang in +* [improve] Implement cascading parameter list for SNMP protocol by @zuobiao-zhou in +* [improve] optimize DateUtil and add test case by @yuluo-yx in +* [feature]Hertzbeat custom plugin. by @zqr10159 in +* update login page and status page color by @lwjxy in +* [chore] update code style and add some comment by @yuluo-yx in +* [doc]Hertzbeat plugin doc. by @zqr10159 in +* [doc] update contributors and update status page style by @tomsun28 in +* [feature] Implement cascading parameter list by @zuobiao-zhou in +* [doc]update threshold alarm doc by @zhangshenghang in +* [chore] optimize code style by @yuluo-yx in +* [fix] Compatible with MongoDB versions earlier than 3.6 by @gjjjj0101 in +* [chore] optimize manager code style by @yuluo-yx in +* [doc] Translate part of documentation development.md under `zh-cn` directory from `en` to `zh-cn` by @Thespica in +* [improve] http protocol prometheus parsing optimization by @zhangshenghang in +* [feature] add at function for wechat by @Yanshuming1 in +* [improve] add common util test by @yuluo-yx in +* [improve] update release license notice and package by @tomsun28 in +* [bugfix] fix collector startup error classpath by @tomsun28 in +* [chore] optimize code style by @yuluo-yx in +* [improve] Bump up `eslint-plugin-jsdoc` to 48.2.5 to support node 20+ by @Thespica in +* [doc] fix doc highlighting by @boatrainlsz in +* [web-app]feature: case insensitive search by @JavaProgrammerLB in +* [feature] Support time calculation expressions. by @LiuTianyou in +* [doc] add document for time expression by @LiuTianyou in +* [feature] Add Apache Pulsar monitor by @zhangshenghang in +* [doc] home verify release doc update by @tomsun28 in +* [Improve] Improve clickhouse monitor And Improve Pulsar monitor by @zhangshenghang in +* [doc] translate help document for memcached monitoring by @LiuTianyou in +* [improve] optimize collector httpsd discovery by @yuluo-yx in +* [optimize] optimize code style and logic, add unit test by @yuluo-yx in +* [fix] Fix possible potential thread safe bugs by @gjjjj0101 in +* [improve] add ci for home by @LiuTianyou in +* [bugfix]Tag with empty value Shouldn't transform to Tag: by @JavaProgrammerLB in +* [bugfix] modify popup confirm to clear cache and cancel popup save by @Yanshuming1 in +* [improve] update monitor state desc by @tomsun28 in +* bugfix: fix overflow of integers by @Calvin979 in +* [improve] tips need update initial default password by @tomsun28 in +* [improve] deprecate support iotdb 0.* version by @Ceilzcx in +* [fixbug] required field check by @zhangshenghang in +* [improve] add IcmpCollectImplTest by @zuobiao-zhou in +* [improve] fix code style by @zuobiao-zhou in +* [improve] increase the length limit of the username field by @zuobiao-zhou in +* [improve] Checkstyle include testSource by @crossoverJie in +* [bugfix] fix collector and frontend dependent license error by @tomsun28 in +* [improve] Add test for MemcachedCollectImpl by @zuobiao-zhou in +* [imprve] Remove duplicate indices by @zuobiao-zhou in +* [docs]: fix several typos in docs by @lw-yang in +* Add the missing parts of docs, fix layout, sync the English version with the Chinese version by @xfl12345 in +* [improve] add filename check in home ci by @LiuTianyou in +* [improve] update dependency licenses and remove the aliyun sms depend by @tomsun28 in ## New Contributors -* @handy-git made their first contribution in https://github.com/apache/hertzbeat/pull/1643 -* @LiuTianyou made their first contribution in https://github.com/apache/hertzbeat/pull/1684 -* @WinterKi1ler made their first contribution in https://github.com/apache/hertzbeat/pull/1718 -* @miki-hmt made their first contribution in https://github.com/apache/hertzbeat/pull/1734 -* @gjjjj0101 made their first contribution in https://github.com/apache/hertzbeat/pull/1748 -* @makechoicenow made their first contribution in https://github.com/apache/hertzbeat/pull/1747 -* @alpha951 made their first contribution in https://github.com/apache/hertzbeat/pull/1745 -* @crossoverJie made their first contribution in https://github.com/apache/hertzbeat/pull/1758 -* @PeixyJ made their first contribution in https://github.com/apache/hertzbeat/pull/1760 -* @dukbong made their first contribution in https://github.com/apache/hertzbeat/pull/1757 -* @xuziyang made their first contribution in https://github.com/apache/hertzbeat/pull/1773 -* @MananPoojara made their first contribution in https://github.com/apache/hertzbeat/pull/1774 -* @lwqzz made their first contribution in https://github.com/apache/hertzbeat/pull/1768 -* @tomorrowshipyltm made their first contribution in https://github.com/apache/hertzbeat/pull/1793 -* @YxYL6125 made their first contribution in https://github.com/apache/hertzbeat/pull/1799 -* @15613060203 made their first contribution in https://github.com/apache/hertzbeat/pull/1797 -* @Alanxtl made their first contribution in https://github.com/apache/hertzbeat/pull/1813 -* @zhangshenghang made their first contribution in https://github.com/apache/hertzbeat/pull/1820 -* @zuobiao-zhou made their first contribution in https://github.com/apache/hertzbeat/pull/1832 -* @Pzz-2021 made their first contribution in https://github.com/apache/hertzbeat/pull/1837 -* @LLP2333 made their first contribution in https://github.com/apache/hertzbeat/pull/1868 -* @HeartLinked made their first contribution in https://github.com/apache/hertzbeat/pull/1893 -* @Aias00 made their first contribution in https://github.com/apache/hertzbeat/pull/1908 -* @westboy made their first contribution in https://github.com/apache/hertzbeat/pull/1914 -* @Yanshuming1 made their first contribution in https://github.com/apache/hertzbeat/pull/1957 -* @yuluo-yx made their first contribution in https://github.com/apache/hertzbeat/pull/1964 -* @lwjxy made their first contribution in https://github.com/apache/hertzbeat/pull/1977 -* @Thespica made their first contribution in https://github.com/apache/hertzbeat/pull/1995 -* @boatrainlsz made their first contribution in https://github.com/apache/hertzbeat/pull/2006 -* @JavaProgrammerLB made their first contribution in https://github.com/apache/hertzbeat/pull/2007 -* @lw-yang made their first contribution in https://github.com/apache/hertzbeat/pull/2047 -* @xfl12345 made their first contribution in https://github.com/apache/hertzbeat/pull/2048 +* @handy-git made their first contribution in +* @LiuTianyou made their first contribution in +* @WinterKi1ler made their first contribution in +* @miki-hmt made their first contribution in +* @gjjjj0101 made their first contribution in +* @makechoicenow made their first contribution in +* @alpha951 made their first contribution in +* @crossoverJie made their first contribution in +* @PeixyJ made their first contribution in +* @dukbong made their first contribution in +* @xuziyang made their first contribution in +* @MananPoojara made their first contribution in +* @lwqzz made their first contribution in +* @tomorrowshipyltm made their first contribution in +* @YxYL6125 made their first contribution in +* @15613060203 made their first contribution in +* @Alanxtl made their first contribution in +* @zhangshenghang made their first contribution in +* @zuobiao-zhou made their first contribution in +* @Pzz-2021 made their first contribution in +* @LLP2333 made their first contribution in +* @HeartLinked made their first contribution in +* @Aias00 made their first contribution in +* @westboy made their first contribution in +* @Yanshuming1 made their first contribution in +* @yuluo-yx made their first contribution in +* @lwjxy made their first contribution in +* @Thespica made their first contribution in +* @boatrainlsz made their first contribution in +* @JavaProgrammerLB made their first contribution in +* @lw-yang made their first contribution in +* @xfl12345 made their first contribution in ## Just one command to get started @@ -374,14 +374,14 @@ Upgrade Guide: https://hertzbeat.apache.org/blog/2024/06/11/hertzbeat-v1.6.0-upd ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` -Detailed refer to HertzBeat Document https://hertzbeat.com/docs +Detailed refer to HertzBeat Document --- -**Github: https://github.com/apache/hertzbeat** +**Github: ** -Download Page: https://hertzbeat.apache.org/docs/download/ +Download Page: -Upgrade Guide: https://hertzbeat.apache.org/blog/2024/06/11/hertzbeat-v1.6.0-update/ +Upgrade Guide: Have Fun! diff --git a/home/blog/2024-07-07-new-committer.md b/home/blog/2024-07-07-new-committer.md index fbb007bfed3..041d6b771ee 100644 --- a/home/blog/2024-07-07-new-committer.md +++ b/home/blog/2024-07-07-new-committer.md @@ -39,4 +39,3 @@ Similarly, the community's response was swift 🏎, which greatly encouraged my During this process, I also incorporated some excellent experiences from other communities (Pulsar, OpenTelemetry) into HertzBeat. Everyone learned from each other, which is undoubtedly the charm of open source. Lastly, I want to thank the community's logicz for inviting me to become a Committer and tom for reviewing my PRs. I wish HertzBeat a successful graduation from the incubator and becoming a star project 🎊. - diff --git a/home/blog/2024-07-08-new-committer.md b/home/blog/2024-07-08-new-committer.md index b46cfec79f9..b955e2055b0 100644 --- a/home/blog/2024-07-08-new-committer.md +++ b/home/blog/2024-07-08-new-committer.md @@ -47,4 +47,3 @@ This process made me understand the importance of cooperation and made me feel t ### Conclusion Becoming a Committer of the Apache Hertzbeat project is a challenging and rewarding journey. Through continuous learning and contribution, I have not only improved my technical ability, but also found a sense of belonging and accomplishment in the community. I hope that my experience can inspire more people to participate in the open source community and jointly promote the progress and development of technology. To borrow the words of Tom: Participating in open source should not affect everyone's work and life, otherwise it will go against the original intention. Everyone should participate in the free time after get off work. - diff --git a/home/blog/2024-07-28-new-committer.md b/home/blog/2024-07-28-new-committer.md index c04ab4716ef..efae483b1ed 100644 --- a/home/blog/2024-07-28-new-committer.md +++ b/home/blog/2024-07-28-new-committer.md @@ -12,7 +12,7 @@ keywords: [open source monitoring system, alerting system] > It's an honor for me to become a Committer of Apache Hertzbeat > - ## Personal introduction +## Personal introduction I graduated in 2023 and am currently working as a Java developer in an Internet company. diff --git a/home/blog/2024-08-18-new-committer.md b/home/blog/2024-08-18-new-committer.md index 2376fd17466..b1175b5534b 100644 --- a/home/blog/2024-08-18-new-committer.md +++ b/home/blog/2024-08-18-new-committer.md @@ -60,7 +60,7 @@ Open source is often pure, and the Apache Foundation exists to protect projects #### Apache Community Identity -Before contributing to the community, it is important to understand the community's definition of identity, where a project's Committers are located, and how to become a Committer. The Apache community has a clear definition of [Contributor Identity](https://community.apache.org/contributor-ladder. html): [Contributor Identity](https://community.apache.org/contributor-ladder. html). The Apache community has a very clear definition of [contributor status](. html): +Before contributing to the community, it is important to understand the community's definition of identity, where a project's Committers are located, and how to become a Committer. The Apache community has a clear definition of [Contributor Identity](. html): [Contributor Identity](. html). The Apache community has a very clear definition of [contributor status](. html): ! [Apache contributor label](/img/blog/committer/yuluo-yx/6.jpg) @@ -84,7 +84,7 @@ I think the best way to get involved in open source and get nominated is the **O The next step is to **Improve your Coding ability**, find the missing unit tests and code bugs in the project, and then submit PR to fix them. This is often difficult for students who are just starting out, and the Coding ability is often built up after optimizing the project code and understanding every change. -The ecosystem around a good project** is often crucial**. A good documentation allows users/developers to quickly get started and participate in contributing. Multi-language extensions allow the project to be used in projects built in other languages, expanding the project's audience. A good Example repository allows users to discover more ways to use the project. Therefore, participating in the construction of the Example repository and writing and translating documentation is one of the ways to familiarize yourself with the project and get nominated. +The ecosystem around a good project**is often crucial**. A good documentation allows users/developers to quickly get started and participate in contributing. Multi-language extensions allow the project to be used in projects built in other languages, expanding the project's audience. A good Example repository allows users to discover more ways to use the project. Therefore, participating in the construction of the Example repository and writing and translating documentation is one of the ways to familiarize yourself with the project and get nominated. Finally, I would like to say that getting nominated is something that just happens. You should not participate in open source just for the sake of getting nominated, and you should never forget your original intention. diff --git a/home/docs/advanced/extend-http-default.md b/home/docs/advanced/extend-http-default.md index 1875a9cff77..c3b21f12070 100644 --- a/home/docs/advanced/extend-http-default.md +++ b/home/docs/advanced/extend-http-default.md @@ -283,4 +283,3 @@ metrics: parseType: jsonPath parseScript: '$' ``` - diff --git a/home/docs/advanced/extend-http-example-hertzbeat.md b/home/docs/advanced/extend-http-example-hertzbeat.md index 7f9fc7c93a5..5dabc107865 100644 --- a/home/docs/advanced/extend-http-example-hertzbeat.md +++ b/home/docs/advanced/extend-http-example-hertzbeat.md @@ -55,7 +55,7 @@ As above, usually our background API interface will design such a general return } ``` -**This time we get the metric data such as `category`, `app`, `status`, `size`, `availableSize` under the app. ** +**This time we get the metric data such as `category`, `app`, `status`, `size`, `availableSize` under the app.** ### Add custom monitoring template `hertzbeat` @@ -189,7 +189,7 @@ metrics: ``` -**The addition is complete, now we save and apply. We can see that the system page has added a `hertzbeat` monitoring type. ** +**The addition is complete, now we save and apply. We can see that the system page has added a `hertzbeat` monitoring type.** ![](/img/docs/advanced/extend-http-example-1.png) @@ -211,10 +211,10 @@ metrics: ---- -#### over! +#### over This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/docs/advanced/extend-http-example-token.md b/home/docs/advanced/extend-http-example-token.md index d4d7a6bed02..a89e1f1a26b 100644 --- a/home/docs/advanced/extend-http-example-token.md +++ b/home/docs/advanced/extend-http-example-token.md @@ -10,7 +10,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz ### Request process -【**Authentication information metrics (highest priority)**】【**HTTP interface carries account password call**】->【**Response data analysis**】->【**Analysis and issuance of TOKEN-accessToken as an metric **] -> [**Assign accessToken as a variable parameter to other collection index groups**] +【**Authentication information metrics (highest priority)**】【**HTTP interface carries account password call**】->【**Response data analysis**】->【**Analysis and issuance of TOKEN-accessToken as an metric**] -> [**Assign accessToken as a variable parameter to other collection index groups**] > Here we still use the hertzbeat monitoring example of Tutorial 1! The hertzbeat background interface not only supports the basic direct account password authentication used in Tutorial 1, but also supports token authentication. @@ -220,7 +220,7 @@ metrics: --- ``` -**At this time, save and apply, add `hertzbeat_token` type monitoring on the system page, configure input parameters, `content-type` fill in `application/json`, `request Body` fill in the account password json as follows: ** +**At this time, save and apply, add `hertzbeat_token` type monitoring on the system page, configure input parameters, `content-type` fill in `application/json`, `request Body` fill in the account password json as follows:** ```json { @@ -231,7 +231,7 @@ metrics: ![](/img/docs/advanced/extend-http-example-5.png) -** After the addition is successful, we can see the `token`, `refreshToken` metric data we collected on the details page. ** +**After the addition is successful, we can see the `token`, `refreshToken` metric data we collected on the details page.** ![](/img/docs/advanced/extend-http-example-6.png) @@ -241,7 +241,7 @@ metrics: **Add an index group definition `summary` in `app-hertzbeat_token.yml`, which is the same as `summary` in Tutorial 1, and set the collection priority to 1** -**Set the authentication method in the HTTP protocol configuration of this index group to `Bearer Token`, assign the index `token` collected by the previous index group `auth` as a parameter, and use `^o^` as the internal replacement symbol, that is ` ^o^token^o^`. as follows:** +**Set the authentication method in the HTTP protocol configuration of this index group to `Bearer Token`, assign the index `token` collected by the previous index group `auth` as a parameter, and use `^o^` as the internal replacement symbol, that is `^o^token^o^`. as follows:** ```yaml - name: summary @@ -399,10 +399,10 @@ metrics: --- -#### over! +#### over This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/docs/advanced/extend-http-jsonpath.md b/home/docs/advanced/extend-http-jsonpath.md index 86a49c06756..4e12fe86b57 100644 --- a/home/docs/advanced/extend-http-jsonpath.md +++ b/home/docs/advanced/extend-http-jsonpath.md @@ -61,7 +61,7 @@ Multilayer format:Set key value in the array #### Example -Query the value information of the custom system, and its exposed interface is `/metrics/person`. We need `type,num` Metric. +Query the value information of the custom system, and its exposed interface is `/metrics/person`. We need `type,num` Metric. The raw data returned by the interface is as follows: ```json @@ -172,4 +172,3 @@ metrics: parseType: jsonPath parseScript: '$.number[*]' ``` - diff --git a/home/docs/advanced/extend-http.md b/home/docs/advanced/extend-http.md index 10e5bdf9623..d9ad3ba59dc 100644 --- a/home/docs/advanced/extend-http.md +++ b/home/docs/advanced/extend-http.md @@ -13,14 +13,14 @@ sidebar_label: HTTP Protocol Custom Monitoring It can be seen from the process that we define a monitoring type of HTTP protocol. We need to configure HTTP request parameters, configure which Metrics to obtain, and configure the parsing method and parsing script for response data. HTTP protocol supports us to customize HTTP request path, request header, request parameters, request method, request body, etc. -**System default parsing method**:HTTP interface returns the JSON data structure specified by hertzbeat, that is, the default parsing method can be used to parse the data and extract the corresponding Metric data. For details, refer to [**System Default Parsing**](extend-http-default) +**System default parsing method**:HTTP interface returns the JSON data structure specified by hertzbeat, that is, the default parsing method can be used to parse the data and extract the corresponding Metric data. For details, refer to [**System Default Parsing**](extend-http-default) **JsonPath script parsing method**:Use JsonPath script to parse the response JSON data, return the data structure specified by the system, and then provide the corresponding Metric data. For details, refer to [**JsonPath Script Parsing**](extend-http-jsonpath) ### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** -------- +------- Configuration usages of the monitoring templates yml are detailed below. Please pay attention to usage annotation. @@ -279,4 +279,3 @@ metrics: parseType: default ``` - diff --git a/home/docs/advanced/extend-jdbc.md b/home/docs/advanced/extend-jdbc.md index ec42f84f642..4a92d94c74f 100644 --- a/home/docs/advanced/extend-jdbc.md +++ b/home/docs/advanced/extend-jdbc.md @@ -21,7 +21,7 @@ We can obtain the corresponding Metric data through the data fields queried by S > Query a row of data, return the column name of the result set through query and map them to the queried field. -eg: +eg: queried Metric fields:one two three four query SQL:select one, two, three, four from book limit 1; Here the Metric field and the response data can be mapped into a row of collected data one by one. @@ -30,7 +30,7 @@ Here the Metric field and the response data can be mapped into a row of collecte > Query multiple rows of data, return the column names of the result set and map them to the queried fields. -eg: +eg: queried Metric fields:one two three four query SQL:select one, two, three, four from book; Here the Metric field and the response data can be mapped into multiple rows of collected data one by one. @@ -39,9 +39,9 @@ Here the Metric field and the response data can be mapped into multiple rows of > Collect a row of Metric data. By matching the two columns of queried data (key value), key and the queried field, value is the value of the query field. -eg: -queried fields:one two three four -query SQL:select key, value from book; +eg: +queried fields:one two three four +query SQL:select key, value from book; SQL response data: | key | value | @@ -57,7 +57,7 @@ Here by mapping the Metric field with the key of the response data, we can obta **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -241,4 +241,3 @@ metrics: sql: show global status where Variable_name like 'innodb%'; url: ^_^url^_^ ``` - diff --git a/home/docs/advanced/extend-jmx.md b/home/docs/advanced/extend-jmx.md index 2f9ba992f63..2110e98dca8 100644 --- a/home/docs/advanced/extend-jmx.md +++ b/home/docs/advanced/extend-jmx.md @@ -23,7 +23,7 @@ By configuring the monitoring template YML metrics `field`, `aliasFields`, `obje ![](/img/docs/advanced/extend-point-1.png) -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -192,4 +192,3 @@ metrics: objectName: java.lang:type=MemoryPool,name=* url: ^_^url^_^ ``` - diff --git a/home/docs/advanced/extend-ngql.md b/home/docs/advanced/extend-ngql.md index 2047e1d1cf5..65c5fb0c69b 100644 --- a/home/docs/advanced/extend-ngql.md +++ b/home/docs/advanced/extend-ngql.md @@ -21,6 +21,7 @@ Mapping the fields returned by NGQL queries to the metrics we need allows us to > `filterValue`: filter attribute value (optional) For example: + - online_meta_count#SHOW HOSTS META#Status#ONLINE Counts the number of rows returned by `SHOW HOSTS META` where Status equals ONLINE. - online_meta_count#SHOW HOSTS META## @@ -31,12 +32,14 @@ Counts the number of rows returned by `SHOW HOSTS META`. > Queries a single row of data by mapping the column names of the query result set to the queried fields. For example: + - Metrics fields: a, b - NGQL query: match (v:metrics) return v.metrics.a as a, v.metrics.b as b; Here, the metric fields can be mapped to the response data row by row. Notes: + - When using the `oneRow` method, if a single query statement returns multiple rows of results, only the first row of results will be mapped to the metric fields. - When the `commands` field contains two or more query statements and the returned fields of multiple query statements are the same, the fields returned by the subsequent statement will overwrite those returned by the previous statement. - It is recommended to use the limit statement to limit the number of rows returned in the result set when defining `commands`. @@ -46,11 +49,13 @@ Notes: > Queries multiple rows of data by mapping the column names of the query result set to the queried fields. For example: + - Metrics fields: a, b - NGQL query: match (v:metrics) return v.metrics.a as a, v.metrics.b as b; Here, the metric fields can be mapped to the response data row by row. Notes: + - When using the `multiRow` method, the `commands` field can only contain one query statement. #### **columns** @@ -58,6 +63,7 @@ Notes: > Collects a single row of metric data by mapping two columns of data (key-value), where the key matches the queried fields and the value is the value of the queried field. Notes: + - When using the `columns` method, the first two columns of the result set are mapped to collect data by default, where the first column corresponds to the metric name and the second column corresponds to the metric value. - When the `commands` field contains two or more query statements and the first column of data returned by multiple query statements is duplicated, the result of the last statement will be retained. @@ -67,7 +73,7 @@ Notes: ![HertzBeat Page](/img/docs/advanced/extend-point-1.png) -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -166,4 +172,3 @@ metrics: - match (v:tag2) return "tag2" as name ,count(v) as cnt timeout: ^_^timeout^_^ ``` - diff --git a/home/docs/advanced/extend-point.md b/home/docs/advanced/extend-point.md index e4d92d8d875..018e67deb30 100644 --- a/home/docs/advanced/extend-point.md +++ b/home/docs/advanced/extend-point.md @@ -11,7 +11,7 @@ sidebar_label: Custom Monitoring **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -138,4 +138,3 @@ metrics: parseType: website ``` - diff --git a/home/docs/advanced/extend-snmp.md b/home/docs/advanced/extend-snmp.md index b3bb9173c87..3dae2b8b6dd 100644 --- a/home/docs/advanced/extend-snmp.md +++ b/home/docs/advanced/extend-snmp.md @@ -23,7 +23,7 @@ By configuring the metrics `field`, `aliasFields`, and `oids` under the `snmp` p ![](/img/docs/advanced/extend-point-1.png) -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -169,4 +169,3 @@ metrics: processes: 1.3.6.1.2.1.25.1.6.0 location: 1.3.6.1.2.1.1.6.0 ``` - diff --git a/home/docs/advanced/extend-ssh.md b/home/docs/advanced/extend-ssh.md index bf960376179..3a5486c394b 100644 --- a/home/docs/advanced/extend-ssh.md +++ b/home/docs/advanced/extend-ssh.md @@ -21,12 +21,12 @@ We can obtain the corresponding Metric data through the data fields queried by t > Query out a column of data, return the field value (one value per row) of the result set through query and map them to the field. -eg: -Metrics of Linux to be queried hostname-host name,uptime-start time -Host name original query command:`hostname` -Start time original query command:`uptime | awk -F "," '{print $1}'` -Then the query script of the two Metrics in hertzbeat is(Use `;` Connect them together): -`hostname; uptime | awk -F "," '{print $1}'` +eg: +Metrics of Linux to be queried hostname-host name,uptime-start time +Host name original query command:`hostname` +Start time original query command:`uptime | awk -F "," '{print $1}'` +Then the query script of the two Metrics in hertzbeat is(Use `;` Connect them together): +`hostname; uptime | awk -F "," '{print $1}'` The data responded by the terminal is: ``` @@ -34,8 +34,8 @@ tombook 14:00:15 up 72 days ``` -At last collected Metric data is mapped one by one as: -hostname is `tombook` +At last collected Metric data is mapped one by one as: +hostname is `tombook` uptime is `14:00:15 up 72 days` Here the Metric field and the response data can be mapped into a row of collected data one by one @@ -44,8 +44,8 @@ Here the Metric field and the response data can be mapped into a row of collecte > Query multiple rows of data, return the column names of the result set through the query, and map them to the Metric field of the query. -eg: -Linux memory related Metric fields queried:total-Total memory, used-Used memory,free-Free memory, buff-cache-Cache size, available-Available memory +eg: +Linux memory related Metric fields queried:total-Total memory, used-Used memory,free-Free memory, buff-cache-Cache size, available-Available memory Memory metrics original query command:`free -m`, Console response: ```shell @@ -55,7 +55,7 @@ Swap: 8191 33 8158 ``` In hertzbeat multiRow format parsing requires a one-to-one mapping between the column name of the response data and the indicaotr value, so the corresponding query SHELL script is: -`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` +`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` Console response is: ```shell @@ -69,7 +69,7 @@ Here the Metric field and the response data can be mapped into collected data on **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -212,4 +212,3 @@ metrics: script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' parseType: multiRow ``` - diff --git a/home/docs/advanced/extend-telnet.md b/home/docs/advanced/extend-telnet.md index 7b45c5aab32..052d4b93201 100644 --- a/home/docs/advanced/extend-telnet.md +++ b/home/docs/advanced/extend-telnet.md @@ -23,7 +23,7 @@ By configuring the metrics `field`, `aliasFields` the `Telnet` protocol of the m ![](/img/docs/advanced/extend-point-1.png) -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -297,4 +297,3 @@ metrics: ``` - diff --git a/home/docs/advanced/extend-tutorial.md b/home/docs/advanced/extend-tutorial.md index ab25bed88f7..8db349d96c1 100644 --- a/home/docs/advanced/extend-tutorial.md +++ b/home/docs/advanced/extend-tutorial.md @@ -55,7 +55,7 @@ As above, usually our background API interface will design such a general return } ``` -**This time we get the metrics data such as `category`, `app`, `status`, `size`, `availableSize` under the app. ** +**This time we get the metrics data such as `category`, `app`, `status`, `size`, `availableSize` under the app.** ### Add Monitoring Template Yml @@ -201,7 +201,7 @@ metrics: parseScript: '$.data.apps.*' ``` -**The addition is complete, now we restart the hertzbeat system. We can see that the system page has added a `hertzbeat` monitoring type. ** +**The addition is complete, now we restart the hertzbeat system. We can see that the system page has added a `hertzbeat` monitoring type.** ![](/img/docs/advanced/extend-http-example-1.png) @@ -223,10 +223,10 @@ metrics: ---- -#### over! +#### over This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. Thanks for the old iron support. Refill! -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/docs/community/become_committer.md b/home/docs/community/become_committer.md index 382e69fb9bf..11d0c660b13 100644 --- a/home/docs/community/become_committer.md +++ b/home/docs/community/become_committer.md @@ -44,7 +44,7 @@ you must commit code with your commit privilege to the codebase; it means you are committed to the HertzBeat project and are productively contributing to our community's success. -## Committer requirements: +## Committer requirements There are no strict rules for becoming a committer or PPMC member. Candidates for new committers are typically people that are active @@ -83,4 +83,3 @@ of the main website or HertzBeat's GitHub repositories. - Be involved in the design road map discussions with a professional and diplomatic approach even if there is a disagreement - Promoting the project by writing articles or holding events - diff --git a/home/docs/community/become_pmc_member.md b/home/docs/community/become_pmc_member.md index cd9dff4e02a..70aa00575dd 100644 --- a/home/docs/community/become_pmc_member.md +++ b/home/docs/community/become_pmc_member.md @@ -44,7 +44,7 @@ you must commit code with your commit privilege to the codebase; it means you are committed to the HertzBeat project and are productively contributing to our community's success. -## PMC member requirements: +## PMC member requirements There are no strict rules for becoming a committer or PPMC member. Candidates for new PMC member are typically people that are active @@ -83,4 +83,3 @@ of the main website or HertzBeat's GitHub repositories. - Be involved in the design road map discussions with a professional and diplomatic approach even if there is a disagreement - Promoting the project by writing articles or holding events - diff --git a/home/docs/community/code-style-and-quality-guide.md b/home/docs/community/code-style-and-quality-guide.md index 70ddc5d518e..8be58aa2d84 100644 --- a/home/docs/community/code-style-and-quality-guide.md +++ b/home/docs/community/code-style-and-quality-guide.md @@ -63,8 +63,64 @@ limitations under the License. ### 2.2 Document style check -1. Run `mvn spotless:check` in the project to automatically detect the Markdown file format. -2. Run `mvn spotless:apply` in the project to automatically format the Markdown file format to ensure that all documents meet the specifications. +1. Install `markdownlint-cli2` and run `npm install markdownlint-cli2 --global` +2. Run `markdownlint "home/**/*.md"` in the project to automatically detect the Markdown file format. +3. Run `markdownlint --fix "home/**/*.md"` in the project to automatically format the Markdown file format to ensure that all documents meet the specifications. + +Error code description: + +| **Error code** | **description** | +|--------------------------------------------| ------------------------------------------------------------ | +| **MD001 heading-increment** | Heading levels should only increment by one level at a time | +| **MD003 heading-style** | Heading style | +| **MD004 ul-style** | Unordered list style | +| **MD005 list-indent** | Inconsistent indentation for list items at the same level | +| **MD007 ul-indent** | Unordered list indentation | +| **MD009 no-trailing-spaces** | Trailing spaces | +| **MD010 no-hard-tabs** | Hard tabs | +| **MD011 no-reversed-links** | Reversed link syntax | +| **MD012 no-multiple-blanks** | Multiple consecutive blank lines | +| **MD013 line-length** | Line length | +| **MD014 commands-show-output** | Dollar signs used before commands without showing output | +| **MD018 no-missing-space-atx** | No space after hash on atx style heading | +| **MD019 no-multiple-space-atx** | Multiple spaces after hash on atx style heading | +| **MD020 no-missing-space-closed-atx** | No space inside hashes on closed atx style heading | +| **MD021 no-multiple-space-closed-atx** | Multiple spaces inside hashes on closed atx style heading | +| **MD022 blanks-around-headings** | Headings should be surrounded by blank lines | +| **MD023 heading-start-left** | Headings must start at the beginning of the line | +| **MD024 no-duplicate-heading** | Multiple headings with the same content | +| **MD025 single-title/single-h1** | Multiple top-level headings in the same document | +| **MD026 no-trailing-punctuation** | Trailing punctuation in heading | +| **MD027 no-multiple-space-blockquote** | Multiple spaces after blockquote symbol | +| **MD028 no-blanks-blockquote** | Blank line inside blockquote | +| **MD029 ol-prefix** | Ordered list item prefix | +| **MD030 list-marker-space** | Spaces after list markers | +| **MD031 blanks-around-fences** | Fenced code blocks should be surrounded by blank lines | +| **MD032 blanks-around-lists** | Lists should be surrounded by blank lines | +| **MD033 no-inline-html** | Inline HTML | +| **MD034 no-bare-urls** | Bare URL used | +| **MD035 hr-style** | Horizontal rule style | +| **MD036 no-emphasis-as-heading** | Emphasis used instead of a heading | +| **MD037 no-space-in-emphasis** | Spaces inside emphasis markers | +| **MD038 no-space-in-code** | Spaces inside code span elements | +| **MD039 no-space-in-links** | Spaces inside link text | +| **MD040 fenced-code-language** | Fenced code blocks should have a language specified | +| **MD041 first-line-heading/first-line-h1** | First line in a file should be a top-level heading | +| **MD042 no-empty-links** | No empty links | +| **MD043 required-headings** | Required heading structure | +| **MD044 proper-names** | Proper names should have the correct capitalization | +| **MD045 no-alt-text** | Images should have alternate text (alt text) | +| **MD046 code-block-style** | Code block style | +| **MD047 single-trailing-newline** | Files should end with a single newline character | +| **MD048 code-fence-style** | Code fence style | +| **MD049 emphasis-style** | Emphasis style | +| **MD050 strong-style** | Strong style | +| **MD051 link-fragments** | Link fragments should be valid | +| **MD052 reference-links-images** | Reference links and images should use a label that is defined | +| **MD053 link-image-reference-definitions** | Link and image reference definitions should be needed | +| **MD054 link-image-style** | Link and image style | +| **MD055 table-pipe-style** | Table pipe style | +| **MD056 table-column-count** | Table column count | ## 3 Programming Specification @@ -75,6 +131,7 @@ limitations under the License. ```java Cache publicKeyCache; ``` + 2. Pinyin abbreviations are prohibited for variables (excluding nouns such as place names), such as chengdu. 3. It is recommended to end variable names with a `type`. For variables of type `Collection/List`, take `xxxx` (plural representing multiple elements) or end with `xxxList` (specific type). @@ -84,6 +141,7 @@ limitations under the License. Map idUserMap; Map userIdNameMap; ``` + 4. That can intuitively know the type and meaning of the variable through its name. Method names should start with a verb first as follows: @@ -119,6 +177,7 @@ limitations under the License. return resp; } ``` + - Positive demo: > Strings are extracted as constant references. @@ -144,6 +203,7 @@ limitations under the License. return resp; } ``` + 2. Ensure code readability and intuitiveness - The string in the `annotation` symbol doesn't need to be extracted as constant. @@ -203,6 +263,7 @@ public CurrentHashMap funName(); return; } ``` + - Positive demo: ```java @@ -226,11 +287,13 @@ public CurrentHashMap funName(); - Redundant lines Generally speaking, if a method's code line depth exceeds `2+ Tabs` due to continuous nested `if... else..`, it should be considered to try + - `merging branches`, - `inverting branch conditions` - `extracting private methods` to reduce code line depth and improve readability like follows: + - Union or merge the logic into the next level calling - Negative demo: @@ -267,6 +330,7 @@ if(expression2) { ...... } ``` + - Reverse the condition - Negative demo: @@ -281,6 +345,7 @@ if(expression2) { } } ``` + - Positive demo: ```java @@ -294,6 +359,7 @@ if(expression2) { // ... } ``` + - Using a single variable or method to reduce the complex conditional expression - Negative demo: @@ -302,6 +368,7 @@ if(expression2) { ... } ``` + - Positive demo: ```java @@ -346,6 +413,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. ```java map.computeIfAbsent(key, x -> key.toLowerCase()) ``` + - Positive demo: ```java @@ -359,6 +427,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. ```java map.computeIfAbsent(key, k-> Loader.load(k)); ``` + - Positive demo: ```java @@ -388,6 +457,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. return; } ``` + - Positive demo: ```java @@ -405,6 +475,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. return; } ``` + - Positive demo: ```java @@ -422,6 +493,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. return; } ``` + - Positive demo: ```java @@ -441,6 +513,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. return; } ``` + - Positive demo: ```java @@ -458,6 +531,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. ... } ``` + - Positive demo: ```java @@ -473,6 +547,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. ```java System.out.println(JobStatus.RUNNING.toString()); ``` + - Positive demo: ```java @@ -488,6 +563,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. ... } ``` + - Positive demo: ```java @@ -525,6 +601,7 @@ public void process(String input) { ```java log.info("Deploy cluster request " + deployRequest); ``` + - Positive demo ```java @@ -544,6 +621,7 @@ When printing the log content, if the actual parameters of the log placeholder a List userList = getUsersByBatch(1000); LOG.debug("All users: {}", getAllUserIds(userList)); ``` + - Positive demo: In this case, we should determine the log level in advance before making actual log calls as follows: @@ -552,7 +630,7 @@ When printing the log content, if the actual parameters of the log placeholder a // ignored declaration lines. List userList = getUsersByBatch(1000); if (LOG.isDebugEnabled()) { - LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); + LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); } ``` @@ -564,13 +642,12 @@ When printing the log content, if the actual parameters of the log placeholder a ## References -- https://site.mockito.org/ -- https://alibaba.github.io/p3c/ -- https://rules.sonarsource.com/java/ -- https://junit.org/junit5/ -- https://streampark.apache.org/ +- +- +- +- +- ``` ``` - diff --git a/home/docs/community/contribution.md b/home/docs/community/contribution.md index f7d932bcd2c..9a634ffffed 100644 --- a/home/docs/community/contribution.md +++ b/home/docs/community/contribution.md @@ -47,7 +47,7 @@ Even small corrections to typos are very welcome :) ### Getting HertzBeat up and running -> To get HertzBeat code running on your development tools, and able to debug with breakpoints. +> To get HertzBeat code running on your development tools, and able to debug with breakpoints. > This is a front-end and back-end separation project. To start the local code, the back-end manager and the front-end web-app must be started separately. #### Backend start @@ -162,6 +162,7 @@ Add WeChat account `ahertzbeat` to pull you into the WeChat group. - **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** Provide monitoring management, system management basic services. > Provides monitoring management, monitoring configuration management, system user management, etc. +> > - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** Provide metrics data collection services. > Use common protocols to remotely collect and obtain peer-to-peer metrics data. > - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** Provide monitoring data warehousing services. diff --git a/home/docs/community/development.md b/home/docs/community/development.md index 549084ce516..3e17d01385b 100644 --- a/home/docs/community/development.md +++ b/home/docs/community/development.md @@ -6,8 +6,8 @@ sidebar_label: Development ## Getting HertzBeat code up and running -> To get HertzBeat code running on your development tools, and able to debug with breakpoints. -> This is a front-end and back-end separation project. +> To get HertzBeat code running on your development tools, and able to debug with breakpoints. +> This is a front-end and back-end separation project. > To start the local code, the back-end [manager](https://github.com/apache/hertzbeat/tree/master/manager) and the front-end [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) must be started separately. ### Backend start diff --git a/home/docs/community/document.md b/home/docs/community/document.md index e561b3f1b2c..b7f3af828d1 100644 --- a/home/docs/community/document.md +++ b/home/docs/community/document.md @@ -40,8 +40,8 @@ This website is compiled using node, using Docusaurus framework components 1. Download and install nodejs (version 18.8.0) 2. Clone the code to the local `git clone git@github.com:apache/hertzbeat.git` 3. In `home` directory run `npm install` to install the required dependent libraries. -4. In `home` directory run `npm run start`, you can visit http://localhost:3000 to view the English mode preview of the site -5. In `home` directory run `npm run start-zh-cn`, you can visit http://localhost:3000 to view the Chinese mode preview of the site +4. In `home` directory run `npm run start`, you can visit to view the English mode preview of the site +5. In `home` directory run `npm run start-zh-cn`, you can visit to view the Chinese mode preview of the site 6. To generate static website resource files, run `npm run build`. The static resources of the build are in the build directory. ## Directory structure @@ -93,4 +93,3 @@ css and other style files are placed in the `src/css` directory ### Page content modification > All pages doc can be directly jumped to the corresponding github resource modification page through the 'Edit this page' button at the bottom - diff --git a/home/docs/community/how-to-release.md b/home/docs/community/how-to-release.md index 12338c1d3a6..3e87aa1b4b5 100644 --- a/home/docs/community/how-to-release.md +++ b/home/docs/community/how-to-release.md @@ -22,6 +22,7 @@ This release process is operated in the UbuntuOS(Windows,Mac), and the following ## 2. Preparing for release > First summarize the account information to better understand the operation process, will be used many times later. +> > - apache id: `muchunjin (APACHE LDAP UserName)` > - apache passphrase: `APACHE LDAP Passphrase` > - apache email: `muchunjin@apache.org` @@ -128,12 +129,12 @@ gpg: Total number processed: 1 gpg: unchanged: 1 ``` -Or enter https://keyserver.ubuntu.com/ address in the browser, enter the name of the key and click 'Search key' to search if existed. +Or enter address in the browser, enter the name of the key and click 'Search key' to search if existed. #### 2.4 Add the gpg public key to the KEYS file of the Apache SVN project repo -- Apache HertzBeat Branch Dev https://dist.apache.org/repos/dist/dev/incubator/hertzbeat -- Apache HertzBeat Branch Release https://dist.apache.org/repos/dist/release/incubator/hertzbeat +- Apache HertzBeat Branch Dev +- Apache HertzBeat Branch Release ##### 2.4.1 Add public key to KEYS in dev branch @@ -167,7 +168,7 @@ $ svn ci -m "add gpg key for muchunjin" ## 3. Prepare material package & release -#### 3.1 Based on the master branch, create a release-${release_version}-rcx branch, such as release-1.6.0-rc1, And create a tag named v1.6.0-rc1 based on the release-1.6.0-rc1 branch, and set this tag as pre-release. +#### 3.1 Based on the master branch, create a release-${release_version}-rcx branch, such as release-1.6.0-rc1, And create a tag named v1.6.0-rc1 based on the release-1.6.0-rc1 branch, and set this tag as pre-release ```shell git checkout master @@ -328,7 +329,7 @@ svn commit -m "release for HertzBeat 1.6.0" - Check Apache SVN Commit Results -> Visit the address https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1/ in the browser, check if existed the new material package +> Visit the address in the browser, check if existed the new material package ## 4. Enter the community voting stage @@ -336,7 +337,7 @@ svn commit -m "release for HertzBeat 1.6.0" Send a voting email in the community requires at least three `+1` and no `-1`. -> `Send to`: dev@hertzbeat.apache.org
+> `Send to`:
> `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0 rc1
> `Body`: @@ -392,7 +393,7 @@ Thanks! After 72 hours, the voting results will be counted, and the voting result email will be sent, as follows. -> `Send to`: dev@hertzbeat.apache.org
+> `Send to`:
> `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: @@ -421,14 +422,14 @@ Best, ChunJin Mu ``` -One item of the email content is `Vote thread`, and the link is obtained here: https://lists.apache.org/list.html?dev@hertzbeat.apache.org +One item of the email content is `Vote thread`, and the link is obtained here: #### 3.2 Send Incubator Community voting mail Send a voting email in the incubator community requires at least three `+1` and no `-1`. -> `Send to`: general@incubator.apache.org
-> `cc`: dev@hertzbeat.apache.org、private@hertzbeat.apache.org
+> `Send to`:
+> `cc`:
> `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: @@ -482,7 +483,7 @@ ChunJin Mu If there is no -1 after 72 hours, reply to the email as follows -> `Send to`: general@incubator.apache.org
+> `Send to`:
> `Body`: ``` @@ -494,7 +495,7 @@ Chunjin Mu Then the voting results will be counted, and the voting result email will be sent, as follows. -> `Send to`: general@incubator.apache.org
+> `Send to`:
> `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: @@ -520,7 +521,7 @@ Best, ChunJin Mu ``` -One item of the email content is `Vote thread`, and the link is obtained here: https://lists.apache.org/list.html?general@incubator.apache.org +One item of the email content is `Vote thread`, and the link is obtained here: Wait a day to see if the tutor has any other comments, if not, send the following announcement email @@ -534,10 +535,10 @@ svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 http #### 4.2 Add the new version download address to the official website -https://github.com/apache/hertzbeat/blob/master/home/docs/download.md -https://github.com/apache/hertzbeat/blob/master/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md + + -Open the official website address https://hertzbeat.apache.org/docs/download/ to see if there is a new version of the download +Open the official website address to see if there is a new version of the download > It should be noted that the download link may take effect after an hour, so please pay attention to it. @@ -550,6 +551,7 @@ You can modify it on the original RC Release without creating a new Release. ::: Then enter Release Title and Describe + - Release Title: ``` @@ -569,8 +571,8 @@ The rename the release-1.6.0-rc1 branch to release-1.6.0. #### 4.5 Send new version announcement email -> `Send to`: general@incubator.apache.org
-> `cc`: dev@hertzbeat.apache.org
+> `Send to`:
+> `cc`:
> `Title`: [ANNOUNCE] Apache HertzBeat (incubating) 1.6.0 released
> `Body`: diff --git a/home/docs/community/how-to-verify.md b/home/docs/community/how-to-verify.md index 38b507149b0..77e53ee444c 100644 --- a/home/docs/community/how-to-verify.md +++ b/home/docs/community/how-to-verify.md @@ -8,7 +8,7 @@ sidebar_position: 4 For detailed check list, please refer to the official [check list](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) -Version content accessible in browser https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/ +Version content accessible in browser ## 1. Download the candidate version @@ -47,8 +47,8 @@ First import the publisher's public key. Import KEYS from the svn repository to #### 2.2.1 Import public key ```shell -$ curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # Download KEYS -$ gpg --import KEYS # Import KEYS to local +curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # Download KEYS +gpg --import KEYS # Import KEYS to local ``` #### 2.2.2 Trust the public key @@ -83,7 +83,7 @@ gpg> #### 2.2.3 Check the gpg signature ```shell -$ for i in *.tar.gz; do echo $i; gpg --verify $i.asc $i; done +for i in *.tar.gz; do echo $i; gpg --verify $i.asc $i; done ``` check result @@ -100,7 +100,7 @@ gpg: Good signature from "xxx @apache.org>" ### 2.3 Check sha512 hash ```shell -$ for i in *.tar.gz; do echo $i; sha512sum --check $i.sha512; done +for i in *.tar.gz; do echo $i; sha512sum --check $i.sha512; done ``` ### 2.4 Check the binary package @@ -155,13 +155,13 @@ If you initiate a posting vote, you can refer to this response example to reply When replying to the email, you must bring the information that you have checked by yourself. Simply replying to `+1 approve` is invalid. -When PPMC votes in the dev@hertzbeat.apache.org hertzbeat community, Please bring the binding suffix to indicate that it has a binding vote for the vote in the hertzbeat community, and it is convenient to count the voting results. +When PPMC votes in the hertzbeat community, Please bring the binding suffix to indicate that it has a binding vote for the vote in the hertzbeat community, and it is convenient to count the voting results. -When IPMC votes in the general@incubator.apache.org incubator community. Please bring the binding suffix to indicate that the voting in the incubator community has a binding vote, which is convenient for counting the voting results. +When IPMC votes in the incubator community. Please bring the binding suffix to indicate that the voting in the incubator community has a binding vote, which is convenient for counting the voting results. :::caution -If you have already voted on dev@hertzbeat.apache.org, you can take it directly to the incubator community when you reply to the vote, such as: +If you have already voted on , you can take it directly to the incubator community when you reply to the vote, such as: ```html //Incubator community voting, only IPMC members have binding binding,PPMC needs to be aware of binding changes @@ -196,6 +196,6 @@ I checked: 5. ``` ---- +--- This doc refer from [Apache StreamPark](https://streampark.apache.org/) diff --git a/home/docs/community/mailing_lists.md b/home/docs/community/mailing_lists.md index c5ab8df7604..ef1a0a20329 100644 --- a/home/docs/community/mailing_lists.md +++ b/home/docs/community/mailing_lists.md @@ -34,7 +34,7 @@ Before you post anything to the mailing lists, be sure that you already **subscr | List Name | Address | Subscribe | Unsubscribe | Archive | |--------------------|--------------------------|--------------------------------------------------------|------------------------------------------------------------|------------------------------------------------------------------------| -| **Developer List** | dev@hertzbeat.apache.org | [subscribe](mailto:dev-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:dev-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | +| **Developer List** | | [subscribe](mailto:dev-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:dev-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | ### Notification List @@ -42,16 +42,16 @@ Before you post anything to the mailing lists, be sure that you already **subscr | List Name | Address | Subscribe | Unsubscribe | Archive | |-----------------------|------------------------------------|------------------------------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------| -| **Notification List** | notifications@hertzbeat.apache.org | [subscribe](mailto:notifications-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | +| **Notification List** | | [subscribe](mailto:notifications-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | ## Steps for Subscription Sending a subscription email is also very simple. The steps are as follows: - 1、**Subscribe**: Click the **subscribe** button in the above table, and it redirects to your mail client. The subject and content are arbitrary. - After that, you will receive a confirmation email from dev-help@hertzbeat.apache.org (if not received, please confirm whether the email is automatically classified as SPAM, promotion email, subscription email, etc.). + After that, you will receive a confirmation email from (if not received, please confirm whether the email is automatically classified as SPAM, promotion email, subscription email, etc.). - 2、**Confirm**: Reply directly to the confirmation email, or click on the link in the email to reply quickly. The subject and content are arbitrary. -- 3、**Welcome**: After completing the above steps, you will receive a welcome email with the subject WELCOME to dev@hertzbeat.apache.org, and you have successfully subscribed to the Apache HertzBeat mailing list. +- 3、**Welcome**: After completing the above steps, you will receive a welcome email with the subject WELCOME to , and you have successfully subscribed to the Apache HertzBeat mailing list. ## Post Plain Text Mails diff --git a/home/docs/community/new_committer_process.md b/home/docs/community/new_committer_process.md index 0d72df109d7..e62d6d317e2 100644 --- a/home/docs/community/new_committer_process.md +++ b/home/docs/community/new_committer_process.md @@ -110,7 +110,7 @@ ttt ``` Note that, Voting ends one week from today, i.e. -[midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) +[midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) [Apache Voting Guidelines](https://community.apache.org/newcommitter.html) ### Close Vote Template @@ -283,4 +283,3 @@ Thanks for your contributions. Best Wishes! ``` - diff --git a/home/docs/community/new_pmc_member_process.md b/home/docs/community/new_pmc_member_process.md index 414dad94a56..9397d6dc034 100644 --- a/home/docs/community/new_pmc_member_process.md +++ b/home/docs/community/new_pmc_member_process.md @@ -78,7 +78,7 @@ ${Work list}[1] [1] https://github.com/apache/hertzbeat/commits?author=${NEW_PMC_NAME} ``` -Note that, Voting ends one week from today, i.e. [midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) +Note that, Voting ends one week from today, i.e. [midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) [Apache Voting Guidelines](https://community.apache.org/newcommitter.html) ### Close Vote Template @@ -282,4 +282,3 @@ A PPMC member helps manage and guide the direction of the project. Thanks, On behalf of the Apache HertzBeat (incubating) PPMC ``` - diff --git a/home/docs/community/submit-code.md b/home/docs/community/submit-code.md index 15bfeba545a..4b5f5dacf42 100644 --- a/home/docs/community/submit-code.md +++ b/home/docs/community/submit-code.md @@ -49,12 +49,14 @@ git remote -v ```shell git fetch upstream ``` + * Synchronize remote repository code to local repository ```shell git checkout origin/dev git merge --no-ff upstream/dev ``` + * **⚠️Note that you must create a new branch to develop features `git checkout -b feature-xxx`. It is not recommended to use the master branch for direct development** * After modifying the code locally, submit it to your own repository: **Note that the submission information does not contain special characters** @@ -63,8 +65,8 @@ git remote -v git commit -m 'commit content' git push ``` + * Submit changes to the remote repository, you can see a green button "Compare & pull request" on your repository page, click it. * Select the modified local branch and the branch you want to merge with the past, you need input the message carefully, describe doc is important as code, click "Create pull request". * Then the community Committers will do CodeReview, and then he will discuss some details (design, implementation, performance, etc.) with you, afterward you can directly update the code in this branch according to the suggestions (no need to create a new PR). When this pr is approved, the commit will be merged into the master branch * Finally, congratulations, you have become an official contributor to HertzBeat ! You will be added to the contributor wall, you can contact the community to obtain a contributor certificate. - diff --git a/home/docs/download.md b/home/docs/download.md index 4bccd984b70..037e891e63d 100644 --- a/home/docs/download.md +++ b/home/docs/download.md @@ -4,12 +4,14 @@ title: Download Apache HertzBeat (incubating) sidebar_label: Download --- -> **Here is the Apache HertzBeat (incubating) official download page.** +> **Here is the Apache HertzBeat (incubating) official download page.** > **Please choose version to download from the following tables. It is recommended use the latest.** :::tip + - Please verify the release with corresponding hashes(sha512), signatures and [project release KEYS](https://downloads.apache.org/incubator/hertzbeat/KEYS). - Refer to [How to Verify](https://www.apache.org/dyn/closer.cgi#verify) for how to check the hashes and signatures. + ::: ## The Latest Release @@ -26,8 +28,8 @@ Previous releases of HertzBeat may be affected by security issues, please use th > Apache HertzBeat provides a docker image for each release. You can pull the image from the [Docker Hub](https://hub.docker.com/r/apache/hertzbeat). -- HertzBeat https://hub.docker.com/r/apache/hertzbeat -- HertzBeat Collector https://hub.docker.com/r/apache/hertzbeat-collector +- HertzBeat +- HertzBeat Collector ## All Archived Releases diff --git a/home/docs/help/activemq.md b/home/docs/help/activemq.md index f24bc37fbbb..ef3cc911969 100644 --- a/home/docs/help/activemq.md +++ b/home/docs/help/activemq.md @@ -143,4 +143,3 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/docs/help/ai_config.md b/home/docs/help/ai_config.md index 5c5b56962af..f6c704646e7 100644 --- a/home/docs/help/ai_config.md +++ b/home/docs/help/ai_config.md @@ -23,15 +23,15 @@ keywords: [AI] |-----------------------|-----------------------------------------------------|-----------------------------------------------------------------| | type | zhiPu (must be exactly the same as example) | | | model | glm-4-0520、glm-4 、glm-4-air、glm-4-airx、 glm-4-flash | | -| api-key | xxxxx.xxxxxx | https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys | +| api-key | xxxxx.xxxxxx | | #### Alibaba AI | Name of the parameter | Example | Link | |-----------------------|----------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------| | type | alibabaAi (must be exactly the same as example) | | -| model | qwen-turbo、qwen-plus、qwen-max、qwen-max-0428、qwen-max-0403、qwen-max-0107、qwen-max-longcontext | https://help.aliyun.com/zh/dashscope/developer-reference/model-introduction?spm=a2c4g.11186623.0.0.4e0246c1RQFKMH | -| api-key | xxxxxxxxxxx | https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key?spm=a2c4g.11186623.0.i10 | +| model | qwen-turbo、qwen-plus、qwen-max、qwen-max-0428、qwen-max-0403、qwen-max-0107、qwen-max-longcontext | | +| api-key | xxxxxxxxxxx | | #### Kimi AI @@ -39,18 +39,18 @@ keywords: [AI] |-----------------------|-------------------------------------------------|-----------------------------------------------| | type | kimiAi (must be exactly the same as example) | | | model | moonshot-v1-8k、moonshot-v1-32k、moonshot-v1-128k | | -| api-key | xxxxxxxxxxx | https://platform.moonshot.cn/console/api-keys | +| api-key | xxxxxxxxxxx | | #### sparkDesk AI -QuickStart: https://www.xfyun.cn/doc/platform/quickguide.html +QuickStart: | Name of the parameter | Example | Link | |-----------------------|--------------------------------------------------|---------------------------------------| | type | sparkDesk (must be exactly the same as example) | | | model | general、generalv2、generalv3、generalv3.5、4.0Ultra | | -| api-key | xxxxxxxxxxx | https://console.xfyun.cn/services/cbm | -| api-secret | xxxxxxxxxxx | https://console.xfyun.cn/services/cbm | +| api-key | xxxxxxxxxxx | | +| api-secret | xxxxxxxxxxx | | | sparkDesk version | model | |-------------------|-------------| @@ -59,4 +59,3 @@ QuickStart: https://www.xfyun.cn/doc/platform/quickguide.html | Spark Pro | generalv3 | | Spark V2.0 | generalv2 | | Spark Lite(free) | general | - diff --git a/home/docs/help/airflow.md b/home/docs/help/airflow.md index 52367155d89..a7f77f7f5b6 100644 --- a/home/docs/help/airflow.md +++ b/home/docs/help/airflow.md @@ -36,4 +36,3 @@ keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] |-------------|------|---------------| | value | 无 | Airflow版本 | | git_version | 无 | Airflow git版本 | - diff --git a/home/docs/help/alert_dingtalk.md b/home/docs/help/alert_dingtalk.md index b86ed662940..36e332d9b21 100644 --- a/home/docs/help/alert_dingtalk.md +++ b/home/docs/help/alert_dingtalk.md @@ -17,7 +17,7 @@ keywords: [open source monitoring tool, open source alerter, open source DingDin 2. **【Save access_token value of the WebHook address of the robot】** -> eg: webHook address:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` +> eg: webHook address:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` > Its robot access_token value is `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` 3. **【Alarm notification】->【Add new recipient】 ->【Select DingDing robot notification method】->【Set DingDing robot ACCESS_TOKEN】-> 【Confirm】** diff --git a/home/docs/help/alert_discord.md b/home/docs/help/alert_discord.md index 7aa565c0acf..68296148f22 100644 --- a/home/docs/help/alert_discord.md +++ b/home/docs/help/alert_discord.md @@ -61,8 +61,8 @@ keywords: [open source monitoring tool, open source alerter, open source Discord 1. Discord doesn't receive bot alert notifications -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured +> Please check whether the alarm information has been triggered in the alarm center +> Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured > Please check whether the bot is properly authorized by the Discord chat server Other questions can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_email.md b/home/docs/help/alert_email.md index 3beddfbf4e8..1fc45db80f4 100644 --- a/home/docs/help/alert_email.md +++ b/home/docs/help/alert_email.md @@ -13,7 +13,7 @@ keywords: [open source monitoring tool, open source alerter, open source email n ![email](/img/docs/help/alert-notice-1.png) -2. **【Get verification code】-> 【Enter email verification code】-> 【Confirm】** +2. **【Get verification code】-> 【Enter email verification code】-> 【Confirm】** ![email](/img/docs/help/alert-notice-2.png) ![email](/img/docs/help/alert-notice-3.png) diff --git a/home/docs/help/alert_feishu.md b/home/docs/help/alert_feishu.md index 8f7e9391001..38f7c72cf03 100644 --- a/home/docs/help/alert_feishu.md +++ b/home/docs/help/alert_feishu.md @@ -13,7 +13,7 @@ keywords: [open source monitoring tool, open source alerter, open source feishu 2. **【Save the key value of the WebHook address of the robot】** -> eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` > Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select FeiShu robot notification method】->【Set FeiShu robot KEY】-> 【Confirm】** @@ -28,7 +28,7 @@ keywords: [open source monitoring tool, open source alerter, open source feishu 1. FeiShu group did not receive the robot alarm notification. -> Please check whether there is any triggered alarm information in the alarm center. +> Please check whether there is any triggered alarm information in the alarm center. > Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_slack.md b/home/docs/help/alert_slack.md index 5148432fe8b..26bde4ed2e5 100644 --- a/home/docs/help/alert_slack.md +++ b/home/docs/help/alert_slack.md @@ -29,7 +29,7 @@ Refer to the official website document [Sending messages using Incoming Webhooks 1. Slack did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center +> Please check whether the alarm information has been triggered in the alarm center > Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_smn.md b/home/docs/help/alert_smn.md index b4013b9f902..53774315561 100644 --- a/home/docs/help/alert_smn.md +++ b/home/docs/help/alert_smn.md @@ -37,7 +37,7 @@ keywords: [ open source monitoring tool, open source alerter, open source Huawei 1. Huawei Cloud SMN did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center +> Please check whether the alarm information has been triggered in the alarm center > Please check whether the Huawei Cloud SMN AK, SK and other configurations are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_telegram.md b/home/docs/help/alert_telegram.md index cb60f266778..1689788f0f4 100644 --- a/home/docs/help/alert_telegram.md +++ b/home/docs/help/alert_telegram.md @@ -58,8 +58,8 @@ Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token 1. Telegram did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured +> Please check whether the alarm information has been triggered in the alarm center +> Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured > UserId should be the UserId of the recipient of the message Other questions can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_threshold_expr.md b/home/docs/help/alert_threshold_expr.md index 6b8772388e6..c999d887982 100644 --- a/home/docs/help/alert_threshold_expr.md +++ b/home/docs/help/alert_threshold_expr.md @@ -58,7 +58,7 @@ This variable is mainly used for calculations involving multiple instances. For `responseTime>=400` 2. API Monitoring -> Alert when response time is greater than 3000ms `responseTime>3000` -3. Overall Monitoring -> Alert when response time for URL (instance) path 'https://baidu.com/book/3' is greater than 200ms +3. Overall Monitoring -> Alert when response time for URL (instance) path '' is greater than 200ms `equals(instance,"https://baidu.com/book/3")&&responseTime>200` 4. MYSQL Monitoring -> Alert when 'threads_running' metric under 'status' exceeds 7 `threads_running>7` diff --git a/home/docs/help/alert_wework.md b/home/docs/help/alert_wework.md index ca14d5615fa..ce344200301 100644 --- a/home/docs/help/alert_wework.md +++ b/home/docs/help/alert_wework.md @@ -15,7 +15,7 @@ keywords: [open source monitoring tool, open source alerter, open source WeWork 2. **【Save the key value of the WebHook address of the robot】** -> eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` > Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select enterprise Wechat robot notification method】->【Set enterprise Wechat robot KEY】-> 【Confirm】** @@ -32,7 +32,7 @@ keywords: [open source monitoring tool, open source alerter, open source WeWork 1. The enterprise wechat group did not receive the robot alarm notification. -> Please check whether there is any triggered alarm information in the alarm center. +> Please check whether there is any triggered alarm information in the alarm center. > Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/almalinux.md b/home/docs/help/almalinux.md index 40a07028c96..695a8be57b3 100644 --- a/home/docs/help/almalinux.md +++ b/home/docs/help/almalinux.md @@ -107,4 +107,3 @@ Statistics for the top 10 processes using memory. Statistics include: process ID | command | None | Executed command | --- - diff --git a/home/docs/help/api.md b/home/docs/help/api.md index 98763e0eefe..7e068a390b3 100644 --- a/home/docs/help/api.md +++ b/home/docs/help/api.md @@ -34,4 +34,3 @@ keywords: [open source monitoring tool, monitoring http api] | Metric name | Metric unit | Metric help description | |--------------|-------------|-------------------------| | responseTime | ms | Website response time | - diff --git a/home/docs/help/centos.md b/home/docs/help/centos.md index 60b770ebf96..858a1d2bb94 100644 --- a/home/docs/help/centos.md +++ b/home/docs/help/centos.md @@ -79,4 +79,3 @@ keywords: [open source monitoring tool, open source os monitoring tool, monitori | available | Mb | Available disk size | | usage | % | usage | | mounted | none | Mount point directory | - diff --git a/home/docs/help/clickhouse.md b/home/docs/help/clickhouse.md index efd873d1f32..d9994148bcd 100644 --- a/home/docs/help/clickhouse.md +++ b/home/docs/help/clickhouse.md @@ -93,4 +93,3 @@ keywords: [open source monitoring system, open source database monitoring, Click | MarkCacheBytes | N/A | Size of marks cache in StorageMergeTree | | MarkCacheFiles | N/A | Number of files in marks cache for StorageMergeTree | | MaxPartCountForPartition | N/A | Maximum active data blocks in partitions | - diff --git a/home/docs/help/debian.md b/home/docs/help/debian.md index 14cee060aaf..47487573f36 100644 --- a/home/docs/help/debian.md +++ b/home/docs/help/debian.md @@ -95,4 +95,3 @@ Metric Unit: - Memory Usage Rate: % - CPU Usage Rate: % - diff --git a/home/docs/help/dm.md b/home/docs/help/dm.md index 82159bf2408..f8e031bfe20 100644 --- a/home/docs/help/dm.md +++ b/home/docs/help/dm.md @@ -46,4 +46,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | dm_sql_thd | None | Thread for writing dmsql dmserver | | dm_io_thd | None | IO threads, controlled by IO_THR_GROUPS parameter, default is 2 threads | | dm_quit_thd | None | Thread used to perform a graceful shutdown of the database | - diff --git a/home/docs/help/dns.md b/home/docs/help/dns.md index d8dbd8d0921..3d6a5fe4b8e 100644 --- a/home/docs/help/dns.md +++ b/home/docs/help/dns.md @@ -68,4 +68,3 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito | Section0 | none | Additional information for DNS queries. | > The metric set collects up to 10 records, with metric names from Section0 to Section9. - diff --git a/home/docs/help/docker.md b/home/docs/help/docker.md index 0e3a1f0b428..63fe3b03a19 100644 --- a/home/docs/help/docker.md +++ b/home/docs/help/docker.md @@ -32,7 +32,7 @@ systemctl daemon-reload systemctl restart docker ``` -**Note: Remember to open the `2375` port number in the server console. ** +**Note: Remember to open the `2375` port number in the server console.** **3. If the above method does not work:** @@ -99,4 +99,3 @@ firewall-cmd --reload | cpu_delta | None | The number of CPUs already used by the Docker container | | number_cpus | None | The number of CPUs that the Docker container can use | | cpu_usage | None | Docker container CPU usage | - diff --git a/home/docs/help/doris_be.md b/home/docs/help/doris_be.md index 8dcde7b549b..3e6fd37de03 100644 --- a/home/docs/help/doris_be.md +++ b/home/docs/help/doris_be.md @@ -168,4 +168,3 @@ keywords: [开源监控系统, 开源数据库监控, DORIS数据库BE监控] | 指标名称 | 指标单位 | 指标帮助描述 | |-------|------|------------------------------------------| | value | 字节 | BE 进程物理内存大小,取自 `/proc/self/status/VmRSS` | - diff --git a/home/docs/help/doris_fe.md b/home/docs/help/doris_fe.md index b478b2eaadb..ecfad855ba6 100644 --- a/home/docs/help/doris_fe.md +++ b/home/docs/help/doris_fe.md @@ -130,4 +130,3 @@ Can observe the number of import transactions in various states to determine if | committed | None | Committed | | visible | None | Visible | | aborted | None | Aborted / Revoked | - diff --git a/home/docs/help/dynamic_tp.md b/home/docs/help/dynamic_tp.md index fd36206bc6e..332767b2a39 100644 --- a/home/docs/help/dynamic_tp.md +++ b/home/docs/help/dynamic_tp.md @@ -99,4 +99,3 @@ Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has respo | dynamic | None | Dynamic thread pool or not | | run_timeout_count | None | Number of running timeout tasks | | queue_timeout_count | None | Number of tasks waiting for timeout | - diff --git a/home/docs/help/elasticsearch.md b/home/docs/help/elasticsearch.md index 3ac3d62a7e1..f0c29a8fd9f 100644 --- a/home/docs/help/elasticsearch.md +++ b/home/docs/help/elasticsearch.md @@ -61,4 +61,3 @@ keywords: [ open source monitoring tool, monitoring ElasticSearch metrics ] | disk_free | GB | Disk Free | | disk_total | GB | Disk Total | | disk_used_percent | % | Disk Used Percent | - diff --git a/home/docs/help/euleros.md b/home/docs/help/euleros.md index 786dab30afc..5fad0c856ae 100644 --- a/home/docs/help/euleros.md +++ b/home/docs/help/euleros.md @@ -105,4 +105,3 @@ Top 10 processes consuming memory. Metrics include: Process ID, Memory usage, CP | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | - diff --git a/home/docs/help/flink.md b/home/docs/help/flink.md index bd731a6dee6..9fb7c7cb9e1 100644 --- a/home/docs/help/flink.md +++ b/home/docs/help/flink.md @@ -33,4 +33,3 @@ keywords: [open source monitoring tool, open source flink monitoring tool] | task_total | Units | Total number of tasks. | | jobs_running | Units | Number of jobs running. | | jobs_failed | Units | Number of jobs failed. | - diff --git a/home/docs/help/flink_on_yarn.md b/home/docs/help/flink_on_yarn.md index 1ae8e5ebd51..9340b7681aa 100644 --- a/home/docs/help/flink_on_yarn.md +++ b/home/docs/help/flink_on_yarn.md @@ -135,4 +135,3 @@ | Status.JVM.Memory.Heap.Max | MB | Maximum JVM heap memory | | Status.Flink.Memory.Managed.Total | MB | Total managed memory by Flink | | Status.Shuffle.Netty.UsedMemory | MB | Used memory by Netty Shuffle | - diff --git a/home/docs/help/freebsd.md b/home/docs/help/freebsd.md index 51d0ed9ab0b..d6505d83dd1 100644 --- a/home/docs/help/freebsd.md +++ b/home/docs/help/freebsd.md @@ -85,4 +85,3 @@ Statistics of the top 10 processes using memory. Statistics include: Process ID, | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | - diff --git a/home/docs/help/ftp.md b/home/docs/help/ftp.md index 50a571eb7a7..72d55e8c1e2 100644 --- a/home/docs/help/ftp.md +++ b/home/docs/help/ftp.md @@ -32,4 +32,3 @@ keywords: [ open source monitoring tool, open source ftp server monitoring tool, |---------------|-------------|----------------------------------------------------------| | Is Active | none | Check if the directory exists and has access permission. | | Response Time | ms | Response Time | - diff --git a/home/docs/help/fullsite.md b/home/docs/help/fullsite.md index 6145f238bdc..bad94c4b751 100644 --- a/home/docs/help/fullsite.md +++ b/home/docs/help/fullsite.md @@ -5,8 +5,8 @@ sidebar_label: Full site Monitor keywords: [open source monitoring tool, open source website monitoring tool, monitoring sitemap metrics] --- -> Available or not to monitor all pages of the website. -> A website often has multiple pages provided by different services. We monitor the full site by collecting the SiteMap exposed by the website. +> Available or not to monitor all pages of the website. +> A website often has multiple pages provided by different services. We monitor the full site by collecting the SiteMap exposed by the website. > Note⚠️ This monitoring requires your website to support SiteMap. We support SiteMap in XML and TXT formats. ### Configuration parameter @@ -32,4 +32,3 @@ keywords: [open source monitoring tool, open source website monitoring tool, mon | statusCode | none | Response HTTP status code for requesting the website | | responseTime | ms | Website response time | | errorMsg | none | Error message feedback after requesting the website | - diff --git a/home/docs/help/guide.md b/home/docs/help/guide.md index ce182746ffa..8728b7f8e37 100644 --- a/home/docs/help/guide.md +++ b/home/docs/help/guide.md @@ -9,7 +9,7 @@ sidebar_label: Help Center ## 🔬 Monitoring services -> Regularly collect and monitor the performance Metrics exposed by end-to-end services, provide visual interfaces, and process data for alarm and other service scheduling. +> Regularly collect and monitor the performance Metrics exposed by end-to-end services, provide visual interfaces, and process data for alarm and other service scheduling. > Planned monitoring type:application service, database, operating system, cloud native, open source middleware. ### Application service monitoring @@ -115,8 +115,8 @@ More details see 👉 [Threshold alarm](alert_threshold)
### Alarm notification -> After triggering the alarm information, in addition to being displayed in the alarm center list, it can also be notified to the designated recipient in a specified way (e-mail, wechat and FeiShu etc.) -> Alarm notification provides different types of notification methods, such as email recipient, enterprise wechat robot notification, DingDing robot notification, and FeiShu robot notification. +> After triggering the alarm information, in addition to being displayed in the alarm center list, it can also be notified to the designated recipient in a specified way (e-mail, wechat and FeiShu etc.) +> Alarm notification provides different types of notification methods, such as email recipient, enterprise wechat robot notification, DingDing robot notification, and FeiShu robot notification. > After setting the receiver, you need to set the associated alarm notification strategy to configure which alarm information is sent to which receiver.  👉 [Configure Email Notification](alert_email)
diff --git a/home/docs/help/hadoop.md b/home/docs/help/hadoop.md index 56f19472277..e12a44807ea 100644 --- a/home/docs/help/hadoop.md +++ b/home/docs/help/hadoop.md @@ -87,4 +87,3 @@ export HADOOP_OPTS= "$HADOOP_OPTS | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/docs/help/hbase_master.md b/home/docs/help/hbase_master.md index 1e3efe84af7..d30c8d4bdf7 100644 --- a/home/docs/help/hbase_master.md +++ b/home/docs/help/hbase_master.md @@ -57,4 +57,3 @@ Check the `hbase-site.xml` file to obtain the value of the `hbase.master.info.po | receivedBytes | MB | Cluster received data volume | | sentBytes | MB | Cluster sent data volume (MB) | | clusterRequests | none | Total number of cluster requests | - diff --git a/home/docs/help/hbase_regionserver.md b/home/docs/help/hbase_regionserver.md index 0a77eb5441b..a2940b9048c 100644 --- a/home/docs/help/hbase_regionserver.md +++ b/home/docs/help/hbase_regionserver.md @@ -91,4 +91,3 @@ Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver. | MemHeapMaxM | None | Cluster balance load times | | MemMaxM | None | RPC handle count | | GcCount | MB | Cluster data reception volume | - diff --git a/home/docs/help/hdfs_datanode.md b/home/docs/help/hdfs_datanode.md index 164adc7d6df..68e1c4a38de 100644 --- a/home/docs/help/hdfs_datanode.md +++ b/home/docs/help/hdfs_datanode.md @@ -54,4 +54,3 @@ Retrieve the HTTP monitoring port for the Apache HDFS DataNode. Value: `dfs.data | Metric Name | Metric Unit | Metric Description | |-------------|-------------|--------------------| | StartTime | | Startup time | - diff --git a/home/docs/help/hdfs_namenode.md b/home/docs/help/hdfs_namenode.md index 978daac3613..975c2e5d935 100644 --- a/home/docs/help/hdfs_namenode.md +++ b/home/docs/help/hdfs_namenode.md @@ -90,4 +90,3 @@ Ensure that you have obtained the JMX monitoring port for the HDFS NameNode. | ThreadsBlocked | Count | Number of threads in BLOCKED state | | ThreadsWaiting | Count | Number of threads in WAITING state | | ThreadsTimedWaiting | Count | Number of threads in TIMED WAITING state | - diff --git a/home/docs/help/hive.md b/home/docs/help/hive.md index 3dfbf0bfb78..34e8b53ca33 100644 --- a/home/docs/help/hive.md +++ b/home/docs/help/hive.md @@ -74,4 +74,3 @@ hive --service hiveserver2 & | init | MB | The initial amount of memory requested for the memory pool. | | max | MB | The maximum amount of memory that can be allocated for the memory pool. | | used | MB | The amount of memory currently being used by the memory pool. | - diff --git a/home/docs/help/http_sd.md b/home/docs/help/http_sd.md index 6b8de487555..122b159f41b 100644 --- a/home/docs/help/http_sd.md +++ b/home/docs/help/http_sd.md @@ -49,4 +49,3 @@ keywords: [open source monitoring tool, open source java monitoring tool, monito | Address | | | | Port | | | | Health Status | | Current health status of service | - diff --git a/home/docs/help/huawei_switch.md b/home/docs/help/huawei_switch.md index 902c0596965..6bc99169bce 100644 --- a/home/docs/help/huawei_switch.md +++ b/home/docs/help/huawei_switch.md @@ -51,4 +51,3 @@ This document only introduces the monitoring indicators queried in the monitor t | ifOutErrors | none | For packet-oriented interfaces, the number of outbound packets that could not be transmitted because of errors. For character-oriented or fixed-length interfaces, the number of outbound transmission units that could not be transmitted because of errors. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | | ifAdminStatus | none | The desired state of the interface. The testing(3) state indicates that no operational packets can be passed. When a managed system initializes, all interfaces start with ifAdminStatus in the down(2) state. As a result of either explicit management action or per configuration information retained by the managed system, ifAdminStatus is then changed to either the up(1) or testing(3) states (or remains in the down(2) state). | | ifOperStatus | none | The current operational state of the interface. The testing(3) state indicates that no operational packets can be passed. If ifAdminStatus is down(2) then ifOperStatus should be down(2). If ifAdminStatus is changed to up(1) then ifOperStatus should change to up(1) if the interface is ready to transmit and receive network traffic; it should change to dormant(5) if the interface is waiting for external actions (such as a serial line waiting for an incoming connection); it should remain in the down(2) state if and only if there is a fault that prevents it from going to the up(1) state; it should remain in the notPresent(6) state if the interface has missing (typically, hardware) components. | - diff --git a/home/docs/help/hugegraph.md b/home/docs/help/hugegraph.md index 66b0574aab7..90334a4bc03 100644 --- a/home/docs/help/hugegraph.md +++ b/home/docs/help/hugegraph.md @@ -138,4 +138,3 @@ Check the `rest-server.properties` file to obtain the value of the `restserver_p | garbage_collector_g1_old_generation_count | NONE | Indicates the number of old generation garbage collections by G1 garbage collector | | garbage_collector_g1_old_generation_time | NONE | Indicates the total time spent in old generation garbage collections by G1 garbage collector | | garbage_collector_time_unit | NONE | Indicates the unit of garbage collection time (such as milliseconds, seconds, etc.) | - diff --git a/home/docs/help/imap.md b/home/docs/help/imap.md index a6cf82e577b..88e4e08a5e7 100644 --- a/home/docs/help/imap.md +++ b/home/docs/help/imap.md @@ -45,4 +45,3 @@ Collect information on each folder in the email (custom folders can be configure | Total message count | None | The total number of emails in this folder | | Recent message count | None | The number of recently received emails in this folder | | Unseen message count | None | The number of unread emails in this folder | - diff --git a/home/docs/help/influxdb.md b/home/docs/help/influxdb.md index 92c5da380ef..31fb3efec6c 100644 --- a/home/docs/help/influxdb.md +++ b/home/docs/help/influxdb.md @@ -63,4 +63,3 @@ keywords: [open source monitoring system, open source database monitoring, Influ |-------------|-------------|-------------------------| | result | N/A | Result | | org | N/A | Organization identifier | - diff --git a/home/docs/help/influxdb_promql.md b/home/docs/help/influxdb_promql.md index afed14cad7a..c8b55c9ae76 100644 --- a/home/docs/help/influxdb_promql.md +++ b/home/docs/help/influxdb_promql.md @@ -59,4 +59,3 @@ keywords: [ Open Source Monitoring System, InfluxDB Monitoring, InfluxDB-PromQL | instance | None | Instance to which the metric belongs | | timestamp | None | Timestamp of metric collection | | value | None | Metric value | - diff --git a/home/docs/help/iotdb.md b/home/docs/help/iotdb.md index bec827feb73..011b9cbec12 100644 --- a/home/docs/help/iotdb.md +++ b/home/docs/help/iotdb.md @@ -118,4 +118,3 @@ predefinedMetrics: |-------------|-------------|----------------------------------| | name | None | name | | connection | none | thrift current connection number | - diff --git a/home/docs/help/issue.md b/home/docs/help/issue.md index a48e84cfd4e..9904fab6551 100644 --- a/home/docs/help/issue.md +++ b/home/docs/help/issue.md @@ -17,42 +17,41 @@ sidebar_label: Common issues 3. Ping connectivity monitoring exception when installing hertzbeat for package deployment. The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 -> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. -> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. +> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. +> See ### Docker Deployment common issues -1. **MYSQL, TDENGINE and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** +1. **MYSQL, TDENGINE and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. -> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. +> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. > Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` -2. **According to the process deploy,visit http://ip:1157/ no interface** +2. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: -> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. +> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. > two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. > >> three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. 3. **Log an error TDengine connection or insert SQL failed** -> one:Check whether database account and password configured is correct, the database is created. +> one:Check whether database account and password configured is correct, the database is created. > two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. ### Package Deployment common issues -1. **According to the process deploy,visit http://ip:1157/ no interface** +1. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: -> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. -> two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. +> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. +> two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. > three: Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 2. **Log an error TDengine connection or insert SQL failed** -> one:Check whether database account and password configured is correct, the database is created. +> one:Check whether database account and password configured is correct, the database is created. > two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. - diff --git a/home/docs/help/jetty.md b/home/docs/help/jetty.md index 6e069553dba..ccec65b5559 100644 --- a/home/docs/help/jetty.md +++ b/home/docs/help/jetty.md @@ -92,4 +92,3 @@ Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/docs/help/jvm.md b/home/docs/help/jvm.md index 3b47e0e7a8a..477d9fbece1 100644 --- a/home/docs/help/jvm.md +++ b/home/docs/help/jvm.md @@ -13,7 +13,7 @@ keywords: [open source monitoring tool, open source java jvm monitoring tool, mo 1. Add JVM `VM options` When Start Server ⚠️ customIP -Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#remote +Refer: ```shell -Djava.rmi.server.hostname=customIP @@ -74,4 +74,3 @@ Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#rem | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/docs/help/kafka.md b/home/docs/help/kafka.md index f86913733b1..48d06b2037b 100644 --- a/home/docs/help/kafka.md +++ b/home/docs/help/kafka.md @@ -87,4 +87,3 @@ exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" | FiveMinuteRate | % | Five Minute Rate | | MeanRate | % | Mean Rate | | FifteenMinuteRate | % | Fifteen Minute Rate | - diff --git a/home/docs/help/kafka_promql.md b/home/docs/help/kafka_promql.md index ea358d0de8d..203ef2bef4b 100644 --- a/home/docs/help/kafka_promql.md +++ b/home/docs/help/kafka_promql.md @@ -64,4 +64,3 @@ keywords: [ Open Source Monitoring System, Open Source Middleware Monitoring, Ka 1. If Kafka is enabled with JMX monitoring, you can use [Kafka](kafka) Monitoring. 2. If Kafka cluster deploys kafka_exporter to expose monitoring metrics, you can refer to [Prometheus task](prometheus) to configure the Prometheus collection task to monitor kafka. - diff --git a/home/docs/help/kubernetes.md b/home/docs/help/kubernetes.md index 45adda576fc..3cb2336e768 100644 --- a/home/docs/help/kubernetes.md +++ b/home/docs/help/kubernetes.md @@ -13,7 +13,7 @@ If you want to monitor the information in 'Kubernetes', you need to obtain an au Refer to the steps to obtain token -#### method one: +#### method one 1. Create a service account and bind the default cluster-admin administrator cluster role @@ -27,7 +27,7 @@ kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` -#### method two: +#### method two ```shell kubectl create serviceaccount cluster-admin @@ -93,4 +93,3 @@ kubectl create token --duration=1000h cluster-admin | cluster_ip | None | cluster ip | | selector | None | tag selector matches | | creation_time | None | Created | - diff --git a/home/docs/help/linux.md b/home/docs/help/linux.md index 6c22028114c..f5c77a72ca6 100644 --- a/home/docs/help/linux.md +++ b/home/docs/help/linux.md @@ -79,4 +79,3 @@ keywords: [open source monitoring tool, open source linux monitoring tool, monit | available | Mb | Available disk size | | usage | % | usage | | mounted | none | Mount point directory | - diff --git a/home/docs/help/mariadb.md b/home/docs/help/mariadb.md index f5ef40cbfbc..0f8d585449c 100644 --- a/home/docs/help/mariadb.md +++ b/home/docs/help/mariadb.md @@ -9,7 +9,7 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Attention, Need Add MYSQL jdbc driver jar -- Download the MYSQL jdbc driver jar package, such as mysql-connector-java-8.1.0.jar. https://mvnrepository.com/artifact/com.mysql/mysql-connector-j/8.1.0 +- Download the MYSQL jdbc driver jar package, such as mysql-connector-java-8.1.0.jar. - Copy the jar package to the `hertzbeat/ext-lib` directory. - Restart the HertzBeat service. @@ -57,4 +57,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | innodb_data_writes | none | innodb average number of writes from file per second | | innodb_data_read | KB | innodb average amount of data read per second | | innodb_data_written | KB | innodb average amount of data written per second | - diff --git a/home/docs/help/memcached.md b/home/docs/help/memcached.md index 920da021e6b..f3c1ddfab55 100644 --- a/home/docs/help/memcached.md +++ b/home/docs/help/memcached.md @@ -14,7 +14,7 @@ The default YML configuration for the memcache version is in compliance with 1.4 You need to use the stats command to view the parameters that your memcache can monitor ``` -### +### **1、Obtain usable parameter indicators through commands such as stats、stats setting、stats settings. @@ -32,7 +32,7 @@ STAT version 1.4.15 ... ``` -**There is help_doc: https://www.runoob.com/memcached/memcached-stats.html** +**There is help_doc: ** ### Configuration parameter @@ -67,4 +67,3 @@ STAT version 1.4.15 | cmd_flush | | Flush command request count | | get_misses | | Get command misses | | delete_misses | | Delete command misses | - diff --git a/home/docs/help/mongodb.md b/home/docs/help/mongodb.md index 9c536e73d7a..52582b47097 100644 --- a/home/docs/help/mongodb.md +++ b/home/docs/help/mongodb.md @@ -93,4 +93,3 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m | pageSize | none | Size of a memory page in bytes. | | numPages | none | Total number of memory pages. | | maxOpenFiles | none | Maximum number of open files allowed. | - diff --git a/home/docs/help/mongodb_atlas.md b/home/docs/help/mongodb_atlas.md index b295a517adb..66a895bd2fa 100644 --- a/home/docs/help/mongodb_atlas.md +++ b/home/docs/help/mongodb_atlas.md @@ -75,4 +75,3 @@ keywords: [open-source monitoring system, open-source database monitoring, Mongo | Storage Size | Bytes | Size of storage used | | Indexes | None | Number of indexes | | Index Size | Bytes | Total size of indexes | - diff --git a/home/docs/help/mysql.md b/home/docs/help/mysql.md index 6d689f68765..5da22ef2ddc 100644 --- a/home/docs/help/mysql.md +++ b/home/docs/help/mysql.md @@ -9,7 +9,7 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo ### Attention, Need Add MYSQL jdbc driver jar -- Download the MYSQL jdbc driver jar package, such as mysql-connector-java-8.1.0.jar. https://mvnrepository.com/artifact/com.mysql/mysql-connector-j/8.1.0 +- Download the MYSQL jdbc driver jar package, such as mysql-connector-java-8.1.0.jar. - Copy the jar package to the `hertzbeat/ext-lib` directory. - Restart the HertzBeat service. @@ -57,4 +57,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | innodb_data_writes | none | innodb average number of writes from file per second | | innodb_data_read | KB | innodb average amount of data read per second | | innodb_data_written | KB | innodb average amount of data written per second | - diff --git a/home/docs/help/nacos.md b/home/docs/help/nacos.md index 4de3661c17b..f7c8815521f 100644 --- a/home/docs/help/nacos.md +++ b/home/docs/help/nacos.md @@ -92,4 +92,3 @@ More information see [Nacos monitor guide](https://nacos.io/en-us/docs/monitor-g | nacos_monitor{name='configListenSize'} | none | listened configuration file count | | nacos_client_request_seconds_count | none | request count | | nacos_client_request_seconds_sum | second | request time | - diff --git a/home/docs/help/nebulagraph.md b/home/docs/help/nebulagraph.md index c23e39c14fe..60ac139f827 100644 --- a/home/docs/help/nebulagraph.md +++ b/home/docs/help/nebulagraph.md @@ -14,13 +14,13 @@ The monitoring has two parts,nebulaGraph_stats and rocksdb_stats. nebulaGraph_stats is nebulaGraph's statistics, and rocksdb_stats is rocksdb's statistics. ``` -### +### **1、Obtain available parameters through the stats and rocksdb stats interfaces.** 1.1、 If you only need to get nebulaGraph_stats, you need to ensure that you have access to stats, or you'll get errors. -The default port is 19669 and the access address is http://ip:19669/stats +The default port is 19669 and the access address is 1.2、If you need to obtain additional parameters for rocksdb stats, you need to ensure that you have access to rocksdb stats, otherwise an error will be reported. @@ -28,11 +28,11 @@ stats, otherwise an error will be reported. Once you connect to NebulaGraph for the first time, you must first register your Storage service in order to properly query your data. -**There is help_doc: https://docs.nebula-graph.com.cn/3.4.3/4.deployment-and-installation/connect-to-nebula-graph/** +**There is help_doc: ** -**https://docs.nebula-graph.com.cn/3.4.3/2.quick-start/3.quick-start-on-premise/3.1add-storage-hosts/** +**** -The default port is 19779 and the access address is:http://ip:19779/rocksdb_stats +The default port is 19779 and the access address is: ### Configuration parameter @@ -53,7 +53,7 @@ The default port is 19779 and the access address is:http://ip:19779/rocksdb_stat #### Metrics Set:nebulaGraph_stats Too many indicators, related links are as follows -**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** +**** | Metric name | Metric unit | Metric help description | |---------------------------------------|-------------|--------------------------------------------------------------| @@ -65,11 +65,10 @@ Too many indicators, related links are as follows #### Metrics Set:rocksdb_stats Too many indicators, related links are as follows -**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** +**** | Metric name | Metric unit | Metric help description | |----------------------------|-------------|-------------------------------------------------------------| | rocksdb.backup.read.bytes | | Number of bytes read during the RocksDB database backup. | | rocksdb.backup.write.bytes | | Number of bytes written during the RocksDB database backup. | | ... | | ... | - diff --git a/home/docs/help/nebulagraph_cluster.md b/home/docs/help/nebulagraph_cluster.md index c39195f427e..1a4291dd5f6 100644 --- a/home/docs/help/nebulagraph_cluster.md +++ b/home/docs/help/nebulagraph_cluster.md @@ -89,4 +89,3 @@ keywords: [ Open Source Monitoring System, Open Source Database Monitoring, Open | version | None | Version | > If you need to customize monitoring templates to collect data from NebulaGraph clusters, please refer to: [NGQL Custom Monitoring](../advanced/extend-ngql.md) - diff --git a/home/docs/help/nginx.md b/home/docs/help/nginx.md index f630e4d4d24..a5662be985f 100644 --- a/home/docs/help/nginx.md +++ b/home/docs/help/nginx.md @@ -46,8 +46,8 @@ server { location /nginx-status { stub_status on; access_log on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts } } ``` @@ -93,8 +93,8 @@ http { server { location /req-status { req_status_show on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts } } } @@ -109,7 +109,7 @@ nginx -s reload 4. Access `http://localhost/req-status` in the browser to view the Nginx monitoring status information. -**Refer Doc: https://github.com/zls0424/ngx_req_status** +**Refer Doc: ** **⚠️Attention: The endpoint path of the monitoring module is `/nginx-status` `/req-status`** @@ -152,4 +152,3 @@ nginx -s reload | requests | | Total requests | | active | | Current concurrent connections | | bandwidth | kb | Current bandwidth | - diff --git a/home/docs/help/ntp.md b/home/docs/help/ntp.md index 666f2a6b39a..fc7f7925ca6 100644 --- a/home/docs/help/ntp.md +++ b/home/docs/help/ntp.md @@ -35,4 +35,3 @@ keywords: [ open source monitoring tool, open source NTP monitoring tool, monito | stratum | | The stratumevel of the NTP server, indicating its distance from a reference clock). | | referenceId | | An identifier that indicates the reference clock or time source used by the NTP server). | | precision | | The precision of the NTP server's clock, indicating its accuracy). | - diff --git a/home/docs/help/openai.md b/home/docs/help/openai.md index 7165925372f..a7a10de2b19 100644 --- a/home/docs/help/openai.md +++ b/home/docs/help/openai.md @@ -12,8 +12,8 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI > 1. Open Chrome browser's network request interface > `Mac: cmd + option + i` > `Windows: ctrl + shift + i` -> 2. Visit https://platform.openai.com/usage -> 3. Find the request to https://api.openai.com/dashboard/billing/usage +> 2. Visit +> 3. Find the request to > 4. Find the Authorization field in the request headers, and copy the content after `Bearer`. For example: `sess-123456` ### Notes @@ -81,4 +81,3 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI | Tax IDs | None | Tax IDs | | Billing Address | None | Billing address | | Business Address | None | Business address | - diff --git a/home/docs/help/opengauss.md b/home/docs/help/opengauss.md index 28171658951..3490bb8b003 100644 --- a/home/docs/help/opengauss.md +++ b/home/docs/help/opengauss.md @@ -53,4 +53,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | Metric name | Metric unit | Metric help description | |-------------|-------------|--------------------------------------| | running | connections | Number of current client connections | - diff --git a/home/docs/help/opensuse.md b/home/docs/help/opensuse.md index acaf86632d4..a4c1fc873de 100644 --- a/home/docs/help/opensuse.md +++ b/home/docs/help/opensuse.md @@ -105,4 +105,3 @@ Statistics for the top 10 processes using memory. Statistics include: Process ID | mem_usage | % | Memory usage rate | | cpu_usage | % | CPU usage rate | | command | None | Executed command | - diff --git a/home/docs/help/oracle.md b/home/docs/help/oracle.md index e8d5ddab704..051ddea95da 100644 --- a/home/docs/help/oracle.md +++ b/home/docs/help/oracle.md @@ -67,4 +67,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | qps | QPS | I/O Requests per second | | tps | TPS | User transaction per second | | mbps | MBPS | I/O Megabytes per second | - diff --git a/home/docs/help/ping.md b/home/docs/help/ping.md index 7c894f488ff..bed89d53dcf 100644 --- a/home/docs/help/ping.md +++ b/home/docs/help/ping.md @@ -31,7 +31,6 @@ keywords: [open source monitoring tool, open source network monitoring tool, mon 1. Ping connectivity monitoring exception when installing hertzbeat for package deployment. The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 -> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. -> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address - +> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. +> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. +> See diff --git a/home/docs/help/plugin.md b/home/docs/help/plugin.md index ccca94fdc45..0e2f12a65e2 100644 --- a/home/docs/help/plugin.md +++ b/home/docs/help/plugin.md @@ -18,7 +18,7 @@ Currently, `HertzBeat` only set up the trigger `alert` method after alarm, if yo 1. Pull the master branch code `git clone https://github.com/apache/hertzbeat.git` and locate the `plugin` module's `Plugin` interface. ![plugin-1.png](/img/docs/help/plugin-1.png) -2. In the `org.apache.hertzbeat.plugin.impl` directory, create a new interface implementation class, such as `org.apache.hertzbeat.plugin.impl.DemoPluginImpl`, and receive the `Alert` class as a parameter, implement the `alert ` method, the logic is customized by the user, here we simply print the object. +2. In the `org.apache.hertzbeat.plugin.impl` directory, create a new interface implementation class, such as `org.apache.hertzbeat.plugin.impl.DemoPluginImpl`, and receive the `Alert` class as a parameter, implement the `alert` method, the logic is customized by the user, here we simply print the object. ![plugin-2.png](/img/docs/help/plugin-2.png) 3. Add the fully qualified names of the interface implementation classes to the `META-INF/services/org.apache.hertzbeat.plugin.Plugin` file, with each implementation class name on a separate line. 4. Package the `hertzbeat-plugin` module. @@ -29,4 +29,3 @@ Currently, `HertzBeat` only set up the trigger `alert` method after alarm, if yo ![plugin-4.png](/img/docs/help/plugin-4.png) 6. Then restart `HertzBeat` to enable the customized post-alert handling policy. - diff --git a/home/docs/help/pop3.md b/home/docs/help/pop3.md index fffff2a494f..c73884a0afe 100644 --- a/home/docs/help/pop3.md +++ b/home/docs/help/pop3.md @@ -45,4 +45,3 @@ If you want to monitor information in 'POP3' with this monitoring type, you just |--------------|-------------|-----------------------------------------| | email_count | | Number of emails | | mailbox_size | kb | The total size of emails in the mailbox | - diff --git a/home/docs/help/port.md b/home/docs/help/port.md index 7f420fd1375..6ae4a6bda2d 100644 --- a/home/docs/help/port.md +++ b/home/docs/help/port.md @@ -26,4 +26,3 @@ keywords: [open source monitoring tool, open source port monitoring tool, monito | Metric name | Metric unit | Metric help description | |--------------|-------------|-------------------------| | responseTime | ms | Website response time | - diff --git a/home/docs/help/postgresql.md b/home/docs/help/postgresql.md index 57834a713bd..5191f7d325d 100644 --- a/home/docs/help/postgresql.md +++ b/home/docs/help/postgresql.md @@ -53,4 +53,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | Metric name | Metric unit | Metric help description | |-------------|-------------|--------------------------------------| | running | connections | Number of current client connections | - diff --git a/home/docs/help/prestodb.md b/home/docs/help/prestodb.md index 7c438e96cef..3fb1cee3d78 100644 --- a/home/docs/help/prestodb.md +++ b/home/docs/help/prestodb.md @@ -72,4 +72,3 @@ keywords: [ open source monitoring system, open source database monitoring, pres | state | None | State | | self | None | Self | | lastHeartbeat | None | Last Heartbeat | - diff --git a/home/docs/help/process.md b/home/docs/help/process.md index 825a20ac43b..61dacc52ba6 100644 --- a/home/docs/help/process.md +++ b/home/docs/help/process.md @@ -85,4 +85,3 @@ Includes metrics for: - read_bytes (Actual number of bytes read by the process from disk) - write_bytes (Actual number of bytes written by the process to disk) - cancelled_write_bytes (Actual number of bytes cancelled by the process while writing to disk) - diff --git a/home/docs/help/prometheus.md b/home/docs/help/prometheus.md index 571a2e9b51b..39af4dff7e4 100755 --- a/home/docs/help/prometheus.md +++ b/home/docs/help/prometheus.md @@ -39,4 +39,3 @@ You can use the following configuration: - Endpoint Path: `/actuator/prometheus` Keep the rest of the settings default. - diff --git a/home/docs/help/rabbitmq.md b/home/docs/help/rabbitmq.md index 917ca63c3d3..e49d572ee72 100644 --- a/home/docs/help/rabbitmq.md +++ b/home/docs/help/rabbitmq.md @@ -18,7 +18,7 @@ keywords: [open source monitoring tool, open source rabbitmq monitoring tool, mo rabbitmq-plugins enable rabbitmq_management ``` -2. Access http://ip:15672/ with a browser, and the default account password is `guest/guest`. Successful login means that it is successfully opened. +2. Access with a browser, and the default account password is `guest/guest`. Successful login means that it is successfully opened. 3. Just add the corresponding RabbitMQ monitoring in HertzBeat, the parameters use the IP port of Management, and the default account password. @@ -123,4 +123,3 @@ rabbitmq-plugins enable rabbitmq_management | message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | | message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | | message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | - diff --git a/home/docs/help/redhat.md b/home/docs/help/redhat.md index 2a8472e00d6..28b076f129d 100644 --- a/home/docs/help/redhat.md +++ b/home/docs/help/redhat.md @@ -105,4 +105,3 @@ Top 10 processes consuming memory. Metrics include: Process ID, Memory usage, CP | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | - diff --git a/home/docs/help/redis.md b/home/docs/help/redis.md index bdb78ce3584..dca24d20781 100644 --- a/home/docs/help/redis.md +++ b/home/docs/help/redis.md @@ -237,4 +237,3 @@ keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] | cmdstat_lpop | 无 | lpop命令的统计信息 | | cmdstat_rpop | 无 | rpop命令的统计信息 | | cmdstat_llen | 无 | llen命令的统计信息 | - diff --git a/home/docs/help/redis_cluster.md b/home/docs/help/redis_cluster.md index 3aa41136f88..b10d3ff684e 100644 --- a/home/docs/help/redis_cluster.md +++ b/home/docs/help/redis_cluster.md @@ -85,6 +85,7 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to external: name: hertzbeat-redis-cluster ``` + 2. View the IP addresses of all containers from the network, which is required when building a cluster. ```bash @@ -132,6 +133,7 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to } }, ``` + 3. Go inside the container to build a Redis cluster. ```bash @@ -148,6 +150,7 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to 192.168.117.2:6379 \ --cluster-replicas 1 ``` + 4. Specific operations. Add a redis monitor center, fill require parameters. @@ -158,7 +161,6 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to ![](/img/docs/help/redis-cluster-view.png) - ### Configuration Parameters +### Configuration Parameters Please see [REDIS](https://hertzbeat.apache.org/docs/help/redis) doc. - diff --git a/home/docs/help/rocketmq.md b/home/docs/help/rocketmq.md index f31dea47d9b..4fbe5e195a4 100644 --- a/home/docs/help/rocketmq.md +++ b/home/docs/help/rocketmq.md @@ -46,4 +46,3 @@ keywords: [ open source monitoring tool, monitoring Apache RocketMQ metrics ] | Consume_type | none | Consume type | | Consume_tps | none | Consume tps | | Delay | none | Delay | - diff --git a/home/docs/help/rockylinux.md b/home/docs/help/rockylinux.md index b1e093bc210..43a5f78d7ee 100644 --- a/home/docs/help/rockylinux.md +++ b/home/docs/help/rockylinux.md @@ -105,4 +105,3 @@ Top 10 processes consuming memory. Metrics include: Process ID, Memory usage, CP | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | - diff --git a/home/docs/help/shenyu.md b/home/docs/help/shenyu.md index c7f12bbfaf0..aa4a43a8d5c 100644 --- a/home/docs/help/shenyu.md +++ b/home/docs/help/shenyu.md @@ -127,4 +127,3 @@ Finally, restart the access gateway metrics endpoint `http://ip:8090` to respond |-------------|-------------|---------------------------------------------------------| | state | none | thread state | | value | None | The number of threads corresponding to the thread state | - diff --git a/home/docs/help/smtp.md b/home/docs/help/smtp.md index fedb17e0040..4be044bc090 100644 --- a/home/docs/help/smtp.md +++ b/home/docs/help/smtp.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit Determine whether the server is available through the hello command in SMTP ``` -> see https://datatracker.ietf.org/doc/html/rfc821#page-13 +> see **Protocol Use:SMTP** @@ -38,4 +38,3 @@ Determine whether the server is available through the hello command in SMTP | response | | Response Status. | | smtpBanner | | Banner of SMTP server. | | heloInfo | | Response information returned by helo. | - diff --git a/home/docs/help/spark.md b/home/docs/help/spark.md index 41865300024..8bc045fc9a1 100644 --- a/home/docs/help/spark.md +++ b/home/docs/help/spark.md @@ -13,7 +13,7 @@ keywords: [open source monitoring tool, open source java spark monitoring tool, 1. Add Spark `VM options` When Start Server ⚠️ customIP -Refer: https://spark.apache.org/docs/latest/spark-standalone.html +Refer: **监控配置spark的监控主要分为Master、Worker、driver、executor监控。Master和Worker的监控在spark集群运行时即可监控,Driver和Excutor的监控需要针对某一个app来进行监控。** **如果都要监控,需要根据以下步骤来配置** @@ -112,4 +112,3 @@ gement.jmxremote.port=8711 | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/docs/help/spring_gateway.md b/home/docs/help/spring_gateway.md index 66c5f0b4f29..7f27b7fe8ef 100644 --- a/home/docs/help/spring_gateway.md +++ b/home/docs/help/spring_gateway.md @@ -87,4 +87,3 @@ management: | predicate | None | This is a routing matching rule | | uri | None | This is a service resource identifier | | order | None | The priority of this route | - diff --git a/home/docs/help/springboot2.md b/home/docs/help/springboot2.md index 6452aff270e..08029dc23b5 100644 --- a/home/docs/help/springboot2.md +++ b/home/docs/help/springboot2.md @@ -93,4 +93,3 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ |-------------|-------------|--------------------------------------| | space | None | Memory space name | | mem_used | MB | This space occupies a memory size of | - diff --git a/home/docs/help/springboot3.md b/home/docs/help/springboot3.md index 47b3db10b5c..77d7032e436 100644 --- a/home/docs/help/springboot3.md +++ b/home/docs/help/springboot3.md @@ -89,4 +89,3 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ | Metric Name | Unit | Metric Description | |-------------|------|---------------------------------| | status | None | Service health status: UP, Down | - diff --git a/home/docs/help/sqlserver.md b/home/docs/help/sqlserver.md index 71bd8ebdc83..06e19252ede 100644 --- a/home/docs/help/sqlserver.md +++ b/home/docs/help/sqlserver.md @@ -57,20 +57,20 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo 1. SSL connection problem fixed -jdk version: jdk11 -Description of the problem: SQL Server 2019 uses the SA user connection to report an error +jdk version: jdk11 +Description of the problem: SQL Server 2019 uses the SA user connection to report an error Error message: ```text The driver could not establish a secure connection to SQL Server by using Secure Sockets Layer (SSL) encryption. Error: "PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target". ClientConnectionId:xxxxxxxxxxxxxxxxxxxx ``` -Screenshot of the problem: +Screenshot of the problem: ![issue](https://user-images.githubusercontent.com/38679717/206621658-c0741d48-673d-45ff-9a3b-47d113064c12.png) -solution: +solution: Use advanced settings when adding `SqlServer` monitoring, customize JDBC URL, add parameter configuration after the spliced jdbc url, ```;encrypt=true;trustServerCertificate=true;```This parameter true means unconditionally trust the server returned any root certificate. Example: ```jdbc:sqlserver://127.0.0.1:1433;DatabaseName=demo;encrypt=true;trustServerCertificate=true;``` -Reference document: [microsoft pkix-path-building-failed-unable-to-find-valid-certification](https://techcommunity.microsoft.com/t5/azure-database-support-blog/pkix-path-building- failed-unable-to-find-valid-certification/ba-p/2591304) +Reference document: [microsoft pkix-path-building-failed-unable-to-find-valid-certification]( failed-unable-to-find-valid-certification/ba-p/2591304) diff --git a/home/docs/help/ssl_cert.md b/home/docs/help/ssl_cert.md index e7b60fc8a89..253485f8b1a 100644 --- a/home/docs/help/ssl_cert.md +++ b/home/docs/help/ssl_cert.md @@ -31,4 +31,3 @@ keywords: [open source monitoring tool, open source ssl cert monitoring tool, mo | start_timestamp | ms millisecond | Validity start timestamp | | end_time | None | Expiration time | | end_timestamp | ms milliseconds | expiration timestamp | - diff --git a/home/docs/help/status.md b/home/docs/help/status.md index 0d9ce6ff28b..ab969150e1e 100644 --- a/home/docs/help/status.md +++ b/home/docs/help/status.md @@ -1,6 +1,7 @@ Here is the English translation of the provided text: --- + id: status title: Status Page sidebar_label: Status Page @@ -19,9 +20,9 @@ The fields that need to be filled in are as follows: |--------------------------|----------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Organization Name | Name of the organization | HertzBeat | | Organization Description | Detailed description of the organization | Apache HertzBeat (incubating) is an easy-to-use and user-friendly open-source real-time monitoring and alerting system, no agent required, high-performance cluster, compatible with Prometheus, providing powerful custom monitoring and status page building capabilities. | -| Website Link | URL of the organization's website for more information | https://hertzbeat.apache.org/ | -| Logo Image | Path or URL of the organization's official logo image, preferably in .svg format | https://hertzbeat.apache.org/zh-cn/img/hertzbeat-logo.svg | -| Feedback Address | Address to receive feedback | https://github.com/apache/hertzbeat/issues | +| Website Link | URL of the organization's website for more information | | +| Logo Image | Path or URL of the organization's official logo image, preferably in .svg format | | +| Feedback Address | Address to receive feedback | | | Theme Color | Main color tone of the status page | Click to select on the page | After filling in the organization information, click `Confirm`. diff --git a/home/docs/help/tidb.md b/home/docs/help/tidb.md index 83128c527c8..2378b224110 100644 --- a/home/docs/help/tidb.md +++ b/home/docs/help/tidb.md @@ -52,4 +52,3 @@ Due to the large number of metrics that can be monitored, only the metrics queri | max_connections | none | The maximum number of concurrent connections permitted for a single TiDB instance. This variable can be used for resources control. The default value 0 means no limit. When the value of this variable is larger than 0, and the number of connections reaches the value, the TiDB server rejects new connections from clients. | | datadir | none | The location where data is stored. This location can be a local path /tmp/tidb, or point to a PD server if the data is stored on TiKV. A value in the format of ${pd-ip}:${pd-port} indicates the PD server that TiDB connects to on startup. | | port | none | The port that the tidb-server is listening on when speaking the MySQL protocol. | - diff --git a/home/docs/help/time_expression.md b/home/docs/help/time_expression.md index 482fc04cc05..2f0711c4cf9 100644 --- a/home/docs/help/time_expression.md +++ b/home/docs/help/time_expression.md @@ -62,4 +62,3 @@ ${FORMATTER [{ + | - } ]} - `${time+1h+15s+30s}` calculates the time one hour, 15 minutes, and 30 seconds from now and formats it as `HH:mm:ss` 2. Complex expression template (if the built-in formatter does not meet your needs, you can combine multiple expressions) - `${@year}年${@month}月${@day}日` returns the current date formatted as yyyy年MM月dd日 - diff --git a/home/docs/help/tomcat.md b/home/docs/help/tomcat.md index 60591f85579..9f103dfe5be 100644 --- a/home/docs/help/tomcat.md +++ b/home/docs/help/tomcat.md @@ -71,4 +71,3 @@ keywords: [open source monitoring tool, open source tomcat monitoring tool, moni ```aidl CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote -Djava.rmi.server.hostname=10.1.1.52 -Dcom.sun.management.jmxremote.port=1099 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" ``` - diff --git a/home/docs/help/ubuntu.md b/home/docs/help/ubuntu.md index 8d3b65ce195..9de28efe095 100644 --- a/home/docs/help/ubuntu.md +++ b/home/docs/help/ubuntu.md @@ -79,4 +79,3 @@ keywords: [open source monitoring tool, open source linux ubuntu monitoring tool | available | Mb | Available disk size | | usage | % | usage | | mounted | none | Mount point directory | - diff --git a/home/docs/help/udp_port.md b/home/docs/help/udp_port.md index 51c3098dc9a..85d4fcb4383 100644 --- a/home/docs/help/udp_port.md +++ b/home/docs/help/udp_port.md @@ -29,4 +29,3 @@ keywords: [open source monitoring tool, open source port monitoring tool, monito | Metric name | Metric unit | Metric help description | |---------------|-------------------|-------------------------| | Response Time | Milliseconds (ms) | Website response time | - diff --git a/home/docs/help/website.md b/home/docs/help/website.md index afe86397c9e..1041755f156 100644 --- a/home/docs/help/website.md +++ b/home/docs/help/website.md @@ -27,4 +27,3 @@ keywords: [open source monitoring tool, open source website monitoring tool, mon | Metric name | Metric unit | Metric help description | |--------------|-------------|-------------------------| | responseTime | ms | Website response time | - diff --git a/home/docs/help/websocket.md b/home/docs/help/websocket.md index 13d1f6eed31..1523a145bd6 100644 --- a/home/docs/help/websocket.md +++ b/home/docs/help/websocket.md @@ -31,4 +31,3 @@ keywords: [ open source monitoring tool, Websocket监控 ] | statusMessage | none | Status messages | | connection | none | Connect type | | upgrade | none | Upgraded protocols | - diff --git a/home/docs/help/windows.md b/home/docs/help/windows.md index e4be2bd6d96..99d305cbce5 100644 --- a/home/docs/help/windows.md +++ b/home/docs/help/windows.md @@ -8,10 +8,10 @@ keywords: [open source monitoring tool, open source windows monitoring tool, mon > Collect and monitor the general performance Metrics of Windows operating system through SNMP protocol. > Note⚠️ You need to start SNMP service for Windows server. -References: -[What is SNMP protocol 1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) -[What is SNMP protocol 2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) -[Win configure SNMP in English](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) +References: +[What is SNMP protocol 1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) +[What is SNMP protocol 2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) +[Win configure SNMP in English](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) [Win configure SNMP in Chinese](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) ### Configuration parameter @@ -41,4 +41,3 @@ References: | services | number | Current number of services | | processes | number | Current number of processes | | responseTime | ms | Collection response time | - diff --git a/home/docs/help/yarn.md b/home/docs/help/yarn.md index d7f304ff910..cea4079abf7 100644 --- a/home/docs/help/yarn.md +++ b/home/docs/help/yarn.md @@ -81,4 +81,3 @@ Retrieve the HTTP monitoring port of Apache Yarn. Value: `yarn.resourcemanager.w | Metric Name | Unit | Metric Description | |-------------|------|--------------------| | StartTime | | Startup timestamp | - diff --git a/home/docs/help/zookeeper.md b/home/docs/help/zookeeper.md index ca7e026a4c4..00ecb487107 100644 --- a/home/docs/help/zookeeper.md +++ b/home/docs/help/zookeeper.md @@ -120,4 +120,3 @@ Complete! | user_name | none | Username | | user_home | none | User home directory | | user_dir | none | User current directory | - diff --git a/home/docs/introduce.md b/home/docs/introduce.md index 95b493c2cc5..6f6769ca55c 100644 --- a/home/docs/introduce.md +++ b/home/docs/introduce.md @@ -35,11 +35,12 @@ slug: / > HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system. --- + ### Powerful Monitoring Templates > Before we discuss the customizable monitoring capabilities of HertzBeat, which we mentioned at the beginning, let's introduce the different monitoring templates of HertzBeat. And it is because of this monitoring template design that the advanced features come later. -HertzBeat itself did not create a data collection protocol for the monitoring client to adapt to. Instead, HertzBeat makes full use of the existing ecosystem, `SNMP protocol` to collect information from network switches and routers, `JMX specification` to collect information from Java applications, `JDBC specification` to collect information from datasets, `SSH` to directly connect to scripts to get the display information, `HTTP+ (JsonPath | prometheus, etc.) ` to parse the information from API interfaces, `IPMI protocol` to collect server information, and so on. +HertzBeat itself did not create a data collection protocol for the monitoring client to adapt to. Instead, HertzBeat makes full use of the existing ecosystem, `SNMP protocol` to collect information from network switches and routers, `JMX specification` to collect information from Java applications, `JDBC specification` to collect information from datasets, `SSH` to directly connect to scripts to get the display information, `HTTP+ (JsonPath | prometheus, etc.)` to parse the information from API interfaces, `IPMI protocol` to collect server information, and so on. HertzBeat uses these existing standard protocols or specifications, makes them abstractly configurable, and finally makes them all available in the form of YML format monitoring templates that can be written to create templates that use these protocols to collect any desired metrics data. ![hertzbeat](/img/blog/multi-protocol.png) @@ -51,7 +52,7 @@ Do you believe that users can just write a monitoring template on the UI page, c **There are a lot of built-in monitoring templates for users to add directly on the page, one monitoring type corresponds to one YML monitoring template**. -- [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), +* [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), [Http Api](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml), [Ping Connect](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml), [Jvm](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml), [Ssl Certificate](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot2](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml), @@ -60,7 +61,7 @@ Do you believe that users can just write a monitoring template on the UI page, c [Pop3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-pop3.yml), [Ntp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ntp.yml), [Api Code](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api_code.yml), [Smtp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-smtp.yml), [Nginx](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nginx.yml) -- [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), +* [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), [MariaDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml), [ElasticSearch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml), [Oracle](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml), @@ -69,13 +70,13 @@ Do you believe that users can just write a monitoring template on the UI page, c [Redis Cluster](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml), [Redis Sentinel](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml) [Doris BE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_be.yml), [Doris FE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_fe.yml), [Memcached](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-memcached.yml), [NebulaGraph](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-nebulaGraph.yml) -- [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), +* [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), [CentOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml), [EulerOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml), [Fedora CoreOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml), [OpenSUSE](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml), [Rocky Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml), [Red Hat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml), [FreeBSD](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml), [AlmaLinux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml), [Debian Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml) -- [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), +* [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml), [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml), [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml), @@ -83,31 +84,31 @@ Do you believe that users can just write a monitoring template on the UI page, c [Spring Gateway](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spring_gateway.yml), [EMQX MQTT](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-emqx.yml), [AirFlow](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-airflow.yml), [Hive](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hive.yml), [Spark](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spark.yml), [Hadoop](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hadoop.yml) -- [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) -- [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), +* [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) +* [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) -- And More Your Custom Template. -- Notified Support `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. +* And More Your Custom Template. +* Notified Support `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. ### Powerful Customization -> From the previous introduction of **Monitoring Templates**, it is clear that `HertzBeat` has powerful customization features. -> Each monitor type is considered as a monitor template, no matter it is built-in or user-defined. You can easily add, modify and delete indicators by modifying the monitoring template. +> From the previous introduction of **Monitoring Templates**, it is clear that `HertzBeat` has powerful customization features. +> Each monitor type is considered as a monitor template, no matter it is built-in or user-defined. You can easily add, modify and delete indicators by modifying the monitoring template. > The templates contain a series of functions such as protocol configuration, environment variables, metrics conversion, metrics calculation, units conversion, metrics collection, etc., which help users to collect the metrics they want. ![hertzbeat](/img/docs/custom-arch.png) ### No Agent Required -> For users who have used various systems, the most troublesome thing is the installation, deployment, debugging and upgrading of various `agents`. -> You need to install one `agent` per host, and several corresponding `agents` to monitor different application middleware, and the number of monitoring can easily reach thousands, so writing a batch script may ease the burden. +> For users who have used various systems, the most troublesome thing is the installation, deployment, debugging and upgrading of various `agents`. +> You need to install one `agent` per host, and several corresponding `agents` to monitor different application middleware, and the number of monitoring can easily reach thousands, so writing a batch script may ease the burden. > The problem of whether the version of `agent` is compatible with the main application, debugging the communication between `agent` and the main application, upgrading the `agent` synchronization and so on and so forth, are all big headaches. The principle of `HertzBeat` is to use different protocols to connect directly to the end system, and use the `PULL` form to pull the collected data, without the need for the user to deploy and install `Agent` | `Exporter` on the host of the end, etc. For example, monitoring the `linux operating system`. -- For example, if you want to monitor `linux OS`, you can just input the IP port account password or key on `HertzBeat` side. -- For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. +* For example, if you want to monitor `linux OS`, you can just input the IP port account password or key on `HertzBeat` side. +* For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. **Password and other sensitive information is encrypted on all links**. @@ -115,9 +116,9 @@ The principle of `HertzBeat` is to use different protocols to connect directly t > When the number of monitors rises exponentially, the collection performance drops or the environment is unstable and prone to single point of failure of the collectors, then our collector clusters come into play. -- HertzBeat supports the deployment of collector clusters and the horizontal expansion of multiple collector clusters to exponentially increase the number of monitorable tasks and collection performance. -- Monitoring tasks are self-scheduled in the collector cluster, single collector hangs without sensing the failure to migrate the collection tasks, and the newly added collector nodes are automatically scheduled to share the collection pressure. -- It is very easy to switch between stand-alone mode and cluster mode without additional component deployment. +* HertzBeat supports the deployment of collector clusters and the horizontal expansion of multiple collector clusters to exponentially increase the number of monitorable tasks and collection performance. +* Monitoring tasks are self-scheduled in the collector cluster, single collector hangs without sensing the failure to migrate the collection tasks, and the newly added collector nodes are automatically scheduled to share the collection pressure. +* It is very easy to switch between stand-alone mode and cluster mode without additional component deployment. ![hertzbeat](/img/docs/cluster-arch.png) @@ -125,25 +126,25 @@ The principle of `HertzBeat` is to use different protocols to connect directly t > Two locations, three centers, multi-cloud environments, multi-isolated networks, you may have heard of these scenarios. When there is a need for a unified monitoring system to monitor the IT resources of different isolated networks, this is where our Cloud Edge Collaboration comes in. -In an isolated network where multiple networks are not connected, we need to deploy a monitoring system in each network in the previous solution, which leads to data non-interoperability and inconvenient management, deployment and maintenance. +In an isolated network where multiple networks are not connected, we need to deploy a monitoring system in each network in the previous solution, which leads to data non-interoperability and inconvenient management, deployment and maintenance. `HertzBeat` provides the ability of cloud edge collaboration, can be deployed in multiple isolated networks edge collector, collector in the isolated network within the monitoring task collection, collection of data reported by the main service unified scheduling management display. ![hertzbeat](/img/docs/cluster-arch.png) ### Easy to Use -- Set **Monitoring+Alarm+Notification** All in one, no need to deploy multiple component services separately. -- Full UI interface operation, no matter adding new monitor, modifying monitor template, or alarm threshold notification, all can be done in WEB interface, no need to modify files or scripts or reboot. -- No Agent is needed, we only need to fill in the required IP, port, account, password and other parameters in the WEB interface. -- Customization friendly, only need a monitoring template YML, automatically generate monitoring management page, data chart page, threshold configuration for corresponding monitoring types. -- Threshold alarm notification friendly, based on the expression threshold configuration, a variety of alarm notification channels, support alarm silence, time label alarm level filtering and so on. +* Set **Monitoring+Alarm+Notification** All in one, no need to deploy multiple component services separately. +* Full UI interface operation, no matter adding new monitor, modifying monitor template, or alarm threshold notification, all can be done in WEB interface, no need to modify files or scripts or reboot. +* No Agent is needed, we only need to fill in the required IP, port, account, password and other parameters in the WEB interface. +* Customization friendly, only need a monitoring template YML, automatically generate monitoring management page, data chart page, threshold configuration for corresponding monitoring types. +* Threshold alarm notification friendly, based on the expression threshold configuration, a variety of alarm notification channels, support alarm silence, time label alarm level filtering and so on. ### Completely Open Source -- An open source collaboration product using the `Apache2` protocol, maintained by a free and open source community. -- No monitoring number `License`, monitoring type and other pseudo-open source restrictions . -- Built on `Java+SpringBoot+TypeScript+Angular` mainstream technology stack , convenient secondary development . -- Open source is not the same as free, dev based on HertzBeat must retain copyright, etc. +* An open source collaboration product using the `Apache2` protocol, maintained by a free and open source community. +* No monitoring number `License`, monitoring type and other pseudo-open source restrictions . +* Built on `Java+SpringBoot+TypeScript+Angular` mainstream technology stack , convenient secondary development . +* Open source is not the same as free, dev based on HertzBeat must retain copyright, etc. **HertzBeat has been included in the [CNCF Observability And Analysis - Monitoring Landscape](https://landscape.cncf.io/card-mode?category=monitoring&grouping=category)** @@ -153,36 +154,36 @@ In an isolated network where multiple networks are not connected, we need to dep **HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system.** ------ +----- ## Quickly Start -Just run a single command in a Docker environment: `docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` +Just run a single command in a Docker environment: `docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` Browser access `http://localhost:1157` default account password `admin/hertzbeat` ### Landing Page -- HertzBeat's user management is unified by the configuration file `sureness.yml`, which allows users to add, delete, and modify user information, user role permissions, and so on. Default password admin/hertzbeat +* HertzBeat's user management is unified by the configuration file `sureness.yml`, which allows users to add, delete, and modify user information, user role permissions, and so on. Default password admin/hertzbeat ![hertzbeat](/img/home/0.png) ### Overview Page -- The global overview page shows the distribution of current monitoring categories, users can visualize the current monitoring types and quantities and click to jump to the corresponding monitoring types for maintenance and management. -- Show the status of currently registered collector clusters, including collector on-line status, monitoring tasks, startup time, IP address, name and so on. -- Show the list of recent alarm messages, alarm level distribution and alarm processing rate. +* The global overview page shows the distribution of current monitoring categories, users can visualize the current monitoring types and quantities and click to jump to the corresponding monitoring types for maintenance and management. +* Show the status of currently registered collector clusters, including collector on-line status, monitoring tasks, startup time, IP address, name and so on. +* Show the list of recent alarm messages, alarm level distribution and alarm processing rate. ![hertzbeat](/img/home/1.png) ### Monitoring Center -- The monitoring portal supports the management of monitoring of application services, database, operating system, middleware, network, customization, etc. It displays the currently added monitors in the form of a list. -- It displays the currently added monitors in the form of a list and supports adding, modifying, deleting, canceling, importing, exporting and batch management of monitors. -- Support tag grouping, query filtering, view monitoring details portal. +* The monitoring portal supports the management of monitoring of application services, database, operating system, middleware, network, customization, etc. It displays the currently added monitors in the form of a list. +* It displays the currently added monitors in the form of a list and supports adding, modifying, deleting, canceling, importing, exporting and batch management of monitors. +* Support tag grouping, query filtering, view monitoring details portal. Built-in support for monitoring types include: -- [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), +* [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), [Http Api](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml), [Ping Connect](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml), [Jvm](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml), [Ssl Certificate](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot2](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml), @@ -191,7 +192,7 @@ Built-in support for monitoring types include: [Pop3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-pop3.yml), [Ntp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ntp.yml), [Api Code](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api_code.yml), [Smtp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-smtp.yml), [Nginx](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nginx.yml) -- [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), +* [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), [MariaDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml), [ElasticSearch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml), [Oracle](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml), @@ -200,13 +201,13 @@ Built-in support for monitoring types include: [Redis Cluster](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml), [Redis Sentinel](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml) [Doris BE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_be.yml), [Doris FE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_fe.yml), [Memcached](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-memcached.yml), [NebulaGraph](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-nebulaGraph.yml) -- [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), +* [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), [CentOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml), [EulerOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml), [Fedora CoreOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml), [OpenSUSE](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml), [Rocky Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml), [Red Hat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml), [FreeBSD](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml), [AlmaLinux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml), [Debian Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml) -- [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), +* [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml), [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml), [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml), @@ -214,8 +215,8 @@ Built-in support for monitoring types include: [Spring Gateway](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spring_gateway.yml), [EMQX MQTT](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-emqx.yml), [AirFlow](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-airflow.yml), [Hive](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hive.yml), [Spark](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spark.yml), [Hadoop](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hadoop.yml) -- [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) -- [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), +* [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) +* [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) @@ -223,18 +224,18 @@ Built-in support for monitoring types include: ### Add and Modify Surveillance -- You can add or modify monitoring instances of a specific monitoring type, configure the IP, port and other parameters of the monitoring on the other end, set the collection period, collection task scheduling method, support detecting availability in advance, etc. The monitoring instances on the page are defined by the corresponding monitoring templates. -- The monitoring parameters configured on the page are defined by the monitoring template of the corresponding monitoring type, and users can modify the configuration parameters on the page by modifying the monitoring template. -- Support associated tags to manage monitoring grouping, alarm matching, and so on. +* You can add or modify monitoring instances of a specific monitoring type, configure the IP, port and other parameters of the monitoring on the other end, set the collection period, collection task scheduling method, support detecting availability in advance, etc. The monitoring instances on the page are defined by the corresponding monitoring templates. +* The monitoring parameters configured on the page are defined by the monitoring template of the corresponding monitoring type, and users can modify the configuration parameters on the page by modifying the monitoring template. +* Support associated tags to manage monitoring grouping, alarm matching, and so on. ![hertzbeat](/img/home/10.png) ### Monitor Details -- The monitoring data detail page shows the basic parameter information of the current monitoring, and the monitoring indicator data information. -- Monitor Real-time Data Report displays the real-time values of all the currently monitored indicators in the form of a list of small cards, and users can configure alarm threshold rules based on the real-time values for reference. -- Monitor Historical Data Report displays the historical values of the currently monitored metrics in the form of trend charts, supports querying hourly, daily and monthly historical data, and supports configuring the page refresh time. -- ⚠️ Note that the monitoring history charts need to be configured with an external timing database in order to get the full functionality, timing database support: IOTDB, TDengine, InfluxDB, GreptimeDB +* The monitoring data detail page shows the basic parameter information of the current monitoring, and the monitoring indicator data information. +* Monitor Real-time Data Report displays the real-time values of all the currently monitored indicators in the form of a list of small cards, and users can configure alarm threshold rules based on the real-time values for reference. +* Monitor Historical Data Report displays the historical values of the currently monitored metrics in the form of trend charts, supports querying hourly, daily and monthly historical data, and supports configuring the page refresh time. +* ⚠️ Note that the monitoring history charts need to be configured with an external timing database in order to get the full functionality, timing database support: IOTDB, TDengine, InfluxDB, GreptimeDB ![hertzbeat](/img/home/3.png) @@ -242,17 +243,17 @@ Built-in support for monitoring types include: ### Alarm Center -- The management display page of triggered alarm messages enables users to visualize the current alarm situation. -- Support alarm processing, alarm marking unprocessed, alarm deletion, clearing and other batch operations. +* The management display page of triggered alarm messages enables users to visualize the current alarm situation. +* Support alarm processing, alarm marking unprocessed, alarm deletion, clearing and other batch operations. ![hertzbeat](/img/home/7.png) ### Threshold Rules -- Threshold rules can be configured for monitoring the availability status, and alerts can be issued when the value of a particular metric exceeds the expected range. -- There are three levels of alerts: notification alerts, critical alerts, and emergency alerts. -- Threshold rules support visual page configuration or expression rule configuration for more flexibility. -- It supports configuring the number of triggers, alarm levels, notification templates, associated with a specific monitor and so on. +* Threshold rules can be configured for monitoring the availability status, and alerts can be issued when the value of a particular metric exceeds the expected range. +* There are three levels of alerts: notification alerts, critical alerts, and emergency alerts. +* Threshold rules support visual page configuration or expression rule configuration for more flexibility. +* It supports configuring the number of triggers, alarm levels, notification templates, associated with a specific monitor and so on. ![hertzbeat](/img/home/6.png) @@ -260,8 +261,8 @@ Built-in support for monitoring types include: ### Alarm Convergence -- When the alarm is triggered by the threshold rule, it will enter into the alarm convergence, the alarm convergence will be based on the rules of the specific time period of the duplicate alarm message de-emphasis convergence, to avoid a large number of repetitive alarms lead to the receiver alarm numbness. -- Alarm convergence rules support duplicate alarm effective time period, label matching and alarm level matching filter. +* When the alarm is triggered by the threshold rule, it will enter into the alarm convergence, the alarm convergence will be based on the rules of the specific time period of the duplicate alarm message de-emphasis convergence, to avoid a large number of repetitive alarms lead to the receiver alarm numbness. +* Alarm convergence rules support duplicate alarm effective time period, label matching and alarm level matching filter. ![hertzbeat](/img/home/12.png) @@ -269,9 +270,9 @@ Built-in support for monitoring types include: ### Alarm Silence -- When the alarm is triggered by the threshold rule, it will enter into the alarm silence, the alarm silence will be based on the rules of a specific one-time time period or periodic time period of the alarm message blocking silence, this time period does not send alarm messages. -- This application scenario, such as users in the system maintenance, do not need to send known alarms. Users will only receive alarm messages on weekdays, and users need to avoid disturbances at night. -- Alarm silence rules support one-time time period or periodic time period, support label matching and alarm level matching. +* When the alarm is triggered by the threshold rule, it will enter into the alarm silence, the alarm silence will be based on the rules of a specific one-time time period or periodic time period of the alarm message blocking silence, this time period does not send alarm messages. +* This application scenario, such as users in the system maintenance, do not need to send known alarms. Users will only receive alarm messages on weekdays, and users need to avoid disturbances at night. +* Alarm silence rules support one-time time period or periodic time period, support label matching and alarm level matching. ![hertzbeat](/img/home/14.png) @@ -279,11 +280,11 @@ Built-in support for monitoring types include: ### Message Notification -- Message notification is a function to notify alarm messages to specified recipients through different media channels, so that the alarm messages can reach them in time. -- It includes recipient information management and notification policy management. Recipient management maintains the information of recipients and their notification methods, while notification policy management maintains the policy rules of which recipients will be notified of the alert messages. -- Notification methods support `Email` `Discord` `Slack` `Telegram` `Pinning` `WeChat` `Flybook` `SMS` `Webhook` and so on. -- The notification policy supports tag matching and alert level matching, which makes it convenient to assign alerts with different tags and alert levels to different receivers and handlers. -- Support notification templates, users can customize the content format of the templates to meet their own personalized notification display needs. +* Message notification is a function to notify alarm messages to specified recipients through different media channels, so that the alarm messages can reach them in time. +* It includes recipient information management and notification policy management. Recipient management maintains the information of recipients and their notification methods, while notification policy management maintains the policy rules of which recipients will be notified of the alert messages. +* Notification methods support `Email` `Discord` `Slack` `Telegram` `Pinning` `WeChat` `Flybook` `SMS` `Webhook` and so on. +* The notification policy supports tag matching and alert level matching, which makes it convenient to assign alerts with different tags and alert levels to different receivers and handlers. +* Support notification templates, users can customize the content format of the templates to meet their own personalized notification display needs. ![hertzbeat](/img/home/16.png) @@ -293,8 +294,8 @@ Built-in support for monitoring types include: ### Monitoring Templates -- HertzBeat makes `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` and other protocols configurable so that you can customize the metrics you want to collect using these protocols by simply configuring the monitoring template `YML` in your browser. Would you believe that you can instantly adapt a new monitoring type such as `K8s` or `Docker` just by configuring it? -- All our built-in monitoring types (mysql, website, jvm, k8s) are also mapped to corresponding monitoring templates, so you can add and modify monitoring templates to customize your monitoring functions. +* HertzBeat makes `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` and other protocols configurable so that you can customize the metrics you want to collect using these protocols by simply configuring the monitoring template `YML` in your browser. Would you believe that you can instantly adapt a new monitoring type such as `K8s` or `Docker` just by configuring it? +* All our built-in monitoring types (mysql, website, jvm, k8s) are also mapped to corresponding monitoring templates, so you can add and modify monitoring templates to customize your monitoring functions. ![hertzbeat](/img/home/9.png) @@ -302,6 +303,6 @@ Built-in support for monitoring types include: **There's so much more to discover. Have Fun!** ------ +----- -**Github: https://github.com/apache/hertzbeat** +**Github: ** diff --git a/home/docs/start/account-modify.md b/home/docs/start/account-modify.md index 52ddf334e43..2e7a4d33377 100644 --- a/home/docs/start/account-modify.md +++ b/home/docs/start/account-modify.md @@ -6,8 +6,8 @@ sidebar_label: Update Account Secret ## Update Account -Apache HertzBeat (incubating) default built-in three user accounts, respectively admin/hertzbeat tom/hertzbeat guest/hertzbeat -If you need add, delete or modify account or password, configure `sureness.yml`. Ignore this step without this demand. +Apache HertzBeat (incubating) default built-in three user accounts, respectively admin/hertzbeat tom/hertzbeat guest/hertzbeat +If you need add, delete or modify account or password, configure `sureness.yml`. Ignore this step without this demand. Modify the following **part parameters** in sureness.yml:**[Note⚠️Other default sureness configuration parameters should be retained]** ```yaml @@ -156,4 +156,4 @@ sureness: dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' ``` -**Restart HertzBeat, access http://ip:1157/ to explore** +**Restart HertzBeat, access to explore** diff --git a/home/docs/start/custom-config.md b/home/docs/start/custom-config.md index 7554498bc6e..7f45b5dd27d 100644 --- a/home/docs/start/custom-config.md +++ b/home/docs/start/custom-config.md @@ -10,8 +10,8 @@ This describes how to configure the SMS server, the number of built-in availabil ### Configure the configuration file of HertzBeat -Modify the configuration file located at `hertzbeat/config/application.yml` -Note ⚠️The docker container method needs to mount the application.yml file to the local host +Modify the configuration file located at `hertzbeat/config/application.yml` +Note ⚠️The docker container method needs to mount the application.yml file to the local host The installation package can be decompressed and modified in `hertzbeat/config/application.yml` 1. Configure the SMS sending server @@ -57,4 +57,3 @@ warehouse: port: 6379 password: 123456 ``` - diff --git a/home/docs/start/docker-compose-deploy.md b/home/docs/start/docker-compose-deploy.md index b63498c7916..f365eff86a2 100644 --- a/home/docs/start/docker-compose-deploy.md +++ b/home/docs/start/docker-compose-deploy.md @@ -9,7 +9,7 @@ Suggest to use Docker Compose to deploy HertzBeat and its dependent services. ::: :::note -This document assumes that you already have Docker and Docker Compose installed in your environment. If not, please refer to the [Docker official documentation](https://docs.docker.com/compose/install/). +This document assumes that you already have Docker and Docker Compose installed in your environment. If not, please refer to the [Docker official documentation](https://docs.docker.com/compose/install/). Run the `docker compose version` command to check if you have a Docker Compose environment. ::: @@ -20,21 +20,23 @@ Download the installation script package `apache-hertzbeat-xxx-incubating-docker 2. Choose to use the HertzBeat + PostgreSQL + VictoriaMetrics solution :::tip + - `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` contains multiple deployment solutions after decompression. Here we recommend choosing the `hertzbeat-postgresql-victoria-metrics` solution. - Other deployment methods, please read the README.md file of each deployment solution in detail. The MySQL solution requires you to prepare the MySQL driver package yourself. + ::: - Unzip the script package ``` -$ tar zxvf apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz +tar zxvf apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz ``` - Enter the decompression directory and select `HertzBeat + PostgreSQL + VictoriaMetrics` for one-click deployment ``` -$ cd apache-hertzbeat-1.6.0-incubating-docker-compose -$ cd hertzbeat-postgresql-victoria-metrics +cd apache-hertzbeat-1.6.0-incubating-docker-compose +cd hertzbeat-postgresql-victoria-metrics ``` - One-click start @@ -54,13 +56,12 @@ docker-compose ps ``` 4. Start exploring HertzBeat - Access http://ip:1157/ in the browser to start exploring and using it. The default account password is admin/hertzbeat. + Access in the browser to start exploring and using it. The default account password is admin/hertzbeat. **HAVE FUN** ----- +---- ### FAQ **The most common problem is network problems, please check in advance** - diff --git a/home/docs/start/docker-deploy.md b/home/docs/start/docker-deploy.md index 5cac80f4868..cc4670c16f6 100644 --- a/home/docs/start/docker-deploy.md +++ b/home/docs/start/docker-deploy.md @@ -5,7 +5,7 @@ sidebar_label: Install via Docker --- :::tip -Using Docker to start HertzBeat with the minimum available environment, no external service dependencies, easy to experience quickly. +Using Docker to start HertzBeat with the minimum available environment, no external service dependencies, easy to experience quickly. But it is not recommended to use in production environment, it is recommended to use Docker Compose deployment, installation package deployment, Kubernetes deployment in production environment. ::: @@ -42,15 +42,17 @@ $ docker run -d -p 1157:1157 -p 1158:1158 \ - `--network host` : (optional) Use the host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` :::tip + - Marked as optional parameters, non-mandatory items, if not needed, delete them. - This maps the 1157,1158 ports of the container to the 1157,1158 ports of the host. If the port on the host is already occupied, you need to modify the host mapping port. - When mounting files, the first parameter is your custom local file address, and the second parameter is the container file address. Make sure you have this file locally when mounting. - You can execute `docker update --restart=always hertzbeat` to configure the container to restart automatically. - If you want to use the host network mode to start Docker, you can use `docker run -d --network host .....` + ::: 2. Start to explore HertzBeat - Access http://ip:1157/ using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! + Access using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! ### Deploy HertzBeat Collector Cluster(Optional) @@ -85,11 +87,13 @@ $ docker run -d \ - `--network host` : (optional) Use the host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` :::tip + - Marked as optional parameters, non-mandatory items, if not needed, delete them. - The `127.0.0.1` in `MANAGER_HOST` needs to be replaced with the external IP address of the HertzBeat Server. - When mounting files, the first parameter is your custom local file address, and the second parameter is the container file address. Make sure you have this file locally when mounting. - You can execute `docker update --restart=always hertzbeat-collector` to configure the container to restart automatically. - If you want to use the host network mode to start Docker, you can use `docker run -d --network host .....` + ::: 2. Access `http://localhost:1157` and you will see the registered new collector in dashboard. @@ -102,13 +106,13 @@ $ docker run -d \ **The most common problem is network problems, please check in advance** -1. MYSQL, TDENGINE, IoTDB and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail +1. MYSQL, TDENGINE, IoTDB and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. -> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. +> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. > Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` -2. According to the process deploy,visit http://ip:1157/ no interface +2. According to the process deploy,visit no interface Please refer to the following points to troubleshoot issues: > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. @@ -136,8 +140,9 @@ $ docker run -d \ > This file is the configuration file of HertzBeat, used to configure various parameters of HertzBeat, such as database connection information, time series database configuration, etc. > Download `application.yml` file to the host directory, for example: $(pwd)/application.yml -> Download source [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) +> Download source [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) > You can modify the configuration yml file according to your needs. +> > - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` > - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) > - **Recommended** If you need to use the time series database victoria-metrics to store metric data, you need to replace the `warehouse.store.victoria-metrics` parameter in `application.yml` for specific steps, see [Using victoria-metrics to store metrics data](victoria-metrics-init) @@ -150,4 +155,3 @@ $ docker run -d \ > Download and config `sureness.yml` in the host directory,eg:`$(pwd)/sureness.yml` > Download from [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) > For detail steps, please refer to [Configure Account Password](account-modify) - diff --git a/home/docs/start/greptime-init.md b/home/docs/start/greptime-init.md index 3f347bd9ef7..881e6faa0e2 100644 --- a/home/docs/start/greptime-init.md +++ b/home/docs/start/greptime-init.md @@ -17,8 +17,9 @@ It's designed to work on infrastructure of the cloud era, and users benefit from ### Install GreptimeDB via Docker > Refer to the official website [installation tutorial](https://docs.greptime.com/getting-started/overview) -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > > ``` @@ -44,7 +45,7 @@ use```$ docker ps``` to check if the database started successfully ### Configure the database connection in hertzbeat `application.yml` configuration file -1. Configure HertzBeat's configuration file +1. Configure HertzBeat's configuration file Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.greptime` data source parameters, URL account and password. @@ -75,4 +76,3 @@ The default database is `hertzbeat` in the `url`, and it will be created automat 1. Do both the time series databases Greptime, IoTDB or TDengine need to be configured? Can they both be used? > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. - diff --git a/home/docs/start/influxdb-init.md b/home/docs/start/influxdb-init.md index d4991067a8f..50b86344279 100644 --- a/home/docs/start/influxdb-init.md +++ b/home/docs/start/influxdb-init.md @@ -9,7 +9,7 @@ Apache HertzBeat (incubating)'s historical data storage relies on the time serie > It is recommended to use VictoriaMetrics as metrics storage. **Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** +**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** Note⚠️ Need InfluxDB 1.x Version. ### 1. Use HuaweiCloud GaussDB For Influx @@ -23,8 +23,9 @@ Note⚠️ Need InfluxDB 1.x Version. ### 2. Install TDengine via Docker > Refer to the official website [installation tutorial](https://hub.docker.com/_/influxdb) -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > > ``` @@ -40,14 +41,14 @@ Note⚠️ Need InfluxDB 1.x Version. > influxdb:1.8 > ``` > -> `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. +> `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. > use```$ docker ps``` to check if the database started successfully ### Configure the database connection in hertzbeat `application.yml` configuration file -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` +1. Configure HertzBeat's configuration file + Modify `hertzbeat/config/application.yml` configuration file + Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.influxdb` data source parameters, URL account and password. ```yaml @@ -73,4 +74,3 @@ warehouse: 1. Do both the time series databases InfluxDB, IoTDB and TDengine need to be configured? Can they both be used? > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. - diff --git a/home/docs/start/iotdb-init.md b/home/docs/start/iotdb-init.md index b740a690529..d2838a83a6d 100644 --- a/home/docs/start/iotdb-init.md +++ b/home/docs/start/iotdb-init.md @@ -28,6 +28,7 @@ Apache IoTDB is a software system that integrates the collection, storage, manag $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Install IoTDB via Docker ```shell @@ -120,4 +121,3 @@ Configuration parameters: > Is td-engine enable set to true > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed > You can check the startup logs according to the logs directory - diff --git a/home/docs/start/mysql-change.md b/home/docs/start/mysql-change.md index a83e8fcc82c..70c6fab5baf 100644 --- a/home/docs/start/mysql-change.md +++ b/home/docs/start/mysql-change.md @@ -10,7 +10,7 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data ### Install MYSQL via Docker -1. Download and install the Docker environment +1. Download and install the Docker environment For Docker installation, please refer to the [Docker official documentation](https://docs.docker.com/get-docker/). After the installation, please verify in the terminal that the Docker version can be printed normally. @@ -18,6 +18,7 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Install MYSQl with Docker ``` @@ -29,21 +30,21 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data mysql:5.7 ``` - `-v /opt/data:/var/lib/mysql` is local persistent mount of mysql data directory. `/opt/data` should be replaced with the actual local directory. + `-v /opt/data:/var/lib/mysql` is local persistent mount of mysql data directory. `/opt/data` should be replaced with the actual local directory. use ```$ docker ps``` to check if the database started successfully ### Database creation -1. Enter MYSQL or use the client to connect MYSQL service +1. Enter MYSQL or use the client to connect MYSQL service `mysql -uroot -p123456` -2. Create database named hertzbeat +2. Create database named hertzbeat `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. Check if hertzbeat database has been successfully created `show databases;` ### Add MYSQL jdbc driver jar -- Download the MYSQL jdbc driver jar package, such as mysql-connector-java-8.0.25.jar. https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip +- Download the MYSQL jdbc driver jar package, such as mysql-connector-java-8.0.25.jar. - Copy the jar package to the `hertzbeat/ext-lib` directory. ### Modify hertzbeat's configuration file application.yml and switch data source @@ -98,4 +99,4 @@ spring: - It is recommended to set the host field in the MySQL URL to the public IP address when using Hertzbeat in docker. -**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** +**Start HertzBeat visit on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/docs/start/package-deploy.md b/home/docs/start/package-deploy.md index f37f978cbb3..6280b634a59 100644 --- a/home/docs/start/package-deploy.md +++ b/home/docs/start/package-deploy.md @@ -5,7 +5,7 @@ sidebar_label: Install via Package --- :::tip -You can install and run Apache HertzBeat (incubating) on Linux Windows Mac system, and CPU supports X86/ARM64. +You can install and run Apache HertzBeat (incubating) on Linux Windows Mac system, and CPU supports X86/ARM64. Deployment via package relies on Java runtime environment, ensure you have Java17 environment installed, if not please refer to [official website](http://www.oracle.com/technetwork/java/javase/downloads/index.html) ::: @@ -21,11 +21,11 @@ Deployment via package relies on Java runtime environment, ensure you have Java1 Unzip the installation package to the host eg: /opt/hertzbeat ``` - $ tar zxvf apache-hertzbeat-xxx-incubating-bin.tar.gz + tar zxvf apache-hertzbeat-xxx-incubating-bin.tar.gz ``` :::tip -The configuration file is located in `config/application.yml`, you can modify the configuration file according to your needs to configure external dependent services, such as databases, time series databases, etc. +The configuration file is located in `config/application.yml`, you can modify the configuration file according to your needs to configure external dependent services, such as databases, time series databases, etc. HertzBeat defaults to using internal services when started, but it is recommended to switch to external database services in production environments. ::: @@ -36,8 +36,8 @@ It is recommended to use [PostgreSQL](postgresql-change) for metadata storage an 3. Configure the account file(optional) - HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` - If you need modify account or password, configure `config/sureness.yml`. + HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` + If you need modify account or password, configure `config/sureness.yml`. For detail steps, please refer to [Configure Account Password](account-modify) 4. Start the service @@ -45,16 +45,17 @@ It is recommended to use [PostgreSQL](postgresql-change) for metadata storage an Execute the startup script in the installation directory `bin/`, or `startup.bat` in windows. ``` - $ ./startup.sh + ./startup.sh ``` + 5. Begin to explore HertzBeat - Access http://ip:1157/ using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! + Access using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! ### Deploy HertzBeat Collector Cluster(Optional) :::note -HertzBeat Collector is a lightweight data collector used to collect and send data to HertzBeat Server. +HertzBeat Collector is a lightweight data collector used to collect and send data to HertzBeat Server. Deploying multiple HertzBeat Collectors can achieve high availability, load balancing, and cloud-edge collaboration of data. ::: @@ -70,7 +71,7 @@ Deploying multiple HertzBeat Collectors can achieve high availability, load bala Unzip the installation package to the host eg: /opt/hertzbeat-collector ``` -$ tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz +tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz ``` Configure the collector configuration yml file `config/application.yml`: unique `identity` name, running `mode` (public or private), hertzbeat `manager-host`, hertzbeat `manager-port` @@ -96,7 +97,7 @@ collector: 3. Start the service - Run command `$ ./bin/startup.sh ` or `bin/startup.bat` + Run command `$ ./bin/startup.sh` or `bin/startup.bat` 4. Begin to explore HertzBeat Collector @@ -110,9 +111,9 @@ collector: 1. you need to prepare the JAVA environment in advance - Install JAVA runtime environment-refer to [official website](http://www.oracle.com/technetwork/java/javase/downloads/index.html) - requirement:JDK17 ENV - download JAVA installation package: [mirror website](https://repo.huaweicloud.com/java/jdk/) + Install JAVA runtime environment-refer to [official website](http://www.oracle.com/technetwork/java/javase/downloads/index.html) + requirement:JDK17 ENV + download JAVA installation package: [mirror website](https://repo.huaweicloud.com/java/jdk/) After installation use command line to check whether you install it successfully. ``` @@ -122,10 +123,10 @@ collector: Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) ``` -2. According to the process deploy,visit http://ip:1157/ no interface + +2. According to the process deploy,visit no interface Please refer to the following points to troubleshoot issues: > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. -> 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. +> 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. > 3:Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. - diff --git a/home/docs/start/postgresql-change.md b/home/docs/start/postgresql-change.md index 3ca07d095d1..84ccb9b20e7 100644 --- a/home/docs/start/postgresql-change.md +++ b/home/docs/start/postgresql-change.md @@ -10,7 +10,7 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition ### Install PostgreSQL via Docker -1. Download and install the Docker environment +1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. @@ -18,10 +18,11 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Install PostgreSQL with Docker ```shell - $ docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 + docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` use```$ docker ps```to check if the database started successfully @@ -34,11 +35,13 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition su - postgres psql ``` + 2. Create database named hertzbeat ```sql CREATE DATABASE hertzbeat; ``` + 3. Check if hertzbeat database has been successfully created ```sql @@ -93,4 +96,4 @@ spring: level: SEVERE ``` -**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** +**Start HertzBeat visit on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/docs/start/quickstart.md b/home/docs/start/quickstart.md index 393a236a1d1..5c0a9868252 100644 --- a/home/docs/start/quickstart.md +++ b/home/docs/start/quickstart.md @@ -37,7 +37,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.apache 1. Download the release package `hertzbeat-xx.tar.gz` [Download Page](https://hertzbeat.apache.org/docs/download) 2. Configure the HertzBeat configuration yml file `hertzbeat/config/application.yml` (optional) -3. Run command `$ ./bin/startup.sh ` or `bin/startup.bat` +3. Run command `$ ./bin/startup.sh` or `bin/startup.bat` 4. Access `http://localhost:1157` to start, default account: `admin/hertzbeat` 5. Deploy collector clusters(Optional) - Download the release package `hertzbeat-collector-xx.tar.gz` to new machine [Download Page](https://hertzbeat.apache.org/docs/download) @@ -54,7 +54,8 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.apache manager-host: ${MANAGER_HOST:127.0.0.1} manager-port: ${MANAGER_PORT:1158} ``` - - Run command `$ ./bin/startup.sh ` or `bin/startup.bat` + + - Run command `$ ./bin/startup.sh` or `bin/startup.bat` - Access `http://localhost:1157` and you will see the registered new collector in dashboard Detailed config refer to [Install HertzBeat via Package](package-deploy) diff --git a/home/docs/start/sslcert-practice.md b/home/docs/start/sslcert-practice.md index 89d48ec642e..ecd1b8bbe12 100644 --- a/home/docs/start/sslcert-practice.md +++ b/home/docs/start/sslcert-practice.md @@ -12,7 +12,7 @@ This article introduces how to use the hertzbeat monitoring tool to detect the v Apache HertzBeat (incubating) is a real-time monitoring tool with powerful custom monitoring capabilities without Agent. Website monitoring, PING connectivity, port availability, database, operating system, middleware, API monitoring, threshold alarms, alarm notification (email, WeChat, Ding Ding Feishu). -github: https://github.com/apache/hertzbeat +github: #### Install HertzBeat @@ -77,7 +77,7 @@ github: https://github.com/apache/hertzbeat For token configuration such as Dingding WeChat Feishu, please refer to the help document -https://hertzbeat.apache.org/docs/help/alert_dingtalk + > Alarm Notification -> New Alarm Notification Policy -> Enable Notification for the Recipient Just Configured @@ -87,8 +87,8 @@ https://hertzbeat.apache.org/docs/help/alert_dingtalk ---- -#### Finish! +#### Finish The practice of monitoring SSL certificates is here. Of course, for hertzbeat, this function is just the tip of the iceberg. If you think hertzbeat is a good open source project, please give us a Gitee star on GitHub, thank you very much. Thank you for your support. Refill! -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/docs/start/tdengine-init.md b/home/docs/start/tdengine-init.md index 4edc7c610dd..29ea511c6d3 100644 --- a/home/docs/start/tdengine-init.md +++ b/home/docs/start/tdengine-init.md @@ -10,8 +10,8 @@ Apache HertzBeat (incubating)'s historical data storage relies on the time serie TDengine is an open-source IoT time-series database, which we use to store the collected historical data of monitoring metrics. Pay attention to support ⚠️ 3.x version. -**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** +**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** +**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** Note⚠️ Need TDengine 3.x Version. > If you have TDengine environment, can directly skip to create a database instance. @@ -19,8 +19,9 @@ Note⚠️ Need TDengine 3.x Version. ### Install TDengine via Docker > Refer to the official website [installation tutorial](https://docs.taosdata.com/get-started/docker/) -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > > ``` @@ -37,8 +38,8 @@ Note⚠️ Need TDengine 3.x Version. > tdengine/tdengine:3.0.4.0 > ``` > -> `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. -> `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. +> `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. +> `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. > use```$ docker ps``` to check if the database started successfully ### Create database instance @@ -46,9 +47,10 @@ Note⚠️ Need TDengine 3.x Version. 1. Enter database Docker container ``` - $ docker exec -it tdengine /bin/bash + docker exec -it tdengine /bin/bash ``` -2. Create database named hertzbeat + +2. Create database named hertzbeat After entering the container,execute `taos` command as follows: ``` @@ -65,7 +67,7 @@ Note⚠️ Need TDengine 3.x Version. taos> CREATE DATABASE hertzbeat KEEP 90 DURATION 10 BUFFER 16; ``` - The above statements will create a database named hertzbeat. The data will be saved for 90 days (more than 90 days data will be automatically deleted). + The above statements will create a database named hertzbeat. The data will be saved for 90 days (more than 90 days data will be automatically deleted). A data file every 10 days, memory blocks buffer is 16MB. 3. Check if hertzbeat database has been created success @@ -81,9 +83,9 @@ Note⚠️ Need TDengine 3.x Version. ### Configure the database connection in hertzbeat `application.yml` configuration file -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file - Note⚠️The docker container way need to mount application.yml file locally,while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` +1. Configure HertzBeat's configuration file + Modify `hertzbeat/config/application.yml` configuration file + Note⚠️The docker container way need to mount application.yml file locally,while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.td-engine` data source parameters, URL account and password. ```yaml @@ -123,4 +125,3 @@ warehouse: > Is td-engine enable set to true > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed > You can check the startup logs according to the logs directory - diff --git a/home/docs/start/update-1.6.0.md b/home/docs/start/update-1.6.0.md index ee05fe83cd5..182dd58be4a 100644 --- a/home/docs/start/update-1.6.0.md +++ b/home/docs/start/update-1.6.0.md @@ -27,7 +27,7 @@ Go to [https://github.com/apache/hertzbeat/tree/master/manager/src/main/resource Due to significant changes in `application.yml` and `sureness.yml`, it is recommended to directly use the new `yml` configuration files and then modify them based on your own needs. -#### `application.yml` generally needs to modify the following parts: +#### `application.yml` generally needs to modify the following parts Default is: @@ -71,7 +71,7 @@ If you change to a MySQL database, here is an example: level: SEVERE ``` -#### `sureness.yml` modification is optional, usually when you need to change account passwords: +#### `sureness.yml` modification is optional, usually when you need to change account passwords ```yaml # account info config @@ -115,6 +115,7 @@ Next, run the start-up script as before to experience the latest HertzBeat 1.6.0 ``` docker stop hertzbeat ``` + - Upgrade the database script: - Go to [https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), choose the directory of your database and execute the corresponding `V160__update_column.sql` file in MySQL. - Upgrade the configuration files: @@ -131,6 +132,7 @@ Next, run HertzBeat using Docker as before to experience the latest HertzBeat 1. ``` docker stop hertzbeat ``` + - Edit the H2 database files: - Assuming you have mounted the H2 database files in the `data` directory to the local system, or copied the `/opt/hertzbeat/data` directory from the old container manually. - Download the H2 driver jar from [https://mvnrepository.com/artifact/com.h2database/h2/2.2.220](https://mvnrepository.com/artifact/com.h2database/h2/2.2.220). @@ -139,6 +141,7 @@ Next, run HertzBeat using Docker as before to experience the latest HertzBeat 1. ``` java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 ``` + - Upgrade the configuration files: - As mentioned, due to significant changes in `application.yml` and `sureness.yml`, it is recommended to directly mount and use the new `yml` configuration files, and then modify them based on your own needs. - Add the corresponding database drivers: @@ -152,4 +155,3 @@ If you do not want to go through the tedious script upgrade method mentioned abo - Deploy a new environment with the latest version. - Export the monitoring tasks and threshold information from the old environment on the page - diff --git a/home/docs/start/victoria-metrics-init.md b/home/docs/start/victoria-metrics-init.md index 4d0e48b4a70..f9ca1c86216 100644 --- a/home/docs/start/victoria-metrics-init.md +++ b/home/docs/start/victoria-metrics-init.md @@ -10,7 +10,7 @@ Apache HertzBeat (incubating)'s historical data storage relies on the time serie VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.Recommend Version(VictoriaMetrics:v1.95.1+, HertzBeat:v1.4.3+) -**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** +**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** **⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** > If you already have an VictoriaMetrics environment, you can skip directly to the YML configuration step. @@ -18,7 +18,8 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t ### Install VictoriaMetrics via Docker > Refer to the official website [installation tutorial](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -> 1. Download and install Docker environment +> +> 1. Download and install Docker environment > Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > @@ -41,8 +42,8 @@ use```$ docker ps``` to check if the database started successfully 3. Configure the database connection in hertzbeat `application.yml`configuration file - Modify `hertzbeat/config/application.yml` configuration file - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` + Modify `hertzbeat/config/application.yml` configuration file + Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Config the `warehouse.store.jpa.enabled` `false`. Replace `warehouse.store.victoria-metrics` data source parameters, HOST account and password. ```yaml @@ -66,4 +67,3 @@ warehouse: 1. Do both the time series databases need to be configured? Can they both be used? > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which can affects the historical chart data. - diff --git a/home/docs/template.md b/home/docs/template.md index cee7aa05055..2359a43e51f 100644 --- a/home/docs/template.md +++ b/home/docs/template.md @@ -6,7 +6,7 @@ sidebar_label: Monitoring Template > Apache HertzBeat (incubating) is an open source, real-time monitoring tool with custom-monitor and agentLess. > -> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. +> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? Here is the architecture. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md index 02e2cbdd0c8..36e7408bed7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md @@ -52,11 +52,11 @@ Bug修复. 5. [[collector]bugfix: 修复 warehouse data queue 未消费异常 #153](https://github.com/apache/hertzbeat/pull/153). issue by @daqianxiaoyao 6. [[web-app]bugfix: 修复黑暗主题时页面输入框校验出错时不可见 #157](https://github.com/apache/hertzbeat/pull/157). issue by @ConradWen -**Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.0-beta.8...v1.0 +**Full Changelog**: -Online https://console.tancloud.cn. +Online . ------------------------ +----------------------- Redis监控来啦: @@ -67,15 +67,14 @@ Redis监控来啦: > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn)开源的一个支持网站,API,PING,端口,数据库,操作系统等监控类型,拥有易用友好的可视化操作界面的开源监控告警项目。 > 当然,我们也提供了对应的[SAAS云监控版本](https://console.tancloud.cn),中小团队和个人无需再为了监控自己的网站资源,而去部署一套繁琐的监控系统,[登录即可免费开始](https://console.tancloud.cn)监控之旅。 > HertzBeat 支持自定义监控,只用通过配置YML文件我们就可以自定义需要的监控类型和指标,来满足常见的个性化需求。 -> HertzBeat 模块化,`manager, collector, scheduler, warehouse, alerter` 各个模块解耦合,方便理解与定制开发。 +> HertzBeat 模块化,`manager, collector, scheduler, warehouse, alerter` 各个模块解耦合,方便理解与定制开发。 > HertzBeat 支持更自由化的告警配置(计算表达式),支持告警通知,告警模版,邮件钉钉微信飞书等及时通知送达 -> 欢迎登录 HertzBeat 的 [云环境TanCloud](https://console.tancloud.cn) 试用发现更多。 +> 欢迎登录 HertzBeat 的 [云环境TanCloud](https://console.tancloud.cn) 试用发现更多。 > 我们正在快速迭代中,欢迎参与加入共建项目开源生态。 > > `HertzBeat` 的多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 **仓库地址** -[Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat -[Gitee](https://gitee.com/hertzbeat/hertzbeat) https://gitee.com/hertzbeat/hertzbeat - +[Github](https://github.com/apache/hertzbeat) +[Gitee](https://gitee.com/hertzbeat/hertzbeat) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md index 15d260bd215..076285a021a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md @@ -33,7 +33,7 @@ Bugfix. 3. [[monitor] bugfix: 修复elasticsearch监控在basic认证情况下采集失败 #174](https://github.com/apache/hertzbeat/pull/174) contribute by @weifuqing 4. [修改oracle监控参数[数据库名称]有歧义导致的监控失败 #182](https://github.com/apache/hertzbeat/pull/182) @zklmcookle -Online https://console.tancloud.cn. +Online . --- Windows Monitor coming: @@ -58,13 +58,13 @@ commit; Have Fun! ----- +---- ## V1.1.0 Home: hertzbeat.com | tancloud.cn -Hi guys! HertzBeat v1.1.0 is coming. This version we support snmp protocol and use snmp to collect windows metrics. +Hi guys! HertzBeat v1.1.0 is coming. This version we support snmp protocol and use snmp to collect windows metrics. Another major change is that we use the H2 database by default to replace the MYSQL database as storage to facilitate the installation and deployment of users. Now only one docker command is needed to install and experience hertzbeat: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` Let's Try It! @@ -86,7 +86,7 @@ Bugfix. 3. [[monitor] bugfix: fix elasticsearch collect error when need basic auth #174](https://github.com/apache/hertzbeat/pull/174) contribute by @weifuqing 4. [Change the Oracle database name to the service name to reduce ambiguity #182](https://github.com/apache/hertzbeat/pull/182) @zklmcookle -Online https://console.tancloud.cn. +Online . --- @@ -112,20 +112,19 @@ commit; Have Fun! ----- +---- > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn)开源的一个支持网站,API,PING,端口,数据库,操作系统等监控类型,拥有易用友好的可视化操作界面的开源监控告警项目。 > 当然,我们也提供了对应的[SAAS云监控版本](https://console.tancloud.cn),中小团队和个人无需再为了监控自己的网站资源,而去部署一套繁琐的监控系统,[登录即可免费开始](https://console.tancloud.cn)监控之旅。 > HertzBeat 支持自定义监控,只用通过配置YML文件我们就可以自定义需要的监控类型和指标,来满足常见的个性化需求。 -> HertzBeat 模块化,`manager, collector, scheduler, warehouse, alerter` 各个模块解耦合,方便理解与定制开发。 +> HertzBeat 模块化,`manager, collector, scheduler, warehouse, alerter` 各个模块解耦合,方便理解与定制开发。 > HertzBeat 支持更自由化的告警配置(计算表达式),支持告警通知,告警模版,邮件钉钉微信飞书等及时通知送达 -> 欢迎登录 HertzBeat 的 [云环境TanCloud](https://console.tancloud.cn) 试用发现更多。 +> 欢迎登录 HertzBeat 的 [云环境TanCloud](https://console.tancloud.cn) 试用发现更多。 > 我们正在快速迭代中,欢迎参与加入共建项目开源生态。 > > `HertzBeat` 的多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 **仓库地址** -[Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat -[Gitee](https://gitee.com/hertzbeat/hertzbeat) https://gitee.com/hertzbeat/hertzbeat - +[Github](https://github.com/apache/hertzbeat) +[Gitee](https://gitee.com/hertzbeat/hertzbeat) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md index 15d260bd215..076285a021a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md @@ -33,7 +33,7 @@ Bugfix. 3. [[monitor] bugfix: 修复elasticsearch监控在basic认证情况下采集失败 #174](https://github.com/apache/hertzbeat/pull/174) contribute by @weifuqing 4. [修改oracle监控参数[数据库名称]有歧义导致的监控失败 #182](https://github.com/apache/hertzbeat/pull/182) @zklmcookle -Online https://console.tancloud.cn. +Online . --- Windows Monitor coming: @@ -58,13 +58,13 @@ commit; Have Fun! ----- +---- ## V1.1.0 Home: hertzbeat.com | tancloud.cn -Hi guys! HertzBeat v1.1.0 is coming. This version we support snmp protocol and use snmp to collect windows metrics. +Hi guys! HertzBeat v1.1.0 is coming. This version we support snmp protocol and use snmp to collect windows metrics. Another major change is that we use the H2 database by default to replace the MYSQL database as storage to facilitate the installation and deployment of users. Now only one docker command is needed to install and experience hertzbeat: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` Let's Try It! @@ -86,7 +86,7 @@ Bugfix. 3. [[monitor] bugfix: fix elasticsearch collect error when need basic auth #174](https://github.com/apache/hertzbeat/pull/174) contribute by @weifuqing 4. [Change the Oracle database name to the service name to reduce ambiguity #182](https://github.com/apache/hertzbeat/pull/182) @zklmcookle -Online https://console.tancloud.cn. +Online . --- @@ -112,20 +112,19 @@ commit; Have Fun! ----- +---- > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn)开源的一个支持网站,API,PING,端口,数据库,操作系统等监控类型,拥有易用友好的可视化操作界面的开源监控告警项目。 > 当然,我们也提供了对应的[SAAS云监控版本](https://console.tancloud.cn),中小团队和个人无需再为了监控自己的网站资源,而去部署一套繁琐的监控系统,[登录即可免费开始](https://console.tancloud.cn)监控之旅。 > HertzBeat 支持自定义监控,只用通过配置YML文件我们就可以自定义需要的监控类型和指标,来满足常见的个性化需求。 -> HertzBeat 模块化,`manager, collector, scheduler, warehouse, alerter` 各个模块解耦合,方便理解与定制开发。 +> HertzBeat 模块化,`manager, collector, scheduler, warehouse, alerter` 各个模块解耦合,方便理解与定制开发。 > HertzBeat 支持更自由化的告警配置(计算表达式),支持告警通知,告警模版,邮件钉钉微信飞书等及时通知送达 -> 欢迎登录 HertzBeat 的 [云环境TanCloud](https://console.tancloud.cn) 试用发现更多。 +> 欢迎登录 HertzBeat 的 [云环境TanCloud](https://console.tancloud.cn) 试用发现更多。 > 我们正在快速迭代中,欢迎参与加入共建项目开源生态。 > > `HertzBeat` 的多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 **仓库地址** -[Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat -[Gitee](https://gitee.com/hertzbeat/hertzbeat) https://gitee.com/hertzbeat/hertzbeat - +[Github](https://github.com/apache/hertzbeat) +[Gitee](https://gitee.com/hertzbeat/hertzbeat) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md index 589b8113d31..e37323d72c5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-07-10-hertzbeat-v1.1.1.md @@ -40,24 +40,23 @@ Bugfix. 8. [[manager] 修改监控页面取消监控功能再启动监控导致多生成jobId,原有监控项目并没有真实取消 #215](https://github.com/apache/hertzbeat/pull/215) contribute by @yangshihui 9. [[warehouse] 修复tdengine对特殊字段建表失败导致数据无法入库问题 #220](https://github.com/apache/hertzbeat/pull/220) -Online https://console.tancloud.cn. +Online . Have Fun! ----- +---- > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn)开源的一个支持网站,API,PING,端口,数据库,操作系统等监控类型,拥有易用友好的可视化操作界面的开源监控告警项目。 > 当然,我们也提供了对应的[SAAS云监控版本](https://console.tancloud.cn),中小团队和个人无需再为了监控自己的网站资源,而去部署一套繁琐的监控系统,[登录即可免费开始](https://console.tancloud.cn)监控之旅。 > HertzBeat 支持自定义监控,只用通过配置YML文件我们就可以自定义需要的监控类型和指标,来满足常见的个性化需求。 -> HertzBeat 模块化,`manager, collector, scheduler, warehouse, alerter` 各个模块解耦合,方便理解与定制开发。 +> HertzBeat 模块化,`manager, collector, scheduler, warehouse, alerter` 各个模块解耦合,方便理解与定制开发。 > HertzBeat 支持更自由化的告警配置(计算表达式),支持告警通知,告警模版,邮件钉钉微信飞书等及时通知送达 -> 欢迎登录 HertzBeat 的 [云环境TanCloud](https://console.tancloud.cn) 试用发现更多。 +> 欢迎登录 HertzBeat 的 [云环境TanCloud](https://console.tancloud.cn) 试用发现更多。 > 我们正在快速迭代中,欢迎参与加入共建项目开源生态。 > > `HertzBeat` 的多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 **仓库地址** -[Github](https://github.com/apache/hertzbeat) https://github.com/apache/hertzbeat -[Gitee](https://gitee.com/hertzbeat/hertzbeat) https://gitee.com/hertzbeat/hertzbeat - +[Github](https://github.com/apache/hertzbeat) +[Gitee](https://gitee.com/hertzbeat/hertzbeat) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md index e4c3064b1fd..cc08092df91 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md @@ -31,13 +31,14 @@ Bugfix. 1. [[docs] fix extend-http-jsonpath.md parseScript error #262](https://github.com/apache/hertzbeat/pull/262) contribute by @woshiniusange . 2. [[monitor] update help docs, refactor redis metrics name #264](https://github.com/apache/hertzbeat/pull/264) -3. [[manager] bugfix alert tags is null when tags map key normal value null. #270](https://github.com/apache/hertzbeat/pull/270) issue by https://gitee.com/hello_brother_niu -4. [[alert] bugfix: the alert global preset config do not take effect #275](https://github.com/apache/hertzbeat/pull/275) issue by https://gitee.com/hello_brother_niu +3. [[manager] bugfix alert tags is null when tags map key normal value null. #270](https://github.com/apache/hertzbeat/pull/270) issue by +4. [[alert] bugfix: the alert global preset config do not take effect #275](https://github.com/apache/hertzbeat/pull/275) issue by -Online https://console.tancloud.cn. +Online . -Have Fun! +Have Fun --- + ## V1.1.3 官网: hertzbeat.com | tancloud.cn @@ -64,11 +65,10 @@ Bugfix. 1. [[docs] fix extend-http-jsonpath.md parseScript error #262](https://github.com/apache/hertzbeat/pull/262) contribute by @woshiniusange . 2. [[monitor] update help docs, refactor redis metrics name #264](https://github.com/apache/hertzbeat/pull/264) -3. [[manager] bugfix alert tags is null when tags map key normal value null. #270](https://github.com/apache/hertzbeat/pull/270) issue by https://gitee.com/hello_brother_niu -4. [[alert] bugfix: the alert global preset config do not take effect #275](https://github.com/apache/hertzbeat/pull/275) issue by https://gitee.com/hello_brother_niu +3. [[manager] bugfix alert tags is null when tags map key normal value null. #270](https://github.com/apache/hertzbeat/pull/270) issue by +4. [[alert] bugfix: the alert global preset config do not take effect #275](https://github.com/apache/hertzbeat/pull/275) issue by -Online https://console.tancloud.cn. +Online . -Have Fun! +Have Fun --- - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-10-ssl-practice.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-10-ssl-practice.md index 75bf7f7008c..9acd22c2e76 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-10-ssl-practice.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-10-ssl-practice.md @@ -17,10 +17,10 @@ tags: [opensource, practice] HertzBeat 一个拥有强大自定义监控能力,无需Agent的实时监控工具。网站监测,PING连通性,端口可用性,数据库,操作系统,中间件,API监控,阈值告警,告警通知(邮件微信钉钉飞书)。 -**官网: https://hertzbeat.com | https://tancloud.cn** +**官网: | ** -github: https://github.com/apache/hertzbeat -gitee: https://gitee.com/hertzbeat/hertzbeat +github: +gitee: #### 安装 HertzBeat @@ -42,7 +42,7 @@ gitee: https://gitee.com/hertzbeat/hertzbeat 2. 配置监控网站 -> 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 +> 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 > 点击确定 注意⚠️新增前默认会先去测试网站连接性,连接成功才会新增,当然也可以把**是否测试**按钮置灰。 ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ad1154670648413bb82c8bdeb5b13609~tplv-k3u1fbpfcp-zoom-1.image) @@ -87,8 +87,8 @@ gitee: https://gitee.com/hertzbeat/hertzbeat 钉钉微信飞书等token配置可以参考帮助文档 -https://hertzbeat.com/docs/help/alert_dingtalk -https://tancloud.cn/docs/help/alert_dingtalk + + > 告警通知 -> 新增告警通知策略 -> 将刚才配置的接收人启用通知 @@ -98,10 +98,10 @@ https://tancloud.cn/docs/help/alert_dingtalk ---- -#### 完! +#### 完 监控SSL证书的实践就到这里,当然对hertzbeat来说这个功能只是冰山一角,如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! -**github: https://github.com/apache/hertzbeat** +**github: ** -**gitee: https://gitee.com/hertzbeat/hertzbeat** +**gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-10-08-hertzbeat-v1.2.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-10-08-hertzbeat-v1.2.0.md index 4daa13c04e2..bcb68577932 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-10-08-hertzbeat-v1.2.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-10-08-hertzbeat-v1.2.0.md @@ -51,7 +51,7 @@ Bugfix. 8. [[script] add startup log and optimize port service judgment #321](https://github.com/apache/hertzbeat/pull/321) 9. [[web-app] fix echarts y-axis value tip overflow #325](https://github.com/apache/hertzbeat/pull/325) 10. [[webapp] fix interceptor http resp common error-msg when error #329](https://github.com/apache/hertzbeat/pull/329) - Online https://console.tancloud.cn. + Online . Have Fun! @@ -78,7 +78,7 @@ spring: - classpath:../dist/ ``` ----- +---- ## V1.2.0 @@ -125,7 +125,7 @@ Bugfix. 9. [[web-app] fix echarts y-axis value tip overflow #325](https://github.com/apache/hertzbeat/pull/325) 10. [[webapp] fix interceptor http resp common error-msg when error #329](https://github.com/apache/hertzbeat/pull/329) -Online https://console.tancloud.cn. +Online . Have Fun! @@ -152,5 +152,4 @@ spring: - classpath:../dist/ ``` ----- - +---- diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-11-28-hertzbeat-v1.2.2.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-11-28-hertzbeat-v1.2.2.md index 941192b69cc..743f0044844 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-11-28-hertzbeat-v1.2.2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-11-28-hertzbeat-v1.2.2.md @@ -11,7 +11,7 @@ tags: [opensource] Home: hertzbeat.com | tancloud.cn -Hi guys! HertzBeat v1.2.2 is coming. This release brings significant features. This version we support monitor kubernets, docker, springboot, nacos and database dm, opengauss and more. Also we bring a experimental feature, users can custom define metrics collect from promethues with promql. Fixed several bugs and improved the overall stable usability. And more, linux monitor we support top10 cpu usage metrics, top10 memory usage mertics. +Hi guys! HertzBeat v1.2.2 is coming. This release brings significant features. This version we support monitor kubernets, docker, springboot, nacos and database dm, opengauss and more. Also we bring a experimental feature, users can custom define metrics collect from promethues with promql. Fixed several bugs and improved the overall stable usability. And more, linux monitor we support top10 cpu usage metrics, top10 memory usage mertics. Let's Try It Now! Only one docker command is needed to install and experience heartbeat: @@ -51,11 +51,11 @@ Bugfix. 6. [[manager] bugfix the gmtUpdate not change when update monitor param #459](https://github.com/apache/hertzbeat/pull/459) 7. [[home] fix typo in springboot2.md #464](https://github.com/apache/hertzbeat/pull/464) @eltociear -Online https://console.tancloud.cn. +Online . Have Fun! ----- +---- ## V1.2.2 @@ -100,5 +100,4 @@ Bugfix. 6. [[manager] bugfix the gmtUpdate not change when update monitor param #459](https://github.com/apache/hertzbeat/pull/459) 7. [[home] fix typo in springboot2.md #464](https://github.com/apache/hertzbeat/pull/464) @eltociear ----- - +---- diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md index 34df92ffbd4..7acfd0f5aac 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md @@ -57,7 +57,7 @@ github:[wang1027-wqh](https://github.com/wang1027-wqh) 现从事:某互联网公司Java开发工程师 -email:1758619238@qq.com +email:<1758619238@qq.com> Hertzbeat Committer @@ -105,4 +105,3 @@ github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) + 如果是大的改动,建议提交前编写issues,在提交pr,同时请注意编码的规范,尽量减少bug和警告的产生 > 以上就是我们新晋Committer们的开源经历了,可以看出参与开源并不难,更重要的是迈出第一步,无论是代码还是文档修复或者提交issue,这些都是贡献者参与开源的姿势。快来加入我们吧! - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-28-hertzbeat-v1.2.3.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-28-hertzbeat-v1.2.3.md index 5bc276eb240..716c97fd15e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-28-hertzbeat-v1.2.3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-28-hertzbeat-v1.2.3.md @@ -49,11 +49,11 @@ Bugfix. 7. [监控k8s问题issue描述与解决方案 #511](https://github.com/apache/hertzbeat/pull/511) @MrAndyMing 8. [[manager] springboot2 monitor support base path config #515](https://github.com/apache/hertzbeat/pull/515) -Online https://console.tancloud.cn. +Online . Have Fun! ----- +---- ## V1.2.3 @@ -95,5 +95,4 @@ Bugfix. 7. [监控k8s问题issue描述与解决方案 #511](https://github.com/apache/hertzbeat/pull/511) @MrAndyMing 8. [[manager] springboot2 monitor support base path config #515](https://github.com/apache/hertzbeat/pull/515) ----- - +---- diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md index 74b9d28d2f6..54f307891f3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md @@ -8,27 +8,27 @@ tags: [opensource, practice] keywords: [开源监控系统, 开源数据库监控, IotDB数据库监控] --- -## 使用 HertzBeat 对物联网数据库 IoTDB 进行监控实践,5分钟搞定! +## 使用 HertzBeat 对物联网数据库 IoTDB 进行监控实践,5分钟搞定 ### IoTDB 介绍 -> Apache IoTDB (Internet of Things Database) 是一款时序数据库管理系统,可以为用户提供数据收集、存储和分析等服务。 +> Apache IoTDB (Internet of Things Database) 是一款时序数据库管理系统,可以为用户提供数据收集、存储和分析等服务。 > IoTDB由于其轻量级架构、高性能和高可用的特性,以及与 Hadoop 和 Spark 生态的无缝集成,满足了工业 IoT 领域中海量数据存储、高吞吐量数据写入和复杂数据查询分析的需求。 ### HertzBeat 介绍 -> HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 -> 支持对应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书)。 +> HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 +> 支持对应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书)。 > HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 ### 在 HertzBeat 5分钟搞定监控 IoTDB -#### 操作前提,您已拥有 IoTDB 环境和 HertzBeat 环境。 +#### 操作前提,您已拥有 IoTDB 环境和 HertzBeat 环境 - IoTDB [部署安装文档](https://iotdb.apache.org/UserGuide/V0.13.x/QuickStart/QuickStart.html) - HertzBeat [部署安装文档](https://hertzbeat.com/docs/start/docker-deploy) -#### 一. 在 IoTDB 端开启`metrics`功能,它将提供 prometheus metrics 形式的接口数据。 +#### 一. 在 IoTDB 端开启`metrics`功能,它将提供 prometheus metrics 形式的接口数据 1. metric 采集默认是关闭的,需要先到 `conf/iotdb-metric.yml` 中修改参数打开后重启 server @@ -42,7 +42,7 @@ metricReporterList: - PROMETHEUS ``` -2. 重启 IoTDB, 打开浏览器或者用curl 访问 http://ip:9091/metrics, 就能看到metric数据了。 +2. 重启 IoTDB, 打开浏览器或者用curl 访问 , 就能看到metric数据了。 #### 二. 在 HertzBeat 监控页面添加 IoTDB 监控 @@ -54,8 +54,8 @@ metricReporterList: 2. 配置监控IoTDB所需参数 -在监控页面填写 IoTDB **服务IP**,**监控端口**(默认9091),最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/iotdb/) https://hertzbeat.com/docs/help/iotdb/ +在监控页面填写 IoTDB **服务IP**,**监控端口**(默认9091),最后点击确定添加即可。 +其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/iotdb/) ![hertzbeat](/img/blog/monitor-iotdb-2.png) @@ -72,7 +72,8 @@ metricReporterList: ![hertzbeat](/img/blog/monitor-iotdb-5.png) **完成DONE!通过上面几步,总结起来其实也就是两步** -- **一步开启 IoTDB 端`metrics`功能** + +- **一步开启 IoTDB 端`metrics`功能** - **另一步在 HertzBeat 监控页面配置IP端口添加监控即可** **这样我们就完成了对 IoTDB 的监控,我们可以随时查看监控详情指标信息来观测其服务状态,但人不可能是一直去看,总有要休息的时候,监控往往伴随着告警,当监控指标发生异常,监控系统需要能及时通知到负责人** @@ -96,7 +97,7 @@ metricReporterList: 消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 +- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 - 在 HertzBeat 配置接收人参数如下。 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 @@ -123,12 +124,12 @@ metricReporterList: 这篇实践文章带我们体验了如何使用 HertzBeat 监控 IoTDB 数据库指标数据,可以发现将 监控-告警-通知 集一体的 HertzBeat 在操作与使用方面更加的便捷,在页面上简单点一点就能把 IoTDB 纳入监控,再也不需要部署多个组件,写多个有门槛的YML配置文件了。 -IoTDB Github: https://github.com/apache/iotdb -HertzBeat Github: https://github.com/apache/hertzbeat +IoTDB Github: +HertzBeat Github: **欢迎了解使用Star支持哦!** -只需要一条docker命令即可安装体验heartbeat : +只需要一条docker命令即可安装体验heartbeat : `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` 注意⚠️HertzBeat v1.2.3 版本支持 IoTDB v0.12 v0.13, 由于其v1.0刚发布, 暂未对此版本全部指标兼容。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md index 2ddf42275b3..bf6a47a7e8e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md @@ -7,7 +7,7 @@ author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 tags: [opensource, practice] --- -## 使用 HertzBeat 对 API 网关 Apache ShenYu 进行监控实践,5分钟搞定! +## 使用 HertzBeat 对 API 网关 Apache ShenYu 进行监控实践,5分钟搞定 ### Apache ShenYu 介绍 @@ -24,20 +24,20 @@ tags: [opensource, practice] ### HertzBeat 介绍 -> HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 -> 支持对应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书)。 +> HertzBeat 是一款开源,易用友好的实时监控工具,无需Agent,拥有强大自定义监控能力。 +> 支持对应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书)。 > HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 ### 在 HertzBeat 5分钟搞定监控 Apache ShenYu -#### 操作前提,您已拥有 ShenYu 环境和 HertzBeat 环境。 +#### 操作前提,您已拥有 ShenYu 环境和 HertzBeat 环境 - ShenYu [部署安装文档](https://shenyu.apache.org/zh/docs/deployment/deployment-before) - HertzBeat [部署安装文档](https://hertzbeat.com/docs/start/docker-deploy) -#### 一. 在 ShenYu 端开启`metrics`插件,它将提供 metrics 接口数据。 +#### 一. 在 ShenYu 端开启`metrics`插件,它将提供 metrics 接口数据 -> 插件是 Apache ShenYu 网关的核心执行者,指标数据采集在 `ShenYu` 也是以插件的形式集成的 - `Metrics插件`。 +> 插件是 Apache ShenYu 网关的核心执行者,指标数据采集在 `ShenYu` 也是以插件的形式集成的 - `Metrics插件`。 > `Metrics插件`是网关用来监控自身运行状态(`JVM`相关),请求响应等相关指标进行监测。 1. 在网关的 `pom.xml` 文件中添加 `metrics插件` 的依赖。 @@ -76,8 +76,8 @@ shenyu: 2. 配置监控 ShenYu 所需参数 -在监控页面填写 ShenYu **服务IP**,**监控端口**(默认8090),最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/shenyu/) https://hertzbeat.com/docs/help/shenyu/ +在监控页面填写 ShenYu **服务IP**,**监控端口**(默认8090),最后点击确定添加即可。 +其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/shenyu/) ![hertzbeat](/img/blog/monitor-shenyu-1.png) @@ -96,7 +96,8 @@ shenyu: ![hertzbeat](/img/blog/monitor-shenyu-6.png) **DONE!完成啦!通过上面几步,总结起来其实也就只用两步** -- **第一步开启 ShenYu 端`metrics`插件功能** + +- **第一步开启 ShenYu 端`metrics`插件功能** - **第二步在 HertzBeat 监控页面配置IP端口添加监控即可** :::tip @@ -125,7 +126,7 @@ shenyu: 消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 +- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 - 在 HertzBeat 配置接收人参数如下。 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 @@ -156,10 +157,10 @@ shenyu: 这篇实践文章带我们体验了如何使用 HertzBeat 监控 Apache ShenYu 指标数据,可以发现将 `监控-告警-通知` 集一体的 HertzBeat 在操作与使用方面更加的便捷,在页面上简单点一点就能把 ShenYu 纳入监控,再也不需要部署多个组件,写多个有门槛的YML配置文件了。 ::: -Apache ShenYu Github: https://github.com/apache/shenyu -HertzBeat Github: https://github.com/apache/hertzbeat +Apache ShenYu Github: +HertzBeat Github: **欢迎了解使用Star支持哦!** -只需要一条docker命令即可安装体验heartbeat : +只需要一条docker命令即可安装体验heartbeat : `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md index 60663c6041f..576ace519e3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md @@ -7,7 +7,7 @@ author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 tags: [opensource, practice] --- -## 使用 HertzBeat 对 线程池框架 DynamicTp 进行监控实践,5分钟搞定! +## 使用 HertzBeat 对 线程池框架 DynamicTp 进行监控实践,5分钟搞定 ### 线程池框架 DynamicTp 介绍 @@ -27,12 +27,12 @@ tags: [opensource, practice] ### 在 HertzBeat 5分钟搞定监控 DynamicTp -#### 操作前提,您已拥有 DynamicTp 环境和 HertzBeat 环境。 +#### 操作前提,您已拥有 DynamicTp 环境和 HertzBeat 环境 - DynamicTp [集成接入文档](https://dynamictp.cn/guide/use/quick-start.html) - HertzBeat [部署安装文档](https://hertzbeat.com/docs/start/docker-deploy) -#### 一. 在 DynamicTp 端暴露出`DynamicTp`指标接口 `/actuator/dynamic-tp`,它将提供 metrics 接口数据。 +#### 一. 在 DynamicTp 端暴露出`DynamicTp`指标接口 `/actuator/dynamic-tp`,它将提供 metrics 接口数据 1. 开启 SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 @@ -88,8 +88,8 @@ management: 2. 配置监控 DynamicTp 所需参数 -在监控页面填写 DynamicTp **服务IP**,**监控端口**(默认8080),最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/dynamic_tp/) https://hertzbeat.com/docs/help/dynamic_tp/ +在监控页面填写 DynamicTp **服务IP**,**监控端口**(默认8080),最后点击确定添加即可。 +其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/dynamic_tp/) ![hertzbeat](/img/blog/monitor-dynamic-tp-2.png) @@ -108,7 +108,8 @@ management: ![hertzbeat](/img/blog/monitor-dynamic-tp-5.png) **DONE!完成啦!通过上面几步,总结起来其实也就只用两步** -- **第一步暴露 DynamicTp 端`metrics`端点`/actuator/dynamic-tp`** + +- **第一步暴露 DynamicTp 端`metrics`端点`/actuator/dynamic-tp`** - **第二步在 HertzBeat 监控页面配置IP端口添加监控即可** :::tip @@ -137,7 +138,7 @@ management: 消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 +- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 - 在 HertzBeat 配置接收人参数如下。 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 @@ -168,10 +169,10 @@ management: 这篇实践文章带我们体验了如何使用 HertzBeat 监控 DynamicTp线程池 指标数据,可以发现集 `监控-告警-通知` 的 HertzBeat 在操作与使用方面更加的便捷,只需页面上简单点一点就能把 DynamicTp线程池 纳入监控并告警通知,再也不需要部署多个组件写YML配置文件那些繁琐操作了。 ::: -DynamicTp Github: https://github.com/dromara/dynamic-tp -HertzBeat Github: https://github.com/apache/hertzbeat +DynamicTp Github: +HertzBeat Github: **欢迎了解使用Star支持哦!** -只需要一条docker命令即可安装体验heartbeat : +只需要一条docker命令即可安装体验heartbeat : `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-10-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-10-new-committer.md index 72b996e62c5..bb348eacf12 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-10-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-10-new-committer.md @@ -74,16 +74,16 @@ github:TJxiaobao **贡献:** -- 1、实现对 docker 容器的监控。 -- 2、完成 国产数据库 DM 监控 -- 3、编写相对应业务的单测。 -- 4、部分注释的英文翻译。 +* 1、实现对 docker 容器的监控。 +* 2、完成 国产数据库 DM 监控 +* 3、编写相对应业务的单测。 +* 4、部分注释的英文翻译。 **收获:** -- 1、技术能力得到进一步的提升。 -- 2、开阔自己的眼界。 -- 3、从大佬们身上学到了很多知识。 +* 1、技术能力得到进一步的提升。 +* 2、开阔自己的眼界。 +* 3、从大佬们身上学到了很多知识。 ### 🌻 感谢社区小伙伴 @@ -93,30 +93,29 @@ github:TJxiaobao 首先我也是一枚新手村的萌新啦,但是我可以把我的一些经验分享给大家,希望能给大家有所帮助。 -- 1、不要过于心急,要沉静身心了解各个模块的大致实现逻辑。 -- 2、通过使用不同的功能,并 debug 来看看各个功能的底层实现原理。 -- 3、慢慢的尝试阅读源码,并理解。 -- 4、如果遇见bug,可以直接反馈到 isses,也可以自己尝试解决嘿嘿。 +* 1、不要过于心急,要沉静身心了解各个模块的大致实现逻辑。 +* 2、通过使用不同的功能,并 debug 来看看各个功能的底层实现原理。 +* 3、慢慢的尝试阅读源码,并理解。 +* 4、如果遇见bug,可以直接反馈到 isses,也可以自己尝试解决嘿嘿。 ## What is HertzBeat? > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需Agent的实时监控告警工具。应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Discord Slack Telegram)。 > -> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。 +> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。 > 您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? > > `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** ## ⛄ Supported -- 网站监控, 端口可用性, Http Api, Ping连通性, Jvm, SiteMap全站, Ssl证书, SpringBoot, FTP服务器 -- Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, 达梦, OpenGauss, ClickHouse, IoTDB -- Linux, Ubuntu, CentOS, Windows -- Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ -- Kubernetes, Docker -- 和更多您的自定义监控。 -- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 - +* 网站监控, 端口可用性, Http Api, Ping连通性, Jvm, SiteMap全站, Ssl证书, SpringBoot, FTP服务器 +* Mysql, PostgreSQL, MariaDB, Redis, ElasticSearch, SqlServer, Oracle, MongoDB, 达梦, OpenGauss, ClickHouse, IoTDB +* Linux, Ubuntu, CentOS, Windows +* Tomcat, Nacos, Zookeeper, RabbitMQ, Flink, Kafka, ShenYu, DynamicTp, Jetty, ActiveMQ +* Kubernetes, Docker +* 和更多您的自定义监控。 +* 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md index b41bc15de9a..748a3f09d53 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md @@ -8,7 +8,7 @@ tags: [opensource, practice] keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] --- -## 使用开源实时监控工具 HertzBeat 对 Mysql 数据库监控告警实践,5分钟搞定! +## 使用开源实时监控工具 HertzBeat 对 Mysql 数据库监控告警实践,5分钟搞定 ### Mysql 数据库介绍 @@ -24,7 +24,7 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] ### 在 HertzBeat 5分钟搞定对 Mysql 数据库监控 -#### 操作前提,您已拥有 Mysql 环境和 HertzBeat 环境。 +#### 操作前提,您已拥有 Mysql 环境和 HertzBeat 环境 - Mysql [安装部署文档](https://www.runoob.com/mysql/mysql-install.html) - HertzBeat [安装部署文档](https://hertzbeat.com/docs/start/docker-deploy) @@ -39,8 +39,8 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] 2. 配置新增监控 Mysql 数据库所需参数 -在监控页面填写 Mysql **服务IP**,**监控端口**(默认3306),**账户密码等**,最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/mysql/) https://hertzbeat.com/docs/help/mysql/ +在监控页面填写 Mysql **服务IP**,**监控端口**(默认3306),**账户密码等**,最后点击确定添加即可。 +其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/mysql/) ![hertzbeat](/img/blog/monitor-mysql-2.png) @@ -57,6 +57,7 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] ![hertzbeat](/img/blog/monitor-mysql-4.png) **DONE!完成啦!通过上面几步,总结起来其实也就只用一步即可** + - **在 HertzBeat 监控页面配置IP端口账户密码添加 Mysql 监控即可** :::tip @@ -87,7 +88,7 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] 消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 +- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 - 在 HertzBeat 配置接收人参数如下。 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 @@ -118,8 +119,8 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] 这篇实践文章带我们体验了如何使用开源实时监控工具 HertzBeat 来监控 Mysql 数据库指标数据,可以发现集 `监控-告警-通知` 的 HertzBeat 在操作与使用方面更加的便捷,只需页面上简单点一点就能把 Mysql 数据库纳入监控并告警通知,再也不需要部署多个组件编写配置文件那些繁琐操作了。 ::: -Mysql Github: https://github.com/mysql/mysql-server -HertzBeat Github: https://github.com/apache/hertzbeat +Mysql Github: +HertzBeat Github: **欢迎了解使用支持Star哦!** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md index c80d7a5fcbb..8df57dee476 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md @@ -8,7 +8,7 @@ tags: [opensource, practice] keywords: [开源监控系统, 操作系统监控, Linux监控] --- -## 使用开源实时监控工具 HertzBeat 对 Linux 操作系统的监控告警实践,5分钟搞定! +## 使用开源实时监控工具 HertzBeat 对 Linux 操作系统的监控告警实践,5分钟搞定 ### HertzBeat 介绍 @@ -18,11 +18,11 @@ keywords: [开源监控系统, 操作系统监控, Linux监控] - 其将Http, Jmx, Ssh, Snmp, Jdbc, Prometheus等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? - HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 -Github: https://github.com/apache/hertzbeat +Github: ### 在 HertzBeat 5分钟搞定对 Linux 的监控 -#### 操作前提,您已拥有 Linux 环境和 HertzBeat 环境。 +#### 操作前提,您已拥有 Linux 环境和 HertzBeat 环境 - HertzBeat [安装部署文档](https://hertzbeat.com/docs/start/docker-deploy) @@ -36,8 +36,8 @@ Github: https://github.com/apache/hertzbeat 2. 配置新增监控 Linux 所需参数 -在监控页面填写 Linux **对端IP**,**SSH端口**(默认22),**账户密码等**,最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考帮助文档 https://hertzbeat.com/docs/help/mysql/ +在监控页面填写 Linux **对端IP**,**SSH端口**(默认22),**账户密码等**,最后点击确定添加即可。 +其他参数如**采集间隔**,**超时时间**等可以参考帮助文档 ![hertzbeat](/img/blog/monitor-linux-2.png) @@ -149,7 +149,7 @@ Github: https://github.com/apache/hertzbeat 消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 +- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 - 在 HertzBeat 配置接收人参数如下。 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 @@ -188,13 +188,13 @@ Github: https://github.com/apache/hertzbeat > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需Agent的实时监控告警工具。应用服务,数据库,操作系统,中间件,云原生等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Discord Slack Telegram)。 > -> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。 +> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。 > 您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? > > `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** ## ⛄ Supported @@ -205,4 +205,3 @@ Github: https://github.com/apache/hertzbeat - Kubernetes, Docker - 和更多您的自定义监控。 - 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md index 90c0b4e7f27..fed67a861cb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md @@ -14,8 +14,8 @@ keywords: [开源监控系统, 告警系统, Linux监控] ### What is HertzBeat? -> HertzBeat赫兹跳动 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 -> 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等指标监控,阈值告警通知一步到位。 +> HertzBeat赫兹跳动 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 +> 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等指标监控,阈值告警通知一步到位。 > 支持更自由化的阈值规则(计算表达式),`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式及时送达。 > > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 @@ -24,9 +24,9 @@ keywords: [开源监控系统, 告警系统, Linux监控] ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4236e748f5ac4352b7cf4bb65ccf97aa~tplv-k3u1fbpfcp-zoom-1.image) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** ### v1.3.0 大版本来了 @@ -86,7 +86,7 @@ Feature: Bugfix. 1. [[collector] bugfix oracle query error: ORA-01000 happen #618](https://github.com/apache/hertzbeat/pull/618) -2. [[manager]bugfix:update flink fields name, use _ replace - avoid alert_threshold_expr problem. #622](https://github.com/apache/hertzbeat/pull/622) @cuipiheqiuqiu +2. [[manager]bugfix:update flink fields name, use _replace - avoid alert_threshold_expr problem. #622](https://github.com/apache/hertzbeat/pull/622) @cuipiheqiuqiu 3. [[webapp] fix rule days not change when edit old notice rule item #628](https://github.com/apache/hertzbeat/pull/628) 4. [[webapp] update alert notice modal item span #630](https://github.com/apache/hertzbeat/pull/630) 5. [Update issue.md #654](https://github.com/apache/hertzbeat/pull/654) @ycilry @@ -109,7 +109,7 @@ Bugfix. 22. [[script] modified the linux memory metrics specified script code #719](https://github.com/apache/hertzbeat/pull/719) 23. [[webapp] bugfix the cover of the big screen is too small #724](https://github.com/apache/hertzbeat/pull/724) ----- +---- 升级注意⚠️. @@ -146,5 +146,5 @@ COMMIT; ---- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md index 49c2d8121f1..b03359eb1ec 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md @@ -8,7 +8,7 @@ tags: [opensource, practice] keywords: [开源监控系统, SpringBoot监控, 监控告警] --- -## 使用开源实时监控工具 HertzBeat 对 SpringBoot2 应用的监控告警实践,5分钟搞定! +## 使用开源实时监控工具 HertzBeat 对 SpringBoot2 应用的监控告警实践,5分钟搞定 ### HertzBeat 介绍 @@ -18,15 +18,15 @@ keywords: [开源监控系统, SpringBoot监控, 监控告警] - 其将Http, Jmx, Ssh, Snmp, Jdbc, Prometheus等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? - HertzBeat 的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 -Github: https://github.com/apache/hertzbeat +Github: ### 在 HertzBeat 5分钟搞定对 SpringBoot2 应用的监控 -#### 操作前提,您已拥有 SpringBoot2 应用环境和 HertzBeat 环境。 +#### 操作前提,您已拥有 SpringBoot2 应用环境和 HertzBeat 环境 - HertzBeat [安装部署文档](https://hertzbeat.com/docs/start/docker-deploy) -#### 一. 在 SpringBoot2 应用端暴露出`actuator`指标接口,它将提供 metrics 接口数据。 +#### 一. 在 SpringBoot2 应用端暴露出`actuator`指标接口,它将提供 metrics 接口数据 1. 开启 SpringBoot Actuator Endpoint 暴露出`metrics health env`指标接口 @@ -89,8 +89,8 @@ management: 2. 配置新增监控 SpringBoot2 所需参数 -在监控页面填写 SpringBoot2应用 **对端IP**,**服务端口**(默认8080),**账户密码等**,最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考帮助文档 https://hertzbeat.com/docs/help/ +在监控页面填写 SpringBoot2应用 **对端IP**,**服务端口**(默认8080),**账户密码等**,最后点击确定添加即可。 +其他参数如**采集间隔**,**超时时间**等可以参考帮助文档 ![hertzbeat](/img/blog/monitor-springboot2-2.png) @@ -138,7 +138,7 @@ management: 消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) https://hertzbeat.com/docs/help/alert_dingtalk 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 +- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 - 在 HertzBeat 配置接收人参数如下。 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 @@ -183,13 +183,13 @@ management: > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需Agent的实时监控告警工具。应用服务,数据库,操作系统,中间件,云原生,网络等监控,阈值告警,告警通知(邮件微信钉钉飞书短信 Discord Slack Telegram)。 > -> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。 +> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,只需配置YML就能使用这些协议去自定义采集任何您想要采集的指标。 > 您相信只需配置YML就能立刻适配一个K8s或Docker等新的监控类型吗? > > `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** ## ⛄ Supported @@ -201,4 +201,3 @@ management: - CiscoSwitch, HpeSwitch, HuaweiSwitch, TpLinkSwitch - 和更多的自定义监控。 - 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-09-hertzbeat-v1.3.1.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-09-hertzbeat-v1.3.1.md index d68058fec9d..b58b533d388 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-09-hertzbeat-v1.3.1.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-09-hertzbeat-v1.3.1.md @@ -14,8 +14,8 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ### What is HertzBeat? -> HertzBeat赫兹跳动 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 -> 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等指标监控,阈值告警通知一步到位。 +> HertzBeat赫兹跳动 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 +> 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等指标监控,阈值告警通知一步到位。 > 支持更自由化的阈值规则(计算表达式),`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式及时送达。 > > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 @@ -24,9 +24,9 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4236e748f5ac4352b7cf4bb65ccf97aa~tplv-k3u1fbpfcp-zoom-1.image) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** ### v1.3.1 来了 @@ -38,7 +38,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 只需要一条docker命令即可安装体验hertzbeat : `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` -## 升级注意⚠️. +## 升级注意⚠️ 若之前使用的TDengine时序数据库,需升级至TDengine3.0+ @@ -50,9 +50,10 @@ COMMIT; ``` --- + ## ⛄ 已支持 -> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! +> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! > 欢迎大家一起贡献你使用过程中自定义的通用监控类型监控模版。 - Website, Port Telnet, Http Api, Ping Connect, Jvm, SiteMap, Ssl Certificate, SpringBoot2, FTP Server, SpringBoot3 @@ -66,5 +67,5 @@ COMMIT; --- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-11-greptimedb-store.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-11-greptimedb-store.md index ad93e35571a..5a6170fa4a0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-11-greptimedb-store.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-11-greptimedb-store.md @@ -78,7 +78,7 @@ $ docker run -d -p 1157:1157 \ 注意⚠️ 本地挂载配置文件 `application.yml` 需提前存在,文件完整内容见项目仓库[/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) -2. 浏览器访问 http://ip:1157/ 默认账户密码 admin/hertzbeat,查看 HertzBeat 是否启动成功。 +2. 浏览器访问 默认账户密码 admin/hertzbeat,查看 HertzBeat 是否启动成功。 #### 配置使用 GreptimeDB 存储 HertzBeat 监控指标度量数据 @@ -102,12 +102,12 @@ warehouse: 2. 重启 HertzBeat ```shell -$ docker restart hertzbeat +docker restart hertzbeat ``` #### 观察验证效果 -1. 浏览器访问 HertzBeat http://ip:1157/ 默认账户密码 admin/hertzbeat +1. 浏览器访问 HertzBeat 默认账户密码 admin/hertzbeat 2. 使用 HertzBeat 添加应用监控,比如网站监控,Linux监控,Mysql监控 3. 监控采集几个周期之后,查看 GreptimeDB 数据库是否存储指标度量数据,HertzBeat 指标数据图表数据是否展示正常。 @@ -121,11 +121,11 @@ $ docker restart hertzbeat ## 小结 -这篇文章带我们体验了如何使用开源时序数据库 GreptimeDB 存储开源实时监控 HertzBeat 的指标度量数据,总的来看两款开源产品上手是非常简单的,关键是如果嫌麻烦不想部署他俩都还有云服务😂让你折腾。 +这篇文章带我们体验了如何使用开源时序数据库 GreptimeDB 存储开源实时监控 HertzBeat 的指标度量数据,总的来看两款开源产品上手是非常简单的,关键是如果嫌麻烦不想部署他俩都还有云服务😂让你折腾。 作为特性 [HertzBeat支持GreptimeDB](https://github.com/apache/hertzbeat/pull/834) 的开发者之一,在实际适配使用过程中,GreptimeDB的丝滑原生SDK和类似关系数据库的SQL,让我们从其它时序数据库 `TDengine, IotDB, InfluxDB` 切换过去还是非常容易,体验丝滑的。 -GreptimeDB Github: https://github.com/GreptimeTeam/greptimedb -HertzBeat Github: https://github.com/apache/hertzbeat +GreptimeDB Github: +HertzBeat Github: **最后就是欢迎大家一定要多多了解,多多使用,多多提意见,多多ISSUE,多多PR,多多Star支持这俩没出来多久希望得到呵护的开源牛牛不怕困难 一颗小星星哦!做开源,我们是蒸(真)的,爱心💗** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-07-05-hertzbeat-v1.3.2.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-07-05-hertzbeat-v1.3.2.md index 03509304667..5afc14c05e6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-07-05-hertzbeat-v1.3.2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-07-05-hertzbeat-v1.3.2.md @@ -14,18 +14,18 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ### HertzBeat 介绍 -> HertzBeat赫兹跳动 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 -> 致力于**易用友好**,全 WEB 页面操作,鼠标点一点就能监控告警,零上手学习成本。 -> 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等指标监控,阈值告警通知一步到位。 +> HertzBeat赫兹跳动 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 +> 致力于**易用友好**,全 WEB 页面操作,鼠标点一点就能监控告警,零上手学习成本。 +> 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等指标监控,阈值告警通知一步到位。 > 支持更自由化的阈值规则(计算表达式),`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式及时送达。 > > 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4236e748f5ac4352b7cf4bb65ccf97aa~tplv-k3u1fbpfcp-zoom-1.image) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** ### v1.3.2 来了 @@ -43,16 +43,17 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 修复了大量BUG,完善文档代码,提高了整体的稳定可用性。更多新功能欢迎探索! -只需要一条docker命令即可安装体验hertzbeat: +只需要一条docker命令即可安装体验hertzbeat: `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` -若dockerhub网络超时,可以使用下面命令: +若dockerhub网络超时,可以使用下面命令: `docker run -d -p 1157:1157 --name hertzbeat quay.io/tancloud/hertzbeat` --- + ## ⛄ 已支持 -> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! +> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! > 欢迎大家一起贡献你使用过程中自定义的通用监控类型监控模版。 - Website, Port Telnet, Http Api, Ping Connect, Jvm, SiteMap, Ssl Certificate, SpringBoot2, FTP Server, SpringBoot3 @@ -68,6 +69,5 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] **欢迎star三连来支持我们** -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** - +**Github: ** +**Gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md index 02d1abc665a..be2a3761fce 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md @@ -26,13 +26,13 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ![hertzBeat](/img/docs/hertzbeat-arch.png) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** ### 集群版来啦 -我们之前的hertzbeat一直是单机版本,组件代码模块化但不支持采集器独立部署,所支持的监控数量上限受到了单节点的天然限制,且无法应对多个隔离网络的资源的统一纳管。 +我们之前的hertzbeat一直是单机版本,组件代码模块化但不支持采集器独立部署,所支持的监控数量上限受到了单节点的天然限制,且无法应对多个隔离网络的资源的统一纳管。 经过一个多月的迭代,我们重写了采集任务调度,采集器独立部署,设计单机版和集群版用同一套代码方便后续的维护升级,单机集群两种模式可相互切换无感知。最终很高兴,集群版如期与大家见面了。 集群版不仅仅给我们带来了更强大的监控性能,更有云边协同等功能让人充满想象。 @@ -57,9 +57,9 @@ HertzBeat 提供云边协同能力,可以在多个隔离网络部署边缘采 ### 为什么要开源集群版? -往往一些做需要商业化的开源产品的策略会是单机版作为玩具给小玩家们的入门使用,然后集群版作为闭源产品给有需求的氪金玩家付费使用。这样的模式是可以说非常不错的且值得肯定的,即保证开源也得到了收益,也适用于很多开源项目的发展策略,可能会在商业路径上走得更通顺点。 -网络上有些人会对这样的分单机和集群版的开源项目嗤之以鼻,觉得它们是伪开源,开源是噱头,他们觉得开源应该什么都开源免费出来,开源团队什么都应该无私奉献出来。。。。很无语这类人,有投入才有回报,当你免费使用着开源软件并得到价值的时候,是否应该想一想你付出给开源软件了什么而不是一味的索取。 -那回到正题,我们又为什么要开源集群版?仅因为热爱开源?如果说我们还在少年可能这话你信,但一个快奔30还有家庭责任的人说出这话你信吗,我自己都不信😂。 +往往一些做需要商业化的开源产品的策略会是单机版作为玩具给小玩家们的入门使用,然后集群版作为闭源产品给有需求的氪金玩家付费使用。这样的模式是可以说非常不错的且值得肯定的,即保证开源也得到了收益,也适用于很多开源项目的发展策略,可能会在商业路径上走得更通顺点。 +网络上有些人会对这样的分单机和集群版的开源项目嗤之以鼻,觉得它们是伪开源,开源是噱头,他们觉得开源应该什么都开源免费出来,开源团队什么都应该无私奉献出来。。。。很无语这类人,有投入才有回报,当你免费使用着开源软件并得到价值的时候,是否应该想一想你付出给开源软件了什么而不是一味的索取。 +那回到正题,我们又为什么要开源集群版?仅因为热爱开源?如果说我们还在少年可能这话你信,但一个快奔30还有家庭责任的人说出这话你信吗,我自己都不信😂。 首先我们来看看开源能带来什么,或者为什么要做开源。最开始全职开源的想法很简单,做自己喜欢的开源产品(已实现),程序员的梦想能部署在成千上万的服务器上(看下载量已实现),然后基于此开源产品挣钱(暂未哭)。 - 用户流量。开源项目免费提供给用户和开发者,吸引用户使用,宣传等方面都有优势。 @@ -67,7 +67,7 @@ HertzBeat 提供云边协同能力,可以在多个隔离网络部署边缘采 - 社区协作。开源的产品可以吸引到顶级贡献者一起贡献,接收用户的反馈issue,pr贡献等,在社区的驱动下使开源项目越来越好,正向反馈后也会有更多人参与和使用。社区协作我觉得这是开源的意义,而且这样不仅仅只是程序员之间的贡献代码协作,用户都是协作对象(比如我们这个项目有大量的运维朋友贡献代码和文档),如果是仅仅代码开源而不社区协作,那还不如放个安装包给别人免费使用下载就好。 - 产品生态。这对一些需要生态的产品是需要的,比如hertzbeat,需要支持对接各种类型协议的监控类型,大量的监控模版。一个好的开源项目生态才能吸引到其它贡献者贡献和分享,在生态中互通有无,最终大家在生态中都受益。这在闭源程序中是很难做到的。 -上面几点,重在社区协作和产品生态,这也是开源集群版的原因,只有卷开源产品卷自己到更强的产品力,比如集群这一技术特性天生会吸引到开发者(而且集群本身就是我们社区协作的产物),会吸引到更多的用户和贡献者使用反馈大家一起迭代,社区驱动进而正向促进开源项目和满足用户功能体验。 +上面几点,重在社区协作和产品生态,这也是开源集群版的原因,只有卷开源产品卷自己到更强的产品力,比如集群这一技术特性天生会吸引到开发者(而且集群本身就是我们社区协作的产物),会吸引到更多的用户和贡献者使用反馈大家一起迭代,社区驱动进而正向促进开源项目和满足用户功能体验。 而对于开源商业化,开源商业化的前提是得有个真正好的,受欢迎,被广泛使用的开源产品,然后在此基础上做商业化挣钱。 对了这里再说下开源不等同于免费,基于HertzBeat二次开发需保留logo,名称,页面脚注,版权等。 @@ -98,63 +98,64 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) --- + ### 更多的 v1.4.0 版本更新 > 更多版本新功能更新欢迎探索,感谢社区小伙伴们的辛苦贡献,爱心💗! -* [doc] add v1.3.2 publish doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1075 -* remove elasticsearch unused param index by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1080 -* feature support monitoring apache airflow by @luoxuanzao in https://github.com/apache/hertzbeat/pull/1081 -* add luoxuanzao as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1083 -* [collector] bugfix sshd cannot use private key to connect by @gcdd1993 in https://github.com/apache/hertzbeat/pull/1084 -* bugfix update dashboard alerts cards height not consist by @tomsun28 in https://github.com/apache/hertzbeat/pull/1087 -* Feature#serverchan by @zqr10159 in https://github.com/apache/hertzbeat/pull/1092 -* bugfix dm database monitoring connect error by @lisongning in https://github.com/apache/hertzbeat/pull/1094 -* add lisongning as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1096 -* update alert rule operator display "<=" to ">=" by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1097 -* [doc] add custom monitoring relate document by @tomsun28 in https://github.com/apache/hertzbeat/pull/1098 -* add YutingNie as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1103 -* Remove unreachable status by @YutingNie in https://github.com/apache/hertzbeat/pull/1102 -* 139 auto update alert status by @l646505418 in https://github.com/apache/hertzbeat/pull/1104 -* feat: aviator fn for str contains, exists & matches by @mikezzb in https://github.com/apache/hertzbeat/pull/1106 -* add mikezzb as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1107 -* bugfix common alarm do not need monitorId tag existed by @tomsun28 in https://github.com/apache/hertzbeat/pull/1108 -* bugfix extern alert do not have labels mapping inner monitor by @tomsun28 in https://github.com/apache/hertzbeat/pull/1111 -* feature: support apache spark metrics monitoring by @a-little-fool in https://github.com/apache/hertzbeat/pull/1114 -* add a-little-fool as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1116 -* [Feature]Add third report of TenCloud by @zqr10159 in https://github.com/apache/hertzbeat/pull/1113 -* [Feature]Add third report of TenCloud (#1113) by @zqr10159 in https://github.com/apache/hertzbeat/pull/1119 -* [manager] fix: can query by tags when tagValue is null by @l646505418 in https://github.com/apache/hertzbeat/pull/1118 -* bugfix the notification template environment variable display error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1120 -* add littlezhongzer as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1127 -* feature:monitor brearer token api, ignore letter case to comparison by @littlezhongzer in https://github.com/apache/hertzbeat/pull/1122 -* docs: enhance README by @mikezzb in https://github.com/apache/hertzbeat/pull/1128 -* Update app-oracle.yml by @ChenXiangxxxxx in https://github.com/apache/hertzbeat/pull/1129 -* add ChenXiangxxxxx as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1130 -* fix alarm silence strategy setting failed by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1131 -* support run sql script file in jdbc protocol config by @tomsun28 in https://github.com/apache/hertzbeat/pull/1117 -* bugfix return old cache json file when upgrade version by @tomsun28 in https://github.com/apache/hertzbeat/pull/1137 -* support ssh protocol config choose if reuse connection by @tomsun28 in https://github.com/apache/hertzbeat/pull/1136 -* feat(web): alert threshold UI support matches & contains by @mikezzb in https://github.com/apache/hertzbeat/pull/1138 -* support hertzbeat metrics collector cluster by @tomsun28 in https://github.com/apache/hertzbeat/pull/1101 -* add collector card in dashboard by @tomsun28 in https://github.com/apache/hertzbeat/pull/1147 -* bugfix: linux collect warning: bad syntax, perhaps a bogus '-' by @Mr-zhou315 in https://github.com/apache/hertzbeat/pull/1151 -* add Mr-zhou315 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1157 -* support config timezone locale language region on web ui by @tomsun28 in https://github.com/apache/hertzbeat/pull/1154 -* bugfix monitoring template app name already exists by @tomsun28 in https://github.com/apache/hertzbeat/pull/1152 -* bugfix can not startup when error monitoring template yml file by @tomsun28 in https://github.com/apache/hertzbeat/pull/1153 -* tags also deleted when the monitor is deleted by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1159 -* monitoring param host with http head will not be error reported by @littlezhongzer in https://github.com/apache/hertzbeat/pull/1155 -* [script] feature update build.sh and Dockerfile: detect app version a… by @XimfengYao in https://github.com/apache/hertzbeat/pull/1162 -* add XimfengYao as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1163 -* [doc] add collector clusters document by @tomsun28 in https://github.com/apache/hertzbeat/pull/1161 -* [hertzbeat] release hertzbeat version v1.4.0 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1168 +- [doc] add v1.3.2 publish doc by @tomsun28 in +- remove elasticsearch unused param index by @Ceilzcx in +- feature support monitoring apache airflow by @luoxuanzao in +- add luoxuanzao as a contributor for code by @allcontributors in +- [collector] bugfix sshd cannot use private key to connect by @gcdd1993 in +- bugfix update dashboard alerts cards height not consist by @tomsun28 in +- Feature#serverchan by @zqr10159 in +- bugfix dm database monitoring connect error by @lisongning in +- add lisongning as a contributor for code by @allcontributors in +- update alert rule operator display "<=" to ">=" by @Ceilzcx in +- [doc] add custom monitoring relate document by @tomsun28 in +- add YutingNie as a contributor for code by @allcontributors in +- Remove unreachable status by @YutingNie in +- 139 auto update alert status by @l646505418 in +- feat: aviator fn for str contains, exists & matches by @mikezzb in +- add mikezzb as a contributor for code by @allcontributors in +- bugfix common alarm do not need monitorId tag existed by @tomsun28 in +- bugfix extern alert do not have labels mapping inner monitor by @tomsun28 in +- feature: support apache spark metrics monitoring by @a-little-fool in +- add a-little-fool as a contributor for code by @allcontributors in +- [Feature]Add third report of TenCloud by @zqr10159 in +- [Feature]Add third report of TenCloud (#1113) by @zqr10159 in +- [manager] fix: can query by tags when tagValue is null by @l646505418 in +- bugfix the notification template environment variable display error by @tomsun28 in +- add littlezhongzer as a contributor for code by @allcontributors in +- feature:monitor brearer token api, ignore letter case to comparison by @littlezhongzer in +- docs: enhance README by @mikezzb in +- Update app-oracle.yml by @ChenXiangxxxxx in +- add ChenXiangxxxxx as a contributor for code by @allcontributors in +- fix alarm silence strategy setting failed by @Ceilzcx in +- support run sql script file in jdbc protocol config by @tomsun28 in +- bugfix return old cache json file when upgrade version by @tomsun28 in +- support ssh protocol config choose if reuse connection by @tomsun28 in +- feat(web): alert threshold UI support matches & contains by @mikezzb in +- support hertzbeat metrics collector cluster by @tomsun28 in +- add collector card in dashboard by @tomsun28 in +- bugfix: linux collect warning: bad syntax, perhaps a bogus '-' by @Mr-zhou315 in +- add Mr-zhou315 as a contributor for code by @allcontributors in +- support config timezone locale language region on web ui by @tomsun28 in +- bugfix monitoring template app name already exists by @tomsun28 in +- bugfix can not startup when error monitoring template yml file by @tomsun28 in +- tags also deleted when the monitor is deleted by @Ceilzcx in +- monitoring param host with http head will not be error reported by @littlezhongzer in +- [script] feature update build.sh and Dockerfile: detect app version a… by @XimfengYao in +- add XimfengYao as a contributor for code by @allcontributors in +- [doc] add collector clusters document by @tomsun28 in +- [hertzbeat] release hertzbeat version v1.4.0 by @tomsun28 in --- ## ⛄ 已支持 -> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! +> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! > 欢迎大家一起贡献你使用过程中自定义的通用监控类型监控模版。 - Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server @@ -172,6 +173,5 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 欢迎star一波来支持我们哦。 -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** - +**Github: ** +**Gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-28-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-28-new-committer.md index 6adbd15b1fd..b4fe8406ebc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-28-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-28-new-committer.md @@ -83,6 +83,6 @@ HertzBeat 赫兹跳动是一个拥有强大自定义监控能力,高性能集 > `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 -Github: https://github.com/apache/hertzbeat +Github: 欢迎更多的用户参与到`HertzBeat`的开源协作中来,不管是一个错别字还是标点符号我们都非常欢迎。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-09-26-hertzbeat-v1.4.1.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-09-26-hertzbeat-v1.4.1.md index bb29c6c9d0a..a0f73af7921 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-09-26-hertzbeat-v1.4.1.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-09-26-hertzbeat-v1.4.1.md @@ -13,18 +13,18 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ![hertzBeat](/img/home/0.png) -### 总结起来如下: +### 总结起来如下 - **重构netty client server, 采集器集群调度** @Ceilzcx @tomsun28 - **采集器集群的UI界面管理** @Ceilzcx @tomsun28 - **功能页面帮助信息模块和阈值表达式增强** 开源之夏和GLCC课题 @YutingNie @mikezzb - **新的控制台登陆界面和欢迎页面** - **监控指标名称国际化** 用户可以看指标的中英文名称啦,欢迎一起完善监控模版里面的i18n国际化资源 -- **支持kubernetes helm charts一键部署** 见 https://artifacthub.io/packages/search?repo=hertzbeat +- **支持kubernetes helm charts一键部署** 见 **更多的特性和BUG修复,稳定性提示** 感谢 @zqr10159 @Carpe-Wang @luxx-lq @l646505418 @LINGLUOJUN @luelueking @qyaaaa @novohit @gcdd1993 -### 上效果图: +### 上效果图 - 新的登陆页面UI @@ -62,9 +62,9 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ![hertzBeat](/img/docs/hertzbeat-arch.png) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** ### 尝试部署 @@ -91,9 +91,10 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) --- + ## ⛄ 已支持 -> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! +> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! > 欢迎大家一起贡献你使用过程中自定义的通用监控类型监控模版。 - Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server @@ -109,6 +110,5 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN --- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** - +**Github: ** +**Gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md index 5e18cd2ba5d..db282b4feee 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md @@ -12,7 +12,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ![hertzBeat](/img/home/0.png) -### 总结起来如下: +### 总结起来如下 - **消息通知模版特性,开源之夏课题** - **支持华为云OBS存储监控模版文件** @@ -39,9 +39,9 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] ![hertzBeat](/img/docs/hertzbeat-arch.png) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** ### 尝试部署 @@ -68,9 +68,10 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) --- + ## ⛄ 已支持 -> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! +> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! > 欢迎大家一起贡献你使用过程中自定义的通用监控类型监控模版。 - Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server @@ -86,8 +87,8 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN --- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** ### **下载链接** @@ -110,4 +111,3 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - ⬇️ [hertzbeat-collector-macos_arm64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-macos_arm64_1.4.2.tar.gz) - ⬇️ [hertzbeat-collector-macos_amd64_1.4.2.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-macos_amd64_1.4.2.tar.gz) - ⬇️ [hertzbeat-collector-windows64_1.4.2.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.2/hertzbeat-collector-windows64_1.4.2.zip) - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md index 4550a1f4278..c8c138121d3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md @@ -21,11 +21,11 @@ keywords: [open source monitoring system, alerting system] - 高性能,支持多采集器集群横向扩展,支持多隔离网络监控,云边协同。 - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** -### HertzBeat's 1.4.3 新版本发布啦! +### HertzBeat's 1.4.3 新版本发布啦 - 三方外部告警上报增强 - 支持 mysql api port website mongodb jvm redis 等监控指标的i18n国际化 @@ -63,9 +63,10 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) --- + ## ⛄ 已支持 -> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! +> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! > 欢迎大家一起贡献你使用过程中自定义的通用监控类型监控模版。 - Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server @@ -81,73 +82,74 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN --- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** --- + ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! -* update package deploy doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1330 -* bugfix duplicate collect job when update monitor templates by @tomsun28 in https://github.com/apache/hertzbeat/pull/1332 -* bugfix number variable in freemarker template display error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1334 -* [alerter] Enhanced reporting of external general alert API by @SurryChen in https://github.com/apache/hertzbeat/pull/1326 -* [doc] update hertzbeat-mysql-tdengine readme by @jiashu1024 in https://github.com/apache/hertzbeat/pull/1335 -* add jiashu1024 as a contributor for doc by @allcontributors in https://github.com/apache/hertzbeat/pull/1336 -* app-mysql.yml: Adjust slow query translation by @1036664317 in https://github.com/apache/hertzbeat/pull/1337 -* add 1036664317 as a contributor for doc by @allcontributors in https://github.com/apache/hertzbeat/pull/1338 -* Bump com.google.guava:guava from 31.0.1-jre to 32.0.0-jre by @dependabot in https://github.com/apache/hertzbeat/pull/1339 -* [feature] support auto collect metrics by prometheus task by @tomsun28 in https://github.com/apache/hertzbeat/pull/1342 -* [doc] add vinci as new committer by @tomsun28 in https://github.com/apache/hertzbeat/pull/1341 -* [feature] add tag word cloud in dashboard by @tomsun28 in https://github.com/apache/hertzbeat/pull/1345 -* support custom prometheus endpoint path by @tomsun28 in https://github.com/apache/hertzbeat/pull/1346 -* bugfix tdengine query interval history metrics data with instance error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1348 -* unlimit Alert.java content field length by @xiaoguolong in https://github.com/apache/hertzbeat/pull/1351 -* add xiaoguolong as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1353 -* update monitor detail table ui layout by @tomsun28 in https://github.com/apache/hertzbeat/pull/1352 -* [doc]add star history by @zqr10159 in https://github.com/apache/hertzbeat/pull/1356 -* feature: app-mongodb.yml by @a-little-fool in https://github.com/apache/hertzbeat/pull/1359 -* alarm threshold support prometheus task metrics by @tomsun28 in https://github.com/apache/hertzbeat/pull/1354 -* support victoriametrics as metrics data storage by @tomsun28 in https://github.com/apache/hertzbeat/pull/1361 -* Add time type to support query_time of mysql and mariadb by @Clownsw in https://github.com/apache/hertzbeat/pull/1364 -* add Clownsw as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1365 -* Error occured when I followed running steps to start Front-web by @Calvin979 in https://github.com/apache/hertzbeat/pull/1366 -* add Calvin979 as a contributor for doc by @allcontributors in https://github.com/apache/hertzbeat/pull/1367 -* enriches the cncf landscape by @tomsun28 in https://github.com/apache/hertzbeat/pull/1368 -* Fix flaky test in CollectUtilTest by @bbelide2 in https://github.com/apache/hertzbeat/pull/1371 -* add bbelide2 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1372 -* Fix flaky test replaceSmilingPlaceholder by @bbelide2 in https://github.com/apache/hertzbeat/pull/1373 -* add docker-compose script hertzbeat+mysql+victoria-metrics all in one by @tomsun28 in https://github.com/apache/hertzbeat/pull/1370 -* Feature: app-jvm.yml support for international name aliases by @Calvin979 in https://github.com/apache/hertzbeat/pull/1376 -* add Calvin979 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1377 -* feature: support monitoring spring gateway metrics by @a-little-fool in https://github.com/apache/hertzbeat/pull/1374 -* update code comment and doc, bugfix concurrent exception by @tomsun28 in https://github.com/apache/hertzbeat/pull/1378 -* update windows define and accept snmp leaf by @jinyaoMa in https://github.com/apache/hertzbeat/pull/1379 -* add jinyaoMa as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1380 -* fix exception when sending email has special chars by @Carpe-Wang in https://github.com/apache/hertzbeat/pull/1383 -* test: add e2e testing for some basic APIs by @LinuxSuRen in https://github.com/apache/hertzbeat/pull/1387 -* add LinuxSuRen as a contributor for code, and test by @allcontributors in https://github.com/apache/hertzbeat/pull/1389 -* bugfix auto generate monitor name error when add monitor by @tomsun28 in https://github.com/apache/hertzbeat/pull/1384 -* bugfix CalculateAlarm execAlertExpression NPE by @tomsun28 in https://github.com/apache/hertzbeat/pull/1388 -* Feature: app-redis.yml support for international name aliases by @Calvin979 in https://github.com/apache/hertzbeat/pull/1390 -* test: add more monitor related e2e testing case by @LinuxSuRen in https://github.com/apache/hertzbeat/pull/1391 -* chore: update the pr template about the e2e testing by @LinuxSuRen in https://github.com/apache/hertzbeat/pull/1392 -* add help header ui when update or add monitors by @tomsun28 in https://github.com/apache/hertzbeat/pull/1399 -* [hertzbeat] release hertzbeat version v1.4.3 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1400 +- update package deploy doc by @tomsun28 in +- bugfix duplicate collect job when update monitor templates by @tomsun28 in +- bugfix number variable in freemarker template display error by @tomsun28 in +- [alerter] Enhanced reporting of external general alert API by @SurryChen in +- [doc] update hertzbeat-mysql-tdengine readme by @jiashu1024 in +- add jiashu1024 as a contributor for doc by @allcontributors in +- app-mysql.yml: Adjust slow query translation by @1036664317 in +- add 1036664317 as a contributor for doc by @allcontributors in +- Bump com.google.guava:guava from 31.0.1-jre to 32.0.0-jre by @dependabot in +- [feature] support auto collect metrics by prometheus task by @tomsun28 in +- [doc] add vinci as new committer by @tomsun28 in +- [feature] add tag word cloud in dashboard by @tomsun28 in +- support custom prometheus endpoint path by @tomsun28 in +- bugfix tdengine query interval history metrics data with instance error by @tomsun28 in +- unlimit Alert.java content field length by @xiaoguolong in +- add xiaoguolong as a contributor for code by @allcontributors in +- update monitor detail table ui layout by @tomsun28 in +- [doc]add star history by @zqr10159 in +- feature: app-mongodb.yml by @a-little-fool in +- alarm threshold support prometheus task metrics by @tomsun28 in +- support victoriametrics as metrics data storage by @tomsun28 in +- Add time type to support query_time of mysql and mariadb by @Clownsw in +- add Clownsw as a contributor for code by @allcontributors in +- Error occured when I followed running steps to start Front-web by @Calvin979 in +- add Calvin979 as a contributor for doc by @allcontributors in +- enriches the cncf landscape by @tomsun28 in +- Fix flaky test in CollectUtilTest by @bbelide2 in +- add bbelide2 as a contributor for code by @allcontributors in +- Fix flaky test replaceSmilingPlaceholder by @bbelide2 in +- add docker-compose script hertzbeat+mysql+victoria-metrics all in one by @tomsun28 in +- Feature: app-jvm.yml support for international name aliases by @Calvin979 in +- add Calvin979 as a contributor for code by @allcontributors in +- feature: support monitoring spring gateway metrics by @a-little-fool in +- update code comment and doc, bugfix concurrent exception by @tomsun28 in +- update windows define and accept snmp leaf by @jinyaoMa in +- add jinyaoMa as a contributor for code by @allcontributors in +- fix exception when sending email has special chars by @Carpe-Wang in +- test: add e2e testing for some basic APIs by @LinuxSuRen in +- add LinuxSuRen as a contributor for code, and test by @allcontributors in +- bugfix auto generate monitor name error when add monitor by @tomsun28 in +- bugfix CalculateAlarm execAlertExpression NPE by @tomsun28 in +- Feature: app-redis.yml support for international name aliases by @Calvin979 in +- test: add more monitor related e2e testing case by @LinuxSuRen in +- chore: update the pr template about the e2e testing by @LinuxSuRen in +- add help header ui when update or add monitors by @tomsun28 in +- [hertzbeat] release hertzbeat version v1.4.3 by @tomsun28 in ## New Contributors -* @1036664317 made their first contribution in https://github.com/apache/hertzbeat/pull/1337 -* @dependabot made their first contribution in https://github.com/apache/hertzbeat/pull/1339 -* @xiaoguolong made their first contribution in https://github.com/apache/hertzbeat/pull/1351 -* @Clownsw made their first contribution in https://github.com/apache/hertzbeat/pull/1364 -* @Calvin979 made their first contribution in https://github.com/apache/hertzbeat/pull/1366 -* @bbelide2 made their first contribution in https://github.com/apache/hertzbeat/pull/1371 -* @jinyaoMa made their first contribution in https://github.com/apache/hertzbeat/pull/1379 -* @LinuxSuRen made their first contribution in https://github.com/apache/hertzbeat/pull/1387 +- @1036664317 made their first contribution in +- @dependabot made their first contribution in +- @xiaoguolong made their first contribution in +- @Clownsw made their first contribution in +- @Calvin979 made their first contribution in +- @bbelide2 made their first contribution in +- @jinyaoMa made their first contribution in +- @LinuxSuRen made their first contribution in -**Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.4.2...v1.4.3 +**Full Changelog**: --- @@ -164,8 +166,8 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN ---- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** ### **下载链接** @@ -188,4 +190,3 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - ⬇️ [hertzbeat-collector-macos_arm64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-macos_arm64_1.4.3.tar.gz) - ⬇️ [hertzbeat-collector-macos_amd64_1.4.3.tar.gz](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-macos_amd64_1.4.3.tar.gz) - ⬇️ [hertzbeat-collector-windows64_1.4.3.zip](https://github.com/apache/hertzbeat/releases/download/v1.4.3/hertzbeat-collector-windows64_1.4.3.zip) - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md index c699daba0a1..5cdc243aa58 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md @@ -41,6 +41,7 @@ keywords: [open source monitoring system, alerting system] 感谢Tom哥和我的ospp导师郑晨鑫,他们在我接触开源社区的过程中给出了许多帮助和建议。目前我仍然在社区中负责部分代码的开发,希望Hertzbeat以后可以越来越好! --- + # New Committer - 淞筱 **姓名:周书胜** @@ -57,13 +58,13 @@ keywords: [open source monitoring system, alerting system] ## 开源贡献 -* 支持Spring Gateway、Apache Spark、Apache Hive等服务指标采集 -* 自定义nginx、pop3协议,对Nginx,POP3邮箱服务器进行指标采集,并添加相应帮助文档 +- 支持Spring Gateway、Apache Spark、Apache Hive等服务指标采集 +- 自定义nginx、pop3协议,对Nginx,POP3邮箱服务器进行指标采集,并添加相应帮助文档 ## 收获 -* 接触了更加优秀、结构更加复杂的大型项目,提高了编程和解决问题的能力 -* 将理论知识付诸于实践,收获了JUC,微服务相关的开发经验,以及宝贵的项目经历 +- 接触了更加优秀、结构更加复杂的大型项目,提高了编程和解决问题的能力 +- 将理论知识付诸于实践,收获了JUC,微服务相关的开发经验,以及宝贵的项目经历 ## 感谢社区小伙伴 @@ -73,8 +74,8 @@ keywords: [open source monitoring system, alerting system] ## 给新人的一些建议 -* 初次参与开源项目时,可以从简单的任务开始。逐渐熟悉项目的代码和流程,并逐步承担更复杂的任务。 -* 如果遇到自己无法解决的问题时,可以多多请教社区的小伙伴们。 +- 初次参与开源项目时,可以从简单的任务开始。逐渐熟悉项目的代码和流程,并逐步承担更复杂的任务。 +- 如果遇到自己无法解决的问题时,可以多多请教社区的小伙伴们。 --- @@ -130,8 +131,8 @@ keywords: [open source monitoring system, alerting system] > `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** 欢迎更多小伙伴参与到HertzBeat的开源协作中来,不管是一个错别字还是标点符号我们都非常欢迎,大家一起学习进步,目标做一个世界级开源软件。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md index cbee4825d83..e375a5c15b9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md @@ -21,11 +21,11 @@ keywords: [open source monitoring system, alerting system] - 高性能,支持多采集器集群横向扩展,支持多隔离网络监控,云边协同。 - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** -### HertzBeat's 1.4.4 新版本发布啦! +### HertzBeat's 1.4.4 新版本发布啦 - support snmp v3 monitoring protocol @TJxiaobao - support monitoring NebulaGraph metrics @ZY945 @@ -64,9 +64,10 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) --- + ## ⛄ 已支持 -> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! +> 我们将监控采集类型(mysql,jvm,k8s)都定义为yml监控模版,用户可以导入这些模版来支持对应类型的监控! > 欢迎大家一起贡献你使用过程中自定义的通用监控类型监控模版。 - Site Monitor, Port Availability, Http Api, Ping Connectivity, Jvm, SiteMap Full Site, Ssl Certificate, SpringBoot, FTP Server @@ -82,86 +83,87 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN --- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** --- + ### What's Changed > Welcome to explore more new version updates, thanks to the hard work of the community partners, love 💗! -* bugfix metrics tags value store jpa data-storage error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1403 -* add smtp protocol and support smtp monitoring by @ZY945 in https://github.com/apache/hertzbeat/pull/1407 -* add ZY945 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1409 -* support new parse type 'log' in ssh collect protocol by @tomsun28 in https://github.com/apache/hertzbeat/pull/1410 -* add ntp protocol and support ntp monitoring by @ZY945 in https://github.com/apache/hertzbeat/pull/1411 -* monitoring the availability of websockets through handshake. by @ZY945 in https://github.com/apache/hertzbeat/pull/1413 -* Task-1386 When adding tags in tag management, random colors are given by default. by @prolevel1 in https://github.com/apache/hertzbeat/pull/1412 -* add prolevel1 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1415 -* - -# 1397 feature: support for dns monitoring by @Calvin979 in https://github.com/apache/hertzbeat/pull/1416 - -* Support monitoring hive metrics by @a-little-fool in https://github.com/apache/hertzbeat/pull/1417 -* support legend pageable in history data charts by @tomsun28 in https://github.com/apache/hertzbeat/pull/1414 -* update component tip and help tip doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1418 -* feature: support monitoring nginx metrics and add a help doc by @a-little-fool in https://github.com/apache/hertzbeat/pull/1420 -* update parser to parse from prometheus txt metrics data by @tomsun28 in https://github.com/apache/hertzbeat/pull/1421 -* support monitoring memcached metrics and add a help doc by @ZY945 in https://github.com/apache/hertzbeat/pull/1423 -* support all ssh connect key exchange by @tomsun28 in https://github.com/apache/hertzbeat/pull/1424 -* doc add code of conduct by @tomsun28 in https://github.com/apache/hertzbeat/pull/1425 -* update label structure store in victoria metrics, make it prometheus like by @tomsun28 in https://github.com/apache/hertzbeat/pull/1426 -* feature: support monitoring pop3 metrics and add help doc by @a-little-fool in https://github.com/apache/hertzbeat/pull/1427 -* Update sidebars.json by @a-little-fool in https://github.com/apache/hertzbeat/pull/1428 -* Add zh-cn help doc by @a-little-fool in https://github.com/apache/hertzbeat/pull/1429 -* update monitoring state un-manage to unmonitored, update pic by @tomsun28 in https://github.com/apache/hertzbeat/pull/1430 -* Add jpa to date type storage by @Clownsw in https://github.com/apache/hertzbeat/pull/1431 -* bugfix ^o^ token error, protect metrics api auth by @tomsun28 in https://github.com/apache/hertzbeat/pull/1434 -* Add relevant documents for SMTP and NTP by @ZY945 in https://github.com/apache/hertzbeat/pull/1437 -* bugfix threshold init error in mysql env by @tomsun28 in https://github.com/apache/hertzbeat/pull/1435 -* app-rabbitmq.yml support for international name aliases by @ZY945 in https://github.com/apache/hertzbeat/pull/1439 -* fix: error create lru-cache-timeout-cleaner thread by @Clownsw in https://github.com/apache/hertzbeat/pull/1438 -* app-rabbitmq.yml Modifying Error Fields. by @ZY945 in https://github.com/apache/hertzbeat/pull/1440 -* support monitoring NebulaGraph metrics and add help doc by @ZY945 in https://github.com/apache/hertzbeat/pull/1441 -* Fix Nginx Collect validateParams function NPE by @Clownsw in https://github.com/apache/hertzbeat/pull/1442 -* feature: add metrics i18n for app-springboot3.yml by @liyin in https://github.com/apache/hertzbeat/pull/1445 -* feat: add metrics i18n for app-docker.yml by @liyin in https://github.com/apache/hertzbeat/pull/1446 -* update docker-compose script and fix version by @tomsun28 in https://github.com/apache/hertzbeat/pull/1447 -* bugfix java.lang.IllegalArgumentException: Illegal character in query… by @tomsun28 in https://github.com/apache/hertzbeat/pull/1443 -* bugfix delete monitor error after monitor canceled by @ZhangZixuan1994 in https://github.com/apache/hertzbeat/pull/1451 -* add ZhangZixuan1994 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1454 -* remove sleep, probably busy-waiting by @tomsun28 in https://github.com/apache/hertzbeat/pull/1456 -* doc add new committer ZY945 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1453 -* Update app-zookeeper.yml by @hurenjie1 in https://github.com/apache/hertzbeat/pull/1458 -* add hurenjie1 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1459 -* update dashboard ui, remove ssh custom SignatureFactories, update app name by @tomsun28 in https://github.com/apache/hertzbeat/pull/1460 -* Task Monitoring Template Yml Metrics I18n | 监控模版指标国际化任务认领 #1212 by @tslj1024 in https://github.com/apache/hertzbeat/pull/1461 -* add tslj1024 as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1462 -* Add alarm trigger time for alarm restore by @Calvin979 in https://github.com/apache/hertzbeat/pull/1464 -* bugfix history range query not work when victoria-metrics store by @tomsun28 in https://github.com/apache/hertzbeat/pull/1463 -* bugfix springboot3 translation by @liyin in https://github.com/apache/hertzbeat/pull/1467 -* bugfix telegram-notice can not input bot-token by @tomsun28 in https://github.com/apache/hertzbeat/pull/1465 -* feat: support hostname target by @ldysdu in https://github.com/apache/hertzbeat/pull/1455 -* add ldysdu as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1471 -* feature support snmp v3 monitoring protocol by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1469 -* bugfix alarm trigger-times not work when alarm and recovered trigger cyclically by @tomsun28 in https://github.com/apache/hertzbeat/pull/1468 -* update switch monitoring metrics i18n by @tomsun28 in https://github.com/apache/hertzbeat/pull/1472 -* fixed: snmpv3 contextName bug by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1473 -* Fix npt of webhook notify by @Calvin979 in https://github.com/apache/hertzbeat/pull/1474 -* hertzbeat release hertzbeat version v1.4.4 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1475 -* bugfix nginx collect http deadlock error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1476 -* alarm calculate ignore metrics collect code - TIMEOUT by @tomsun28 in https://github.com/apache/hertzbeat/pull/1478 +- bugfix metrics tags value store jpa data-storage error by @tomsun28 in +- add smtp protocol and support smtp monitoring by @ZY945 in +- add ZY945 as a contributor for code by @allcontributors in +- support new parse type 'log' in ssh collect protocol by @tomsun28 in +- add ntp protocol and support ntp monitoring by @ZY945 in +- monitoring the availability of websockets through handshake. by @ZY945 in +- Task-1386 When adding tags in tag management, random colors are given by default. by @prolevel1 in +- add prolevel1 as a contributor for code by @allcontributors in +- + +# 1397 feature: support for dns monitoring by @Calvin979 in + +- Support monitoring hive metrics by @a-little-fool in +- support legend pageable in history data charts by @tomsun28 in +- update component tip and help tip doc by @tomsun28 in +- feature: support monitoring nginx metrics and add a help doc by @a-little-fool in +- update parser to parse from prometheus txt metrics data by @tomsun28 in +- support monitoring memcached metrics and add a help doc by @ZY945 in +- support all ssh connect key exchange by @tomsun28 in +- doc add code of conduct by @tomsun28 in +- update label structure store in victoria metrics, make it prometheus like by @tomsun28 in +- feature: support monitoring pop3 metrics and add help doc by @a-little-fool in +- Update sidebars.json by @a-little-fool in +- Add zh-cn help doc by @a-little-fool in +- update monitoring state un-manage to unmonitored, update pic by @tomsun28 in +- Add jpa to date type storage by @Clownsw in +- bugfix ^o^ token error, protect metrics api auth by @tomsun28 in +- Add relevant documents for SMTP and NTP by @ZY945 in +- bugfix threshold init error in mysql env by @tomsun28 in +- app-rabbitmq.yml support for international name aliases by @ZY945 in +- fix: error create lru-cache-timeout-cleaner thread by @Clownsw in +- app-rabbitmq.yml Modifying Error Fields. by @ZY945 in +- support monitoring NebulaGraph metrics and add help doc by @ZY945 in +- Fix Nginx Collect validateParams function NPE by @Clownsw in +- feature: add metrics i18n for app-springboot3.yml by @liyin in +- feat: add metrics i18n for app-docker.yml by @liyin in +- update docker-compose script and fix version by @tomsun28 in +- bugfix java.lang.IllegalArgumentException: Illegal character in query… by @tomsun28 in +- bugfix delete monitor error after monitor canceled by @ZhangZixuan1994 in +- add ZhangZixuan1994 as a contributor for code by @allcontributors in +- remove sleep, probably busy-waiting by @tomsun28 in +- doc add new committer ZY945 by @tomsun28 in +- Update app-zookeeper.yml by @hurenjie1 in +- add hurenjie1 as a contributor for code by @allcontributors in +- update dashboard ui, remove ssh custom SignatureFactories, update app name by @tomsun28 in +- Task Monitoring Template Yml Metrics I18n | 监控模版指标国际化任务认领 #1212 by @tslj1024 in +- add tslj1024 as a contributor for code by @allcontributors in +- Add alarm trigger time for alarm restore by @Calvin979 in +- bugfix history range query not work when victoria-metrics store by @tomsun28 in +- bugfix springboot3 translation by @liyin in +- bugfix telegram-notice can not input bot-token by @tomsun28 in +- feat: support hostname target by @ldysdu in +- add ldysdu as a contributor for code by @allcontributors in +- feature support snmp v3 monitoring protocol by @TJxiaobao in +- bugfix alarm trigger-times not work when alarm and recovered trigger cyclically by @tomsun28 in +- update switch monitoring metrics i18n by @tomsun28 in +- fixed: snmpv3 contextName bug by @TJxiaobao in +- Fix npt of webhook notify by @Calvin979 in +- hertzbeat release hertzbeat version v1.4.4 by @tomsun28 in +- bugfix nginx collect http deadlock error by @tomsun28 in +- alarm calculate ignore metrics collect code - TIMEOUT by @tomsun28 in ## New Contributors -* @ZY945 made their first contribution in https://github.com/apache/hertzbeat/pull/1407 -* @prolevel1 made their first contribution in https://github.com/apache/hertzbeat/pull/1412 -* @ZhangZixuan1994 made their first contribution in https://github.com/apache/hertzbeat/pull/1451 -* @hurenjie1 made their first contribution in https://github.com/apache/hertzbeat/pull/1458 -* @tslj1024 made their first contribution in https://github.com/apache/hertzbeat/pull/1461 -* @ldysdu made their first contribution in https://github.com/apache/hertzbeat/pull/1455 +- @ZY945 made their first contribution in +- @prolevel1 made their first contribution in +- @ZhangZixuan1994 made their first contribution in +- @hurenjie1 made their first contribution in +- @tslj1024 made their first contribution in +- @ldysdu made their first contribution in -**Full Changelog**: https://github.com/apache/hertzbeat/compare/v1.4.3...v1.4.4 +**Full Changelog**: --- @@ -178,8 +180,8 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN ---- -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Github: ** +**Gitee: ** ### **下载链接** @@ -206,4 +208,3 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN **hertzbeat docker compose script** - ⬇️ [docker-compose](https://github.com/apache/hertzbeat/releases/download/v1.4.4/docker-compose.zip) - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-04-17-to-apache.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-04-17-to-apache.md index d9b0409be70..3495200adc9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-04-17-to-apache.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-04-17-to-apache.md @@ -301,21 +301,20 @@ HertzBeat 开源以来主仓库在 GitHub 累计收获 **4.5K+** star, Gitee **2 > > 愿 HertzBeat 在未来的旅程中持续发展,不断创新,成为开源社区中的亮眼之星。我们相信,凭借团队的出色才能和社区的广泛支持,HertzBeat 必将实现更加辉煌的成就,为全球的开发者和用户提供优质的服务和体验。Dromara 将继续全力支持和关注 HertzBeat 的发展,期待它创造出更加精彩的篇章! ---- +--- **项目地址** -**https://github.com/apache/hertzbeat** +**** 欢迎 star 我们🐶🐶🐶 **官方网址** -**https://hertzbeat.apache.org/** +**** **邮件列表** -**dev@hertzbeat.apache.org** - -发送任意内容至 dev-subcribe@hertzbeat.apache.org 订阅 +**** +发送任意内容至 订阅 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-05-09-hertzbeat-ospp-subject-introduction.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-05-09-hertzbeat-ospp-subject-introduction.md index 755dae83375..5d64d7933a9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-05-09-hertzbeat-ospp-subject-introduction.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-05-09-hertzbeat-ospp-subject-introduction.md @@ -13,9 +13,9 @@ - 高性能,支持多采集器集群横向扩展,支持多隔离网络监控,云边协同。 - 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 -**Github: https://github.com/apache/hertzbeat** +**Github: ** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Gitee: ** ## 什么是开源之夏? diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-15-hertzbeat-v1.6.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-15-hertzbeat-v1.6.0.md index e0f982e0e7a..031deb4806a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-15-hertzbeat-v1.6.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-15-hertzbeat-v1.6.0.md @@ -10,16 +10,16 @@ keywords: [open source, monitoring, alerting] **Hi 朋友们,我们很高兴地宣布,Apache HertzBeat (incubating) 的了第一个Apache版本 v1.6.0 发布啦!🎉.** -经过近五个月的社区开发迭代贡献和两个月的Apache Incubator孵化过程,Apache HertzBeat (incubating) v1.6.0 终于出来了。 -这个版本我们增加了对OpenAi监控,Redfish协议服务器,插件机制,支持了NebulaGraph, Apache Yarn, HDFS, Hbase, Storm等更多功能特性。 -由于License兼容问题,我们在底层替换了ORM框架,计算框架等多个依赖,Hibernate -> EclipseLink, 这也算是JPA生态下为数不多的迁移踩坑实践。 +经过近五个月的社区开发迭代贡献和两个月的Apache Incubator孵化过程,Apache HertzBeat (incubating) v1.6.0 终于出来了。 +这个版本我们增加了对OpenAi监控,Redfish协议服务器,插件机制,支持了NebulaGraph, Apache Yarn, HDFS, Hbase, Storm等更多功能特性。 +由于License兼容问题,我们在底层替换了ORM框架,计算框架等多个依赖,Hibernate -> EclipseLink, 这也算是JPA生态下为数不多的迁移踩坑实践。 同时修复了一些bug和优化了一些功能,更完善的文档。欢迎大家尝试使用,提出宝贵意见和建议,共同推动HertzBeat的发展。🎉 **当然,最重要的是给在社区的贡献者们致以最好的感谢!** -下载页面: https://hertzbeat.apache.org/docs/download/ +下载页面: -升级指南: https://hertzbeat.apache.org/blog/2024/06/11/hertzbeat-v1.6.0-update/ +升级指南: ## 什么是 HertzBeat? @@ -39,9 +39,9 @@ keywords: [open source, monitoring, alerting] ![hertzBeat](/img/docs/hertzbeat-arch.png) -**Github: https://github.com/apache/hertzbeat** +**Github: ** -## HertzBeat's 1.6.0 Version Release! +## HertzBeat's 1.6.0 Version Release ## 亮点更新 @@ -79,292 +79,292 @@ keywords: [open source, monitoring, alerting] ## What's Changed -* bugfix collector can not startup alone by @tomsun28 in https://github.com/apache/hertzbeat/pull/1633 -* translate some hertzbeat blog by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1635 -* Check class description by @ZY945 in https://github.com/apache/hertzbeat/pull/1638 -* translate class description to english by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1641 -* support monitor metrics name i18n: ClickHouse by @ZY945 in https://github.com/apache/hertzbeat/pull/1642 -* translate blog 20220601 to English by @vinci-897 in https://github.com/apache/hertzbeat/pull/1646 -* add a online prometheus parser and a prometheus-like push style. by @vinci-897 in https://github.com/apache/hertzbeat/pull/1644 -* translate blog 20220320 to English by @vinci-897 in https://github.com/apache/hertzbeat/pull/1647 -* support monitor metrics name i18n: DynamicTp by @ZY945 in https://github.com/apache/hertzbeat/pull/1649 -* translate blog 20220228 to English by @vinci-897 in https://github.com/apache/hertzbeat/pull/1648 -* translate blog 20220310 to English by @vinci-897 in https://github.com/apache/hertzbeat/pull/1651 -* translate blog 20220904 to English by @vinci-897 in https://github.com/apache/hertzbeat/pull/1652 -* support monitor metrics name i18n: Airflow by @ZY945 in https://github.com/apache/hertzbeat/pull/1654 -* support monitor metrics name i18n: IoTDB by @ZY945 in https://github.com/apache/hertzbeat/pull/1659 -* Translate 2022-02-11-hertzbeat document by @wang1027-wqh in https://github.com/apache/hertzbeat/pull/1660 -* bugfix The annotation @Transactional specifies rollbackFor. by @handy-git in https://github.com/apache/hertzbeat/pull/1643 -* add handy-git as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1661 -* feature:Translate 2022-02-17-hertzbeat Document by @wang1027-wqh in https://github.com/apache/hertzbeat/pull/1662 -* support monitor metrics name i18n: rocketmq by @ZY945 in https://github.com/apache/hertzbeat/pull/1663 -* [doc] update relate doc and readme by @tomsun28 in https://github.com/apache/hertzbeat/pull/1667 -* bugfix monitoring mongodb not work in springboot3 by @ZY945 in https://github.com/apache/hertzbeat/pull/1668 -* [feature] add storm monitor by @starmilkxin in https://github.com/apache/hertzbeat/pull/1673 -* [bugfix] fixed the issue in http_sd where services were incorrectly reported as available when they were actually unavailable by @starmilkxin in https://github.com/apache/hertzbeat/pull/1678 -* remove mysql-oracle dependency jar from release package lib by @tomsun28 in https://github.com/apache/hertzbeat/pull/1680 -* System config theme by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1636 -* update webapp menu layout and doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1682 -* bugfix can not find mysql dependency when startup by @tomsun28 in https://github.com/apache/hertzbeat/pull/1686 -* support config common aes secret by @tomsun28 in https://github.com/apache/hertzbeat/pull/1683 -* [bugfix]fix the issue of add redis cluster node test error report(#1601) by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1684 -* add LiuTianyou as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1687 -* mongodb monitoring support custom connection timeout param by @ZY945 in https://github.com/apache/hertzbeat/pull/1697 -* bugfix old data decode error when use new common-secret by @tomsun28 in https://github.com/apache/hertzbeat/pull/1696 -* [bugfix] fix bug where reopening pop-up window still retained previously edited data after closing. by @starmilkxin in https://github.com/apache/hertzbeat/pull/1698 -* monitor center add search type modal by @tomsun28 in https://github.com/apache/hertzbeat/pull/1699 -* fix status page logo overflow by @tomsun28 in https://github.com/apache/hertzbeat/pull/1700 -* bugfix npe monitor jobid may be null by @tomsun28 in https://github.com/apache/hertzbeat/pull/1701 -* support custom main menus in monitor template by @tomsun28 in https://github.com/apache/hertzbeat/pull/1703 -* update home website doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1712 -* [Improve] change package group to org apache hertzbeat by @vinci-897 in https://github.com/apache/hertzbeat/pull/1724 -* [improve] initial license clean up by @tomsun28 in https://github.com/apache/hertzbeat/pull/1725 -* update manager and collector logback config(#1704) by @handy-git in https://github.com/apache/hertzbeat/pull/1723 -* fix(sec): upgrade com.h2database:h2 to by @WinterKi1ler in https://github.com/apache/hertzbeat/pull/1718 -* add WinterKi1ler as a contributor for code by @allcontributors in https://github.com/apache/hertzbeat/pull/1736 -* update asf branch protected check by @tomsun28 in https://github.com/apache/hertzbeat/pull/1738 -* [doc]Update star chart by @zqr10159 in https://github.com/apache/hertzbeat/pull/1737 -* [fixed] fixed click collector online offline button error by @miki-hmt in https://github.com/apache/hertzbeat/pull/1734 -* [improve] initial doc clean up by @tomsun28 in https://github.com/apache/hertzbeat/pull/1741 -* [Improvement]Support multiple receivers. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1731 -* [improvement]Add lisence. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1746 -* Backend LICENSE Initialize by @wang1027-wqh in https://github.com/apache/hertzbeat/pull/1744 -* Back-end dependency upgrade by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1743 -* [Improve] run hertzbeat in docker compose support dependen service condition by @gjjjj0101 in https://github.com/apache/hertzbeat/pull/1748 -* [bugfix] fix statuspage index exception by @makechoicenow in https://github.com/apache/hertzbeat/pull/1747 -* remove unlicensed dependency 'wolfy87 eventemitter' by @alpha951 in https://github.com/apache/hertzbeat/pull/1745 -* [improve] auto label when pr, update asf config by @tomsun28 in https://github.com/apache/hertzbeat/pull/1749 -* [improve] update asf config set required status checks context by @tomsun28 in https://github.com/apache/hertzbeat/pull/1751 -* [improve] home add apache info by @a-little-fool in https://github.com/apache/hertzbeat/pull/1740 -* [doc] Change e2e path by @crossoverJie in https://github.com/apache/hertzbeat/pull/1758 -* fix : ingress tls inoperative by @PeixyJ in https://github.com/apache/hertzbeat/pull/1760 -* [refactor] method improvement rationale by @dukbong in https://github.com/apache/hertzbeat/pull/1757 -* [improve] create disclaimer file, add incubating in describe by @tomsun28 in https://github.com/apache/hertzbeat/pull/1764 -* [improve] update new hertzbeat brand logo, update doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1761 -* Complete the code comment translation of the common module by @Hi-Mr-Wind in https://github.com/apache/hertzbeat/pull/1766 -* Remove unnecessary if-else statement. by @dukbong in https://github.com/apache/hertzbeat/pull/1770 -* [doc] remove and translate chinese to english in warehous by @xuziyang in https://github.com/apache/hertzbeat/pull/1773 -* Replace deprecated methods with builder pattern for RedisURI construction by @dukbong in https://github.com/apache/hertzbeat/pull/1772 -* remove and translate chinese to english in collector,script,push,remoting and manager module by @MananPoojara in https://github.com/apache/hertzbeat/pull/1774 -* Added the function of sending SMS messages through Alibaba Cloud. by @lwqzz in https://github.com/apache/hertzbeat/pull/1768 -* [improve]Add frontend license. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1776 -* [test] Add RedisSingleCollectImplTest by @crossoverJie in https://github.com/apache/hertzbeat/pull/1784 -* [refactor] add override annotation by @handy-git in https://github.com/apache/hertzbeat/pull/1782 -* '[docs]bugfix: display syntax error of ipmi protocol' by @tomorrowshipyltm in https://github.com/apache/hertzbeat/pull/1793 -* [doc] translate alerter moudle code chinese to english by @tomsun28 in https://github.com/apache/hertzbeat/pull/1765 -* [refactor] database-related properties class, type changed to record by @xuziyang in https://github.com/apache/hertzbeat/pull/1786 -* Fix snmp template unit conversion problem by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1796 -* [doc] Add help documentation for clickhouse monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1798 -* [feature:update-checkstyle] Limit the java file header by @YxYL6125 in https://github.com/apache/hertzbeat/pull/1799 -* [improve]Add external lib folder to store mysql and oracle driver. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1783 -* [Improve]When multiple lines are returned, each alarm is triggered instead of only the first alarm by @15613060203 in https://github.com/apache/hertzbeat/pull/1797 -* [doc] add team page in website by @alpha951 in https://github.com/apache/hertzbeat/pull/1800 -* [feature] Improve the import checkstyle by @crossoverJie in https://github.com/apache/hertzbeat/pull/1802 -* [doc] Add help document for dns monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1804 -* [improve] preventing NPE by @dukbong in https://github.com/apache/hertzbeat/pull/1808 -* [refactor] change the warehouse properties the type to record by @xuziyang in https://github.com/apache/hertzbeat/pull/1806 -* Refactor: upgrade syntax to jdk17(instanceof & switch) by @Calvin979 in https://github.com/apache/hertzbeat/pull/1807 -* [test] Add NginxCollect test by @crossoverJie in https://github.com/apache/hertzbeat/pull/1809 -* [website] update team page by @tomsun28 in https://github.com/apache/hertzbeat/pull/1803 -* [test] Add RedisClusterCollectImplTest by @crossoverJie in https://github.com/apache/hertzbeat/pull/1789 -* [improve] Fix typo ReqStatusResponse by @crossoverJie in https://github.com/apache/hertzbeat/pull/1811 -* Comparing N objects for null with Assert.noNullElements(). by @dukbong in https://github.com/apache/hertzbeat/pull/1814 -* [doc] Add help document for elasticsearch monitoring and ftp monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1815 -* [doc] add help documentation for huawei switch monitoring by @Alanxtl in https://github.com/apache/hertzbeat/pull/1813 -* chore: upgrade the api-testing (e2e) to v0.0.16 by @LinuxSuRen in https://github.com/apache/hertzbeat/pull/1817 -* [Remove][Improve]Mail config by @zqr10159 in https://github.com/apache/hertzbeat/pull/1819 -* Remove and translate chinese to english in code by @dukbong in https://github.com/apache/hertzbeat/pull/1816 -* [feature]Add monitoring for Hbase Master by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1820 -* [doc] resolve code conflicts and coverage caused by pr(#1813) merge by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1821 -* [doc] Add help document for tidb and nacos monitoring by @Alanxtl in https://github.com/apache/hertzbeat/pull/1823 -* [improve] use eclipselink orm replace of hibernate orm by @tomsun28 in https://github.com/apache/hertzbeat/pull/1801 -* [improve] Add whitespace checkstyle by @crossoverJie in https://github.com/apache/hertzbeat/pull/1824 -* [bugfix] dns monitoring template add query class parameter by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1825 -* [Refactor] Preventing Unnecessary Object Creation and Using Utility Methods by @dukbong in https://github.com/apache/hertzbeat/pull/1818 -* [doc]Add and modify Doris FE Chinese and English documentation by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1828 -* [docs] Optimize: add help docs for UDP port & Springboot3 help doc by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/1832 -* Code Simplification, Structure Changes, and Translation Work, Along with a Question by @dukbong in https://github.com/apache/hertzbeat/pull/1827 -* [doc] add help document for mongodb monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1834 -* [collector] fix: inverts the compareTo sort of MetricsCollect run queue by @Pzz-2021 in https://github.com/apache/hertzbeat/pull/1837 -* [doc]Doc add debian system by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1842 -* [feature] Add Apache Hbase RegionServer monitoring by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1833 -* [improve] Optimize websocket monitor by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1838 -* [refactor] Split the WarehouseProperties class by @xuziyang in https://github.com/apache/hertzbeat/pull/1830 -* [test] Add test for HttpsdImpl by @crossoverJie in https://github.com/apache/hertzbeat/pull/1840 -* [fix] Fix the wrong comment by @xuziyang in https://github.com/apache/hertzbeat/pull/1843 -* [refactor] trans and use assert by @dukbong in https://github.com/apache/hertzbeat/pull/1841 -* [bugfix] modify the command in the mongodb monitoring template by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1844 -* [bigfix]Fix Debian system Top10 monitoring bug by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1846 -* [cleanup]Delete the corresponding Chinese comments by @hudongdong129 in https://github.com/apache/hertzbeat/pull/1847 -* [doc] translates chinese comment to english. by @dukbong in https://github.com/apache/hertzbeat/pull/1853 -* [doc] fix error and add help document for prometheus task by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1852 -* [feature] Add Linux process monitoring by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1857 -* [test] Add test for FtpCollectImpl by @crossoverJie in https://github.com/apache/hertzbeat/pull/1856 -* [improve] use apache jexl replace of aviator by @tomsun28 in https://github.com/apache/hertzbeat/pull/1859 -* [bugfix] jpa data save logic repair by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1863 -* [feature] add influxdb metrics monitoring by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1730 -* [doc] add help document for rocketmq by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1874 -* [improve] Imporve checkstyle of test code. by @crossoverJie in https://github.com/apache/hertzbeat/pull/1864 -* [feature] Support Redfish protocol to monitoring server by @gjjjj0101 in https://github.com/apache/hertzbeat/pull/1867 -* Fix debian monitoring template issue about process monitoring by @LLP2333 in https://github.com/apache/hertzbeat/pull/1868 -* [bugfix] centos Top10 shows missing one by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1870 -* [improve] add website apache incubator footer by @tomsun28 in https://github.com/apache/hertzbeat/pull/1860 -* [doc] update help document by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1861 -* [featurn] support flyway database migration by @tomsun28 in https://github.com/apache/hertzbeat/pull/1875 -* [improve] Delete the timestamp field in the class MetricFamily.Metric by @xuziyang in https://github.com/apache/hertzbeat/pull/1878 -* [improve] Use java.lang.AutoCloseable instead of CacheCloseable by @crossoverJie in https://github.com/apache/hertzbeat/pull/1879 -* [bugfix]Fix top10 process command. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1876 -* [feature] support the VictoriaMetrics cluster by @xuziyang in https://github.com/apache/hertzbeat/pull/1880 -* [improve] Refactor common cache code by @crossoverJie in https://github.com/apache/hertzbeat/pull/1881 -* Eliminate Unnecessary Unboxing and Generics by @handy-git in https://github.com/apache/hertzbeat/pull/1882 -* [bugfix][doc]Add kafka sidebar. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1883 -* [doc] I18n for monitoring template yml metrics by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/1888 -* [refactor] StoreProperties is no longer useful, delete it by @xuziyang in https://github.com/apache/hertzbeat/pull/1887 -* bugfix statistical metrics data matching fails by @tomsun28 in https://github.com/apache/hertzbeat/pull/1884 -* [doc] add help doc for flink monitoring by @HeartLinked in https://github.com/apache/hertzbeat/pull/1893 -* [doc] add almalinux documentation by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1892 -* [improve] Missing a generic by @crossoverJie in https://github.com/apache/hertzbeat/pull/1889 -* [bugfix] Fixed some metrics of Jexlespression not matching in Elasticsearch by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1894 -* feat(*): Support Time Type to Tengine Data Storage by @Clownsw in https://github.com/apache/hertzbeat/pull/1890 -* [feature] support random jwt secret when not custom by @tomsun28 in https://github.com/apache/hertzbeat/pull/1897 -* [doc] add opensuse doc by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1902 -* fix when manager restart, collect register error by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1896 -* [bugfix] fix can not use empty collection as query params in eclipselink by @tomsun28 in https://github.com/apache/hertzbeat/pull/1900 -* [doc] update doc add download page and pic by @tomsun28 in https://github.com/apache/hertzbeat/pull/1904 -* [test] Add test for UdpCollectImpl by @crossoverJie in https://github.com/apache/hertzbeat/pull/1906 -* fix license by @yqxxgh in https://github.com/apache/hertzbeat/pull/1907 -* [improve] refactor code by @Ceilzcx in https://github.com/apache/hertzbeat/pull/1901 -* [type:bugfix] fix customized menu invalid bug #1898 by @Aias00 in https://github.com/apache/hertzbeat/pull/1908 -* [type:bugfix] fix HTTP API bug #1895 by @Aias00 in https://github.com/apache/hertzbeat/pull/1909 -* [test] Add test for WebsocketCollectImpl by @crossoverJie in https://github.com/apache/hertzbeat/pull/1912 -* [doc] translates chinese comment to english. by @westboy in https://github.com/apache/hertzbeat/pull/1914 -* [doc] Add HIP document and template by @crossoverJie in https://github.com/apache/hertzbeat/pull/1913 -* [improve] clean up home webapp unused code by @tomsun28 in https://github.com/apache/hertzbeat/pull/1915 -* [feature] support use ngql query metrics from nebulaGraph by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1917 -* [doc] Improve the Contribution Documentation. by @crossoverJie in https://github.com/apache/hertzbeat/pull/1918 -* [featrue]add apache hdfs monitor by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1920 -* [doc] update hbase documentation description by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1921 -* [doc] Add documentation for nebulaGraph cluster monitoring and custom monitoring using NGQL, and clean up useless parameters by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1923 -* [test] Add test for TelnetCollectImplTest by @crossoverJie in https://github.com/apache/hertzbeat/pull/1924 -* fix(*): fix TdEngine Init not found Database by @Clownsw in https://github.com/apache/hertzbeat/pull/1891 -* [doc] update contribution and add run-build guide by @tomsun28 in https://github.com/apache/hertzbeat/pull/1919 -* bugfix collector startup error can not find JdbcClient by @tomsun28 in https://github.com/apache/hertzbeat/pull/1925 -* [doc] add help document for freebsd monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1928 -* [refactoring] Split AbstractHistoryDataStorage class by @xuziyang in https://github.com/apache/hertzbeat/pull/1926 -* [fix] fixed name error in monitoring template and improve NGQL protocol by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1931 -* [refactoring] Split AbstractRealTimeDataStorage class by @xuziyang in https://github.com/apache/hertzbeat/pull/1935 -* [bugfix] fix ssl-cert days_remaining and npe by @tomsun28 in https://github.com/apache/hertzbeat/pull/1934 -* [feature] add apache yarn monitor by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1937 -* [doc] add help document for redhat monitoring and rocky linux monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1939 -* [test] Add test for NtpCollectImpl by @crossoverJie in https://github.com/apache/hertzbeat/pull/1940 -* [bugfix] fix alarm center tags display error by @tomsun28 in https://github.com/apache/hertzbeat/pull/1938 -* [improve] prepare for release hertzbeat v1.6.0 by @tomsun28 in https://github.com/apache/hertzbeat/pull/1929 -* add:Updated the Open Source Summer Project blog. by @TJxiaobao in https://github.com/apache/hertzbeat/pull/1943 -* [feature] Support monitoring of OpenAI accounts by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/1947 -* [refactoring] Inject a single instance of the data store by @xuziyang in https://github.com/apache/hertzbeat/pull/1944 -* [refactoring] AbstractHistoryDataStorage implement the DisposableBean by @xuziyang in https://github.com/apache/hertzbeat/pull/1946 -* [doc] update iotdb init document by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1948 -* [improve] update build script by @tomsun28 in https://github.com/apache/hertzbeat/pull/1949 -* [test] add test for NgqlCollectImpl by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1953 -* [bugfix]Replace monitors to alert. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1954 -* [improve] add llm, server menu and update doc by @tomsun28 in https://github.com/apache/hertzbeat/pull/1955 -* [improve][HIP] HIP-01: Refactoring AbstractCollect by @crossoverJie in https://github.com/apache/hertzbeat/pull/1930 -* [bugfix] fix ConnectionCommonCache possible npe by @crossoverJie in https://github.com/apache/hertzbeat/pull/1959 -* [doc] add help document for eulerOS monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1960 -* [fixbug] Fix the problem of no data for springboot3 monitoring by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1961 -* commit:fix the front-end popup cannot exit by @Yanshuming1 in https://github.com/apache/hertzbeat/pull/1957 -* [fixbug] expression rule adaptation by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1963 -* [doc] add help doc for influxdb-promql and kafka-promql monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/1965 -* [doc]: update readme-cn docs by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1964 -* [improve][HIP] HIP-01: Implement refactoring AbstractCollect by @crossoverJie in https://github.com/apache/hertzbeat/pull/1966 -* [chore] update .gitignore to save .idea/icon.png by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1971 -* [improve][bugfix]: fix AlertTemplateUtilTest test exception and update code style by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1969 -* [feature] add apache hugegraph monitor by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1972 -* [improve] Implement cascading parameter list for SNMP protocol by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/1976 -* [improve] optimize DateUtil and add test case by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1974 -* [feature]Hertzbeat custom plugin. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1973 -* update login page and status page color by @lwjxy in https://github.com/apache/hertzbeat/pull/1977 -* [chore] update code style and add some comment by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1975 -* [doc]Hertzbeat plugin doc. by @zqr10159 in https://github.com/apache/hertzbeat/pull/1980 -* [doc] update contributors and update status page style by @tomsun28 in https://github.com/apache/hertzbeat/pull/1981 -* [feature] Implement cascading parameter list by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/1978 -* [doc]update threshold alarm doc by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1983 -* [chore] optimize code style by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1984 -* [fix] Compatible with MongoDB versions earlier than 3.6 by @gjjjj0101 in https://github.com/apache/hertzbeat/pull/1988 -* [chore] optimize manager code style by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1993 -* [doc] Translate part of documentation development.md under `zh-cn` directory from `en` to `zh-cn` by @Thespica in https://github.com/apache/hertzbeat/pull/1995 -* [improve] http protocol prometheus parsing optimization by @zhangshenghang in https://github.com/apache/hertzbeat/pull/1996 -* [feature] add at function for wechat by @Yanshuming1 in https://github.com/apache/hertzbeat/pull/1994 -* [improve] add common util test by @yuluo-yx in https://github.com/apache/hertzbeat/pull/2001 -* [improve] update release license notice and package by @tomsun28 in https://github.com/apache/hertzbeat/pull/2003 -* [bugfix] fix collector startup error classpath by @tomsun28 in https://github.com/apache/hertzbeat/pull/2004 -* [chore] optimize code style by @yuluo-yx in https://github.com/apache/hertzbeat/pull/2000 -* [improve] Bump up `eslint-plugin-jsdoc` to 48.2.5 to support node 20+ by @Thespica in https://github.com/apache/hertzbeat/pull/2005 -* [doc] fix doc highlighting by @boatrainlsz in https://github.com/apache/hertzbeat/pull/2006 -* [web-app]feature: case insensitive search by @JavaProgrammerLB in https://github.com/apache/hertzbeat/pull/2007 -* [feature] Support time calculation expressions. by @LiuTianyou in https://github.com/apache/hertzbeat/pull/2009 -* [doc] add document for time expression by @LiuTianyou in https://github.com/apache/hertzbeat/pull/2012 -* [feature] Add Apache Pulsar monitor by @zhangshenghang in https://github.com/apache/hertzbeat/pull/2013 -* [doc] home verify release doc update by @tomsun28 in https://github.com/apache/hertzbeat/pull/2014 -* [Improve] Improve clickhouse monitor And Improve Pulsar monitor by @zhangshenghang in https://github.com/apache/hertzbeat/pull/2015 -* [doc] translate help document for memcached monitoring by @LiuTianyou in https://github.com/apache/hertzbeat/pull/2019 -* [improve] optimize collector httpsd discovery by @yuluo-yx in https://github.com/apache/hertzbeat/pull/1991 -* [optimize] optimize code style and logic, add unit test by @yuluo-yx in https://github.com/apache/hertzbeat/pull/2010 -* [fix] Fix possible potential thread safe bugs by @gjjjj0101 in https://github.com/apache/hertzbeat/pull/2021 -* [improve] add ci for home by @LiuTianyou in https://github.com/apache/hertzbeat/pull/2024 -* [bugfix]Tag with empty value Shouldn't transform to Tag: by @JavaProgrammerLB in https://github.com/apache/hertzbeat/pull/2025 -* [bugfix] modify popup confirm to clear cache and cancel popup save by @Yanshuming1 in https://github.com/apache/hertzbeat/pull/2026 -* [improve] update monitor state desc by @tomsun28 in https://github.com/apache/hertzbeat/pull/2028 -* bugfix: fix overflow of integers by @Calvin979 in https://github.com/apache/hertzbeat/pull/2029 -* [improve] tips need update initial default password by @tomsun28 in https://github.com/apache/hertzbeat/pull/2030 -* [improve] deprecate support iotdb 0.* version by @Ceilzcx in https://github.com/apache/hertzbeat/pull/2032 -* [fixbug] required field check by @zhangshenghang in https://github.com/apache/hertzbeat/pull/2022 -* [improve] add IcmpCollectImplTest by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/2033 -* [improve] fix code style by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/2034 -* [improve] increase the length limit of the username field by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/2035 -* [improve] Checkstyle include testSource by @crossoverJie in https://github.com/apache/hertzbeat/pull/2036 -* [bugfix] fix collector and frontend dependent license error by @tomsun28 in https://github.com/apache/hertzbeat/pull/2037 -* [improve] Add test for MemcachedCollectImpl by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/2044 -* [imprve] Remove duplicate indices by @zuobiao-zhou in https://github.com/apache/hertzbeat/pull/2045 -* [docs]: fix several typos in docs by @lw-yang in https://github.com/apache/hertzbeat/pull/2047 -* Add the missing parts of docs, fix layout, sync the English version with the Chinese version by @xfl12345 in https://github.com/apache/hertzbeat/pull/2048 -* [improve] add filename check in home ci by @LiuTianyou in https://github.com/apache/hertzbeat/pull/2049 -* [improve] update dependency licenses and remove the aliyun sms depend by @tomsun28 in https://github.com/apache/hertzbeat/pull/2058 +- bugfix collector can not startup alone by @tomsun28 in +- translate some hertzbeat blog by @TJxiaobao in +- Check class description by @ZY945 in +- translate class description to english by @TJxiaobao in +- support monitor metrics name i18n: ClickHouse by @ZY945 in +- translate blog 20220601 to English by @vinci-897 in +- add a online prometheus parser and a prometheus-like push style. by @vinci-897 in +- translate blog 20220320 to English by @vinci-897 in +- support monitor metrics name i18n: DynamicTp by @ZY945 in +- translate blog 20220228 to English by @vinci-897 in +- translate blog 20220310 to English by @vinci-897 in +- translate blog 20220904 to English by @vinci-897 in +- support monitor metrics name i18n: Airflow by @ZY945 in +- support monitor metrics name i18n: IoTDB by @ZY945 in +- Translate 2022-02-11-hertzbeat document by @wang1027-wqh in +- bugfix The annotation @Transactional specifies rollbackFor. by @handy-git in +- add handy-git as a contributor for code by @allcontributors in +- feature:Translate 2022-02-17-hertzbeat Document by @wang1027-wqh in +- support monitor metrics name i18n: rocketmq by @ZY945 in +- [doc] update relate doc and readme by @tomsun28 in +- bugfix monitoring mongodb not work in springboot3 by @ZY945 in +- [feature] add storm monitor by @starmilkxin in +- [bugfix] fixed the issue in http_sd where services were incorrectly reported as available when they were actually unavailable by @starmilkxin in +- remove mysql-oracle dependency jar from release package lib by @tomsun28 in +- System config theme by @TJxiaobao in +- update webapp menu layout and doc by @tomsun28 in +- bugfix can not find mysql dependency when startup by @tomsun28 in +- support config common aes secret by @tomsun28 in +- [bugfix]fix the issue of add redis cluster node test error report(#1601) by @LiuTianyou in +- add LiuTianyou as a contributor for code by @allcontributors in +- mongodb monitoring support custom connection timeout param by @ZY945 in +- bugfix old data decode error when use new common-secret by @tomsun28 in +- [bugfix] fix bug where reopening pop-up window still retained previously edited data after closing. by @starmilkxin in +- monitor center add search type modal by @tomsun28 in +- fix status page logo overflow by @tomsun28 in +- bugfix npe monitor jobid may be null by @tomsun28 in +- support custom main menus in monitor template by @tomsun28 in +- update home website doc by @tomsun28 in +- [Improve] change package group to org apache hertzbeat by @vinci-897 in +- [improve] initial license clean up by @tomsun28 in +- update manager and collector logback config(#1704) by @handy-git in +- fix(sec): upgrade com.h2database:h2 to by @WinterKi1ler in +- add WinterKi1ler as a contributor for code by @allcontributors in +- update asf branch protected check by @tomsun28 in +- [doc]Update star chart by @zqr10159 in +- [fixed] fixed click collector online offline button error by @miki-hmt in +- [improve] initial doc clean up by @tomsun28 in +- [Improvement]Support multiple receivers. by @zqr10159 in +- [improvement]Add lisence. by @zqr10159 in +- Backend LICENSE Initialize by @wang1027-wqh in +- Back-end dependency upgrade by @TJxiaobao in +- [Improve] run hertzbeat in docker compose support dependen service condition by @gjjjj0101 in +- [bugfix] fix statuspage index exception by @makechoicenow in +- remove unlicensed dependency 'wolfy87 eventemitter' by @alpha951 in +- [improve] auto label when pr, update asf config by @tomsun28 in +- [improve] update asf config set required status checks context by @tomsun28 in +- [improve] home add apache info by @a-little-fool in +- [doc] Change e2e path by @crossoverJie in +- fix : ingress tls inoperative by @PeixyJ in +- [refactor] method improvement rationale by @dukbong in +- [improve] create disclaimer file, add incubating in describe by @tomsun28 in +- [improve] update new hertzbeat brand logo, update doc by @tomsun28 in +- Complete the code comment translation of the common module by @Hi-Mr-Wind in +- Remove unnecessary if-else statement. by @dukbong in +- [doc] remove and translate chinese to english in warehous by @xuziyang in +- Replace deprecated methods with builder pattern for RedisURI construction by @dukbong in +- remove and translate chinese to english in collector,script,push,remoting and manager module by @MananPoojara in +- Added the function of sending SMS messages through Alibaba Cloud. by @lwqzz in +- [improve]Add frontend license. by @zqr10159 in +- [test] Add RedisSingleCollectImplTest by @crossoverJie in +- [refactor] add override annotation by @handy-git in +- '[docs]bugfix: display syntax error of ipmi protocol' by @tomorrowshipyltm in +- [doc] translate alerter moudle code chinese to english by @tomsun28 in +- [refactor] database-related properties class, type changed to record by @xuziyang in +- Fix snmp template unit conversion problem by @TJxiaobao in +- [doc] Add help documentation for clickhouse monitoring by @LiuTianyou in +- [feature:update-checkstyle] Limit the java file header by @YxYL6125 in +- [improve]Add external lib folder to store mysql and oracle driver. by @zqr10159 in +- [Improve]When multiple lines are returned, each alarm is triggered instead of only the first alarm by @15613060203 in +- [doc] add team page in website by @alpha951 in +- [feature] Improve the import checkstyle by @crossoverJie in +- [doc] Add help document for dns monitoring by @LiuTianyou in +- [improve] preventing NPE by @dukbong in +- [refactor] change the warehouse properties the type to record by @xuziyang in +- Refactor: upgrade syntax to jdk17(instanceof & switch) by @Calvin979 in +- [test] Add NginxCollect test by @crossoverJie in +- [website] update team page by @tomsun28 in +- [test] Add RedisClusterCollectImplTest by @crossoverJie in +- [improve] Fix typo ReqStatusResponse by @crossoverJie in +- Comparing N objects for null with Assert.noNullElements(). by @dukbong in +- [doc] Add help document for elasticsearch monitoring and ftp monitoring by @LiuTianyou in +- [doc] add help documentation for huawei switch monitoring by @Alanxtl in +- chore: upgrade the api-testing (e2e) to v0.0.16 by @LinuxSuRen in +- [Remove][Improve]Mail config by @zqr10159 in +- Remove and translate chinese to english in code by @dukbong in +- [feature]Add monitoring for Hbase Master by @zhangshenghang in +- [doc] resolve code conflicts and coverage caused by pr(#1813) merge by @LiuTianyou in +- [doc] Add help document for tidb and nacos monitoring by @Alanxtl in +- [improve] use eclipselink orm replace of hibernate orm by @tomsun28 in +- [improve] Add whitespace checkstyle by @crossoverJie in +- [bugfix] dns monitoring template add query class parameter by @LiuTianyou in +- [Refactor] Preventing Unnecessary Object Creation and Using Utility Methods by @dukbong in +- [doc]Add and modify Doris FE Chinese and English documentation by @zhangshenghang in +- [docs] Optimize: add help docs for UDP port & Springboot3 help doc by @zuobiao-zhou in +- Code Simplification, Structure Changes, and Translation Work, Along with a Question by @dukbong in +- [doc] add help document for mongodb monitoring by @LiuTianyou in +- [collector] fix: inverts the compareTo sort of MetricsCollect run queue by @Pzz-2021 in +- [doc]Doc add debian system by @zhangshenghang in +- [feature] Add Apache Hbase RegionServer monitoring by @zhangshenghang in +- [improve] Optimize websocket monitor by @LiuTianyou in +- [refactor] Split the WarehouseProperties class by @xuziyang in +- [test] Add test for HttpsdImpl by @crossoverJie in +- [fix] Fix the wrong comment by @xuziyang in +- [refactor] trans and use assert by @dukbong in +- [bugfix] modify the command in the mongodb monitoring template by @LiuTianyou in +- [bigfix]Fix Debian system Top10 monitoring bug by @zhangshenghang in +- [cleanup]Delete the corresponding Chinese comments by @hudongdong129 in +- [doc] translates chinese comment to english. by @dukbong in +- [doc] fix error and add help document for prometheus task by @LiuTianyou in +- [feature] Add Linux process monitoring by @zhangshenghang in +- [test] Add test for FtpCollectImpl by @crossoverJie in +- [improve] use apache jexl replace of aviator by @tomsun28 in +- [bugfix] jpa data save logic repair by @zhangshenghang in +- [feature] add influxdb metrics monitoring by @TJxiaobao in +- [doc] add help document for rocketmq by @LiuTianyou in +- [improve] Imporve checkstyle of test code. by @crossoverJie in +- [feature] Support Redfish protocol to monitoring server by @gjjjj0101 in +- Fix debian monitoring template issue about process monitoring by @LLP2333 in +- [bugfix] centos Top10 shows missing one by @zhangshenghang in +- [improve] add website apache incubator footer by @tomsun28 in +- [doc] update help document by @LiuTianyou in +- [featurn] support flyway database migration by @tomsun28 in +- [improve] Delete the timestamp field in the class MetricFamily.Metric by @xuziyang in +- [improve] Use java.lang.AutoCloseable instead of CacheCloseable by @crossoverJie in +- [bugfix]Fix top10 process command. by @zqr10159 in +- [feature] support the VictoriaMetrics cluster by @xuziyang in +- [improve] Refactor common cache code by @crossoverJie in +- Eliminate Unnecessary Unboxing and Generics by @handy-git in +- [bugfix][doc]Add kafka sidebar. by @zqr10159 in +- [doc] I18n for monitoring template yml metrics by @zuobiao-zhou in +- [refactor] StoreProperties is no longer useful, delete it by @xuziyang in +- bugfix statistical metrics data matching fails by @tomsun28 in +- [doc] add help doc for flink monitoring by @HeartLinked in +- [doc] add almalinux documentation by @zhangshenghang in +- [improve] Missing a generic by @crossoverJie in +- [bugfix] Fixed some metrics of Jexlespression not matching in Elasticsearch by @zhangshenghang in +- feat(*): Support Time Type to Tengine Data Storage by @Clownsw in +- [feature] support random jwt secret when not custom by @tomsun28 in +- [doc] add opensuse doc by @zhangshenghang in +- fix when manager restart, collect register error by @Ceilzcx in +- [bugfix] fix can not use empty collection as query params in eclipselink by @tomsun28 in +- [doc] update doc add download page and pic by @tomsun28 in +- [test] Add test for UdpCollectImpl by @crossoverJie in +- fix license by @yqxxgh in +- [improve] refactor code by @Ceilzcx in +- [type:bugfix] fix customized menu invalid bug #1898 by @Aias00 in +- [type:bugfix] fix HTTP API bug #1895 by @Aias00 in +- [test] Add test for WebsocketCollectImpl by @crossoverJie in +- [doc] translates chinese comment to english. by @westboy in +- [doc] Add HIP document and template by @crossoverJie in +- [improve] clean up home webapp unused code by @tomsun28 in +- [feature] support use ngql query metrics from nebulaGraph by @LiuTianyou in +- [doc] Improve the Contribution Documentation. by @crossoverJie in +- [featrue]add apache hdfs monitor by @zhangshenghang in +- [doc] update hbase documentation description by @zhangshenghang in +- [doc] Add documentation for nebulaGraph cluster monitoring and custom monitoring using NGQL, and clean up useless parameters by @LiuTianyou in +- [test] Add test for TelnetCollectImplTest by @crossoverJie in +- fix(*): fix TdEngine Init not found Database by @Clownsw in +- [doc] update contribution and add run-build guide by @tomsun28 in +- bugfix collector startup error can not find JdbcClient by @tomsun28 in +- [doc] add help document for freebsd monitoring by @LiuTianyou in +- [refactoring] Split AbstractHistoryDataStorage class by @xuziyang in +- [fix] fixed name error in monitoring template and improve NGQL protocol by @LiuTianyou in +- [refactoring] Split AbstractRealTimeDataStorage class by @xuziyang in +- [bugfix] fix ssl-cert days_remaining and npe by @tomsun28 in +- [feature] add apache yarn monitor by @zhangshenghang in +- [doc] add help document for redhat monitoring and rocky linux monitoring by @LiuTianyou in +- [test] Add test for NtpCollectImpl by @crossoverJie in +- [bugfix] fix alarm center tags display error by @tomsun28 in +- [improve] prepare for release hertzbeat v1.6.0 by @tomsun28 in +- add:Updated the Open Source Summer Project blog. by @TJxiaobao in +- [feature] Support monitoring of OpenAI accounts by @zuobiao-zhou in +- [refactoring] Inject a single instance of the data store by @xuziyang in +- [refactoring] AbstractHistoryDataStorage implement the DisposableBean by @xuziyang in +- [doc] update iotdb init document by @zhangshenghang in +- [improve] update build script by @tomsun28 in +- [test] add test for NgqlCollectImpl by @LiuTianyou in +- [bugfix]Replace monitors to alert. by @zqr10159 in +- [improve] add llm, server menu and update doc by @tomsun28 in +- [improve][HIP] HIP-01: Refactoring AbstractCollect by @crossoverJie in +- [bugfix] fix ConnectionCommonCache possible npe by @crossoverJie in +- [doc] add help document for eulerOS monitoring by @LiuTianyou in +- [fixbug] Fix the problem of no data for springboot3 monitoring by @zhangshenghang in +- commit:fix the front-end popup cannot exit by @Yanshuming1 in +- [fixbug] expression rule adaptation by @zhangshenghang in +- [doc] add help doc for influxdb-promql and kafka-promql monitoring by @LiuTianyou in +- [doc]: update readme-cn docs by @yuluo-yx in +- [improve][HIP] HIP-01: Implement refactoring AbstractCollect by @crossoverJie in +- [chore] update .gitignore to save .idea/icon.png by @yuluo-yx in +- [improve][bugfix]: fix AlertTemplateUtilTest test exception and update code style by @yuluo-yx in +- [feature] add apache hugegraph monitor by @zhangshenghang in +- [improve] Implement cascading parameter list for SNMP protocol by @zuobiao-zhou in +- [improve] optimize DateUtil and add test case by @yuluo-yx in +- [feature]Hertzbeat custom plugin. by @zqr10159 in +- update login page and status page color by @lwjxy in +- [chore] update code style and add some comment by @yuluo-yx in +- [doc]Hertzbeat plugin doc. by @zqr10159 in +- [doc] update contributors and update status page style by @tomsun28 in +- [feature] Implement cascading parameter list by @zuobiao-zhou in +- [doc]update threshold alarm doc by @zhangshenghang in +- [chore] optimize code style by @yuluo-yx in +- [fix] Compatible with MongoDB versions earlier than 3.6 by @gjjjj0101 in +- [chore] optimize manager code style by @yuluo-yx in +- [doc] Translate part of documentation development.md under `zh-cn` directory from `en` to `zh-cn` by @Thespica in +- [improve] http protocol prometheus parsing optimization by @zhangshenghang in +- [feature] add at function for wechat by @Yanshuming1 in +- [improve] add common util test by @yuluo-yx in +- [improve] update release license notice and package by @tomsun28 in +- [bugfix] fix collector startup error classpath by @tomsun28 in +- [chore] optimize code style by @yuluo-yx in +- [improve] Bump up `eslint-plugin-jsdoc` to 48.2.5 to support node 20+ by @Thespica in +- [doc] fix doc highlighting by @boatrainlsz in +- [web-app]feature: case insensitive search by @JavaProgrammerLB in +- [feature] Support time calculation expressions. by @LiuTianyou in +- [doc] add document for time expression by @LiuTianyou in +- [feature] Add Apache Pulsar monitor by @zhangshenghang in +- [doc] home verify release doc update by @tomsun28 in +- [Improve] Improve clickhouse monitor And Improve Pulsar monitor by @zhangshenghang in +- [doc] translate help document for memcached monitoring by @LiuTianyou in +- [improve] optimize collector httpsd discovery by @yuluo-yx in +- [optimize] optimize code style and logic, add unit test by @yuluo-yx in +- [fix] Fix possible potential thread safe bugs by @gjjjj0101 in +- [improve] add ci for home by @LiuTianyou in +- [bugfix]Tag with empty value Shouldn't transform to Tag: by @JavaProgrammerLB in +- [bugfix] modify popup confirm to clear cache and cancel popup save by @Yanshuming1 in +- [improve] update monitor state desc by @tomsun28 in +- bugfix: fix overflow of integers by @Calvin979 in +- [improve] tips need update initial default password by @tomsun28 in +- [improve] deprecate support iotdb 0.* version by @Ceilzcx in +- [fixbug] required field check by @zhangshenghang in +- [improve] add IcmpCollectImplTest by @zuobiao-zhou in +- [improve] fix code style by @zuobiao-zhou in +- [improve] increase the length limit of the username field by @zuobiao-zhou in +- [improve] Checkstyle include testSource by @crossoverJie in +- [bugfix] fix collector and frontend dependent license error by @tomsun28 in +- [improve] Add test for MemcachedCollectImpl by @zuobiao-zhou in +- [imprve] Remove duplicate indices by @zuobiao-zhou in +- [docs]: fix several typos in docs by @lw-yang in +- Add the missing parts of docs, fix layout, sync the English version with the Chinese version by @xfl12345 in +- [improve] add filename check in home ci by @LiuTianyou in +- [improve] update dependency licenses and remove the aliyun sms depend by @tomsun28 in ## New Contributors -* @handy-git made their first contribution in https://github.com/apache/hertzbeat/pull/1643 -* @LiuTianyou made their first contribution in https://github.com/apache/hertzbeat/pull/1684 -* @WinterKi1ler made their first contribution in https://github.com/apache/hertzbeat/pull/1718 -* @miki-hmt made their first contribution in https://github.com/apache/hertzbeat/pull/1734 -* @gjjjj0101 made their first contribution in https://github.com/apache/hertzbeat/pull/1748 -* @makechoicenow made their first contribution in https://github.com/apache/hertzbeat/pull/1747 -* @alpha951 made their first contribution in https://github.com/apache/hertzbeat/pull/1745 -* @crossoverJie made their first contribution in https://github.com/apache/hertzbeat/pull/1758 -* @PeixyJ made their first contribution in https://github.com/apache/hertzbeat/pull/1760 -* @dukbong made their first contribution in https://github.com/apache/hertzbeat/pull/1757 -* @xuziyang made their first contribution in https://github.com/apache/hertzbeat/pull/1773 -* @MananPoojara made their first contribution in https://github.com/apache/hertzbeat/pull/1774 -* @lwqzz made their first contribution in https://github.com/apache/hertzbeat/pull/1768 -* @tomorrowshipyltm made their first contribution in https://github.com/apache/hertzbeat/pull/1793 -* @YxYL6125 made their first contribution in https://github.com/apache/hertzbeat/pull/1799 -* @15613060203 made their first contribution in https://github.com/apache/hertzbeat/pull/1797 -* @Alanxtl made their first contribution in https://github.com/apache/hertzbeat/pull/1813 -* @zhangshenghang made their first contribution in https://github.com/apache/hertzbeat/pull/1820 -* @zuobiao-zhou made their first contribution in https://github.com/apache/hertzbeat/pull/1832 -* @Pzz-2021 made their first contribution in https://github.com/apache/hertzbeat/pull/1837 -* @LLP2333 made their first contribution in https://github.com/apache/hertzbeat/pull/1868 -* @HeartLinked made their first contribution in https://github.com/apache/hertzbeat/pull/1893 -* @Aias00 made their first contribution in https://github.com/apache/hertzbeat/pull/1908 -* @westboy made their first contribution in https://github.com/apache/hertzbeat/pull/1914 -* @Yanshuming1 made their first contribution in https://github.com/apache/hertzbeat/pull/1957 -* @yuluo-yx made their first contribution in https://github.com/apache/hertzbeat/pull/1964 -* @lwjxy made their first contribution in https://github.com/apache/hertzbeat/pull/1977 -* @Thespica made their first contribution in https://github.com/apache/hertzbeat/pull/1995 -* @boatrainlsz made their first contribution in https://github.com/apache/hertzbeat/pull/2006 -* @JavaProgrammerLB made their first contribution in https://github.com/apache/hertzbeat/pull/2007 -* @lw-yang made their first contribution in https://github.com/apache/hertzbeat/pull/2047 -* @xfl12345 made their first contribution in https://github.com/apache/hertzbeat/pull/2048 +- @handy-git made their first contribution in +- @LiuTianyou made their first contribution in +- @WinterKi1ler made their first contribution in +- @miki-hmt made their first contribution in +- @gjjjj0101 made their first contribution in +- @makechoicenow made their first contribution in +- @alpha951 made their first contribution in +- @crossoverJie made their first contribution in +- @PeixyJ made their first contribution in +- @dukbong made their first contribution in +- @xuziyang made their first contribution in +- @MananPoojara made their first contribution in +- @lwqzz made their first contribution in +- @tomorrowshipyltm made their first contribution in +- @YxYL6125 made their first contribution in +- @15613060203 made their first contribution in +- @Alanxtl made their first contribution in +- @zhangshenghang made their first contribution in +- @zuobiao-zhou made their first contribution in +- @Pzz-2021 made their first contribution in +- @LLP2333 made their first contribution in +- @HeartLinked made their first contribution in +- @Aias00 made their first contribution in +- @westboy made their first contribution in +- @Yanshuming1 made their first contribution in +- @yuluo-yx made their first contribution in +- @lwjxy made their first contribution in +- @Thespica made their first contribution in +- @boatrainlsz made their first contribution in +- @JavaProgrammerLB made their first contribution in +- @lw-yang made their first contribution in +- @xfl12345 made their first contribution in ## 一条命令即可开始 @@ -374,14 +374,14 @@ keywords: [open source, monitoring, alerting] ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` -详细参考 HertzBeat 官网文档 https://hertzbeat.com/docs +详细参考 HertzBeat 官网文档 --- -**Github: https://github.com/apache/hertzbeat** +**Github: ** -下载页面: https://hertzbeat.apache.org/docs/download/ +下载页面: -升级指南: https://hertzbeat.apache.org/blog/2024/06/11/hertzbeat-v1.6.0-update/ +升级指南: Have Fun! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-07-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-07-new-committer.md index dd19af388f2..9a0e2b5ee11 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-07-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-07-new-committer.md @@ -41,4 +41,3 @@ keywords: [open source monitoring system, alerting system] 在这个过程中我也把在其他社区(Pulsar、OpenTelemetry)的优秀经验借鉴到 HertzBeat,大家取长补短,想必这也是开源的魅力所在吧。 最后要感谢社区的 logicz 邀请我成为 Committer,tom 对我 PR 的 review,预祝 HertzBeat 从孵化器毕业成为明星项目🎊。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md index ad575b95474..d5a34851d1b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-28-new-committer.md @@ -12,7 +12,7 @@ keywords: [open source monitoring system, alerting system] > 非常荣幸能成为Apache Hertzbeat的Committer > - ## 个人介绍 +## 个人介绍 本人是2023年毕业,目前在一家互联网公司担任java开发工程师。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-default.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-default.md index e734c1536a2..8e61e71707e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-default.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-default.md @@ -43,7 +43,7 @@ sidebar_label: 系统默认解析方式 ``` 样例: -查询自定义系统的CPU信息,其暴露接口为 `/metrics/cpu`,我们需要其中的`hostname,core,useage`指标 +查询自定义系统的CPU信息,其暴露接口为 `/metrics/cpu`,我们需要其中的`hostname,core,useage`指标 若只有一台虚拟机,其单层格式为: ```json @@ -283,4 +283,3 @@ metrics: parseType: jsonPath parseScript: '$' ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-hertzbeat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-hertzbeat.md index fcd44c5bbf3..9e576543a25 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-hertzbeat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-hertzbeat.md @@ -61,7 +61,7 @@ sidebar_label: 教程一:适配一款HTTP协议监控 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** -> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个监控模版,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为`hertzbeat`的自定义监控类型,其使用HTTP协议采集指标数据。 @@ -208,10 +208,10 @@ metrics: ---- -#### 完! +#### 完 HTTP协议的自定义监控的实践就到这里,HTTP协议还带其他参数headers,params等,我们可以像用postman一样去定义它,可玩性也非常高! 如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-token.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-token.md index fc87145178a..1c29671ad52 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-token.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-token.md @@ -212,7 +212,7 @@ metrics: ``` -**此时,重启hertzbeat系统,在系统页面上添加 `hertzbeat_token` 类型监控,配置输入参数,`content-type`填`application/json` , `请求Body`填账户密码json如下: ** +**此时,重启hertzbeat系统,在系统页面上添加 `hertzbeat_token` 类型监控,配置输入参数,`content-type`填`application/json` , `请求Body`填账户密码json如下:** ```json { @@ -391,10 +391,10 @@ metrics: ---- -#### 完! +#### 完 HTTP协议的自定义监控的实践就到这里,HTTP协议还带其他参数headers,params等,我们可以像用postman一样去定义它,可玩性也非常高! 如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-jsonpath.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-jsonpath.md index 1439c532219..3ae1aa00a77 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-jsonpath.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-jsonpath.md @@ -61,7 +61,7 @@ sidebar_label: JsonPath解析方式 #### 样例 -查询自定义系统的数值信息,其暴露接口为 `/metrics/person`,我们需要其中的`type,num`指标 +查询自定义系统的数值信息,其暴露接口为 `/metrics/person`,我们需要其中的`type,num`指标 接口返回的原始数据如下: ```json @@ -174,4 +174,3 @@ metrics: parseType: jsonPath parseScript: '$.number[*]' ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http.md index 35a8f4fa5f5..45e25794ca6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http.md @@ -10,10 +10,10 @@ sidebar_label: HTTP协议自定义监控 【**HTTP接口调用**】->【**响应校验**】->【**响应数据解析**】->【**默认方式解析|JsonPath脚本解析 | XmlPath解析(todo) | Prometheus解析**】->【**指标数据提取**】 -由流程可见,我们自定义一个HTTP协议的监控类型,需要配置HTTP请求参数,配置获取哪些指标,对响应数据配置解析方式和解析脚本。 +由流程可见,我们自定义一个HTTP协议的监控类型,需要配置HTTP请求参数,配置获取哪些指标,对响应数据配置解析方式和解析脚本。 HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数,请求方式,请求体等。 -**系统默认解析方式**:http接口返回hertzbeat规定的json数据结构,即可用默认解析方式解析数据提取对应的指标数据,详细介绍见 [**系统默认解析**](extend-http-default) +**系统默认解析方式**:http接口返回hertzbeat规定的json数据结构,即可用默认解析方式解析数据提取对应的指标数据,详细介绍见 [**系统默认解析**](extend-http-default) **JsonPath脚本解析方式**:用JsonPath脚本对响应的json数据进行解析,返回系统指定的数据结构,然后提供对应的指标数据,详细介绍见 [**JsonPath脚本解析**](extend-http-jsonpath) ### 自定义步骤 @@ -22,13 +22,13 @@ HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数, ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下监控模版YML的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个监控模版,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为example_http的自定义监控类型,其使用HTTP协议采集指标数据。 @@ -277,4 +277,3 @@ metrics: basicAuthPassword: ^_^password^_^ parseType: default ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jdbc.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jdbc.md index bb946d8ce1c..cd24a177c63 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jdbc.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jdbc.md @@ -21,7 +21,7 @@ SQL查询回来的数据字段和我们需要的指标映射,就能获取对 > 查询一行数据, 通过查询返回结果集的列名称,和查询的字段映射 -例如: +例如: 查询的指标字段为:one tow three four 查询SQL:select one, tow, three, four from book limit 1; 这里指标字段就能和响应数据一一映射为一行采集数据。 @@ -30,7 +30,7 @@ SQL查询回来的数据字段和我们需要的指标映射,就能获取对 > 查询多行数据, 通过查询返回结果集的列名称,和查询的字段映射 -例如: +例如: 查询的指标字段为:one tow three four 查询SQL:select one, tow, three, four from book; 这里指标字段就能和响应数据一一映射为多行采集数据。 @@ -39,9 +39,9 @@ SQL查询回来的数据字段和我们需要的指标映射,就能获取对 > 采集一行指标数据, 通过查询的两列数据(key-value),key和查询的字段匹配,value为查询字段的值 -例如: -查询字段:one tow three four -查询SQL:select key, value from book; +例如: +查询字段:one tow three four +查询SQL:select key, value from book; SQL响应数据: | key | value | @@ -59,13 +59,13 @@ SQL响应数据: ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为example_sql的自定义监控类型,其使用JDBC协议采集指标数据。 @@ -243,4 +243,3 @@ metrics: sql: show global status where Variable_name like 'innodb%'; url: ^_^url^_^ ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jmx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jmx.md index 07acbbeeec9..41b013a8b6a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jmx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-jmx.md @@ -4,7 +4,7 @@ title: JMX协议自定义监控 sidebar_label: JMX协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JMX协议自定义指标监控。 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JMX协议自定义指标监控。 > JMX协议自定义监控可以让我们很方便的通过配置 JMX Mbean Object 就能监控采集到我们想监控的 Mbean 指标 ### JMX协议采集流程 @@ -23,13 +23,13 @@ sidebar_label: JMX协议自定义监控 ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下监控模版的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为 `example_jvm` 的自定义监控类型,其使用JMX协议采集指标数据。 @@ -236,4 +236,3 @@ metrics: objectName: java.lang:type=MemoryPool,name=* url: ^_^url^_^ ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ngql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ngql.md index 34514b3f2bb..3788a1400e9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ngql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ngql.md @@ -4,7 +4,7 @@ title: NQGL自定义监控 sidebar_label: NGQL自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用NGQL自定义指标监控。 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用NGQL自定义指标监控。 > NGQL自定义监控可以让我们很方便的使用NGQL或者OpenCypher从NebulaGraph图数据库中查询指标数据,支持NebulaGraph 3.X版本。 ### 数据解析方式 @@ -21,6 +21,7 @@ NGQL查询回来的数据字段和我们需要的指标映射,就能获取对 > `filterValue`: 过滤属性值(可选) 例如: + - online_meta_count#SHOW HOSTS META#Status#ONLINE 对 `SHOW HOSTS META` 返回的结果中统计滤Status==ONLINE的数量 - online_meta_count#SHOW HOSTS META## @@ -47,7 +48,8 @@ NGQL查询回来的数据字段和我们需要的指标映射,就能获取对 > 查询多行数据, 通过查询返回结果集的列名称,和查询的字段映射 -例如: +例如: + - 查询的指标字段为:a,b - 查询NGQL:match (v:metrics) return v.metrics.a as a,v.metrics.b as b; 这里指标字段就能和响应数据一一映射为多行采集数据。 @@ -71,13 +73,13 @@ NGQL查询回来的数据字段和我们需要的指标映射,就能获取对 ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为example_ngql的自定义监控类型,其使用NGQL采集指标数据。 @@ -170,4 +172,3 @@ metrics: - match (v:tag2) return "tag2" as name ,count(v) as cnt timeout: ^_^timeout^_^ ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-point.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-point.md index 5f390517206..a141b38703a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-point.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-point.md @@ -176,4 +176,3 @@ metrics: parseType: website ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-snmp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-snmp.md index 6e61ec3fa81..49597c4b9a5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-snmp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-snmp.md @@ -4,7 +4,7 @@ title: SNMP协议自定义监控 sidebar_label: SNMP协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用 SNMP 协议自定义指标监控。 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用 SNMP 协议自定义指标监控。 > SNMP 协议自定义监控可以让我们很方便的通过配置 Mib OID信息 就能监控采集到我们想监控的OID指标 ### SNMP协议采集流程 @@ -23,13 +23,13 @@ sidebar_label: SNMP协议自定义监控 ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为 example_windows 的自定义监控类型,其使用 SNMP 协议采集指标数据。 @@ -207,4 +207,3 @@ metrics: processes: 1.3.6.1.2.1.25.1.6.0 location: 1.3.6.1.2.1.1.6.0 ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ssh.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ssh.md index 0f643f153f8..451e2b3a540 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ssh.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ssh.md @@ -4,7 +4,7 @@ title: SSH协议自定义监控 sidebar_label: SSH协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用SSH协议自定义指标监控。 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用SSH协议自定义指标监控。 > SSH协议自定义监控可以让我们很方便的通过写sh命令脚本就能监控采集到我们想监控的Linux指标 ### SSH协议采集流程 @@ -21,12 +21,12 @@ SHELL脚本查询回来的数据字段和我们需要的指标映射,就能获 > 查询出一列数据, 通过查询返回结果集的字段值(一行一个值)与字段映射 -例如: -需要查询Linux的指标 hostname-主机名称,uptime-启动时间 -主机名称原始查询命令:`hostname` -启动时间原始查询命令:`uptime | awk -F "," '{print $1}'` -则在hertzbeat对应的这两个指标的查询脚本为(用`;`将其连接到一起): -`hostname; uptime | awk -F "," '{print $1}'` +例如: +需要查询Linux的指标 hostname-主机名称,uptime-启动时间 +主机名称原始查询命令:`hostname` +启动时间原始查询命令:`uptime | awk -F "," '{print $1}'` +则在hertzbeat对应的这两个指标的查询脚本为(用`;`将其连接到一起): +`hostname; uptime | awk -F "," '{print $1}'` 终端响应的数据为: ``` @@ -34,8 +34,8 @@ tombook 14:00:15 up 72 days ``` -则最后采集到的指标数据一一映射为: -hostname值为 `tombook` +则最后采集到的指标数据一一映射为: +hostname值为 `tombook` uptime值为 `14:00:15 up 72 days` 这里指标字段就能和响应数据一一映射为一行采集数据。 @@ -44,8 +44,8 @@ uptime值为 `14:00:15 up 72 days` > 查询多行数据, 通过查询返回结果集的列名称,和查询的指标字段映射 -例如: -查询的Linux内存相关指标字段:total-内存总量 used-已使用内存 free-空闲内存 buff-cache-缓存大小 available-可用内存 +例如: +查询的Linux内存相关指标字段:total-内存总量 used-已使用内存 free-空闲内存 buff-cache-缓存大小 available-可用内存 内存指标原始查询命令为:`free -m`, 控制台响应: ```shell @@ -55,7 +55,7 @@ Swap: 8191 33 8158 ``` 在hertzbeat中multiRow格式解析需要响应数据列名称和指标值一一映射,则对应的查询SHELL脚本为: -`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` +`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` 控制台响应为: ```shell @@ -71,13 +71,13 @@ total used free buff_cache available ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为example_linux的自定义监控类型,其使用SSH协议采集指标数据。 @@ -216,4 +216,3 @@ metrics: script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' parseType: multiRow ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-telnet.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-telnet.md index 4d2e2425257..ebdd4786f17 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-telnet.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-telnet.md @@ -4,7 +4,7 @@ title: Telnet协议自定义监控 sidebar_label: Telnet协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用Telnet协议自定义指标监控。 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用Telnet协议自定义指标监控。 > 使用 Telnet 协议自定义监控可以让我们通过编写 Telnet 命令脚本来监控和采集我们想要监控的 Linux 指标 ### Telnet协议采集流程 @@ -23,13 +23,13 @@ sidebar_label: Telnet协议自定义监控 ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为zookeeper的自定义监控类型,其使用telnet协议采集指标数据。 @@ -297,4 +297,3 @@ metrics: ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-tutorial.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-tutorial.md index 7b3112f52f8..221ee168b4b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-tutorial.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-tutorial.md @@ -247,7 +247,6 @@ metrics: ---- -#### 完! +#### 完 HTTP协议的自定义监控的实践就到这里,HTTP协议还带其他参数 `headers,params` 等,我们可以像用postman一样去定义它,可玩性也非常高! - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_committer.md index c30a850a3c2..b444d0a970c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_committer.md @@ -32,7 +32,7 @@ limitations under the License. Apache HertzBeat 社区努力追求基于功绩的原则。因此,一旦有人在 CoPDoC 的任何领域有了足够的贡献,他们就可以成为 Committer 的候选人,最终被投票选为 HertzBeat 的 Committer。成为 Apache HertzBeat 的 Committer 并不一定意味着你必须使用你的提交权限向代码库提交代码;它意味着你致力于 HertzBeat 项目并为我们社区的成功做出了积极的贡献。 -## Committer 的要求: +## Committer 的要求 没有成为 Committer 或 PPMC 成员的严格规则。新的 Committer 的候选人通常是积极的贡献者和社区成员。但是,如果能稍微明确一些规则,就可以在一定程度上消除贡献者的疑虑,使社区更加透明、合理和公平。 @@ -57,4 +57,3 @@ Committer 的候选人应该持续参与并为 HertzBeat 做出大量的贡献 - 对于拉取请求审查保持积极、有礼貌与尊重。 - 即使存在分歧,也要以专业和外交的态度参与技术路线图的讨论。 - 通过撰写文章或举办活动来推广项目。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_pmc_member.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_pmc_member.md index 39cf1da9123..ff56d4cb723 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_pmc_member.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/become_pmc_member.md @@ -32,7 +32,7 @@ limitations under the License. Apache HertzBeat 社区努力追求基于功绩的原则。因此,一旦有人在 CoPDoC 的任何领域有了足够的贡献,他们就可以成为 PMC 成员资格的候选人,最终被投票选为 HertzBeat 的 PMC 成员。成为 Apache HertzBeat 的 PMC 成员并不一定意味着您必须使用您的提交权限向代码库提交代码;它意味着您致力于 HertzBeat 项目并为我们社区的成功做出了积极的贡献。 -## PMC 成员的要求: +## PMC 成员的要求 没有成为 Committer 或 PPMC 成员的严格规则。新的 PMC 成员的候选人通常是积极的贡献者和社区成员。但是,如果能稍微明确一些规则,就可以在一定程度上消除贡献者的疑虑,使社区更加透明、合理和公平。 @@ -57,4 +57,3 @@ PMC 成员的候选人应该持续参与并为 HertzBeat 做出大量的贡献 - 对于拉取请求审查保持积极、有礼貌与尊重。 - 即使存在分歧,也要以专业和外交的态度参与技术路线图的讨论。 - 通过撰写文章或举办活动来推广项目。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md index 9e11d3de6b7..361b4103011 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md @@ -27,7 +27,7 @@ limitations under the License. - 新建 `PR` 后需要在 `PR` 页面的 Github Development 按钮处关联已存在的对应 `ISSUE`(若无建议新建对应ISSUE) - - 标题命名格式(英文,小写) + - 标题命名格式(英文,小写) `[feature/bugfix/doc/improve/refactor/bug/cleanup] title` 2. 添加描述信息 @@ -63,8 +63,66 @@ limitations under the License. ### 2.2 文档样式检查 -1. 在项目中运行`mvn spotless:check`,会执行Markdown文件格式自动检测。 -2. 在项目中运行`mvn spotless:apply`,会执行Markdown文件格式自动格式化,以确保所有文档都符合规范。 +1. 安装`markdownlint-cli2`,运行`npm install markdownlint-cli2 --global` +2. 在项目中运行`markdownlint "home/**/*.md"`,会执行Markdown文件格式自动检测。 +3. 在项目中运行`markdownlint --fix "home/**/*.md"`,会执行Markdown文件格式自动格化,以确保所有文档都符合规范。 + +> 提示: 修复只能修复部分问题,根据检查后的错误信息,手动调整。 + +错误码说明: + +| 错误代码 |说明 | +|---------------------------------------| ------------------------| +| **MD001 heading-increment** | 标题级别应一次只递增一个级别 | +| **MD003 heading-style** | 标题样式 | +| **MD004 ul-style** | 无序列表样式 | +| **MD005 list-indent** | 同一层级的列表项缩进不一致 | +| **MD007 ul-indent** | 无序列表缩进 | +| **MD009 no-trailing-spaces** | 行尾空格 | +| **MD010 no-hard-tabs** | 硬制表符 | +| **MD011 no-reversed-links** | 链接语法反转 | +| **MD012 no-multiple-blanks** | 多个连续空行 | +| **MD013 line-length** | 行长度 | +| **MD014 commands-show-output** | 命令前使用 `$` 符号但未显示输出 | +| **MD018 no-missing-space-atx** | ATX 样式标题符号后缺少空格 | +| **MD019 no-multiple-space-atx** | ATX 样式标题符号后有多个空格 | +| **MD020 no-missing-space-closed-atx** | 闭合 ATX 样式标题符号内部缺少空格 | +| **MD021 no-multiple-space-closed-atx** | 闭合 ATX 样式标题符号内部有多个空格 | +| **MD022 blanks-around-headings** | 标题周围应有空行 | +| **MD023 heading-start-left** | 标题必须从行首开始 | +| **MD024 no-duplicate-heading** | 存在多个内容相同的标题 | +| **MD025 single-title/single-h1** | 同一文档中有多个一级标题 | +| **MD026 no-trailing-punctuation** | 标题中有尾随标点符号 | +| **MD027 no-multiple-space-blockquote** | 引用符号后有多个空格 | +| **MD028 no-blanks-blockquote** | 引用块内有空行 | +| **MD029 ol-prefix** | 有序列表项前缀 | +| **MD030 list-marker-space** | 列表标记后的空格 | +| **MD031 blanks-around-fences** | 围栏代码块应被空行包围 | +| **MD032 blanks-around-lists** | 列表应被空行包围 | +| **MD033 no-inline-html** | 内联 HTML | +| **MD034 no-bare-urls** | 使用了裸露的 URL | +| **MD035 hr-style** | 水平分割线样式 | +| **MD036 no-emphasis-as-heading** | 不应使用强调样式代替标题 | +| **MD037 no-space-in-emphasis** | 强调标记内有空格 | +| **MD038 no-space-in-code** | 代码片段元素内有空格 | +| **MD039 no-space-in-links** | 链接文本内有空格 | +| **MD040 fenced-code-language** | 围栏代码块应指定语言 | +| **MD041 first-line-heading/first-line-h1** | 文件的第一行应为一级标题 | +| **MD042 no-empty-links** | 链接不可为空 | +| **MD043 required-headings** | 必须的标题结构 | +| **MD044 proper-names** | 专有名词应正确大写 | +| **MD045 no-alt-text** | 图片应有替代文字(alt 文本) | +| **MD046 code-block-style** | 代码块样式 | +| **MD047 single-trailing-newline** | 文件应以单个换行符结尾 | +| **MD048 code-fence-style** | 代码围栏样式 | +| **MD049 emphasis-style** | 强调样式 | +| **MD050 strong-style** | 粗体样式 | +| **MD051 link-fragments** | 链接片段应有效 | +| **MD052 reference-links-images** | 引用链接和图片应使用已定义的标签 | +| **MD053 link-image-reference-definitions** | 链接和图片引用定义应是必要的 | +| **MD054 link-image-style** | 链接和图片样式 | +| **MD055 table-pipe-style** | 表格管道样式 | +| **MD056 table-column-count** | 表格列数 | ## 3 编程规范 @@ -75,6 +133,7 @@ limitations under the License. ```java Cache publicKeyCache; ``` + 2. 变量的拼音缩写是禁止的(排除地名等名词),例如chengdu。 3. 推荐的变量名以 `类型` 结尾。 对于 `Collection/List` 类型的变量,取 `xxxx` (复数表示多个元素)或以 `xxxList` (特定类型)结束。 @@ -84,6 +143,7 @@ limitations under the License. Map idUserMap; Map userIdNameMap; ``` + 4. 通过其名称直观地知道变量的类型和含义。 方法名称应首先以动词开始,如下所示: @@ -119,6 +179,7 @@ limitations under the License. return resp; } ``` + - 正面示例: > 字符串提取为常量引用。 @@ -144,6 +205,7 @@ limitations under the License. return resp; } ``` + 2. 确保代码的可读性和直观性 - `annotation` 符号中的字符串不需要提取为常量。 @@ -203,6 +265,7 @@ public CurrentHashMap funName(); return; } ``` + - 正面示例: ```java @@ -226,11 +289,13 @@ public CurrentHashMap funName(); - 多余的行 一般来说,如果一个方法的代码行深度由于连续嵌套的 `if... else..` 超过了 `2+ Tabs`,那么应该考虑试图 + - `合并分支`, - `反转分支条件` - `提取私有方法` 以减少代码行深度并提高可读性,例如: + - 联合或将逻辑合并到下一级调用中 - 负面示例: @@ -267,6 +332,7 @@ if(expression2) { ...... } ``` + - 反转条件 - 负面示例: @@ -281,6 +347,7 @@ if(expression2) { } } ``` + - 正面示例: ```java @@ -294,6 +361,7 @@ if(expression2) { // ... } ``` + - 使用单一变量或方法减少复杂的条件表达式 - 负面示例: @@ -302,6 +370,7 @@ if(expression2) { ... } ``` + - 正面示例: ```java @@ -346,6 +415,7 @@ if(expression2) { ```java map.computeIfAbsent(key, x -> key.toLowerCase()) ``` + - 正面示例: ```java @@ -359,6 +429,7 @@ if(expression2) { ```java map.computeIfAbsent(key, k-> Loader.load(k)); ``` + - 正面示例: ```java @@ -388,6 +459,7 @@ if(expression2) { return; } ``` + - 正面示例: ```java @@ -405,6 +477,7 @@ if(expression2) { return; } ``` + - 正面示例: ```java @@ -422,6 +495,7 @@ if(expression2) { return; } ``` + - 正面示例: ```java @@ -441,6 +515,7 @@ if(expression2) { return; } ``` + - 正面示例: ```java @@ -458,6 +533,7 @@ if(expression2) { ... } ``` + - 正面示例: ```java @@ -473,6 +549,7 @@ if(expression2) { ```java System.out.println(JobStatus.RUNNING.toString()); ``` + - 正面示例: ```java @@ -488,6 +565,7 @@ if(expression2) { ... } ``` + - 正面示例: ```java @@ -525,6 +603,7 @@ public void process(String input) { ```java log.info("Deploy cluster request " + deployRequest); ``` + - 正面示例 ```java @@ -544,6 +623,7 @@ public void process(String input) { List userList = getUsersByBatch(1000); LOG.debug("All users: {}", getAllUserIds(userList)); ``` + - 正面示例: 在这种情况下,我们应该在进行实际的日志调用之前提前确定日志级别,如下所示: @@ -552,7 +632,7 @@ public void process(String input) { // 忽略声明行。 List userList = getUsersByBatch(1000); if (LOG.isDebugEnabled()) { - LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); + LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); } ``` @@ -564,13 +644,12 @@ public void process(String input) { ## 参考资料 -- https://site.mockito.org/ -- https://alibaba.github.io/p3c/ -- https://rules.sonarsource.com/java/ -- https://junit.org/junit5/ -- https://streampark.apache.org/ +- +- +- +- +- ``` ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contribution.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contribution.md index 7223d9b5997..7fe9bc2e194 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contribution.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contribution.md @@ -47,7 +47,7 @@ limitations under the License. ### 让 HertzBeat 运行起来 -> 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 +> 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 > 此为前后端分离项目,本地代码启动需将后端 [manager](https://github.com/apache/hertzbeat/tree/master/manager) 和前端 [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) 分别启动生效。 #### 后端启动 @@ -158,6 +158,7 @@ git pull upstream master - **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** 提供监控管理,系统管理基础服务 > 提供对监控的管理,监控应用配置的管理,系统用户租户后台管理等。 +> > - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** 提供监控数据采集服务 > 使用通用协议远程采集获取对端指标数据。 > - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** 提供监控数据仓储服务 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/development.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/development.md index c9ed4b45859..2d440e18f01 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/development.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/development.md @@ -6,7 +6,7 @@ sidebar_label: 运行编译 ## 让 HertzBeat 运行起来 -> 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 +> 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 > 此为前后端分离项目,本地代码启动需将后端 [manager](https://github.com/apache/hertzbeat/tree/master/manager) 和前端 [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) 分别启动生效。 ### 后端启动 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/document.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/document.md index 4cf56e0137b..7032d24688e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/document.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/document.md @@ -40,8 +40,8 @@ git clone git@github.com:/hertzbeat.git 1. 下载并安装 nodejs (版本 18.8.0) 2. 将代码克隆到本地 `git clone git@github.com:apache/hertzbeat.git` 3. 在`home`目录下运行 `npm install` 来安装所需的依赖库。 -4. 在`home`目录下运行 `npm run start`,您可以访问 http://localhost:3000 查看站点的英文模式预览 -5. 在`home`目录下运行 `npm run start-zh-cn`,您可以访问 http://localhost:3000 查看站点的中文模式预览 +4. 在`home`目录下运行 `npm run start`,您可以访问 查看站点的英文模式预览 +5. 在`home`目录下运行 `npm run start-zh-cn`,您可以访问 查看站点的中文模式预览 6. 若要生成静态网站资源文件,请运行 `npm run build`。构建的静态资源位于 build 目录中。 ## 目录结构 @@ -93,4 +93,3 @@ css 和其他样式文件放在 `src/css` 目录中。 ### 页面内容修改 > 所有页面文档都可以通过底部的'编辑此页面'按钮直接跳转到相应的 github 资源修改页面。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md index 1b79df79f85..29b1dac509b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md @@ -22,6 +22,7 @@ sidebar_position: 4 ## 2. 准备发布 > 首先整理帐户信息以更好地了解操作过程,稍后会多次使用。 +> > - apache id: `muchunjin (APACHE LDAP 用户名)` > - apache passphrase: `APACHE LDAP 密钥` > - apache email: `muchunjin@apache.org` @@ -128,12 +129,12 @@ gpg: Total number processed: 1 gpg: unchanged: 1 ``` -或者进入 https://keyserver.ubuntu.com/ 网址,输入密钥的名称,然后点击'Search key' 按钮,查看是否有对应名称的密钥。 +或者进入 网址,输入密钥的名称,然后点击'Search key' 按钮,查看是否有对应名称的密钥。 #### 2.4 将 gpg 公钥添加到 Apache SVN 项目仓库的 KEYS 文件中 -- Apache HertzBeat Dev 分支 https://dist.apache.org/repos/dist/dev/incubator/hertzbeat -- Apache HertzBeat Release 分支 https://dist.apache.org/repos/dist/release/incubator/hertzbeat +- Apache HertzBeat Dev 分支 +- Apache HertzBeat Release 分支 ##### 2.4.1 将公钥添加到dev分支的KEYS @@ -167,7 +168,7 @@ $ svn ci -m "add gpg key for muchunjin" ## 3. 准备物料 & 发布 -#### 3.1 基于 master 分支,创建一个名为 release-${release_version}-rcx 的分支,例如 release-1.6.0-rc1。并基于 release-1.6.0-rc1 分支创建一个名为 v1.6.0-rc1 的标签,并将此标签设置为预发布。 +#### 3.1 基于 master 分支,创建一个名为 release-${release_version}-rcx 的分支,例如 release-1.6.0-rc1。并基于 release-1.6.0-rc1 分支创建一个名为 v1.6.0-rc1 的标签,并将此标签设置为预发布 ```shell git checkout master @@ -330,7 +331,7 @@ svn commit -m "release for HertzBeat 1.6.0-RC1" - 检查 Apache SVN 提交结果 -> 在浏览器中访问 https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/ , 检查是否有新的版本内容 +> 在浏览器中访问 , 检查是否有新的版本内容 ## 4. 进入社区投票阶段 @@ -338,7 +339,7 @@ svn commit -m "release for HertzBeat 1.6.0-RC1" 发送社区投票邮件需要至少三个`+1`,且无`-1`。 -> `Send to`: dev@hertzbeat.apache.org
+> `Send to`:
> `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0 rc1
> `Body`: @@ -394,7 +395,7 @@ Thanks! 在72小时后,将统计投票结果,并发送投票结果邮件,如下所示。 -> `Send to`: dev@hertzbeat.apache.org
+> `Send to`:
> `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: @@ -423,14 +424,14 @@ Best, ChunJin Mu ``` -邮件内容中的一项是`Vote thread`,在 https://lists.apache.org/list.html?dev@hertzbeat.apache.org 查看获取 +邮件内容中的一项是`Vote thread`,在 查看获取 #### 3.2 发送孵化社区投票邮件 发送孵化社区投票邮件需要至少三个`+1`,且无`-1`。 -> `Send to`: general@incubator.apache.org
-> `cc`: dev@hertzbeat.apache.org、private@hertzbeat.apache.org
+> `Send to`:
+> `cc`:
> `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: @@ -484,7 +485,7 @@ ChunJin Mu 如果72小时后没有-1,回复邮件如下 -> `Send to`: general@incubator.apache.org
+> `Send to`:
> `Body`: ``` @@ -496,7 +497,7 @@ Chunjin Mu 然后将统计投票结果,并发送投票结果邮件,如下所示。 -> `Send to`: general@incubator.apache.org
+> `Send to`:
> `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: @@ -522,7 +523,7 @@ Best, ChunJin Mu ``` -邮件内容中的一项是`Vote thread`,在 https://lists.apache.org/list.html?general@incubator.apache.org 查看获取 +邮件内容中的一项是`Vote thread`,在 查看获取 等待一天,查看看导师是否有其他意见,如果没有,发送以下公告邮件 @@ -536,10 +537,10 @@ svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 http #### 4.2 添加新版本下载地址到官网 -https://github.com/apache/hertzbeat/blob/master/home/docs/download.md -https://github.com/apache/hertzbeat/blob/master/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md + + -完成后打开官网地址 https://hertzbeat.apache.org/docs/download/ 查看是否有新版本的下载 +完成后打开官网地址 查看是否有新版本的下载 > 需要注意的是,下载链接可能需要一个小时后才会生效,请注意。 @@ -572,8 +573,8 @@ release note: xxx #### 4.4 发送新版本公告邮件 -> `Send to`: general@incubator.apache.org
-> `cc`: dev@hertzbeat.apache.org
+> `Send to`:
+> `cc`:
> `Title`: [ANNOUNCE] Apache HertzBeat (incubating) 1.6.0 released
> `Body`: diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-verify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-verify.md index 9904af1967b..2c8af78f0e8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-verify.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-verify.md @@ -8,7 +8,7 @@ sidebar_position: 4 详细检查列表请参考官方的[check list](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) -在浏览器中可访问版本内容 https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/ +在浏览器中可访问版本内容 ## 1. 下载候选版本到本地 @@ -42,8 +42,8 @@ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_versio #### 2.2.1 导入公钥 ```shell -$ curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # 下载KEYS -$ gpg --import KEYS # 导入KEYS到本地 +curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # 下载KEYS +gpg --import KEYS # 导入KEYS到本地 ``` #### 2.2.2 信任公钥 @@ -121,7 +121,7 @@ tar -xzvf apache-hertzbeat-${release.version}-incubating-bin.tar.gz - [ ] 如果依赖的是Apache许可证并且存在`NOTICE`文件,那么这些`NOTICE`文件也需要加入到版本的`NOTICE`文件中 - [ ] ..... -参考: https://apache.org/legal/resolved.html +参考: ### 2.5. 源码编译验证 @@ -131,7 +131,7 @@ tar -xzvf apache-hertzbeat-${release.version}-incubating-bin.tar.gz cd apache-hertzbeat-${release_version}-incubating-src ``` -编译源码: https://hertzbeat.apache.org/docs/community/development/#build-hertzbeat-binary-package +编译源码: 进行如下检查: @@ -145,7 +145,7 @@ cd apache-hertzbeat-${release_version}-incubating-src - [ ] 能够正确编译 - [ ] ..... -参考: https://apache.org/legal/resolved.html +参考: ## 3. 邮件回复 @@ -154,13 +154,13 @@ cd apache-hertzbeat-${release_version}-incubating-src :::caution 注意 回复的邮件一定要带上自己检查了那些项信息,仅仅回复`+1 approve`,是无效的。 -PPMC 在 dev@hertzbeat.apache.org HertzBeat 的社区投票时,请带上 binding 后缀,表示对 HertzBeat 社区中的投票具有约束性投票,方便统计投票结果。 +PPMC 在 HertzBeat 的社区投票时,请带上 binding 后缀,表示对 HertzBeat 社区中的投票具有约束性投票,方便统计投票结果。 -IPMC 在 general@incubator.apache.org incubator 社区投票,请带上 binding 后缀,表示对 incubator 社区中的投票具有约束性投票,方便统计投票结果。 +IPMC 在 incubator 社区投票,请带上 binding 后缀,表示对 incubator 社区中的投票具有约束性投票,方便统计投票结果。 ::: :::caution 注意 -如果在dev@hertzbeat.apache.org已经投过票,在incubator社区进行投票回复时,可以直接带过去,需要注意约束性 如: +如果在已经投过票,在incubator社区进行投票回复时,可以直接带过去,需要注意约束性 如: ```html //incubator社区 投票时,只有IPMC成员才具有约束性 binding,PPMC需要注意约束性的变化 @@ -195,6 +195,6 @@ I checked: 5. .... ``` ---- +--- This doc refer from [Apache StreamPark](https://streampark.apache.org/) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/mailing_lists.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/mailing_lists.md index 922cbfe9a6a..64d938005fd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/mailing_lists.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/mailing_lists.md @@ -34,7 +34,7 @@ limitations under the License. | 列表名称 | 地址 | 订阅 | 退订 | 归档 | |-----------|--------------------------|-------------------------------------------------|---------------------------------------------------|-------------------------------------------------------------------| -| **开发者列表** | dev@hertzbeat.apache.org | [订阅](mailto:dev-subscribe@hertzbeat.apache.org) | [退订](mailto:dev-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | +| **开发者列表** | | [订阅](mailto:dev-subscribe@hertzbeat.apache.org) | [退订](mailto:dev-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | ### 通知列表 @@ -42,16 +42,16 @@ limitations under the License. | 列表名称 | 地址 | 订阅 | 退订 | 归档 | |----------|------------------------------------|-----------------------------------------------------------|-------------------------------------------------------------|-----------------------------------------------------------------------------| -| **通知列表** | notifications@hertzbeat.apache.org | [订阅](mailto:notifications-subscribe@hertzbeat.apache.org) | [退订](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | +| **通知列表** | | [订阅](mailto:notifications-subscribe@hertzbeat.apache.org) | [退订](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | ## 订阅步骤 发送订阅邮件即可订阅。步骤如下: - 1、**订阅**:点击上表中的 **订阅** 按钮,它将重定向到您的邮件客户端。主题和内容是任意的。 - 之后,您会从 dev-help@hertzbeat.apache.org 收到确认邮件(如果没有收到,请确认电子邮件是否被自动分类为垃圾邮件、推广邮件、订阅邮件等)。 + 之后,您会从 收到确认邮件(如果没有收到,请确认电子邮件是否被自动分类为垃圾邮件、推广邮件、订阅邮件等)。 - 2、**确认**:直接回复确认邮件,或点击邮件中的链接快速回复。主题和内容是任意的。 -- 3、**欢迎**:在完成上述步骤后,您会收到一个主题为 WELCOME to dev@hertzbeat.apache.org 的欢迎邮件,您已成功订阅 Apache HertzBeat 邮件列表。 +- 3、**欢迎**:在完成上述步骤后,您会收到一个主题为 WELCOME to 的欢迎邮件,您已成功订阅 Apache HertzBeat 邮件列表。 ## 发送纯文本邮件 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md index 3aa91296baf..3e79960877c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md @@ -107,7 +107,7 @@ ttt ``` 注意,投票将在今天一周后结束,即 -[midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) +[midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) [Apache投票指南](https://community.apache.org/newcommitter.html) ### 关闭投票模板 @@ -372,6 +372,7 @@ hi, i accept. Thanks for invitaion. ![](/img/docs/community/icla-content-2.png) 在PDF中需要填写的字段: + - `Full name` - `Public name` - `Postal Address` @@ -427,7 +428,8 @@ Thanks ! ### 将Apache账号和GitHub账号关联 -访问: https://whimsy.apache.org/roster/committer/${APACHE_ID} +访问: + - `${APACHE_ID}`替换成你的Apache ID 登录后可以看到这个内容`Link GitHub username to ASF id`,点击跳转 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_pmc_member_process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_pmc_member_process.md index d7e144bb52b..500627ec5f2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_pmc_member_process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_pmc_member_process.md @@ -79,7 +79,7 @@ ${Work list}[1] ``` 注意,投票将在今天一周后结束,即 -[midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) +[midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) [Apache 参考投票指南](https://community.apache.org/newcommitter.html) ### Close Vote Template @@ -283,4 +283,3 @@ A PPMC member helps manage and guide the direction of the project. Thanks, On behalf of the Apache HertzBeat (incubating) PPMC ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/submit-code.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/submit-code.md index 8940571f71c..7ad649e09d1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/submit-code.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/submit-code.md @@ -29,11 +29,13 @@ limitations under the License. ```shell git clone git@github.com:<您的账户名>/hertzbeat.git ``` + * 添加远程仓库地址,命名为 upstream ```shell git remote add upstream git@github.com:apache/hertzbeat.git ``` + * 查看仓库 ```shell @@ -47,12 +49,14 @@ limitations under the License. ```shell git fetch upstream ``` + * 将远程仓库代码同步到本地仓库 ```shell git checkout origin/master git merge --no-ff upstream/master ``` + * **⚠️注意一定要新建分支开发特性 `git checkout -b feature-xxx`,不建议使用master分支直接开发** * 在本地修改代码后,提交到自己的仓库: **注意提交信息为英文,不包含特殊字符** @@ -61,8 +65,8 @@ limitations under the License. git commit -m '[docs]necessary instructions' git push ``` + * 将更改提交到远程仓库后,您可以在您的仓库页面上看到一个绿色的按钮“Compare & pull request”,点击它。 * 这会弹出新建 Pull Request 页面,您需要这里仔细填写信息(英文),描述和代码同样重要,然后点击“Create pull request”按钮。 * 然后社区的 Committers 将进行 CodeReview,并与您讨论一些细节(包括设计、实现、性能等),之后您可以根据建议直接在这个分支更新代码(无需新建PR)。当社区 Committer approve之后,提交将被合并到 master 分支。 * 最后,恭喜您,您已经成为 HertzBeat 的官方贡献者,您会被加在贡献者墙上,您可以联系社区获取贡献者证书! - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md index c4b43f355ac..0bac76f66a3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md @@ -4,12 +4,14 @@ title: 下载 Apache HertzBeat (incubating) sidebar_label: Download --- -> **这里是 Apache HertzBeat (incubating) 官方下载页面。** +> **这里是 Apache HertzBeat (incubating) 官方下载页面。** > **请再下方表中选择版本下载,推荐使用最新版本。** :::tip + - 验证下载版本,请使用相应的哈希(sha512)、签名和[项目发布KEYS](https://downloads.apache.org/incubator/hertzbeat/KEYS)。 - 检查哈希和签名的方法参考 [如何验证](https://www.apache.org/dyn/closer.cgi#verify)。 + ::: ## 最新版本 @@ -26,8 +28,8 @@ sidebar_label: Download > Apache HertzBeat 为每个版本制作了 Docker 镜像. 你可以从 [Docker Hub](https://hub.docker.com/r/apache/hertzbeat) 拉取使用. -- HertzBeat https://hub.docker.com/r/apache/hertzbeat -- HertzBeat Collector https://hub.docker.com/r/apache/hertzbeat-collector +- HertzBeat +- HertzBeat Collector ## 归档版本 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/activemq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/activemq.md index 29d5478158a..94e2ad54899 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/activemq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/activemq.md @@ -143,4 +143,3 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" | DaemonThreadCount | 个 | 守护进程数 | | CurrentThreadUserTime | ms | 使用时间 | | CurrentThreadCpuTime | ms | 使用CPU时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ai_config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ai_config.md index a2e61c704fe..d79888149e9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ai_config.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ai_config.md @@ -23,15 +23,15 @@ keywords: [人工智能 AI] |---------|-----------------------------------------------------|-----------------------------------------------------------------| | type | zhiPu(必须和示例完全相同) | 无 | | model | glm-4-0520、glm-4 、glm-4-air、glm-4-airx、 glm-4-flash | 无 | -| api-key | xxxxx.xxxxxx | https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys | +| api-key | xxxxx.xxxxxx | | #### 阿里巴巴AI | 参数名称 | 示例 | 链接 | |---------|----------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------| | type | alibabaAi(必须和示例完全相同) | 无 | -| model | qwen-turbo、qwen-plus、qwen-max、qwen-max-0428、qwen-max-0403、qwen-max-0107、qwen-max-longcontext | https://help.aliyun.com/zh/dashscope/developer-reference/model-introduction?spm=a2c4g.11186623.0.0.4e0246c1RQFKMH | -| api-key | xxxxxxxxxxx | https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key?spm=a2c4g.11186623.0.i10 | +| model | qwen-turbo、qwen-plus、qwen-max、qwen-max-0428、qwen-max-0403、qwen-max-0107、qwen-max-longcontext | | +| api-key | xxxxxxxxxxx | | #### 月之暗面AI @@ -39,18 +39,18 @@ keywords: [人工智能 AI] |---------|-------------------------------------------------|-----------------------------------------------| | type | kimiAi(必须和示例完全相同) | 无 | | model | moonshot-v1-8k、moonshot-v1-32k、moonshot-v1-128k | 无 | -| api-key | xxxxxxxxxxx | https://platform.moonshot.cn/console/api-keys | +| api-key | xxxxxxxxxxx | | #### 科大讯飞AI -快速入门:https://www.xfyun.cn/doc/platform/quickguide.html +快速入门: | 参数名称 | 示例 | 链接 | |------------|--------------------------------------------------|---------------------------------------| | type | sparkDesk (must be exactly the same as example) | | | model | general、generalv2、generalv3、generalv3.5、4.0Ultra | | -| api-key | xxxxxxxxxxx | https://console.xfyun.cn/services/cbm | -| api-secret | xxxxxxxxxxx | https://console.xfyun.cn/services/cbm | +| api-key | xxxxxxxxxxx | | +| api-secret | xxxxxxxxxxx | | | 模型版本 | 模型类型(application.yml的model参数) | |-----------------|-------------------------------| @@ -59,4 +59,3 @@ keywords: [人工智能 AI] | Spark Pro | generalv3 | | Spark V2.0 | generalv2 | | Spark Lite(免费版) | general | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/airflow.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/airflow.md index 52367155d89..a7f77f7f5b6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/airflow.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/airflow.md @@ -36,4 +36,3 @@ keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] |-------------|------|---------------| | value | 无 | Airflow版本 | | git_version | 无 | Airflow git版本 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_dingtalk.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_dingtalk.md index 9d0ee3b088f..ba6b49bc58a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_dingtalk.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_dingtalk.md @@ -17,16 +17,16 @@ keywords: [告警钉钉机器人通知, 开源告警系统, 开源监控告警 2. **【保存机器人的WebHook地址access_token值】** -> 例如: webHook地址:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` +> 例如: webHook地址:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` > 其机器人access_token值为 `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` 3. **【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】** ![email](/img/docs/help/alert-notice-9.png) -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_discord.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_discord.md index 9694126d0dd..bb3c6287cd4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_discord.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_discord.md @@ -63,8 +63,8 @@ keywords: [告警 Discord 机器人通知, 开源告警系统, 开源监控告 1. Discord 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人Token, ChannelId,是否已配置告警策略关联 +> 请排查在告警中心是否已有触发的告警信息 +> 请排查是否配置正确机器人Token, ChannelId,是否已配置告警策略关联 > 请排查机器人是否被 Discord聊天服务器正确赋权 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_email.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_email.md index d4dc218c591..0f53b58e71d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_email.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_email.md @@ -13,14 +13,14 @@ keywords: [告警邮件通知, 开源告警系统, 开源监控告警系统] ![email](/img/docs/help/alert-notice-1.png) -2. **【获取验证码】-> 【输入邮箱验证码】-> 【确定】** +2. **【获取验证码】-> 【输入邮箱验证码】-> 【确定】** ![email](/img/docs/help/alert-notice-2.png) ![email](/img/docs/help/alert-notice-3.png) -3. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) @@ -32,7 +32,7 @@ keywords: [告警邮件通知, 开源告警系统, 开源监控告警系统] 2. 云环境TanCloud无法接收到邮件通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确邮箱,是否已配置告警策略关联 > 请查询邮箱的垃圾箱里是否把告警邮件拦截 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_feishu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_feishu.md index 604eff34fdc..5a6e95d7067 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_feishu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_feishu.md @@ -13,14 +13,14 @@ keywords: [告警飞书机器人通知, 开源告警系统, 开源监控告警 2. **【保存机器人的WebHook地址的KEY值】** -> 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` > 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择飞书机器人通知方式】->【设置飞书机器人KEY】-> 【确定】** -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) @@ -28,7 +28,7 @@ keywords: [告警飞书机器人通知, 开源告警系统, 开源监控告警 1. 飞书群未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人KEY,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md index c81f5608674..5c5c38c56be 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md @@ -31,7 +31,7 @@ keywords: [告警 Slack Webhook 通知, 开源告警系统, 开源监控告警 1. Slack 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_smn.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_smn.md index 73f434a8e8a..d6bca9843a4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_smn.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_smn.md @@ -29,7 +29,7 @@ keywords: [ 告警华为云SMN通知, 开源告警系统, 开源监控告警系 5. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_telegram.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_telegram.md index df609e66b50..dfb1aa48d8a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_telegram.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_telegram.md @@ -60,8 +60,8 @@ keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] 1. Telegram 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 +> 请排查在告警中心是否已有触发的告警信息 +> 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 > UserId 应为消息接收对象的UserId 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_webhook.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_webhook.md index 022cd50f07e..272c59cfd4c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_webhook.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_webhook.md @@ -13,9 +13,9 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 ![email](/img/docs/help/alert-notice-5.png) -2. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) @@ -60,7 +60,7 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 1. WebHook回调未生效 -> 请查看告警中心是否已经产生此条告警信息 +> 请查看告警中心是否已经产生此条告警信息 > 请排查配置的WebHook回调地址是否正确 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_wework.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_wework.md index e0dbabf1a70..5c73ffee2a6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_wework.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_wework.md @@ -15,16 +15,16 @@ keywords: [告警企业微信通知, 开源告警系统, 开源监控告警系 2. **【保存机器人的WebHook地址的KEY值】** -> 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` > 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择企业微信机器人通知方式】->【设置企业微信机器人KEY】-> 【确定】** ![email](/img/docs/help/alert-notice-7.png) -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) @@ -32,7 +32,7 @@ keywords: [告警企业微信通知, 开源告警系统, 开源监控告警系 1. 企业微信群未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人KEY,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/almalinux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/almalinux.md index 391005c080c..abf262d52bd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/almalinux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/almalinux.md @@ -105,4 +105,3 @@ keywords: [开源监控系统, 开源操作系统监控, AlmaLinux操作系统 | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/api.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/api.md index 89f3cd701bc..c1df34f880a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/api.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/api.md @@ -34,4 +34,3 @@ keywords: [开源监控系统, 开源网站监控, HTTP API监控] | 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------| | responseTime | ms毫秒 | 网站响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/centos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/centos.md index 3d0654db3b5..02a93f751c5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/centos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/centos.md @@ -79,4 +79,3 @@ keywords: [开源监控系统, 开源操作系统监控, CentOS操作系统监 | available | Mb | 可用磁盘大小 | | usage | % | 使用率 | | mounted | 无 | 挂载点目录 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/clickhouse.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/clickhouse.md index 955c87b4e4f..1f0e1f0e6c2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/clickhouse.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/clickhouse.md @@ -93,4 +93,3 @@ keywords: [开源监控系统, 开源数据库监控, Clickhouse数据库监控] | MarkCacheBytes | 无 | StorageMergeTree 的 marks 的缓存大小 | | MarkCacheFiles | 无 | StorageMergeTree 的 marks 的缓存文件数量 | | MaxPartCountForPartition | 无 | partitions 中最大的活跃数据块的数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/debian.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/debian.md index 6b353bafd0b..983787f3b1e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/debian.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/debian.md @@ -99,4 +99,3 @@ keywords: [开源监控系统, 操作系统监控, Debian监控] - 内存占用率:% - CPU占用率:% - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dm.md index ea4a376c049..12cb13b8422 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dm.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dm.md @@ -46,4 +46,3 @@ keywords: [开源监控系统, 开源数据库监控, 达梦数据库监控] | dm_sql_thd | 无 | 用于编写 dmsql dmserver 的线程 | | dm_io_thd | 无 | IO线程,由IO_THR_GROUPS参数控制,默认为2个线程 | | dm_quit_thd | 无 | 用于执行正常关闭数据库的线程 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dns.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dns.md index 303ac47444f..386ec0e91e1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dns.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dns.md @@ -70,4 +70,3 @@ keywords: [ 开源监控系统, 开源DNS监控工具, 监控DNS指标 ] | Section0 | 无 | DNS查询的附加信息。 | > Additional 指标集最多会采集10条响应记录,指标名称从 Section0 到 Section9。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/docker.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/docker.md index c546b46fd2c..0b81365780b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/docker.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/docker.md @@ -99,4 +99,3 @@ firewall-cmd --reload | cpu_delta | 无 | Docker容器已经使用的CPU数量 | | number_cpus | 无 | Docker容器可以使用的CPU数量 | | cpu_usage | 无 | Docker容器CPU使用率 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/doris_fe.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/doris_fe.md index 10a66aa6853..78afde77138 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/doris_fe.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/doris_fe.md @@ -129,4 +129,3 @@ keywords: [开源监控系统, 开源数据库监控, DORIS数据库FE监控] | committed | 无 | 已提交 | | visible | 无 | 可见 | | aborted | 无 | 已中止/已撤销 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dynamic_tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dynamic_tp.md index 8c2f1e290e4..1abcb732289 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dynamic_tp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dynamic_tp.md @@ -99,4 +99,3 @@ management: | dynamic | 无 | 是否动态线程池 | | run_timeout_count | 无 | 运行超时任务数 | | queue_timeout_count | 无 | 等待超时任务数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/elasticsearch.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/elasticsearch.md index a0b3082cc1a..0e872084c2e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/elasticsearch.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/elasticsearch.md @@ -61,4 +61,3 @@ keywords: [ 开源监控系统, 监控ElasticSearch ] | disk_free | GB | 磁盘剩余容量 | | disk_total | GB | 磁盘总容量 | | disk_used_percent | % | 磁盘使用率 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/euleros.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/euleros.md index 6c894671cc6..4c7324a3f5d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/euleros.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/euleros.md @@ -105,4 +105,3 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink.md index 177c41874fb..79dfd76fd7e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink.md @@ -33,4 +33,3 @@ keywords: [开源监控系统, 开源 Flink 监控] | task_total | 个 | 任务总数 | | jobs_running | 个 | 正在运行的任务数 | | jobs_failed | 个 | 已经失败的任务数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink_on_yarn.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink_on_yarn.md index a9baa1eeadd..36e2508a52b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink_on_yarn.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/flink_on_yarn.md @@ -141,4 +141,3 @@ keywords: [开源监控系统, 开源 Flink On Yarn 监控] | Status.JVM.Memory.Heap.Max | MB | JVM 堆内存的最大容量 | | Status.Flink.Memory.Managed.Total | MB | Flink 管理的内存总量 | | Status.Shuffle.Netty.UsedMemory | MB | Netty Shuffle 使用的内存 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/freebsd.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/freebsd.md index 01313bd7ae6..e2890cc3b9f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/freebsd.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/freebsd.md @@ -85,4 +85,3 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ftp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ftp.md index d421b6a78eb..ac1e1621c24 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ftp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ftp.md @@ -32,4 +32,3 @@ keywords: [ 开源监控系统, 开源FTP服务器监控工具, 监控FTP指标 |------|------|------------------| | 活动状态 | 无 | 检查目录是否存在,且具有访问权限 | | 响应时间 | ms | 连接FTP响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/fullsite.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/fullsite.md index 9d39da7c9e4..54553c1200c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/fullsite.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/fullsite.md @@ -5,8 +5,8 @@ sidebar_label: 全站监控 keywords: [开源监控系统, 开源网站监控, SiteMap监控] --- -> 对网站的全部页面监测是否可用 -> 往往一个网站有多个不同服务提供的页面,我们通过采集网站暴露出来的网站地图SiteMap来监控全站。 +> 对网站的全部页面监测是否可用 +> 往往一个网站有多个不同服务提供的页面,我们通过采集网站暴露出来的网站地图SiteMap来监控全站。 > 注意⚠️,此监控需您网站支持SiteMap。我们支持XML和TXT格式的SiteMap。 ### 配置参数 @@ -32,4 +32,3 @@ keywords: [开源监控系统, 开源网站监控, SiteMap监控] | statusCode | 无 | 请求此网页的响应HTTP状态码 | | responseTime | ms毫秒 | 网站响应时间 | | errorMsg | 无 | 请求此网站反馈的错误信息 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/guide.md index 48dc239b69a..0b5bfada4e7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/guide.md @@ -9,7 +9,7 @@ sidebar_label: 帮助入门 ## 🔬 监控服务 -> 定时采集监控对端服务暴露的性能指标,提供可视化界面,处理数据供告警等服务调度。 +> 定时采集监控对端服务暴露的性能指标,提供可视化界面,处理数据供告警等服务调度。 > 规划的监控类型:应用服务,数据库,操作系统,云原生,开源中间件 ### 应用服务监控 @@ -99,7 +99,7 @@ sidebar_label: 帮助入门 ## 💡 告警服务 -> 更自由化的阈值告警配置,支持邮箱,短信,webhook,钉钉,企业微信,飞书机器人等告警通知。 +> 更自由化的阈值告警配置,支持邮箱,短信,webhook,钉钉,企业微信,飞书机器人等告警通知。 > 告警服务的定位是阈值准确及时触发,告警通知及时可达。 ### 告警中心 @@ -115,8 +115,8 @@ sidebar_label: 帮助入门 ### 告警通知 -> 触发告警信息后,除了显示在告警中心列表外,还可以用指定方式(邮件钉钉微信飞书等)通知给指定接收人。 -> 告警通知提供设置不同类型的通知方式,如邮件接收人,企业微信机器人通知,钉钉机器人通知,飞书机器人通知。 +> 触发告警信息后,除了显示在告警中心列表外,还可以用指定方式(邮件钉钉微信飞书等)通知给指定接收人。 +> 告警通知提供设置不同类型的通知方式,如邮件接收人,企业微信机器人通知,钉钉机器人通知,飞书机器人通知。 > 接收人设置后需要设置关联的告警通知策略,来配置哪些告警信息发给哪些接收人。  👉 [配置邮箱通知](alert_email)
diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hadoop.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hadoop.md index bda83b006e4..186baede498 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hadoop.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hadoop.md @@ -87,4 +87,3 @@ export HADOOP_OPTS= "$HADOOP_OPTS | DaemonThreadCount | 个 | 守护进程数 | | CurrentThreadUserTime | ms | 使用时间 | | CurrentThreadCpuTime | ms | 使用CPU时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_master.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_master.md index e732bf45fd6..6912712607f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_master.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_master.md @@ -57,4 +57,3 @@ keywords: [开源监控系统, 开源数据库监控, HbaseMaster监控] | receivedBytes | MB | 集群接收数据量 | | sentBytes | MB | 集群发送数据量(MB) | | clusterRequests | 无 | 集群总请求数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_regionserver.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_regionserver.md index 1c1cfdf1802..2452e34a469 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_regionserver.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_regionserver.md @@ -91,4 +91,3 @@ keywords: [开源监控系统, 开源数据库监控, RegionServer监控] | MemHeapMaxM | 无 | 集群负载均衡次数 | | MemMaxM | 无 | RPC句柄数 | | GcCount | MB | 集群接收数据量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_datanode.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_datanode.md index db494acbb8e..5fe981ddaff 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_datanode.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_datanode.md @@ -54,4 +54,3 @@ keywords: [大数据监控系统, 分布式文件系统监控, Apache HDFS DataN | 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | StartTime | | 启动时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_namenode.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_namenode.md index 66343c11cd2..46d69e5e2b4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_namenode.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_namenode.md @@ -90,4 +90,3 @@ keywords: [大数据监控系统, 分布式文件系统监控, Apache HDFS NameN | ThreadsBlocked | 个 | 处于 BLOCKED 状态的线程数量 | | ThreadsWaiting | 个 | 处于 WAITING 状态的线程数量 | | ThreadsTimedWaiting | 个 | 处于 TIMED WAITING 状态的线程数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hive.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hive.md index 8396a870f11..bdf08d7b0ba 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hive.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hive.md @@ -74,4 +74,3 @@ hive --service hiveserver2 & | 内存池初始内存 | MB | 内存池请求的初始内存量。 | | 内存池可分配最大内存 | MB | 内存池可分配的最大内存量。 | | 内存池内存使用量 | MB | 内存池已使用内存量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/huawei_switch.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/huawei_switch.md index 6bd76f639e8..2d42a0170ee 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/huawei_switch.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/huawei_switch.md @@ -51,4 +51,3 @@ keywords: [ 开源监控系统, 网络监控, 华为通用交换机监控 ] | ifOutErrors | 无 | 对于面向数据包的接口,该节点表示由于错误而无法发送的数据包数量。对于面向字符或固定长度接口,该节点表示由于错误而无法传输的传输单元的数量。这种计数器的值可能在管理系统的重新初始化时会不连续,其他时间如ifCounterDiscontinuityTime的值。 | | ifAdminStatus | 无 | 接口的理想状态。 testing(3)状态表示没有可操作的数据包通过。 当受管系统初始化时,全部接口开始于ifAdminStatus在down(2)状态。由于明确的管理动作或被管理的系统保留的每个配置信息,ifAdminStatus然后被更改为Up(1)或testing(3)状态(或保留在down(2)状态)。 | | ifOperStatus | 无 | 当前接口的操作状态。testing(3)状态表示没有可操作的数据包可以通过。如果ifAdminStatus是down(2),则ifOperStatus应该是down(2)。 如果ifAdminStatus是改为up(1),则ifOperStatus应该更改为up(1)。如果接口准备好传输,接收网络流量; 它应该改为dormant(5)。如果接口正在等待外部动作(如串行线路等待传入连接); 它应该保持在down(2)状态,并且只有当有故障阻止它变成up(1)状态。 它应该留在notPresent(6)状态如果接口缺少(通常为硬件)组件。 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hugegraph.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hugegraph.md index bb802791dda..11c9a91749a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hugegraph.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hugegraph.md @@ -138,4 +138,3 @@ keywords: [开源监控系统, 开源数据库监控, HugeGraph监控] | garbage_collector_g1_old_generation_count | 无 | 表示G1垃圾收集器老年代垃圾收集的次数 | | garbage_collector_g1_old_generation_time | 无 | 表示G1垃圾收集器老年代垃圾收集的总时间 | | garbage_collector_time_unit | 无 | 表示垃圾收集时间的单位(如毫秒、秒等) | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/imap.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/imap.md index d913d0ff9f9..aba2eef0f21 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/imap.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/imap.md @@ -45,4 +45,3 @@ keywords: [开源监控系统, 开源网络监控, 邮箱信息监控] | 邮件总数 | | 该文件夹下所有邮件数量 | | 最近收到邮件总数 | | 该文件夹下最近收到邮件数量 | | 未读邮件总数 | | 该文件夹下未读邮件数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb.md index 00ff0b7f679..1b3bda87fe3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb.md @@ -63,4 +63,3 @@ keywords: [开源监控系统, 开源数据库监控, InfluxDB 数据库监控] |--------|------|--------| | result | 无 | 结果 | | org | 无 | 组织标识符 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb_promql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb_promql.md index 97469a71932..5cb8d241015 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb_promql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/influxdb_promql.md @@ -59,4 +59,3 @@ keywords: [ 开源监控系统, InfluxDB监控,InfluxDB-PromQL监控 ] | instance | 无 | 指标所属实例 | | timestamp | 无 | 采集指标时间戳 | | value | 无 | 指标值 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md index fceb485f05b..8bb3bbb25e0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md @@ -41,7 +41,7 @@ predefinedMetrics: - FILE ``` -2. 重启 IoTDB, 打开浏览器或者用curl 访问 http://ip:9091/metrics, 就能看到metric数据了。 +2. 重启 IoTDB, 打开浏览器或者用curl 访问 , 就能看到metric数据了。 3. 在 HertzBeat 添加对应 IoTDB 监控即可。 @@ -118,4 +118,3 @@ predefinedMetrics: |------------|------|-------------| | name | 无 | 名称 | | connection | 无 | thrift当前连接数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/issue.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/issue.md index 745a4f70a88..f17a60a9b9f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/issue.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/issue.md @@ -6,20 +6,20 @@ sidebar_label: 常见问题 ### 监控常见问题 -1. ** 页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名 ** +1. **页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名** > 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http -2. ** 网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK ** +2. **网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK** > 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) 3. 安装包部署的hertzbeat下ping连通性监控异常 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 -> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 -> docker安装默认启用无此问题 -> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 +> docker安装默认启用无此问题 +> 详见 4. 配置了k8s监控,但是实际监控时间并未按照正确间隔时间执行 请参考下面几点排查问题: @@ -32,35 +32,34 @@ sidebar_label: 常见问题 ### Docker部署常见问题 -1. **MYSQL,TDENGINE和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** +1. **MYSQL,TDENGINE和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 -> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP +> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP > 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` -2. **按照流程部署,访问 http://ip:1157/ 无界面** +2. **按照流程部署,访问 无界面** 请参考下面几点排查问题: -> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 +> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 3. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter ### 安装包部署常见问题 -1. **按照流程部署,访问 http://ip:1157/ 无界面** +1. **按照流程部署,访问 无界面** 请参考下面几点排查问题: -> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 +> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 +> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 2. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jetty.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jetty.md index b60a5882b9f..31e297703fc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jetty.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jetty.md @@ -92,4 +92,3 @@ java -jar $JETTY_HOME/start.jar --add-module=jmx-remote | DaemonThreadCount | 个 | 守护进程数 | | CurrentThreadUserTime | ms | 使用时间 | | CurrentThreadCpuTime | ms | 使用CPU时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jvm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jvm.md index f046b3ef6a0..3d9e96e55e1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jvm.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jvm.md @@ -17,7 +17,7 @@ keywords: [开源监控系统, 开源JAVA监控, JVM虚拟机监控] 应用启动时添加JVM参数 ⚠️注意可自定义暴露端口,对外IP -参考文档: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#remote +参考文档: ```shell -Djava.rmi.server.hostname=对外ip地址 @@ -78,4 +78,3 @@ keywords: [开源监控系统, 开源JAVA监控, JVM虚拟机监控] | DaemonThreadCount | 个 | 守护进程数 | | CurrentThreadUserTime | ms | 使用时间 | | CurrentThreadCpuTime | ms | 使用CPU时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka.md index 3cb4d74132c..a79bb0e91c2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka.md @@ -17,7 +17,7 @@ keywords: [开源监控系统, 开源消息中间件监控, Kafka监控] 2. 修改 Kafka 启动脚本 -修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` +修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` 在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 ```shell @@ -93,4 +93,3 @@ export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=ip地址 -Dcom.sun.management. | FifteenMinuteRate | 无 | 十五分钟处理率 | > 其他指标见文知意,欢迎贡献一起优化文档。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka_promql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka_promql.md index e0e5ecf7e50..f75ee33ba77 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka_promql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka_promql.md @@ -54,4 +54,3 @@ keywords: [ 开源监控系统,开源中间件监控, Kafka监控,Kafka-PromQL 1. kafka启用了JMX监控,可以使用 [Kafka](kafka) 监控; 2. kafka集群部署kafka_exporter暴露的监控指标,可以参考 [Prometheus任务](prometheus) 配置Prometheus采集任务监控kafka。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md index aa242d93a6b..4f0363f621d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md @@ -13,7 +13,7 @@ keywords: [开源监控系统, 开源Kubernetes监控] 参考获取token步骤 -#### 方式一: +#### 方式一 1. 创建service account并绑定默认cluster-admin管理员集群角色 @@ -27,7 +27,7 @@ kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` -#### 方式二: +#### 方式二 ``` kubectl create serviceaccount cluster-admin @@ -96,4 +96,3 @@ kubectl create token --duration=1000h cluster-admin | cluster_ip | 无 | cluster ip | | selector | 无 | tag selector匹配 | | creation_time | 无 | 创建时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/linux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/linux.md index 4a69c04495e..abd87de1ef8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/linux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/linux.md @@ -79,4 +79,3 @@ keywords: [开源监控系统, 开源操作系统监控, Linux操作系统监控 | available | Mb | 可用磁盘大小 | | usage | % | 使用率 | | mounted | 无 | 挂载点目录 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mariadb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mariadb.md index 291c8eb8bf6..e960d620649 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mariadb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mariadb.md @@ -9,7 +9,7 @@ keywords: [开源监控系统, 开源数据库监控, MariaDB数据库监控] ### 注意,必须添加 MYSQL jdbc 驱动 jar -- 下载 MYSQL jdbc driver jar, 例如 mysql-connector-java-8.1.0.jar. https://mvnrepository.com/artifact/com.mysql/mysql-connector-j/8.1.0 +- 下载 MYSQL jdbc driver jar, 例如 mysql-connector-java-8.1.0.jar. - 将此 jar 包拷贝放入 HertzBeat 的安装目录下的 `ext-lib` 目录下. - 重启 HertzBeat 服务。 @@ -57,4 +57,3 @@ keywords: [开源监控系统, 开源数据库监控, MariaDB数据库监控] | innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | | innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | | innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/memcached.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/memcached.md index db88c1ac5fc..1066d3934dc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/memcached.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/memcached.md @@ -30,7 +30,7 @@ STAT version 1.4.15 ... ``` -**帮助文档: https://www.runoob.com/memcached/memcached-stats.html** +**帮助文档: ** ### 配置参数 @@ -65,4 +65,3 @@ STAT version 1.4.15 | cmd_flush | 无 | Flush 命令请求数 | | get_misses | 无 | Get 命令未命中次数 | | delete_misses | 无 | Delete 命令未命中次数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb.md index 8c54174b54a..46d602fac9f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb.md @@ -93,4 +93,3 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] | pageSize | 无 | 内存页大小 | | numPages | 无 | 内存页数量 | | maxOpenFiles | 无 | 系统中允许打开的最大文件数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb_atlas.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb_atlas.md index 01167c2fc7b..8747eb4660d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb_atlas.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb_atlas.md @@ -75,4 +75,3 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB Atlas 数据库 | Storage Size | Bytes | 使用存储空间大小 | | Indexes | 无 | 索引数 | | Index Size | Bytes | 索引大小 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mysql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mysql.md index 4d47823d43b..46046f095c9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mysql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mysql.md @@ -9,7 +9,7 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] ### 注意,必须添加 MYSQL jdbc 驱动 jar -- 下载 MYSQL jdbc driver jar, 例如 mysql-connector-java-8.1.0.jar. https://mvnrepository.com/artifact/com.mysql/mysql-connector-j/8.1.0 +- 下载 MYSQL jdbc driver jar, 例如 mysql-connector-java-8.1.0.jar. - 将此 jar 包拷贝放入 HertzBeat 的安装目录下的 `ext-lib` 目录下. - 重启 HertzBeat 服务。 @@ -57,4 +57,3 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] | innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | | innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | | innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md index 84b432f4651..0b9b96b6099 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md @@ -92,4 +92,3 @@ management.endpoints.web.exposure.include=* | nacos_monitor{name='configListenSize'} | 无 | 监听的配置数 | | nacos_client_request_seconds_count | 无 | 请求的次数,包括多种(url,方法,code) | | nacos_client_request_seconds_sum | 秒 | 请求的总耗时,包括多种(url,方法,code) | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph.md index ded4a06ad2f..9faed580e1b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph.md @@ -14,23 +14,23 @@ keywords: [ 开源监控工具, 开源 NebulaGraph 监控工具, 监控 NebulaGr nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 的统计信息。 ``` -### +### **1、通过 stats 和 rocksdb stats 接口获取可用参数。** 1.1、如果只需要获取 nebulaGraph_stats,需要确保可以访问 stats,否则会出现错误。 -默认端口是 19669,访问地址为 http://ip:19669/stats +默认端口是 19669,访问地址为 1.2、如果需要获取 rocksdb stats 的附加参数,需要确保可以访问 rocksdb stats,否则会报错。 首次连接 NebulaGraph 时,必须先注册 Storage 服务,以便正确查询数据。 -**有帮助文档:https://docs.nebula-graph.com.cn/3.4.3/4.deployment-and-installation/connect-to-nebula-graph/** +**有帮助文档:** -**https://docs.nebula-graph.com.cn/3.4.3/2.quick-start/3.quick-start-on-premise/3.1add-storage-hosts/** +**** -默认端口是 19779,访问地址为:http://ip:19779/rocksdb_stats +默认端口是 19779,访问地址为: ### 配置参数 @@ -51,7 +51,7 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB #### 指标集:nebulaGraph_stats 指标太多,相关链接如下 -**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** +**** | 指标名称 | 指标单位 | 指标帮助描述 | |----------------------------------------------------------------|------|--------| @@ -114,11 +114,10 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB #### 指标集:rocksdb_stats 指标太多,相关链接如下 -**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** +**** | 指标名称 | 指标单位 | 指标帮助描述 | |----------------------------|------|------------------------| | rocksdb.backup.read.bytes | | 备份 RocksDB 数据库期间读取的字节数 | | rocksdb.backup.write.bytes | | 指标名称 | | ... | | ... | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph_cluster.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph_cluster.md index 252f5f47d8a..7fe8792d29d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph_cluster.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph_cluster.md @@ -89,4 +89,3 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, | version | 无 | 版本 | > 如果需要自定义监控模板采集NebulaGraph集群的数据,请参考: [NGQL自定义监控](../advanced/extend-ngql.md) - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nginx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nginx.md index 82908df358b..8c81c5a82c2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nginx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nginx.md @@ -45,8 +45,8 @@ server { location /nginx-status { stub_status on; access_log on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts } } ``` @@ -93,8 +93,8 @@ http { server { location /req-status { req_status_show on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts } } } @@ -108,7 +108,7 @@ nginx -s reload 4. 在浏览器访问 `http://localhost/req-status` 即可查看 Nginx 监控状态信息。 -**参考文档: https://blog.csdn.net/weixin_55985097/article/details/116722309** +**参考文档: ** **⚠️注意监控模块的端点路径为 `/nginx-status` `/req-status`** @@ -151,4 +151,3 @@ nginx -s reload | 总请求数 | | 总请求数 | | 当前并发连接数 | | 当前并发连接数 | | 当前带宽 | kb | 当前带宽 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ntp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ntp.md index 735ab741b4d..a160f2501e4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ntp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ntp.md @@ -39,4 +39,3 @@ NTP监控的中文文档如下: | 层级 | | NTP服务器的层级,表示其与参考时钟的距离。 | | 参考ID | | 指示NTP服务器使用的参考时钟或时间源的标识符。 | | 精度 | | NTP服务器时钟的精度,表示其准确性。 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/openai.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/openai.md index 0af3ca3d17b..a67a0c1c820 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/openai.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/openai.md @@ -12,9 +12,9 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] > 1. 打开 Chrome 浏览器的网络请求界面 > `Mac: cmd + option + i` > `Windows: ctrl + shift + i` -> 2. 访问 https://platform.openai.com/usage -> 3. 找到 https://api.openai.com/dashboard/billing/usage 请求 -> 4. 找到请求头中 Authorization 字段,并复制 `Bearer ` 之后的内容。例如: `sess-123456` +> 2. 访问 +> 3. 找到 请求 +> 4. 找到请求头中 Authorization 字段,并复制 `Bearer` 之后的内容。例如: `sess-123456` ### 注意事项 @@ -81,4 +81,3 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] | 税务ID | 无 | 税务ID | | 结算地址 | 无 | 结算地址 | | 业务地址 | 无 | 业务地址 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opengauss.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opengauss.md index 8bf21d7debb..964fc909c33 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opengauss.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opengauss.md @@ -53,4 +53,3 @@ keywords: [开源监控系统, 开源数据库监控, OpenGauss数据库监控] | 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|----------| | running | 连接数 | 当前客户端连接数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opensuse.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opensuse.md index f32e2b070ae..2f1e00a9e39 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opensuse.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/opensuse.md @@ -105,4 +105,3 @@ keywords: [开源监控系统, 开源操作系统监控, OpenSUSE操作系统监 | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/oracle.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/oracle.md index 71f0db0bf95..042c8d8b831 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/oracle.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/oracle.md @@ -67,4 +67,3 @@ keywords: [开源监控系统, 开源数据库监控, Oracle数据库监控] | qps | QPS | I/O Requests per Second 每秒IO请求数量 | | tps | TPS | User Transaction Per Sec 每秒用户事物处理数量 | | mbps | MBPS | I/O Megabytes per Second 每秒 I/O 兆字节数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ping.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ping.md index 401e86f9382..59ac237ed34 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ping.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ping.md @@ -31,7 +31,6 @@ keywords: [开源监控系统, 开源网络监控, 网络PING监控] 1. 安装包部署的hertzbeat下ping连通性监控异常 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 -> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 -> docker安装默认启用无此问题 -> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address - +> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 +> docker安装默认启用无此问题 +> 详见 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/plugin.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/plugin.md index c4bf36a4cfb..19fde18ed84 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/plugin.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/plugin.md @@ -29,4 +29,3 @@ sidebar_label: 自定义插件 ![plugin-4.png](/img/docs/help/plugin-4.png) 6. 然后重启`HertzBeat`,即可实现自定义告警后处理策略。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pop3.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pop3.md index 4c58cc4a308..7a55a98df3e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pop3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pop3.md @@ -44,4 +44,3 @@ keywords: [开源监控工具,开源Java监控工具,监控POP3指标] |-------|------|-----------| | 邮件数量 | | 邮件数量 | | 邮箱总大小 | kb | 邮箱中邮件的总大小 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/port.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/port.md index dd0b19aac82..0b73299aa53 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/port.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/port.md @@ -26,4 +26,3 @@ keywords: [开源监控系统, 开源网络监控, TCP 端口可用性监控] | 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------| | responseTime | ms毫秒 | 网站响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/postgresql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/postgresql.md index 59adae7da81..12485e62ffa 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/postgresql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/postgresql.md @@ -53,4 +53,3 @@ keywords: [开源监控系统, 开源数据库监控, PostgreSQL数据库监控] | 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|----------| | running | 连接数 | 当前客户端连接数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prestodb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prestodb.md index 592e840b463..64b785fde73 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prestodb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prestodb.md @@ -72,4 +72,3 @@ keywords: [ 开源监控系统, 开源数据库监控, Presto数据库监控 ] | state | 无 | 状态 | | self | 无 | 自身 | | lastHeartbeat | 无 | 最后心跳时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/process.md index 2eda0726d27..cd21bece380 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/process.md @@ -85,4 +85,3 @@ keywords: [开源监控系统, 操作系统进程监控, 进程监控] - read_bytes(进程从磁盘实际读取的字节数) - write_bytes(进程写入到磁盘的实际字节数) - cancelled_write_bytes(进程写入到磁盘的实际字节数) - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prometheus.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prometheus.md index 571da45aac0..a9feebfe623 100755 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prometheus.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/prometheus.md @@ -39,4 +39,3 @@ keywords: [ 开源监控系统, Prometheus协议监控 ] - 端点路径:`/actuator/prometheus` 其余设置保持默认。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pulsar.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pulsar.md index 1c12244997b..f37070d8604 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pulsar.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/pulsar.md @@ -70,4 +70,3 @@ Broker端消息发布延迟 #### 指标集合:pulsar_metadata_store_ops_latency_ms Broker端元数据存储操作延迟 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rabbitmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rabbitmq.md index 89c728162c9..2210a2452e0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rabbitmq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rabbitmq.md @@ -18,7 +18,7 @@ keywords: [开源监控系统, 开源消息中间件监控, RabbitMQ消息中间 rabbitmq-plugins enable rabbitmq_management ``` -2. 浏览器访问 http://ip:15672/ ,默认账户密码 `guest/guest`. 成功登录即开启成功。 +2. 浏览器访问 ,默认账户密码 `guest/guest`. 成功登录即开启成功。 3. 在 HertzBeat 添加对应 RabbitMQ 监控即可,参数使用 Management 的 IP 端口,默认账户密码。 @@ -123,4 +123,3 @@ rabbitmq-plugins enable rabbitmq_management | message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | | message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | | message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redhat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redhat.md index e0b8ae48cf4..1c4c6b5167a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redhat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redhat.md @@ -105,4 +105,3 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis.md index 58248fb0b45..0a0c9f77a65 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis.md @@ -237,4 +237,3 @@ keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] | cmdstat_lpop | 无 | lpop命令的统计信息 | | cmdstat_rpop | 无 | rpop命令的统计信息 | | cmdstat_llen | 无 | llen命令的统计信息 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis_cluster.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis_cluster.md index ed684ef1a35..e5aed34ba3f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis_cluster.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis_cluster.md @@ -85,6 +85,7 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 external: name: hertzbeat-redis-cluster ``` + 2. 查看所有容器的 IP 地址,搭建 Redis 集群时需要用到这些. ```bash @@ -132,6 +133,7 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 } }, ``` + 3. 进入容器, 然后构建集群. ```bash @@ -148,6 +150,7 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 192.168.117.2:6379 \ --cluster-replicas 1 ``` + 4. 最终的效果. 添加监控节点时填入所需要的参数. @@ -158,7 +161,6 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 ![](/img/docs/help/redis-cluster-view.png) - ### Configuration Parameters +### Configuration Parameters 查看 [REDIS](https://hertzbeat.apache.org/docs/help/redis) 文档. - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rocketmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rocketmq.md index 84cc24fc976..65ca5d96613 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rocketmq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rocketmq.md @@ -46,4 +46,3 @@ keywords: [ 开源监控系统, 开源中间件监控, RocketMQ消息中间件 | Consume_type | 无 | 消费类型 | | Consume_tps | 无 | 消费TPS | | Delay | 无 | 延迟 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rockylinux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rockylinux.md index 55923468da8..35dcfae06ef 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rockylinux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rockylinux.md @@ -105,4 +105,3 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/shenyu.md index 1149ed4bdd9..87bb81b7800 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/shenyu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/shenyu.md @@ -127,4 +127,3 @@ shenyu: |-------|------|-------------| | state | 无 | 线程状态 | | value | 无 | 对应线程状态的线程数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/smtp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/smtp.md index 5755437e80e..73e9af9ee13 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/smtp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/smtp.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit 通过 SMTP 的 hello 命令确定服务器是否可用 ``` -> 详见 https://datatracker.ietf.org/doc/html/rfc821#page-13 +> 详见 **协议使用:SMTP** @@ -38,4 +38,3 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit | 响应状态 | | 响应状态 | | SMTP 服务器标语 | | SMTP 服务器的标语 | | helo 命令返回信息 | | helo 命令返回的响应信息 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/spring_gateway.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/spring_gateway.md index a0695849705..aaba0dd9841 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/spring_gateway.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/spring_gateway.md @@ -87,4 +87,3 @@ management: | 匹配规则 | 无 | 路由匹配规则 | | 资源标识符 | 无 | 服务资源标识符 | | 优先级 | 无 | 此路由的优先级 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot2.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot2.md index e66d4237a13..d39b67d3efd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot2.md @@ -94,4 +94,3 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ |----------|------|-----------| | space | 无 | 内存空间名称 | | mem_used | MB | 此空间占用内存大小 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot3.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot3.md index 56a63068b17..58f1942cf0d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/springboot3.md @@ -89,4 +89,3 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ | 指标名称 | 指标单位 | 指标帮助描述 | |--------|------|-----------------| | status | 无 | 服务健康状态: UP,Down | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/sqlserver.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/sqlserver.md index 22a5a50ddd8..847a7775adc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/sqlserver.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/sqlserver.md @@ -57,8 +57,8 @@ keywords: [开源监控系统, 开源数据库监控, SqlServer数据库监控] 1. SSL连接问题修复 -jdk版本:jdk11 -问题描述:SQL Server2019使用SA用户连接报错 +jdk版本:jdk11 +问题描述:SQL Server2019使用SA用户连接报错 错误信息: ```text diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ssl_cert.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ssl_cert.md index 73957e31fb8..e15de6e3e97 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ssl_cert.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ssl_cert.md @@ -31,4 +31,3 @@ keywords: [开源监控系统, 开源网站监控, SSL证书监控监控] | start_timestamp | ms毫秒 | 有效期开始时间戳 | | end_time | 无 | 过期时间 | | end_timestamp | ms毫秒 | 过期时间戳 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/status.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/status.md index 2dc77cd8f6f..6d21a834525 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/status.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/status.md @@ -17,9 +17,9 @@ keywords: [开源监控系统, 开源网站监控, 状态页面] |------|----------------------------------------|---------------------------------------------------------------------------------------------------| | 组织名称 | 组织的名称 | HertzBeat | | 组织介绍 | 组织的详细介绍 | Apache HertzBeat (incubating) 是一个易用友好的开源实时监控告警系统,无需 Agent,高性能集群,兼容 Prometheus,提供强大的自定义监控和状态页构建能力。 | -| 网站链接 | 组织网站的 URL,便于访问者获取更多信息 | https://hertzbeat.apache.org/ | -| 标志图片 | 组织官方标志或 Logo 的图片文件路径或 URL,建议使用 .svg 格式 | https://hertzbeat.apache.org/zh-cn/img/hertzbeat-logo.svg | -| 反馈地址 | 接收问题反馈的地址 | https://github.com/apache/hertzbeat/issues | +| 网站链接 | 组织网站的 URL,便于访问者获取更多信息 | | +| 标志图片 | 组织官方标志或 Logo 的图片文件路径或 URL,建议使用 .svg 格式 | | +| 反馈地址 | 接收问题反馈的地址 | | | 主题颜色 | 状态页面的主色调。 | 在页面中点击选择 | 填写完组织信息后,点击 `确定`。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tidb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tidb.md index fe5eef718ef..b0ea82505ca 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tidb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tidb.md @@ -44,4 +44,3 @@ keywords: [开源监控系统, 开源数据库监控, TiDB数据库监控] | max_connections | 无 | 该变量表示 TiDB 中同时允许的最大客户端连接数,用于资源控制。默认情况下,该变量值为 0 表示不限制客户端连接数。当本变量的值大于 0 且客户端连接数到达此值时,TiDB 服务端将会拒绝新的客户端连接。 | | datadir | 无 | 数据存储的位置,位置可以是本地路径 /tmp/tidb。如果数据存储在 TiKV 上,则可以是指向 PD 服务器的路径。变量值的格式为 ${pd-ip}:${pd-port},表示 TiDB 在启动时连接到的 PD 服务器。 | | port | 无 | 使用 MySQL 协议时 tidb-server 监听的端口。 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/time_expression.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/time_expression.md index 8b5e6c8aca9..1a6b02b45b2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/time_expression.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/time_expression.md @@ -62,4 +62,3 @@ ${FORMATTER [{ + | - } ]} - `${time+1h+15s+30s}` 计算当前时间一小时15分钟30秒之后的时间,并格式化为 `HH:mm:ss` 2. 复杂表达式模板(如果内置的格式化器无法满足需要,可以组合使用多个表达式) - `${@year}年${@month}月${@day}日`,获取当前日期并按照 yyyy年MM月dd日格式返回 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tomcat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tomcat.md index b366ee3c2ac..e1f112777f7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tomcat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/tomcat.md @@ -72,4 +72,4 @@ keywords: [开源监控系统, 开源网站监控, Tomcat监控] CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote -Djava.rmi.server.hostname=10.1.1.52 -Dcom.sun.management.jmxremote.port=1099 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" ``` -参考: https://blog.csdn.net/weixin_41924764/article/details/108694239 +参考: diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ubuntu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ubuntu.md index 3ec51e5464a..4425f1c2c06 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ubuntu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ubuntu.md @@ -79,4 +79,3 @@ keywords: [开源监控系统, 开源操作系统监控, Ubuntu监控] | available | Mb | 可用磁盘大小 | | usage | % | 使用率 | | mounted | 无 | 挂载点目录 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/udp_port.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/udp_port.md index ee2f388873b..dde32b91e4d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/udp_port.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/udp_port.md @@ -29,4 +29,3 @@ keywords: [开源监控系统, 开源网络监控, UDP 端口可用性监控] | 指标名称 | 指标单位 | 指标帮助描述 | |------|---------|--------| | 响应时间 | 毫秒 (ms) | 网站响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/website.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/website.md index 8efe5262612..7403f255aec 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/website.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/website.md @@ -27,4 +27,3 @@ keywords: [开源监控系统, 开源网站监控] | 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------| | responseTime | ms毫秒 | 网站响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/websocket.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/websocket.md index 3bd02f3ce18..b4dfc13d701 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/websocket.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/websocket.md @@ -31,4 +31,3 @@ keywords: [ 开源监控系统, Websocket监控 ] | statusMessage | 无 | 状态消息 | | connection | 无 | 表示连接方式 | | upgrade | 无 | 升级后的协议 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/windows.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/windows.md index 41447469e61..0b1791435f4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/windows.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/windows.md @@ -8,10 +8,10 @@ keywords: [开源监控系统, 开源操作系统监控, Windows操作系统监 > 通过SNMP协议对Windows操作系统的通用性能指标进行采集监控。 > 注意⚠️ Windows服务器需开启SNMP服务 -参考资料: -[什么是SNMP协议1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) -[什么是SNMP协议2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) -[Win配置SNMP英文](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) +参考资料: +[什么是SNMP协议1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) +[什么是SNMP协议2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) +[Win配置SNMP英文](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) [Win配置SNMP中文](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) ### 配置参数 @@ -41,4 +41,3 @@ keywords: [开源监控系统, 开源操作系统监控, Windows操作系统监 | services | 个数 | 当前服务数量 | | processes | 个数 | 当前进程数量 | | responseTime | ms | 采集响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/yarn.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/yarn.md index c35a0226876..6694aff14fe 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/yarn.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/yarn.md @@ -81,4 +81,3 @@ keywords: [大数据监控系统, Apache Yarn监控, 资源管理器监控] | 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | StartTime | | 启动时间戳 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md index 476498549aa..db6043f104a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md @@ -117,4 +117,3 @@ Complete! | user_name | 无 | 用户名 | | user_home | 无 | 用户主目录 | | user_dir | 无 | 用户当前目录 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md index e22cea0502b..2e8e643ea6d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md @@ -32,13 +32,13 @@ slug: / > `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 ----- +---- ### 强大的监控模版 > 开始我们就说 HertzBeat 的特点是自定义监控能力,无需 Agent。在讨论这两点之前,我们先介绍下 HertzBeat 的不一样的监控模版。而正是因为这样的监控模版设计,才会有了后面的高级特性。 -HertzBeat 自身并没有去创造一种采集数据协议让监控对端来适配它。而是充分使用了现有的生态,`SNMP协议`采集网络交换机路由器信息,`JMX规范`采集JAVA应用信息,`JDBC规范`采集数据集信息,`SSH`直连执行脚本获取回显信息,`HTTP+(JsonPath | prometheus等)`解析API接口信息,`IPMI协议`采集服务器信息等等。 +HertzBeat 自身并没有去创造一种采集数据协议让监控对端来适配它。而是充分使用了现有的生态,`SNMP协议`采集网络交换机路由器信息,`JMX规范`采集JAVA应用信息,`JDBC规范`采集数据集信息,`SSH`直连执行脚本获取回显信息,`HTTP+(JsonPath | prometheus等)`解析API接口信息,`IPMI协议`采集服务器信息等等。 HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可配置化,最后使其都可以通过编写YML格式监控模版的形式,来制定模版使用这些协议来采集任何想要的指标数据。 ![hertzbeat](/img/blog/multi-protocol.png) @@ -92,21 +92,22 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ### 强大自定义功能 -> 由前面的**监控模版**介绍,大概清楚了 `HertzBeat` 拥有的强大自定义功能。 -> 我们将每个监控类型都视为一个监控模版,不管是官方内置的还是后期用户自定义新增的。用户都可以方便的通过修改监控模版来新增修改删除监控指标。 +> 由前面的**监控模版**介绍,大概清楚了 `HertzBeat` 拥有的强大自定义功能。 +> 我们将每个监控类型都视为一个监控模版,不管是官方内置的还是后期用户自定义新增的。用户都可以方便的通过修改监控模版来新增修改删除监控指标。 > 模版里面包含各个协议的使用配置,环境变量,指标转换,指标计算,单位转换,指标采集等一系列功能,帮助用户能采集到自己想要的监控指标。 ![hertzbeat](/img/docs/custom-arch.png) ### 无需 Agent -> 对于使用过各种系统的用户来说,可能最麻烦头大的不过就是各种 `agent` 的安装部署调试升级了。 -> 每台主机得装个 `agent`,为了监控不同应用中间件可能还得装几个对应的 `agent`,监控数量上来了轻轻松松上千个,写个批量脚本可能会减轻点负担。 +> 对于使用过各种系统的用户来说,可能最麻烦头大的不过就是各种 `agent` 的安装部署调试升级了。 +> 每台主机得装个 `agent`,为了监控不同应用中间件可能还得装几个对应的 `agent`,监控数量上来了轻轻松松上千个,写个批量脚本可能会减轻点负担。 > `agent` 的版本是否与主应用兼容, `agent` 与主应用的通讯调试, `agent` 的同步升级等等等等,这些全是头大的点。 -`HertzBeat` 的原理就是使用不同的协议去直连对端系统,采用 `PULL` 的形式去拉取采集数据,无需用户在对端主机上部署安装 `Agent` | `Exporter` 等。 -- 比如监控 `linux操作系统`, 在 `HertzBeat` 端输入IP端口账户密码或密钥即可。 -- 比如监控 `mysql数据库`, 在 `HertzBeat` 端输入IP端口账户密码即可。 +`HertzBeat` 的原理就是使用不同的协议去直连对端系统,采用 `PULL` 的形式去拉取采集数据,无需用户在对端主机上部署安装 `Agent` | `Exporter` 等。 + +- 比如监控 `linux操作系统`, 在 `HertzBeat` 端输入IP端口账户密码或密钥即可。 +- 比如监控 `mysql数据库`, 在 `HertzBeat` 端输入IP端口账户密码即可。 **密码等敏感信息全链路加密** ### 高性能集群 @@ -152,11 +153,11 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 --- **`HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。** ------ +----- ## 即刻体验一波 -Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` +Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` 浏览器访问 `http://localhost:1157` 默认账户密码 `admin/hertzbeat` ### 登陆页面 @@ -301,6 +302,6 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 **还有更多强大的功能快去探索呀。Have Fun!** ------ +----- -**Github: https://github.com/apache/hertzbeat** +**Github: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/resource.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/resource.md index 0e01e014901..910499fe860 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/resource.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/resource.md @@ -17,4 +17,3 @@ Download: [SVG](/img/hertzbeat-logo.svg) [PNG](/img/hertzbeat-logo.png) ![logo](/img/hertzbeat-brand.svg) Download: [SVG](/img/hertzbeat-brand.svg) [PNG](/img/hertzbeat-brand.png) - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/account-modify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/account-modify.md index ce89d825b7e..46d24c91b5d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/account-modify.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/account-modify.md @@ -6,9 +6,9 @@ sidebar_label: 更新账户和密钥 ## 更新账户 -Apache HertzBeat (incubating) 默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat -若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 -修改位于安装目录下的 `/hertzbeat/config/sureness.yml` 的配置文件,docker环境目录为`opt/hertzbeat/config/sureness.yml`,建议提前挂载映射 +Apache HertzBeat (incubating) 默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat +若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 +修改位于安装目录下的 `/hertzbeat/config/sureness.yml` 的配置文件,docker环境目录为`opt/hertzbeat/config/sureness.yml`,建议提前挂载映射 配置文件内容参考如下 ```yaml @@ -157,4 +157,4 @@ sureness: dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' ``` -**重启 HertzBeat 浏览器访问 http://ip:1157/ 即可探索使用 HertzBeat** +**重启 HertzBeat 浏览器访问 即可探索使用 HertzBeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md index 01380784169..95bedddc350 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md @@ -10,8 +10,8 @@ sidebar_label: 常见参数配置 ### 配置HertzBeat的配置文件 -修改位于 `hertzbeat/config/application.yml` 的配置文件 -注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 +修改位于 `hertzbeat/config/application.yml` 的配置文件 +注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 1. 配置短信发送服务器 @@ -74,4 +74,3 @@ warehouse: port: 6379 password: 123456 ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md index 2bee426c1ab..06ae9bc2f29 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md @@ -9,7 +9,7 @@ sidebar_label: Docker Compose方式安装 ::: :::note -需您的环境中已经拥有 Docker 环境 和 Docker Compose 环境 ,若未安装请参考 [Docker官网文档](https://docs.docker.com/compose/install/) +需您的环境中已经拥有 Docker 环境 和 Docker Compose 环境 ,若未安装请参考 [Docker官网文档](https://docs.docker.com/compose/install/) 执行命令 `docker compose version` 检查是否拥有 Docker Compose 环境。 ::: @@ -20,21 +20,23 @@ sidebar_label: Docker Compose方式安装 2. 选择使用 HertzBeat + PostgreSQL + VictoriaMetrics 方案 :::tip -- `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` 解压后包含多个部署方案,这里我们推荐选择 `hertzbeat-postgresql-victoria-metrics` 方案。 + +- `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` 解压后包含多个部署方案,这里我们推荐选择 `hertzbeat-postgresql-victoria-metrics` 方案。 - 其它部署方式请详细阅读各个部署方案的 README.md 文件, MySQL 方案需要自行准备 MySQL 驱动包。 + ::: - 解压脚本包 ``` -$ tar zxvf apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz +tar zxvf apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz ``` - 进入解压目录, 选择 `HertzBeat + PostgreSQL + VictoriaMetrics` 一键部署 ``` -$ cd apache-hertzbeat-1.6.0-incubating-docker-compose -$ cd hertzbeat-postgresql-victoria-metrics +cd apache-hertzbeat-1.6.0-incubating-docker-compose +cd hertzbeat-postgresql-victoria-metrics ``` - 一键启动 @@ -53,14 +55,13 @@ docker-compose up -d docker-compose ps ``` -4. 开始探索 HertzBeat - 浏览器访问 http://ip:1157/ 即可开始探索使用,默认账户密码 admin/hertzbeat。 +4. 开始探索 HertzBeat + 浏览器访问 即可开始探索使用,默认账户密码 admin/hertzbeat。 **HAVE FUN** ----- +---- ### 部署常见问题 **最多的问题就是容器网络问题,请先提前排查** - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-deploy.md index d89816f8a12..caf412441d7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-deploy.md @@ -41,14 +41,16 @@ $ docker run -d -p 1157:1157 -p 1158:1158 \ - `apache/hertzbeat` : 使用[官方应用镜像](https://hub.docker.com/r/apache/hertzbeat)来启动容器, 若网络超时可用`quay.io/tancloud/hertzbeat`代替。 :::tip + - 标记为可选的参数,非必填项,若不需要则删除。 -- 此将容器的 1157,1158 端口映射到宿主机的 1157,1158 端口上。若宿主机该端口已被占用,则需修改主机映射端口。 -- 挂载文件时,前面参数为你自定义本地文件地址,后面参数为容器内文件地址。挂载时请确保你本地已有此文件。 -- 可执行```docker update --restart=always hertzbeat```配置容器自动重启。 +- 此将容器的 1157,1158 端口映射到宿主机的 1157,1158 端口上。若宿主机该端口已被占用,则需修改主机映射端口。 +- 挂载文件时,前面参数为你自定义本地文件地址,后面参数为容器内文件地址。挂载时请确保你本地已有此文件。 +- 可执行```docker update --restart=always hertzbeat```配置容器自动重启。 + ::: 2. 开始探索 HertzBeat - 浏览器访问 http://ip:1157/ 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 + 浏览器访问 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 ### 部署 HertzBeat Collector 集群(可选) @@ -82,30 +84,32 @@ $ docker run -d \ - `apache/hertzbeat-collector` : 使用[官方应用镜像](https://hub.docker.com/r/apache/hertzbeat-collector)来启动容器, 若网络超时可用`quay.io/tancloud/hertzbeat-collector`代替。 :::tip + - `MANAGER_HOST=127.0.0.1` 中的 `127.0.0.1` 需被替换为 HertzBeat Server 对外 IP 地址。 - 标记为可选的参数,非必填项,若不需要则删除。 - 挂载文件时,前面参数为你自定义本地文件地址,后面参数为容器内文件地址。挂载时请确保你本地已有此文件。 -- 可执行```docker update --restart=always hertzbeat-collector```配置容器自动重启。 +- 可执行```docker update --restart=always hertzbeat-collector```配置容器自动重启。 + ::: 2. 开始探索 HertzBeat Collector - 浏览器访问 http://ip:1157/ 即可开始探索使用,默认账户密码 admin/hertzbeat。 + 浏览器访问 即可开始探索使用,默认账户密码 admin/hertzbeat。 **HAVE FUN** ----- +---- ### Docker 方式部署常见问题 **最多的问题就是网络问题,请先提前排查** -1. MYSQL,TDENGINE或IotDB和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败 +1. MYSQL,TDENGINE或IotDB和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 -> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP +> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP > 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` -2. 按照流程部署,访问 http://ip:1157/ 无界面 +2. 按照流程部署,访问 无界面 请参考下面几点排查问题: > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 @@ -128,8 +132,9 @@ $ docker run -d \ > 此文件是HertzBeat的配置文件,用于配置HertzBeat的各种参数,如数据库连接信息,时序数据库配置等。 -下载 `application.yml` 文件到主机目录下,例如: $(pwd)/application.yml -下载源 [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) +下载 `application.yml` 文件到主机目录下,例如: $(pwd)/application.yml +下载源 [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + - 若需使用邮件发送告警,需替换 `application.yml` 里面的邮件服务器参数 - 若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) - 若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.victoria-metrics`参数 具体步骤参见 [使用victoria-metrics存储指标数据](victoria-metrics-init) @@ -138,8 +143,8 @@ $ docker run -d \ > 此文件是HertzBeat的用户配置文件,用于配置HertzBeat的用户信息,如账户密码等。 -HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat -若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 -下载 `sureness.yml` 文件到主机目录下,例如: $(pwd)/sureness.yml -下载源 [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) +HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat +若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 +下载 `sureness.yml` 文件到主机目录下,例如: $(pwd)/sureness.yml +下载源 [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) 具体修改步骤参考 [配置修改账户密码](account-modify) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md index 6f946707cab..5928c7b826a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md @@ -16,7 +16,8 @@ It's designed to work on infrastructure of the cloud era, and users benefit from ### 通过Docker方式安装GreptimeDB > 可参考官方网站[安装教程](https://docs.greptime.com/getting-started/overview) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -75,4 +76,3 @@ warehouse: 1. 时序数据库 GreptimeDB 或者 IoTDB 或者 TDengine 是否都需要配置,能不能都用 > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md index 82b833d459c..c21d02e9e6b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md @@ -10,7 +10,7 @@ Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任 InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海量时序数据的高性能读、高性能写、高效存储与实时分析等。 注意支持⚠️ 1.x版本。 -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** +**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** **⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** ### 1. 直接使用华为云服务 GaussDB For Influx @@ -24,7 +24,8 @@ InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海 ### 2. 通过Docker方式安装InfluxDB > 可参考官方网站[安装教程](https://hub.docker.com/_/influxdb) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -41,13 +42,13 @@ $ docker run -p 8086:8086 \ influxdb:1.8 ``` -`-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 +`-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 使用```$ docker ps```查看数据库是否启动成功 ### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** @@ -74,4 +75,3 @@ warehouse: 1. 时序数据库InfluxDb, IoTDB和TDengine是否都需要配置,能不能都用 > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md index 859e2ba39d0..2132e24b010 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md @@ -28,6 +28,7 @@ Apache IoTDB是一体化收集、存储、管理与分析物联网时序数据 $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Docker安装IoTDB ```shell @@ -122,4 +123,3 @@ warehouse: > iot-db enable是否设置为true > 注意⚠️若hertzbeat和IotDB都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/mysql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/mysql-change.md index 1b7154a2fcb..e05bfde2a29 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/mysql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/mysql-change.md @@ -12,7 +12,7 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) ### 通过Docker方式安装MYSQL -1. 下载安装Docker环境 +1. 下载安装Docker环境 Docker 的安装请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后请于终端检查Docker版本输出是否正常。 @@ -20,6 +20,7 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Docker安装MYSQl ``` @@ -31,29 +32,29 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) mysql:5.7 ``` - `-v /opt/data:/var/lib/mysql` 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 + `-v /opt/data:/var/lib/mysql` 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 使用```$ docker ps```查看数据库是否启动成功 ### 数据库创建 -1. 进入MYSQL或使用客户端连接MYSQL服务 +1. 进入MYSQL或使用客户端连接MYSQL服务 `mysql -uroot -p123456` -2. 创建名称为hertzbeat的数据库 +2. 创建名称为hertzbeat的数据库 `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. 查看hertzbeat数据库是否创建成功 `show databases;` ### 添加 MYSQL jdbc 驱动 jar -- 下载 MYSQL jdbc driver jar, 例如 mysql-connector-java-8.0.25.jar. https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip +- 下载 MYSQL jdbc driver jar, 例如 mysql-connector-java-8.0.25.jar. - 将此 jar 包拷贝放入 HertzBeat 的安装目录下的 `ext-lib` 目录下. ### 修改hertzbeat的配置文件application.yml切换数据源 - 配置 HertzBeat 的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 + 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://github.com/hertzbeat/hertzbeat/raw/master/script/application.yml) 需修改部分原参数: @@ -101,4 +102,4 @@ spring: - 通过docker启动时,建议修改host为宿主机的外网IP地址,包括mysql连接字符串。 -**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** +**启动 HertzBeat 浏览器访问 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md index 86c08ce67dd..ea90a2ed9d5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md @@ -5,7 +5,7 @@ sidebar_label: 安装包方式安装 --- :::tip -Apache HertzBeat (incubating) 支持在Linux Windows Mac系统安装运行,CPU支持X86/ARM64。 +Apache HertzBeat (incubating) 支持在Linux Windows Mac系统安装运行,CPU支持X86/ARM64。 安装包方式依赖 Java 运行环境,需您的环境中已经拥有 Java17 环境,若未安装请参考 [官方网站](http://www.oracle.com/technetwork/java/javase/downloads/index.html) ::: @@ -20,11 +20,11 @@ Apache HertzBeat (incubating) 支持在Linux Windows Mac系统安装运行,CPU 解压安装包到主机 eg: /opt/hertzbeat ``` -$ tar zxvf apache-hertzbeat-xxx-incubating-bin.tar.gz +tar zxvf apache-hertzbeat-xxx-incubating-bin.tar.gz ``` :::tip -位于 `config/application.yml` 的配置文件,您可以根据需求修改配置文件来配置外部依赖的服务,如数据库,时序数据库等参数。 +位于 `config/application.yml` 的配置文件,您可以根据需求修改配置文件来配置外部依赖的服务,如数据库,时序数据库等参数。 HertzBeat 启动时默认全使用内部服务,但生产环境建议切换为外部数据库服务。 ::: @@ -35,7 +35,7 @@ HertzBeat 启动时默认全使用内部服务,但生产环境建议切换为 3. 配置账户文件(可选) -HertzBeat 默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat +HertzBeat 默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat 若需要新增删除修改账户或密码,可以通过修改位于 `config/sureness.yml` 的配置文件实现,具体参考 - [配置修改账户密码](account-modify) @@ -45,16 +45,16 @@ HertzBeat 默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat 执行位于安装目录 bin 下的启动脚本 startup.sh, windows 环境下为 startup.bat ``` -$ ./startup.sh +./startup.sh ``` -5. 开始探索HertzBeat - 浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 +5. 开始探索HertzBeat + 浏览器访问 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 ### 部署 HertzBeat Collector 集群(可选) :::note -HertzBeat Collector 是一个轻量级的数据采集器,用于采集并将数据发送到 HertzBeat Server。 +HertzBeat Collector 是一个轻量级的数据采集器,用于采集并将数据发送到 HertzBeat Server。 通过部署多个 HertzBeat Collector 可以实现数据的高可用,负载均衡和云边协同。 ::: @@ -69,7 +69,7 @@ HertzBeat Collector 是一个轻量级的数据采集器,用于采集并将数 解压安装包到主机 eg: /opt/hertzbeat-collector ``` -$ tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz +tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz ``` 配置采集器的配置文件 `config/application.yml` 里面的 HertzBeat Server 连接 IP, 端口, 采集器名称(需保证唯一性)等参数。 @@ -98,15 +98,15 @@ collector: 执行位于安装目录 hertzbeat-collector/bin/ 下的启动脚本 startup.sh, windows 环境下为 startup.bat ``` -$ ./startup.sh +./startup.sh ``` 4. 开始探索 HertzBeat Collector - 浏览器访问 http://ip:1157/ 即可开始探索使用,默认账户密码 admin/hertzbeat。 + 浏览器访问 即可开始探索使用,默认账户密码 admin/hertzbeat。 **HAVE FUN** ----- +---- ### 安装包部署常见问题 @@ -114,9 +114,9 @@ $ ./startup.sh 1. 启动失败,需您提前准备JAVA运行环境 -安装JAVA运行环境-可参考[官方网站](http://www.oracle.com/technetwork/java/javase/downloads/index.html) -要求:JAVA17环境 -下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) +安装JAVA运行环境-可参考[官方网站](http://www.oracle.com/technetwork/java/javase/downloads/index.html) +要求:JAVA17环境 +下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) 安装后命令行检查是否成功安装 ``` @@ -127,10 +127,9 @@ Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) ``` -2. 按照流程部署,访问 http://ip:1157/ 无界面 +2. 按照流程部署,访问 无界面 请参考下面几点排查问题: > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 +> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/postgresql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/postgresql-change.md index 331f021c747..18b33658f0e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/postgresql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/postgresql-change.md @@ -12,17 +12,18 @@ PostgreSQL 是一个功能强大,开源的关系型数据库管理系统(RDB ### 通过 Docker 方式安装 PostgreSQL -1. 下载安装 Docker 环境 +1. 下载安装 Docker 环境 Docker 的安装请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。安装完毕后请于终端检查 Docker 版本输出是否正常。 ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Docker 安装 PostgreSQL ```shell - $ docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 + docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` 使用 ```$ docker ps``` 查看数据库是否启动成功 @@ -35,11 +36,13 @@ PostgreSQL 是一个功能强大,开源的关系型数据库管理系统(RDB su - postgres psql ``` + 2. 创建名称为 hertzbeat 的数据库 ```sql CREATE DATABASE hertzbeat; ``` + 3. 查看 hertzbeat 数据库是否创建成功 ```sql @@ -49,9 +52,9 @@ PostgreSQL 是一个功能强大,开源的关系型数据库管理系统(RDB ### 修改 hertzbeat 的配置文件 application.yml 切换数据源 1. 配置 HertzBeat 的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️ docker 容器方式需要将 application.yml 文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - 替换里面的 `spring.database` 数据源参数,IP 端口账户密码驱动 + 替换里面的 `spring.database` 数据源参数,IP 端口账户密码驱动 ⚠️注意 `application.yml` 文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://github.com/hertzbeat/hertzbeat/raw/master/script/application.yml) ```yaml @@ -95,4 +98,4 @@ spring: level: SEVERE ``` -**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** +**启动 HertzBeat 浏览器访问 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/quickstart.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/quickstart.md index 064230770a6..570d21dbc35 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/quickstart.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/quickstart.md @@ -41,7 +41,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 1. 下载您系统环境对应的安装包`hertzbeat-xx.tar.gz` [Download Page](https://hertzbeat.apache.org/docs/download) 2. 配置 HertzBeat 的配置文件 `hertzbeat/config/application.yml`(可选) -3. 部署启动 `$ ./bin/startup.sh ` 或 `bin/startup.bat` +3. 部署启动 `$ ./bin/startup.sh` 或 `bin/startup.bat` 4. 浏览器访问 `http://localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` 5. 部署采集器集群(可选) - 下载您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [Download Page](https://hertzbeat.apache.org/docs/download) @@ -58,7 +58,8 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN manager-host: ${MANAGER_HOST:127.0.0.1} manager-port: ${MANAGER_PORT:1158} ``` - - 启动 `$ ./bin/startup.sh ` 或 `bin/startup.bat` + + - 启动 `$ ./bin/startup.sh` 或 `bin/startup.bat` - 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 更多配置详细步骤参考 [通过安装包安装HertzBeat](package-deploy) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/sslcert-practice.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/sslcert-practice.md index 1eb90ccaa83..b18881b7b93 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/sslcert-practice.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/sslcert-practice.md @@ -12,7 +12,7 @@ sidebar_label: SSL证书过期监控使用案例 Apache HertzBeat (incubating) 一个拥有强大自定义监控能力,无需Agent的实时监控工具。网站监测,PING连通性,端口可用性,数据库,操作系统,中间件,API监控,阈值告警,告警通知(邮件微信钉钉飞书)。 -github: https://github.com/apache/hertzbeat +github: #### 安装 HertzBeat @@ -32,7 +32,7 @@ github: https://github.com/apache/hertzbeat 2. 配置监控网站 -> 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 +> 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 > 点击确定 注意⚠️新增前默认会先去测试网站连接性,连接成功才会新增,当然也可以把**是否测试**按钮置灰。 ![](/img/docs/start/ssl_2.png) @@ -77,8 +77,8 @@ github: https://github.com/apache/hertzbeat 钉钉微信飞书等token配置可以参考帮助文档 -https://hertzbeat.apache.org/docs/help/alert_dingtalk -https://tancloud.cn/docs/help/alert_dingtalk + + > 告警通知 -> 新增告警通知策略 -> 将刚才配置的接收人启用通知 @@ -88,8 +88,8 @@ https://tancloud.cn/docs/help/alert_dingtalk ---- -#### 完! +#### 完 监控SSL证书的实践就到这里,当然对hertzbeat来说这个功能只是冰山一角,如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md index 9837a5a5b99..d85ca355bd9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md @@ -10,7 +10,7 @@ Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任 TDengine是一款开源物联网时序型数据库,我们用其存储采集到的监控指标历史数据。 注意支持⚠️ 3.x版本。 -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** +**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** **⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有TDengine环境,可直接跳到创建数据库实例那一步。 @@ -18,7 +18,8 @@ TDengine是一款开源物联网时序型数据库,我们用其存储采集到 ### 通过Docker方式安装TDengine > 可参考官方网站[安装教程](https://docs.taosdata.com/get-started/docker/) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -37,7 +38,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ ``` `-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 -`-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 +`-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 使用```$ docker ps```查看数据库是否启动成功 ### 创建数据库实例 @@ -47,8 +48,9 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ 1. 进入数据库Docker容器 ``` - $ docker exec -it tdengine /bin/bash + docker exec -it tdengine /bin/bash ``` + 2. 修改账户密码 > 建议您修改密码。TDengine默认的账户密码是 root/taosdata @@ -80,6 +82,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ taos> show databases; taos> use hertzbeat; ``` + 5. 退出TDengine CLI ``` @@ -93,7 +96,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ ### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** @@ -134,4 +137,3 @@ warehouse: > td-engine enable是否设置为true > 注意⚠️若hertzbeat和TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md index 4beebcd5045..253fe909107 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md @@ -10,7 +10,7 @@ Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任 VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决方案和时序数据库,兼容 Prometheus 生态。推荐版本(VictoriaMetrics:v1.95.1+, HertzBeat:v1.4.3+) -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** +**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** **⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有VictoriaMetrics环境,可直接跳到YML配置那一步。 @@ -18,7 +18,8 @@ VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决 ### 通过Docker方式安装VictoriaMetrics > 可参考官方网站[安装教程](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -41,8 +42,8 @@ $ docker run -d -p 8428:8428 \ 3. 在hertzbeat的`application.yml`配置文件配置VictoriaMetrics数据库连接 - 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 配置HertzBeat的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** @@ -68,4 +69,3 @@ warehouse: 1. 时序数据库是否都需要配置,能不能都用 > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,但会影响历史图表数据和存储时长等。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/template.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/template.md index 219620a230c..521e528a299 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/template.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/template.md @@ -6,7 +6,7 @@ sidebar_label: 监控模版 > Apache HertzBeat (incubating) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 > -> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 +> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 > 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? 这是它的架构原理: diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-default.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-default.md index cdd62f2209a..c0acbf1ea87 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-default.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-default.md @@ -43,7 +43,7 @@ sidebar_label: 系统默认解析方式 ``` 样例: -查询自定义系统的CPU信息,其暴露接口为 `/metrics/cpu`,我们需要其中的`hostname,core,useage`指标 +查询自定义系统的CPU信息,其暴露接口为 `/metrics/cpu`,我们需要其中的`hostname,core,useage`指标 若只有一台虚拟机,其单层格式为: ```json @@ -157,4 +157,3 @@ metrics: # 这里使用HertzBeat默认解析 parseType: default ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md index b8699c93dcc..9317fdbfc21 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md @@ -61,7 +61,7 @@ sidebar_label: 教程一:适配一款HTTP协议监控 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** -> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个监控模版,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为`hertzbeat`的自定义监控类型,其使用HTTP协议采集指标数据。 @@ -206,12 +206,12 @@ metrics: ---- -#### 完! +#### 完 HTTP协议的自定义监控的实践就到这里,HTTP协议还带其他参数headers,params等,我们可以像用postman一样去定义它,可玩性也非常高! 如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! -**github: https://github.com/apache/hertzbeat** +**github: ** -**gitee: https://gitee.com/hertzbeat/hertzbeat** +**gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-token.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-token.md index 93379199b19..bbcaa5299d0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-token.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-token.md @@ -196,7 +196,7 @@ metrics: ``` -**此时,重启hertzbeat系统,在系统页面上添加 `hertzbeat_token` 类型监控,配置输入参数,`content-type`填`application/json` , `请求Body`填账户密码json如下: ** +**此时,重启hertzbeat系统,在系统页面上添加 `hertzbeat_token` 类型监控,配置输入参数,`content-type`填`application/json` , `请求Body`填账户密码json如下:** ```json { @@ -378,12 +378,12 @@ metrics: ---- -#### 完! +#### 完 HTTP协议的自定义监控的实践就到这里,HTTP协议还带其他参数headers,params等,我们可以像用postman一样去定义它,可玩性也非常高! 如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! -**github: https://github.com/apache/hertzbeat** +**github: ** -**gitee: https://gitee.com/hertzbeat/hertzbeat** +**gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-jsonpath.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-jsonpath.md index 71a6b3f116e..5ce2aad2738 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-jsonpath.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-jsonpath.md @@ -61,7 +61,7 @@ sidebar_label: JsonPath解析方式 #### 样例 -查询自定义系统的数值信息,其暴露接口为 `/metrics/person`,我们需要其中的`type,num`指标 +查询自定义系统的数值信息,其暴露接口为 `/metrics/person`,我们需要其中的`type,num`指标 接口返回的原始数据如下: ```json @@ -175,4 +175,3 @@ metrics: parseType: jsonPath parseScript: '$.number[*]' ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http.md index c12ea3539fe..467921638da 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http.md @@ -10,10 +10,10 @@ sidebar_label: HTTP协议自定义监控 【**HTTP接口调用**】->【**响应校验**】->【**响应数据解析**】->【**默认方式解析|JsonPath脚本解析 | XmlPath解析(todo) | Prometheus解析**】->【**指标数据提取**】 -由流程可见,我们自定义一个HTTP协议的监控类型,需要配置HTTP请求参数,配置获取哪些指标,对响应数据配置解析方式和解析脚本。 +由流程可见,我们自定义一个HTTP协议的监控类型,需要配置HTTP请求参数,配置获取哪些指标,对响应数据配置解析方式和解析脚本。 HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数,请求方式,请求体等。 -**系统默认解析方式**:http接口返回hertzbeat规定的json数据结构,即可用默认解析方式解析数据提取对应的指标数据,详细介绍见 [**系统默认解析**](extend-http-default) +**系统默认解析方式**:http接口返回hertzbeat规定的json数据结构,即可用默认解析方式解析数据提取对应的指标数据,详细介绍见 [**系统默认解析**](extend-http-default) **JsonPath脚本解析方式**:用JsonPath脚本对响应的json数据进行解析,返回系统指定的数据结构,然后提供对应的指标数据,详细介绍见 [**JsonPath脚本解析**](extend-http-jsonpath) ### 自定义步骤 @@ -22,13 +22,13 @@ HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数, ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下监控模版YML的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个监控模版,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为example_http的自定义监控类型,其使用HTTP协议采集指标数据。 @@ -203,4 +203,3 @@ metrics: basicAuthPassword: ^_^password^_^ parseType: default ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jdbc.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jdbc.md index 9bcd5cded08..9dd3e547b5b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jdbc.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jdbc.md @@ -21,7 +21,7 @@ SQL查询回来的数据字段和我们需要的指标映射,就能获取对 > 查询一行数据, 通过查询返回结果集的列名称,和查询的字段映射 -例如: +例如: 查询的指标字段为:one tow three four 查询SQL:select one, tow, three, four from book limit 1; 这里指标字段就能和响应数据一一映射为一行采集数据。 @@ -30,7 +30,7 @@ SQL查询回来的数据字段和我们需要的指标映射,就能获取对 > 查询多行数据, 通过查询返回结果集的列名称,和查询的字段映射 -例如: +例如: 查询的指标字段为:one tow three four 查询SQL:select one, tow, three, four from book; 这里指标字段就能和响应数据一一映射为多行采集数据。 @@ -39,9 +39,9 @@ SQL查询回来的数据字段和我们需要的指标映射,就能获取对 > 采集一行指标数据, 通过查询的两列数据(key-value),key和查询的字段匹配,value为查询字段的值 -例如: -查询字段:one tow three four -查询SQL:select key, value from book; +例如: +查询字段:one tow three four +查询SQL:select key, value from book; SQL响应数据: | key | value | @@ -59,13 +59,13 @@ SQL响应数据: ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为example_sql的自定义监控类型,其使用JDBC协议采集指标数据。 @@ -243,4 +243,3 @@ metrics: sql: show global status where Variable_name like 'innodb%'; url: ^_^url^_^ ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jmx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jmx.md index 032f09f4f14..0e7694f76ce 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jmx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jmx.md @@ -4,7 +4,7 @@ title: JMX协议自定义监控 sidebar_label: JMX协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JMX协议自定义指标监控。 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JMX协议自定义指标监控。 > JMX协议自定义监控可以让我们很方便的通过配置 JMX Mbean Object 就能监控采集到我们想监控的 Mbean 指标 ### JMX协议采集流程 @@ -23,13 +23,13 @@ sidebar_label: JMX协议自定义监控 ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下监控模版的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为 `example_jvm` 的自定义监控类型,其使用JMX协议采集指标数据。 @@ -236,4 +236,3 @@ metrics: objectName: java.lang:type=MemoryPool,name=* url: ^_^url^_^ ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-point.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-point.md index a59d9b1898a..9f7ae8ee1b1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-point.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-point.md @@ -168,4 +168,3 @@ metrics: parseType: website ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-snmp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-snmp.md index 387d67c5987..1172b263c2e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-snmp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-snmp.md @@ -4,7 +4,7 @@ title: SNMP协议自定义监控 sidebar_label: SNMP协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用 SNMP 协议自定义指标监控。 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用 SNMP 协议自定义指标监控。 > SNMP 协议自定义监控可以让我们很方便的通过配置 Mib OID信息 就能监控采集到我们想监控的OID指标 ### SNMP协议采集流程 @@ -23,13 +23,13 @@ sidebar_label: SNMP协议自定义监控 ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为 example_windows 的自定义监控类型,其使用 SNMP 协议采集指标数据。 @@ -207,4 +207,3 @@ metrics: processes: 1.3.6.1.2.1.25.1.6.0 location: 1.3.6.1.2.1.1.6.0 ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-ssh.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-ssh.md index 0c4fa9cb9ab..0300c14b31d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-ssh.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-ssh.md @@ -4,7 +4,7 @@ title: SSH协议自定义监控 sidebar_label: SSH协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用SSH协议自定义指标监控。 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用SSH协议自定义指标监控。 > SSH协议自定义监控可以让我们很方便的通过写sh命令脚本就能监控采集到我们想监控的Linux指标 ### SSH协议采集流程 @@ -21,12 +21,12 @@ SHELL脚本查询回来的数据字段和我们需要的指标映射,就能获 > 查询出一列数据, 通过查询返回结果集的字段值(一行一个值)与字段映射 -例如: -需要查询Linux的指标 hostname-主机名称,uptime-启动时间 -主机名称原始查询命令:`hostname` -启动时间原始查询命令:`uptime | awk -F "," '{print $1}'` -则在hertzbeat对应的这两个指标的查询脚本为(用`;`将其连接到一起): -`hostname; uptime | awk -F "," '{print $1}'` +例如: +需要查询Linux的指标 hostname-主机名称,uptime-启动时间 +主机名称原始查询命令:`hostname` +启动时间原始查询命令:`uptime | awk -F "," '{print $1}'` +则在hertzbeat对应的这两个指标的查询脚本为(用`;`将其连接到一起): +`hostname; uptime | awk -F "," '{print $1}'` 终端响应的数据为: ``` @@ -34,8 +34,8 @@ tombook 14:00:15 up 72 days ``` -则最后采集到的指标数据一一映射为: -hostname值为 `tombook` +则最后采集到的指标数据一一映射为: +hostname值为 `tombook` uptime值为 `14:00:15 up 72 days` 这里指标字段就能和响应数据一一映射为一行采集数据。 @@ -44,8 +44,8 @@ uptime值为 `14:00:15 up 72 days` > 查询多行数据, 通过查询返回结果集的列名称,和查询的指标字段映射 -例如: -查询的Linux内存相关指标字段:total-内存总量 used-已使用内存 free-空闲内存 buff-cache-缓存大小 available-可用内存 +例如: +查询的Linux内存相关指标字段:total-内存总量 used-已使用内存 free-空闲内存 buff-cache-缓存大小 available-可用内存 内存指标原始查询命令为:`free -m`, 控制台响应: ```shell @@ -55,7 +55,7 @@ Swap: 8191 33 8158 ``` 在hertzbeat中multiRow格式解析需要响应数据列名称和指标值一一映射,则对应的查询SHELL脚本为: -`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` +`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` 控制台响应为: ```shell @@ -71,13 +71,13 @@ total used free buff_cache available ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为example_linux的自定义监控类型,其使用SSH协议采集指标数据。 @@ -216,4 +216,3 @@ metrics: script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' parseType: multiRow ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-tutorial.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-tutorial.md index ff411818bdc..273fb4b4406 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-tutorial.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-tutorial.md @@ -239,7 +239,6 @@ metrics: ---- -#### 完! +#### 完 HTTP协议的自定义监控的实践就到这里,HTTP协议还带其他参数 `headers,params` 等,我们可以像用postman一样去定义它,可玩性也非常高! - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/activemq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/activemq.md index 29d5478158a..94e2ad54899 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/activemq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/activemq.md @@ -143,4 +143,3 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" | DaemonThreadCount | 个 | 守护进程数 | | CurrentThreadUserTime | ms | 使用时间 | | CurrentThreadCpuTime | ms | 使用CPU时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/airflow.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/airflow.md index 52367155d89..a7f77f7f5b6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/airflow.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/airflow.md @@ -36,4 +36,3 @@ keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] |-------------|------|---------------| | value | 无 | Airflow版本 | | git_version | 无 | Airflow git版本 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_dingtalk.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_dingtalk.md index 9d0ee3b088f..ba6b49bc58a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_dingtalk.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_dingtalk.md @@ -17,16 +17,16 @@ keywords: [告警钉钉机器人通知, 开源告警系统, 开源监控告警 2. **【保存机器人的WebHook地址access_token值】** -> 例如: webHook地址:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` +> 例如: webHook地址:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` > 其机器人access_token值为 `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` 3. **【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】** ![email](/img/docs/help/alert-notice-9.png) -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_discord.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_discord.md index 9694126d0dd..bb3c6287cd4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_discord.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_discord.md @@ -63,8 +63,8 @@ keywords: [告警 Discord 机器人通知, 开源告警系统, 开源监控告 1. Discord 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人Token, ChannelId,是否已配置告警策略关联 +> 请排查在告警中心是否已有触发的告警信息 +> 请排查是否配置正确机器人Token, ChannelId,是否已配置告警策略关联 > 请排查机器人是否被 Discord聊天服务器正确赋权 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_email.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_email.md index d4dc218c591..0f53b58e71d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_email.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_email.md @@ -13,14 +13,14 @@ keywords: [告警邮件通知, 开源告警系统, 开源监控告警系统] ![email](/img/docs/help/alert-notice-1.png) -2. **【获取验证码】-> 【输入邮箱验证码】-> 【确定】** +2. **【获取验证码】-> 【输入邮箱验证码】-> 【确定】** ![email](/img/docs/help/alert-notice-2.png) ![email](/img/docs/help/alert-notice-3.png) -3. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) @@ -32,7 +32,7 @@ keywords: [告警邮件通知, 开源告警系统, 开源监控告警系统] 2. 云环境TanCloud无法接收到邮件通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确邮箱,是否已配置告警策略关联 > 请查询邮箱的垃圾箱里是否把告警邮件拦截 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_feishu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_feishu.md index 604eff34fdc..5a6e95d7067 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_feishu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_feishu.md @@ -13,14 +13,14 @@ keywords: [告警飞书机器人通知, 开源告警系统, 开源监控告警 2. **【保存机器人的WebHook地址的KEY值】** -> 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` > 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择飞书机器人通知方式】->【设置飞书机器人KEY】-> 【确定】** -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) @@ -28,7 +28,7 @@ keywords: [告警飞书机器人通知, 开源告警系统, 开源监控告警 1. 飞书群未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人KEY,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md index c81f5608674..5c5c38c56be 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md @@ -31,7 +31,7 @@ keywords: [告警 Slack Webhook 通知, 开源告警系统, 开源监控告警 1. Slack 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_smn.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_smn.md index 73f434a8e8a..d6bca9843a4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_smn.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_smn.md @@ -29,7 +29,7 @@ keywords: [ 告警华为云SMN通知, 开源告警系统, 开源监控告警系 5. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_telegram.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_telegram.md index df609e66b50..dfb1aa48d8a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_telegram.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_telegram.md @@ -60,8 +60,8 @@ keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] 1. Telegram 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 +> 请排查在告警中心是否已有触发的告警信息 +> 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 > UserId 应为消息接收对象的UserId 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold.md index 2f8bda93e2b..9a68175fad7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold.md @@ -14,23 +14,23 @@ sidebar_label: 阈值告警配置 如上图: -**指标对象**:选择我们需要配置阈值的监控指标对象 例如:网站监控类型下的 -> summary指标集合下的 -> responseTime响应时间指标 -**阈值触发表达式**:根据此表达式来计算判断是否触发阈值,表达式环境变量和操作符见页面提示,例如:设置响应时间大于50触发告警,表达式为 `responseTime > 50`。阈值表达式详细帮助见 [阈值表达式帮助](alert_threshold_expr) +**指标对象**:选择我们需要配置阈值的监控指标对象 例如:网站监控类型下的 -> summary指标集合下的 -> responseTime响应时间指标 +**阈值触发表达式**:根据此表达式来计算判断是否触发阈值,表达式环境变量和操作符见页面提示,例如:设置响应时间大于50触发告警,表达式为 `responseTime > 50`。阈值表达式详细帮助见 [阈值表达式帮助](alert_threshold_expr) **告警级别**:触发阈值的告警级别,从低到高依次为:警告-warning,严重-critical,紧急-emergency -**触发次数**:设置触发阈值多少次之后才会真正的触发告警 -**通知模版**:告警触发后发送的通知信息模版,模版环境变量见页面提示,例如:`${app}.${metrics}.${metric}指标的值为${responseTime},大于50触发告警` -**全局默认**: 设置此阈值是否对全局的此类指标都应用有效,默认否。新增阈值后还需将阈值与监控对象关联,这样阈值才会对此监控生效。 +**触发次数**:设置触发阈值多少次之后才会真正的触发告警 +**通知模版**:告警触发后发送的通知信息模版,模版环境变量见页面提示,例如:`${app}.${metrics}.${metric}指标的值为${responseTime},大于50触发告警` +**全局默认**: 设置此阈值是否对全局的此类指标都应用有效,默认否。新增阈值后还需将阈值与监控对象关联,这样阈值才会对此监控生效。 **启用告警**:此告警阈值配置开启生效或关闭 -2. ** 阈值关联监控⚠️ 【告警配置】-> 【将刚设置的阈值】-> 【配置关联监控】-> 【配置后确定】** +2. **阈值关联监控⚠️ 【告警配置】-> 【将刚设置的阈值】-> 【配置关联监控】-> 【配置后确定】** -> ** 注意⚠️ 新增阈值后还需将阈值与监控对象关联(即设置此阈值对哪些监控有效),这样阈值才会对此监控生效 **。 +> **注意⚠️ 新增阈值后还需将阈值与监控对象关联(即设置此阈值对哪些监控有效),这样阈值才会对此监控生效**。 ![threshold](/img/docs/help/alert-threshold-2.png) ![threshold](/img/docs/help/alert-threshold-3.png) -**阈值告警配置完毕,已经被成功触发的告警信息可以在【告警中心】看到。** +**阈值告警配置完毕,已经被成功触发的告警信息可以在【告警中心】看到。** **若需要将告警信息邮件,微信,钉钉飞书通知给相关人员,可以在【告警通知】配置。** 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md index a3c5fe9a1c2..6d15d14ea6e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md @@ -23,32 +23,32 @@ equals(str1,str2) || ``` -丰富的操作符让我们可以很自由的定义表达式。 +丰富的操作符让我们可以很自由的定义表达式。 注意⚠️ 字符串的相等请用 `equals(str1,str2)` 数字类型的相等判断请用== 或 != #### 表达式函数库列表 -参考: https://www.yuque.com/boyan-avfmj/aviatorscript/ashevw +参考: #### 支持的环境变量 > 环境变量即指标值等支持的变量,用于在表达式中,阈值计算判断时会将变量替换成实际值进行计算 -非固定环境变量:这些变量会根据我们选择的监控指标对象而动态变化,例如我们选择了**网站监控的响应时间指标**,则环境变量就有 `responseTime - 此为响应时间变量` +非固定环境变量:这些变量会根据我们选择的监控指标对象而动态变化,例如我们选择了**网站监控的响应时间指标**,则环境变量就有 `responseTime - 此为响应时间变量` 如果我们想设置**网站监控的响应时间大于400时**触发告警,则表达式为 `responseTime>400` -固定环境变量(不常用):`instance : 所属行实例值` +固定环境变量(不常用):`instance : 所属行实例值` 此变量主要用于计算多实例时,比如采集到c盘d盘的`usage`(`usage为非固定环境变量`),我们只想设置**c盘的usage大于80**时告警,则表达式为 `equals(instance,"c")&&usage>80` #### 表达式设置案例 -1. 网站监控->响应时间大于等于400ms时触发告警 +1. 网站监控->响应时间大于等于400ms时触发告警 `responseTime>=400` -2. API监控->响应时间大于3000ms时触发告警 +2. API监控->响应时间大于3000ms时触发告警 `responseTime>3000` 3. 全站监控->URL(instance)路径为 `https://baidu.com/book/3` 的响应时间大于200ms时触发告警 `equals(instance,"https://baidu.com/book/3")&&responseTime>200` -4. MYSQL监控->status指标->threads_running(运行线程数)指标大于7时触发告警 +4. MYSQL监控->status指标->threads_running(运行线程数)指标大于7时触发告警 `threads_running>7` 若遇到问题可以通过交流群ISSUE交流反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_webhook.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_webhook.md index 022cd50f07e..272c59cfd4c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_webhook.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_webhook.md @@ -13,9 +13,9 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 ![email](/img/docs/help/alert-notice-5.png) -2. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) @@ -60,7 +60,7 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 1. WebHook回调未生效 -> 请查看告警中心是否已经产生此条告警信息 +> 请查看告警中心是否已经产生此条告警信息 > 请排查配置的WebHook回调地址是否正确 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_wework.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_wework.md index e0dbabf1a70..5c73ffee2a6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_wework.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_wework.md @@ -15,16 +15,16 @@ keywords: [告警企业微信通知, 开源告警系统, 开源监控告警系 2. **【保存机器人的WebHook地址的KEY值】** -> 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` > 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择企业微信机器人通知方式】->【设置企业微信机器人KEY】-> 【确定】** ![email](/img/docs/help/alert-notice-7.png) -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) @@ -32,7 +32,7 @@ keywords: [告警企业微信通知, 开源告警系统, 开源监控告警系 1. 企业微信群未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人KEY,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/api.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/api.md index 88f0e690223..0390259fc70 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/api.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/api.md @@ -32,4 +32,3 @@ keywords: [开源监控系统, 开源网站监控, HTTP API监控] | 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------| | responseTime | ms毫秒 | 网站响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/centos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/centos.md index 3d0654db3b5..02a93f751c5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/centos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/centos.md @@ -79,4 +79,3 @@ keywords: [开源监控系统, 开源操作系统监控, CentOS操作系统监 | available | Mb | 可用磁盘大小 | | usage | % | 使用率 | | mounted | 无 | 挂载点目录 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dm.md index ea4a376c049..12cb13b8422 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dm.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dm.md @@ -46,4 +46,3 @@ keywords: [开源监控系统, 开源数据库监控, 达梦数据库监控] | dm_sql_thd | 无 | 用于编写 dmsql dmserver 的线程 | | dm_io_thd | 无 | IO线程,由IO_THR_GROUPS参数控制,默认为2个线程 | | dm_quit_thd | 无 | 用于执行正常关闭数据库的线程 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/docker.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/docker.md index c546b46fd2c..0b81365780b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/docker.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/docker.md @@ -99,4 +99,3 @@ firewall-cmd --reload | cpu_delta | 无 | Docker容器已经使用的CPU数量 | | number_cpus | 无 | Docker容器可以使用的CPU数量 | | cpu_usage | 无 | Docker容器CPU使用率 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dynamic_tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dynamic_tp.md index 8c2f1e290e4..1abcb732289 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dynamic_tp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dynamic_tp.md @@ -99,4 +99,3 @@ management: | dynamic | 无 | 是否动态线程池 | | run_timeout_count | 无 | 运行超时任务数 | | queue_timeout_count | 无 | 等待超时任务数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/fullsite.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/fullsite.md index 9d39da7c9e4..54553c1200c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/fullsite.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/fullsite.md @@ -5,8 +5,8 @@ sidebar_label: 全站监控 keywords: [开源监控系统, 开源网站监控, SiteMap监控] --- -> 对网站的全部页面监测是否可用 -> 往往一个网站有多个不同服务提供的页面,我们通过采集网站暴露出来的网站地图SiteMap来监控全站。 +> 对网站的全部页面监测是否可用 +> 往往一个网站有多个不同服务提供的页面,我们通过采集网站暴露出来的网站地图SiteMap来监控全站。 > 注意⚠️,此监控需您网站支持SiteMap。我们支持XML和TXT格式的SiteMap。 ### 配置参数 @@ -32,4 +32,3 @@ keywords: [开源监控系统, 开源网站监控, SiteMap监控] | statusCode | 无 | 请求此网页的响应HTTP状态码 | | responseTime | ms毫秒 | 网站响应时间 | | errorMsg | 无 | 请求此网站反馈的错误信息 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/guide.md index 0670d75a984..da07e912f00 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/guide.md @@ -9,7 +9,7 @@ sidebar_label: 帮助入门 ## 🔬 监控服务 -> 定时采集监控对端服务暴露的性能指标,提供可视化界面,处理数据供告警等服务调度。 +> 定时采集监控对端服务暴露的性能指标,提供可视化界面,处理数据供告警等服务调度。 > 规划的监控类型:应用服务,数据库,操作系统,云原生,开源中间件 ### 应用服务监控 @@ -59,7 +59,7 @@ sidebar_label: 帮助入门 ## 💡 告警服务 -> 更自由化的阈值告警配置,支持邮箱,短信,webhook,钉钉,企业微信,飞书机器人等告警通知。 +> 更自由化的阈值告警配置,支持邮箱,短信,webhook,钉钉,企业微信,飞书机器人等告警通知。 > 告警服务的定位是阈值准确及时触发,告警通知及时可达。 ### 告警中心 @@ -75,8 +75,8 @@ sidebar_label: 帮助入门 ### 告警通知 -> 触发告警信息后,除了显示在告警中心列表外,还可以用指定方式(邮件钉钉微信飞书等)通知给指定接收人。 -> 告警通知提供设置不同类型的通知方式,如邮件接收人,企业微信机器人通知,钉钉机器人通知,飞书机器人通知。 +> 触发告警信息后,除了显示在告警中心列表外,还可以用指定方式(邮件钉钉微信飞书等)通知给指定接收人。 +> 告警通知提供设置不同类型的通知方式,如邮件接收人,企业微信机器人通知,钉钉机器人通知,飞书机器人通知。 > 接收人设置后需要设置关联的告警通知策略,来配置哪些告警信息发给哪些接收人。  👉 [配置邮箱通知](alert_email)
diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hadoop.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hadoop.md index bda83b006e4..186baede498 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hadoop.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hadoop.md @@ -87,4 +87,3 @@ export HADOOP_OPTS= "$HADOOP_OPTS | DaemonThreadCount | 个 | 守护进程数 | | CurrentThreadUserTime | ms | 使用时间 | | CurrentThreadCpuTime | ms | 使用CPU时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hive.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hive.md index 3b41d3979c6..6e1efde0991 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hive.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hive.md @@ -74,4 +74,3 @@ hive --service hiveserver2 & | 内存池初始内存 | MB | 内存池请求的初始内存量。 | | 内存池可分配最大内存 | MB | 内存池可分配的最大内存量。 | | 内存池内存使用量 | MB | 内存池已使用内存量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/iotdb.md index fceb485f05b..8bb3bbb25e0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/iotdb.md @@ -41,7 +41,7 @@ predefinedMetrics: - FILE ``` -2. 重启 IoTDB, 打开浏览器或者用curl 访问 http://ip:9091/metrics, 就能看到metric数据了。 +2. 重启 IoTDB, 打开浏览器或者用curl 访问 , 就能看到metric数据了。 3. 在 HertzBeat 添加对应 IoTDB 监控即可。 @@ -118,4 +118,3 @@ predefinedMetrics: |------------|------|-------------| | name | 无 | 名称 | | connection | 无 | thrift当前连接数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/issue.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/issue.md index c62c6f9448a..3d06e0346d2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/issue.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/issue.md @@ -6,20 +6,20 @@ sidebar_label: 常见问题 ### 监控常见问题 -1. ** 页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名 ** +1. **页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名** > 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http -2. ** 网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK ** +2. **网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK** > 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) 3. 安装包部署的hertzbeat下ping连通性监控异常 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 -> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 -> docker安装默认启用无此问题 -> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 +> docker安装默认启用无此问题 +> 详见 4. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖服务TDengine时序数据库] @@ -36,35 +36,34 @@ sidebar_label: 常见问题 ### Docker部署常见问题 -1. **MYSQL,TDENGINE和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** +1. **MYSQL,TDENGINE和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 -> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP +> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP > 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` -2. **按照流程部署,访问 http://ip:1157/ 无界面** +2. **按照流程部署,访问 无界面** 请参考下面几点排查问题: -> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 +> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 3. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter ### 安装包部署常见问题 -1. **按照流程部署,访问 http://ip:1157/ 无界面** +1. **按照流程部署,访问 无界面** 请参考下面几点排查问题: -> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 +> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 +> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 2. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jetty.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jetty.md index b60a5882b9f..31e297703fc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jetty.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jetty.md @@ -92,4 +92,3 @@ java -jar $JETTY_HOME/start.jar --add-module=jmx-remote | DaemonThreadCount | 个 | 守护进程数 | | CurrentThreadUserTime | ms | 使用时间 | | CurrentThreadCpuTime | ms | 使用CPU时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jvm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jvm.md index f046b3ef6a0..3d9e96e55e1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jvm.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jvm.md @@ -17,7 +17,7 @@ keywords: [开源监控系统, 开源JAVA监控, JVM虚拟机监控] 应用启动时添加JVM参数 ⚠️注意可自定义暴露端口,对外IP -参考文档: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#remote +参考文档: ```shell -Djava.rmi.server.hostname=对外ip地址 @@ -78,4 +78,3 @@ keywords: [开源监控系统, 开源JAVA监控, JVM虚拟机监控] | DaemonThreadCount | 个 | 守护进程数 | | CurrentThreadUserTime | ms | 使用时间 | | CurrentThreadCpuTime | ms | 使用CPU时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kafka.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kafka.md index 3cb4d74132c..a79bb0e91c2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kafka.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kafka.md @@ -17,7 +17,7 @@ keywords: [开源监控系统, 开源消息中间件监控, Kafka监控] 2. 修改 Kafka 启动脚本 -修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` +修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` 在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 ```shell @@ -93,4 +93,3 @@ export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=ip地址 -Dcom.sun.management. | FifteenMinuteRate | 无 | 十五分钟处理率 | > 其他指标见文知意,欢迎贡献一起优化文档。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kubernetes.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kubernetes.md index aa242d93a6b..4f0363f621d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kubernetes.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kubernetes.md @@ -13,7 +13,7 @@ keywords: [开源监控系统, 开源Kubernetes监控] 参考获取token步骤 -#### 方式一: +#### 方式一 1. 创建service account并绑定默认cluster-admin管理员集群角色 @@ -27,7 +27,7 @@ kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` -#### 方式二: +#### 方式二 ``` kubectl create serviceaccount cluster-admin @@ -96,4 +96,3 @@ kubectl create token --duration=1000h cluster-admin | cluster_ip | 无 | cluster ip | | selector | 无 | tag selector匹配 | | creation_time | 无 | 创建时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/linux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/linux.md index 4a69c04495e..abd87de1ef8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/linux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/linux.md @@ -79,4 +79,3 @@ keywords: [开源监控系统, 开源操作系统监控, Linux操作系统监控 | available | Mb | 可用磁盘大小 | | usage | % | 使用率 | | mounted | 无 | 挂载点目录 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mariadb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mariadb.md index 2490e3630dd..4690b5500ef 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mariadb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mariadb.md @@ -51,4 +51,3 @@ keywords: [开源监控系统, 开源数据库监控, MariaDB数据库监控] | innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | | innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | | innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/memcached.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/memcached.md index 920da021e6b..f3c1ddfab55 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/memcached.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/memcached.md @@ -14,7 +14,7 @@ The default YML configuration for the memcache version is in compliance with 1.4 You need to use the stats command to view the parameters that your memcache can monitor ``` -### +### **1、Obtain usable parameter indicators through commands such as stats、stats setting、stats settings. @@ -32,7 +32,7 @@ STAT version 1.4.15 ... ``` -**There is help_doc: https://www.runoob.com/memcached/memcached-stats.html** +**There is help_doc: ** ### Configuration parameter @@ -67,4 +67,3 @@ STAT version 1.4.15 | cmd_flush | | Flush command request count | | get_misses | | Get command misses | | delete_misses | | Delete command misses | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mysql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mysql.md index c5deaab27a2..47087c88f34 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mysql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mysql.md @@ -51,4 +51,3 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] | innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | | innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | | innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nebulagraph.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nebulagraph.md index ded4a06ad2f..9faed580e1b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nebulagraph.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nebulagraph.md @@ -14,23 +14,23 @@ keywords: [ 开源监控工具, 开源 NebulaGraph 监控工具, 监控 NebulaGr nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 的统计信息。 ``` -### +### **1、通过 stats 和 rocksdb stats 接口获取可用参数。** 1.1、如果只需要获取 nebulaGraph_stats,需要确保可以访问 stats,否则会出现错误。 -默认端口是 19669,访问地址为 http://ip:19669/stats +默认端口是 19669,访问地址为 1.2、如果需要获取 rocksdb stats 的附加参数,需要确保可以访问 rocksdb stats,否则会报错。 首次连接 NebulaGraph 时,必须先注册 Storage 服务,以便正确查询数据。 -**有帮助文档:https://docs.nebula-graph.com.cn/3.4.3/4.deployment-and-installation/connect-to-nebula-graph/** +**有帮助文档:** -**https://docs.nebula-graph.com.cn/3.4.3/2.quick-start/3.quick-start-on-premise/3.1add-storage-hosts/** +**** -默认端口是 19779,访问地址为:http://ip:19779/rocksdb_stats +默认端口是 19779,访问地址为: ### 配置参数 @@ -51,7 +51,7 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB #### 指标集:nebulaGraph_stats 指标太多,相关链接如下 -**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** +**** | 指标名称 | 指标单位 | 指标帮助描述 | |----------------------------------------------------------------|------|--------| @@ -114,11 +114,10 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB #### 指标集:rocksdb_stats 指标太多,相关链接如下 -**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** +**** | 指标名称 | 指标单位 | 指标帮助描述 | |----------------------------|------|------------------------| | rocksdb.backup.read.bytes | | 备份 RocksDB 数据库期间读取的字节数 | | rocksdb.backup.write.bytes | | 指标名称 | | ... | | ... | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nginx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nginx.md index 82908df358b..8c81c5a82c2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nginx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nginx.md @@ -45,8 +45,8 @@ server { location /nginx-status { stub_status on; access_log on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts } } ``` @@ -93,8 +93,8 @@ http { server { location /req-status { req_status_show on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts } } } @@ -108,7 +108,7 @@ nginx -s reload 4. 在浏览器访问 `http://localhost/req-status` 即可查看 Nginx 监控状态信息。 -**参考文档: https://blog.csdn.net/weixin_55985097/article/details/116722309** +**参考文档: ** **⚠️注意监控模块的端点路径为 `/nginx-status` `/req-status`** @@ -151,4 +151,3 @@ nginx -s reload | 总请求数 | | 总请求数 | | 当前并发连接数 | | 当前并发连接数 | | 当前带宽 | kb | 当前带宽 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md index 735ab741b4d..a160f2501e4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md @@ -39,4 +39,3 @@ NTP监控的中文文档如下: | 层级 | | NTP服务器的层级,表示其与参考时钟的距离。 | | 参考ID | | 指示NTP服务器使用的参考时钟或时间源的标识符。 | | 精度 | | NTP服务器时钟的精度,表示其准确性。 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/opengauss.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/opengauss.md index 8bf21d7debb..964fc909c33 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/opengauss.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/opengauss.md @@ -53,4 +53,3 @@ keywords: [开源监控系统, 开源数据库监控, OpenGauss数据库监控] | 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|----------| | running | 连接数 | 当前客户端连接数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/oracle.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/oracle.md index 7ffdfa219ff..d6bb80eb98d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/oracle.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/oracle.md @@ -61,4 +61,3 @@ keywords: [开源监控系统, 开源数据库监控, Oracle数据库监控] | qps | QPS | I/O Requests per Second 每秒IO请求数量 | | tps | TPS | User Transaction Per Sec 每秒用户事物处理数量 | | mbps | MBPS | I/O Megabytes per Second 每秒 I/O 兆字节数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ping.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ping.md index 401e86f9382..59ac237ed34 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ping.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ping.md @@ -31,7 +31,6 @@ keywords: [开源监控系统, 开源网络监控, 网络PING监控] 1. 安装包部署的hertzbeat下ping连通性监控异常 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 -> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 -> docker安装默认启用无此问题 -> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address - +> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 +> docker安装默认启用无此问题 +> 详见 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/pop3.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/pop3.md index 4c58cc4a308..7a55a98df3e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/pop3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/pop3.md @@ -44,4 +44,3 @@ keywords: [开源监控工具,开源Java监控工具,监控POP3指标] |-------|------|-----------| | 邮件数量 | | 邮件数量 | | 邮箱总大小 | kb | 邮箱中邮件的总大小 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/port.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/port.md index 88dc6360cd3..e774ac53254 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/port.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/port.md @@ -26,4 +26,3 @@ keywords: [开源监控系统, 开源网络监控, 端口可用性监控] | 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------| | responseTime | ms毫秒 | 网站响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/postgresql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/postgresql.md index 59adae7da81..12485e62ffa 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/postgresql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/postgresql.md @@ -53,4 +53,3 @@ keywords: [开源监控系统, 开源数据库监控, PostgreSQL数据库监控] | 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|----------| | running | 连接数 | 当前客户端连接数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/rabbitmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/rabbitmq.md index 89c728162c9..2210a2452e0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/rabbitmq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/rabbitmq.md @@ -18,7 +18,7 @@ keywords: [开源监控系统, 开源消息中间件监控, RabbitMQ消息中间 rabbitmq-plugins enable rabbitmq_management ``` -2. 浏览器访问 http://ip:15672/ ,默认账户密码 `guest/guest`. 成功登录即开启成功。 +2. 浏览器访问 ,默认账户密码 `guest/guest`. 成功登录即开启成功。 3. 在 HertzBeat 添加对应 RabbitMQ 监控即可,参数使用 Management 的 IP 端口,默认账户密码。 @@ -123,4 +123,3 @@ rabbitmq-plugins enable rabbitmq_management | message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | | message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | | message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/redis.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/redis.md index 58248fb0b45..0a0c9f77a65 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/redis.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/redis.md @@ -237,4 +237,3 @@ keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] | cmdstat_lpop | 无 | lpop命令的统计信息 | | cmdstat_rpop | 无 | rpop命令的统计信息 | | cmdstat_llen | 无 | llen命令的统计信息 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/shenyu.md index 1149ed4bdd9..87bb81b7800 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/shenyu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/shenyu.md @@ -127,4 +127,3 @@ shenyu: |-------|------|-------------| | state | 无 | 线程状态 | | value | 无 | 对应线程状态的线程数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/smtp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/smtp.md index 5755437e80e..73e9af9ee13 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/smtp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/smtp.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit 通过 SMTP 的 hello 命令确定服务器是否可用 ``` -> 详见 https://datatracker.ietf.org/doc/html/rfc821#page-13 +> 详见 **协议使用:SMTP** @@ -38,4 +38,3 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit | 响应状态 | | 响应状态 | | SMTP 服务器标语 | | SMTP 服务器的标语 | | helo 命令返回信息 | | helo 命令返回的响应信息 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/spring_gateway.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/spring_gateway.md index a0695849705..aaba0dd9841 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/spring_gateway.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/spring_gateway.md @@ -87,4 +87,3 @@ management: | 匹配规则 | 无 | 路由匹配规则 | | 资源标识符 | 无 | 服务资源标识符 | | 优先级 | 无 | 此路由的优先级 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/springboot2.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/springboot2.md index e66d4237a13..d39b67d3efd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/springboot2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/springboot2.md @@ -94,4 +94,3 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ |----------|------|-----------| | space | 无 | 内存空间名称 | | mem_used | MB | 此空间占用内存大小 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md index 22a5a50ddd8..847a7775adc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md @@ -57,8 +57,8 @@ keywords: [开源监控系统, 开源数据库监控, SqlServer数据库监控] 1. SSL连接问题修复 -jdk版本:jdk11 -问题描述:SQL Server2019使用SA用户连接报错 +jdk版本:jdk11 +问题描述:SQL Server2019使用SA用户连接报错 错误信息: ```text diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ssl_cert.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ssl_cert.md index 73957e31fb8..e15de6e3e97 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ssl_cert.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ssl_cert.md @@ -31,4 +31,3 @@ keywords: [开源监控系统, 开源网站监控, SSL证书监控监控] | start_timestamp | ms毫秒 | 有效期开始时间戳 | | end_time | 无 | 过期时间 | | end_timestamp | ms毫秒 | 过期时间戳 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/tomcat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/tomcat.md index b366ee3c2ac..e1f112777f7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/tomcat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/tomcat.md @@ -72,4 +72,4 @@ keywords: [开源监控系统, 开源网站监控, Tomcat监控] CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote -Djava.rmi.server.hostname=10.1.1.52 -Dcom.sun.management.jmxremote.port=1099 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" ``` -参考: https://blog.csdn.net/weixin_41924764/article/details/108694239 +参考: diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ubuntu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ubuntu.md index 3ec51e5464a..4425f1c2c06 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ubuntu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ubuntu.md @@ -79,4 +79,3 @@ keywords: [开源监控系统, 开源操作系统监控, Ubuntu监控] | available | Mb | 可用磁盘大小 | | usage | % | 使用率 | | mounted | 无 | 挂载点目录 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/website.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/website.md index 8efe5262612..7403f255aec 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/website.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/website.md @@ -27,4 +27,3 @@ keywords: [开源监控系统, 开源网站监控] | 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------| | responseTime | ms毫秒 | 网站响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/windows.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/windows.md index 41447469e61..0b1791435f4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/windows.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/windows.md @@ -8,10 +8,10 @@ keywords: [开源监控系统, 开源操作系统监控, Windows操作系统监 > 通过SNMP协议对Windows操作系统的通用性能指标进行采集监控。 > 注意⚠️ Windows服务器需开启SNMP服务 -参考资料: -[什么是SNMP协议1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) -[什么是SNMP协议2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) -[Win配置SNMP英文](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) +参考资料: +[什么是SNMP协议1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) +[什么是SNMP协议2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) +[Win配置SNMP英文](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) [Win配置SNMP中文](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) ### 配置参数 @@ -41,4 +41,3 @@ keywords: [开源监控系统, 开源操作系统监控, Windows操作系统监 | services | 个数 | 当前服务数量 | | processes | 个数 | 当前进程数量 | | responseTime | ms | 采集响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/zookeeper.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/zookeeper.md index 14d50c3c90d..9752c22bc4e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/zookeeper.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/zookeeper.md @@ -97,4 +97,3 @@ Complete! | zk_max_latency | ms | 最大延时 | | zk_ephemerals_count | 个 | 临时节点数 | | zk_min_latency | ms | 最小延时 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md index aa9097c814c..e1184fcc2ab 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md @@ -32,13 +32,13 @@ slug: / > `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 ----- +---- ### 强大的监控模版 > 开始我们就说 HertzBeat 的特点是自定义监控能力,无需 Agent。在讨论这两点之前,我们先介绍下 HertzBeat 的不一样的监控模版。而正是因为这样的监控模版设计,才会有了后面的高级特性。 -HertzBeat 自身并没有去创造一种采集数据协议让监控对端来适配它。而是充分使用了现有的生态,`SNMP协议`采集网络交换机路由器信息,`JMX规范`采集JAVA应用信息,`JDBC规范`采集数据集信息,`SSH`直连执行脚本获取回显信息,`HTTP+(JsonPath | prometheus等)`解析API接口信息,`IPMI协议`采集服务器信息等等。 +HertzBeat 自身并没有去创造一种采集数据协议让监控对端来适配它。而是充分使用了现有的生态,`SNMP协议`采集网络交换机路由器信息,`JMX规范`采集JAVA应用信息,`JDBC规范`采集数据集信息,`SSH`直连执行脚本获取回显信息,`HTTP+(JsonPath | prometheus等)`解析API接口信息,`IPMI协议`采集服务器信息等等。 HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可配置化,最后使其都可以通过编写YML格式监控模版的形式,来制定模版使用这些协议来采集任何想要的指标数据。 ![hertzbeat](/img/blog/multi-protocol.png) @@ -92,21 +92,22 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ### 强大自定义功能 -> 由前面的**监控模版**介绍,大概清楚了 `HertzBeat` 拥有的强大自定义功能。 -> 我们将每个监控类型都视为一个监控模版,不管是官方内置的还是后期用户自定义新增的。用户都可以方便的通过修改监控模版来新增修改删除监控指标。 +> 由前面的**监控模版**介绍,大概清楚了 `HertzBeat` 拥有的强大自定义功能。 +> 我们将每个监控类型都视为一个监控模版,不管是官方内置的还是后期用户自定义新增的。用户都可以方便的通过修改监控模版来新增修改删除监控指标。 > 模版里面包含各个协议的使用配置,环境变量,指标转换,指标计算,单位转换,指标采集等一系列功能,帮助用户能采集到自己想要的监控指标。 ![hertzbeat](/img/docs/custom-arch.png) ### 无需 Agent -> 对于使用过各种系统的用户来说,可能最麻烦头大的不过就是各种 `agent` 的安装部署调试升级了。 -> 每台主机得装个 `agent`,为了监控不同应用中间件可能还得装几个对应的 `agent`,监控数量上来了轻轻松松上千个,写个批量脚本可能会减轻点负担。 +> 对于使用过各种系统的用户来说,可能最麻烦头大的不过就是各种 `agent` 的安装部署调试升级了。 +> 每台主机得装个 `agent`,为了监控不同应用中间件可能还得装几个对应的 `agent`,监控数量上来了轻轻松松上千个,写个批量脚本可能会减轻点负担。 > `agent` 的版本是否与主应用兼容, `agent` 与主应用的通讯调试, `agent` 的同步升级等等等等,这些全是头大的点。 -`HertzBeat` 的原理就是使用不同的协议去直连对端系统,采用 `PULL` 的形式去拉取采集数据,无需用户在对端主机上部署安装 `Agent` | `Exporter` 等。 -- 比如监控 `linux操作系统`, 在 `HertzBeat` 端输入IP端口账户密码或密钥即可。 -- 比如监控 `mysql数据库`, 在 `HertzBeat` 端输入IP端口账户密码即可。 +`HertzBeat` 的原理就是使用不同的协议去直连对端系统,采用 `PULL` 的形式去拉取采集数据,无需用户在对端主机上部署安装 `Agent` | `Exporter` 等。 + +- 比如监控 `linux操作系统`, 在 `HertzBeat` 端输入IP端口账户密码或密钥即可。 +- 比如监控 `mysql数据库`, 在 `HertzBeat` 端输入IP端口账户密码即可。 **密码等敏感信息全链路加密** ### 高性能集群 @@ -152,11 +153,11 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 --- **`HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。** ------ +----- ## 即刻体验一波 -Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` +Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` 浏览器访问 `http://localhost:1157` 默认账户密码 `admin/hertzbeat` ### 登陆页面 @@ -301,8 +302,8 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 **还有更多强大的功能快去探索呀。Have Fun!** ------ +----- -**官网: https://hertzbeat.com/** -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**官网: ** +**Github: ** +**Gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contributing.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contributing.md index d36fb538d92..6913149bab1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contributing.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contributing.md @@ -30,7 +30,7 @@ sidebar_label: 贡献者指南 ### 让 HertzBeat 运行起来 -> 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 +> 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 > 此为前后端分离项目,本地代码启动需将后端 [manager](https://github.com/apache/hertzbeat/tree/master/manager) 和前端 [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) 分别启动生效。 - 后端启动 @@ -130,6 +130,7 @@ git pull upstream master - **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** 提供监控管理,系统管理基础服务 > 提供对监控的管理,监控应用配置的管理,系统用户租户后台管理等。 +> > - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** 提供监控数据采集服务 > 使用通用协议远程采集获取对端指标数据。 > - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** 提供监控数据仓储服务 @@ -138,4 +139,3 @@ git pull upstream master > 告警计算触发,任务状态联动,告警配置,告警通知。 > - **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** 提供可视化控制台页面 > 监控告警系统可视化控制台前端 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md index 5fa3a9c1929..9492da3f9ca 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md @@ -21,7 +21,7 @@ sidebar_label: 开发者们
zqr10159
Logic

📖 💻🎨 vinci
vinci

💻 📖 🎨 淞筱
淞筱

💻 📖 🎨 - 东风
东风

💻 🎨 📖 + 东风
东风

💻 🎨 📖 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/huaweicloud.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/huaweicloud.md index 9f1d408fc79..bc2c4f50c96 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/huaweicloud.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/huaweicloud.md @@ -20,4 +20,4 @@ HuaweiCloud 华为云将面向开源软件工具链与环境、开源应用构 开发者将开源软件工具、开源应用和开源组件与华为云对象存储OBS、数仓DWS、云容器CCE等云服务对接,同时基于Terraform模板,上架到华为云云商店,支持其他开发者一键部署使用开源组件 ,称为“开源xxx for HuaweiCloud”。 -感兴趣的开发者可以查看:华为云开源项目仓库 https://gitee.com/HuaweiCloudDeveloper/huaweicloud-cloud-native-plugins-kits 了解更多。 +感兴趣的开发者可以查看:华为云开源项目仓库 了解更多。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/images-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/images-deploy.md index ff350e763e0..3cdc25e6196 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/images-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/images-deploy.md @@ -21,17 +21,17 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 ## 🎡 介绍 -> [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 -> 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等监控,阈值告警通知一步到位。 +> [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 +> 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等监控,阈值告警通知一步到位。 > 更自由化的阈值规则(计算表达式),`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式及时送达。 > -> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 +> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 > 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? > -> `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 +> `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 > 当然我们也提供了对应的 **[SAAS版本监控云](https://console.tancloud.cn)**,中小团队和个人无需再为了监控自己的网站资源,而去部署学习一套繁琐的监控系统,**[登录即可免费开始](https://console.tancloud.cn)**。 ----- +---- ![hertzbeat](/img/home/1.png) @@ -75,19 +75,20 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](../start/tdengine-init) - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](../start/iotdb-init) -4. 配置用户配置文件(可选,自定义配置用户密码) - HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat - 若需要新增删除修改账户或密码,可以通过修改位于 `/opt/hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 +4. 配置用户配置文件(可选,自定义配置用户密码) + HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat + 若需要新增删除修改账户或密码,可以通过修改位于 `/opt/hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 具体参考 [配置修改账户密码](../start/account-modify) 5. 部署启动 执行位于安装目录/opt/hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat ``` - $ ./startup.sh + ./startup.sh ``` + 6. 开始探索HertzBeat - 浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 + 浏览器访问 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 **HAVE FUN** @@ -95,11 +96,11 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 **最多的问题就是网络问题,请先提前排查** -1. **按照流程部署,访问 http://ip:1157/ 无界面** +1. **按照流程部署,访问 无界面** 请参考下面几点排查问题: > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 +> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 2. **监控历史图表长时间都一直无数据** @@ -107,4 +108,3 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 > 一:Tdengine或IoTDB是否配置,未配置则无历史图表数据 > 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 > 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB 或 Tdengine IP账户密码等配置是否正确 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md index 502cfb8429e..8b712eb562f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md @@ -21,4 +21,3 @@ sidebar_label: 相关资源 ![logo](/img/hertzbeat-brand.svg) 下载: [SVG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-brand.svg) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-brand.png) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-brand.png) - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/sponsor.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/sponsor.md index fcf927d68ed..269c63417e6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/sponsor.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/sponsor.md @@ -8,6 +8,5 @@ sidebar_label: 赞助我们 planet -感谢[吉实信息(构建全新的微波+光交易网络)](https://www.flarespeed.com) 赞助服务器采集节点 +感谢[吉实信息(构建全新的微波+光交易网络)](https://www.flarespeed.com) 赞助服务器采集节点 感谢[蓝易云(全新智慧上云)](https://www.tsyvps.com/aff/BZBEGYLX) 赞助服务器采集节点 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/account-modify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/account-modify.md index 328447b586f..decef8a5b5b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/account-modify.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/account-modify.md @@ -4,9 +4,9 @@ title: 配置修改账户密码 sidebar_label: 配置修改账户密码 --- -HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat -若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 -修改位于安装目录下的 `/hertzbeat/config/sureness.yml` 的配置文件,docker环境目录为`opt/hertzbeat/config/sureness.yml`,建议提前挂载映射 +HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat +若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 +修改位于安装目录下的 `/hertzbeat/config/sureness.yml` 的配置文件,docker环境目录为`opt/hertzbeat/config/sureness.yml`,建议提前挂载映射 配置文件内容参考 项目仓库[/script/sureness.yml](https://github.com/apache/hertzbeat/blob/master/script/sureness.yml) ```yaml @@ -125,4 +125,4 @@ sureness: dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' ``` -**重启 HertzBeat 浏览器访问 http://ip:1157/ 即可探索使用 HertzBeat** +**重启 HertzBeat 浏览器访问 即可探索使用 HertzBeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/custom-config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/custom-config.md index 01380784169..95bedddc350 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/custom-config.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/custom-config.md @@ -10,8 +10,8 @@ sidebar_label: 常见参数配置 ### 配置HertzBeat的配置文件 -修改位于 `hertzbeat/config/application.yml` 的配置文件 -注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 +修改位于 `hertzbeat/config/application.yml` 的配置文件 +注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 1. 配置短信发送服务器 @@ -74,4 +74,3 @@ warehouse: port: 6379 password: 123456 ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/docker-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/docker-deploy.md index 3eb90180a89..1a042ae672e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/docker-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/docker-deploy.md @@ -6,7 +6,7 @@ sidebar_label: Docker方式部署 > 推荐使用Docker部署HertzBeat -1. 下载安装Docker环境 +1. 下载安装Docker环境 Docker 工具自身的下载请参考以下资料: [Docker官网文档](https://docs.docker.com/get-docker/) [菜鸟教程-Docker教程](https://www.runoob.com/docker/docker-tutorial.html) @@ -16,21 +16,23 @@ sidebar_label: Docker方式部署 $ docker -v Docker version 20.10.12, build e91ed57 ``` -2. 拉取HertzBeat Docker镜像 - 镜像版本TAG可查看 [dockerhub 官方镜像仓库](https://hub.docker.com/r/apache/hertzbeat/tags) + +2. 拉取HertzBeat Docker镜像 + 镜像版本TAG可查看 [dockerhub 官方镜像仓库](https://hub.docker.com/r/apache/hertzbeat/tags) 或者使用 [quay.io 镜像仓库](https://quay.io/repository/apache/hertzbeat) ```shell - $ docker pull apache/hertzbeat - $ docker pull apache/hertzbeat-collector + docker pull apache/hertzbeat + docker pull apache/hertzbeat-collector ``` 若网络超时或者使用 ```shell - $ docker pull quay.io/tancloud/hertzbeat - $ docker pull quay.io/tancloud/hertzbeat-collector + docker pull quay.io/tancloud/hertzbeat + docker pull quay.io/tancloud/hertzbeat-collector ``` + 3. 部署HertzBeat您可能需要掌握的几条命令 ```shell @@ -46,18 +48,19 @@ sidebar_label: Docker方式部署 ctrl+d或者 $ exit ``` -4. 挂载并配置HertzBeat的配置文件(可选) - 下载 `application.yml` 文件到主机目录下,例如: $(pwd)/application.yml + +4. 挂载并配置HertzBeat的配置文件(可选) + 下载 `application.yml` 文件到主机目录下,例如: $(pwd)/application.yml 下载源 [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 或 [gitee/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml) - 若需使用邮件发送告警,需替换 `application.yml` 里面的邮件服务器参数 - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](tdengine-init) - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](iotdb-init) -5. 挂载并配置HertzBeat用户配置文件,自定义用户密码(可选) - HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat - 若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 - 下载 `sureness.yml` 文件到主机目录下,例如: $(pwd)/sureness.yml - 下载源 [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) 或 [gitee/script/sureness.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/sureness.yml) +5. 挂载并配置HertzBeat用户配置文件,自定义用户密码(可选) + HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat + 若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 + 下载 `sureness.yml` 文件到主机目录下,例如: $(pwd)/sureness.yml + 下载源 [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) 或 [gitee/script/sureness.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/sureness.yml) 具体修改步骤参考 [配置修改账户密码](account-modify) 6. 启动HertzBeat Docker容器 @@ -88,12 +91,13 @@ $ docker run -d -p 1157:1157 -p 1158:1158 \ - `--restart=always`:(可选,不需要可删除)使容器在Docker启动后自动重启。若您未在容器创建时指定该参数,可通过以下命令实现该容器自启。 ```shell - $ docker update --restart=always hertzbeat + docker update --restart=always hertzbeat ``` + - `apache/hertzbeat` : 使用拉取最新的的HertzBeat官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat`代替。** 7. 开始探索HertzBeat - 浏览器访问 http://ip:1157/ 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 + 浏览器访问 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 8. 部署采集器集群(可选) @@ -107,6 +111,7 @@ $ docker run -d \ ``` 这条命令启动一个运行HertzBeat采集器的Docker容器,并直连上了HertzBeat主服务节点。 + - `docker run -d` : 通过Docker运行一个容器,使其在后台运行 - `-e IDENTITY=custom-collector-name` : (可选) 设置采集器的唯一标识名称。⚠️注意多采集器时采集器名称需保证唯一性。 - `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 @@ -124,13 +129,13 @@ $ docker run -d \ **最多的问题就是网络问题,请先提前排查** -1. **MYSQL,TDENGINE或IotDB和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** +1. **MYSQL,TDENGINE或IotDB和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 -> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP +> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP > 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` -2. **按照流程部署,访问 http://ip:1157/ 无界面** +2. **按照流程部署,访问 无界面** 请参考下面几点排查问题: > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 @@ -139,7 +144,7 @@ $ docker run -d \ 3. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter 4. **监控历史图表长时间都一直无数据** @@ -159,4 +164,3 @@ $ docker run -d \ > iot-db 或td-engine enable 是否设置为true > 注意⚠️若hertzbeat和IotDB,TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/greptime-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/greptime-init.md index 81d950a392c..908e03e0702 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/greptime-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/greptime-init.md @@ -7,7 +7,7 @@ sidebar_label: 使用GreptimeDB存储指标数据(可选) HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) 我们推荐使用并长期支持VictoriaMetrics -GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. +GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage. **⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** @@ -15,7 +15,8 @@ It's designed to work on infrastructure of the cloud era, and users benefit from ### 通过Docker方式安装GreptimeDB > 可参考官方网站[安装教程](https://docs.greptime.com/getting-started/overview) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -41,7 +42,7 @@ $ docker run -p 4000-4004:4000-4004 \ ### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** @@ -64,4 +65,3 @@ warehouse: 1. 时序数据库 GreptimeDB 或者 IoTDB 或者 TDengine 是否都需要配置,能不能都用 > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/influxdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/influxdb-init.md index 9f19a733b2e..fb144a25b08 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/influxdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/influxdb-init.md @@ -9,7 +9,7 @@ HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始 InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海量时序数据的高性能读、高性能写、高效存储与实时分析等。 注意支持⚠️ 1.x版本。 -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** +**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** **⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** ### 1. 直接使用华为云服务 GaussDB For Influx @@ -23,7 +23,8 @@ InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海 ### 2. 通过Docker方式安装InfluxDB > 可参考官方网站[安装教程](https://hub.docker.com/_/influxdb) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -40,13 +41,13 @@ $ docker run -p 8086:8086 \ influxdb:1.8 ``` -`-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 +`-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 使用```$ docker ps```查看数据库是否启动成功 ### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** @@ -73,4 +74,3 @@ warehouse: 1. 时序数据库InfluxDb, IoTDB和TDengine是否都需要配置,能不能都用 > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/iotdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/iotdb-init.md index c26e8feb7b0..9a5e9e4c51e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/iotdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/iotdb-init.md @@ -9,7 +9,7 @@ HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始 Apache IoTDB是一体化收集、存储、管理与分析物联网时序数据的软件系统,我们使用其存储分析采集到的监控指标历史数据。支持V0.12 - V0.13版本,推荐使用V0.13.*版本。 -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** +**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** **⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有IoTDB环境,可直接跳到YML配置那一步。 @@ -17,7 +17,8 @@ Apache IoTDB是一体化收集、存储、管理与分析物联网时序数据 ### 通过Docker方式安装IoTDB > 可参考官方网站[安装教程](https://iotdb.apache.org/zh/UserGuide/V0.13.x/QuickStart/WayToGetIoTDB.html) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -40,8 +41,8 @@ $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ 3. 在hertzbeat的`application.yml`配置文件配置IoTDB数据库连接 - 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 配置HertzBeat的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.iot-db`数据源参数,HOST账户密码等,并启用`enabled`为`true`** @@ -85,4 +86,3 @@ warehouse: > iot-db enable是否设置为true > 注意⚠️若hertzbeat和IotDB都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/mysql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/mysql-change.md index fdf25643f1d..ca028f18a63 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/mysql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/mysql-change.md @@ -12,7 +12,7 @@ MYSQL是一款值得信赖的关系型数据库,HertzBeat除了支持使用默 ### 通过Docker方式安装MYSQL -1. 下载安装Docker环境 +1. 下载安装Docker环境 Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后终端查看Docker版本是否正常输出。 @@ -20,6 +20,7 @@ MYSQL是一款值得信赖的关系型数据库,HertzBeat除了支持使用默 $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Docker安装MYSQl ``` @@ -31,14 +32,14 @@ MYSQL是一款值得信赖的关系型数据库,HertzBeat除了支持使用默 mysql:5.7 ``` - `-v /opt/data:/var/lib/mysql` 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 + `-v /opt/data:/var/lib/mysql` 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 使用```$ docker ps```查看数据库是否启动成功 ### 数据库创建 -1. 进入MYSQL或使用客户端连接MYSQL服务 +1. 进入MYSQL或使用客户端连接MYSQL服务 `mysql -uroot -p123456` -2. 创建名称为hertzbeat的数据库 +2. 创建名称为hertzbeat的数据库 `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. 查看hertzbeat数据库是否创建成功 `show databases;` @@ -46,9 +47,9 @@ MYSQL是一款值得信赖的关系型数据库,HertzBeat除了支持使用默 ### 修改hertzbeat的配置文件application.yml切换数据源 1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 + 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml) 需修改部分原参数: @@ -75,13 +76,13 @@ spring: 2. 通过docker启动时,需要修改host为宿主机的外网Ip,包括mysql连接字符串和redis。 -**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** +**启动 HertzBeat 浏览器访问 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** ### 常见问题 1. 缺少hibernate的mysql方言,导致启动异常 Caused by: org.hibernate.HibernateException: Access to DialectResolutionInfo cannot be null when 'hibernate.dialect' not set -如果上述配置启动系统,出现` Caused by: org.hibernate.HibernateException: Access to DialectResolutionInfo cannot be null when 'hibernate.dialect' not set`异常, +如果上述配置启动系统,出现`Caused by: org.hibernate.HibernateException: Access to DialectResolutionInfo cannot be null when 'hibernate.dialect' not set`异常, 需要在`application.yml`文件中增加以下配置: ```yaml @@ -93,4 +94,3 @@ spring: hibernate: dialect: org.hibernate.dialect.MySQL5InnoDBDialect ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/package-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/package-deploy.md index bbb44f546b3..09aa1e2de3f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/package-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/package-deploy.md @@ -10,7 +10,7 @@ sidebar_label: 安装包方式部署 下载您系统环境对应的安装包 `hertzbeat-xx.tar.gz` `hertzbeat-collector-xx.tar.gz` - 从[GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) 仓库下载 - 从[Download](https://hertzbeat.apache.org/docs/download) 仓库下载 -2. 配置HertzBeat的配置文件(可选) +2. 配置HertzBeat的配置文件(可选) 解压安装包到主机 eg: /opt/hertzbeat ``` @@ -19,26 +19,27 @@ sidebar_label: 安装包方式部署 $ unzip -o hertzbeat-xx.zip ``` - 修改位于 `hertzbeat/config/application.yml` 的配置文件(可选),您可以根据需求修改配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件(可选),您可以根据需求修改配置文件 - 若需使用邮件发送告警,需替换`application.yml`里面的邮件服务器参数 - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) - **强烈推荐** 以后我们将主要支持VictoriaMetrics作为时序数据库,若需使用时序数据库VictoriaMetrics来存储指标数据,需替换`application.yml`里面的`warehouse.store.victoria-metrics`参数 具体步骤参见 [使用VictoriaMetrics存储指标数据](victoria-metrics-init) - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](tdengine-init) - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](iotdb-init) -3. 配置用户配置文件(可选,自定义配置用户密码) - HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat - 若需要新增删除修改账户或密码,可以通过修改位于 `hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 +3. 配置用户配置文件(可选,自定义配置用户密码) + HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat + 若需要新增删除修改账户或密码,可以通过修改位于 `hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 具体参考 [配置修改账户密码](account-modify) 4. 部署启动 执行位于安装目录hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat ``` - $ ./startup.sh + ./startup.sh ``` + 5. 开始探索HertzBeat - 浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 + 浏览器访问 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 6. 部署采集器集群(可选) - 下载解压您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) - 配置采集器的配置文件 `hertzbeat-collector/config/application.yml` 里面的连接主HertzBeat服务的对外IP,端口,当前采集器名称(需保证唯一性)等参数 `identity` `mode` (public or private) `manager-host` `manager-port` @@ -54,7 +55,8 @@ sidebar_label: 安装包方式部署 manager-host: ${MANAGER_HOST:127.0.0.1} manager-port: ${MANAGER_PORT:1158} ``` - - 启动 `$ ./bin/startup.sh ` 或 `bin/startup.bat` + + - 启动 `$ ./bin/startup.sh` 或 `bin/startup.bat` - 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 **HAVE FUN** @@ -65,9 +67,9 @@ sidebar_label: 安装包方式部署 1. **若您使用的是不含JDK的安装包,需您提前准备JAVA运行环境** -安装JAVA运行环境-可参考[官方网站](http://www.oracle.com/technetwork/java/javase/downloads/index.html) -要求:JAVA11环境 -下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) +安装JAVA运行环境-可参考[官方网站](http://www.oracle.com/technetwork/java/javase/downloads/index.html) +要求:JAVA11环境 +下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) 安装后命令行检查是否成功安装 ``` @@ -78,16 +80,16 @@ Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.12+8-LTS-237, mixed mode) ``` -2. **按照流程部署,访问 http://ip:1157/ 无界面** +2. **按照流程部署,访问 无界面** 请参考下面几点排查问题: > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 +> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 3. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter 4. **监控历史图表长时间都一直无数据** @@ -95,4 +97,3 @@ Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.12+8-LTS-237, mixed mode) > 一:时序数据库是否配置,未配置则无历史图表数据 > 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 > 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 时序数据库 IP账户密码等配置是否正确 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md index 6f2d7cdf7bc..f3b30ac066d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md @@ -12,7 +12,7 @@ PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBM ### 通过Docker方式安装PostgreSQL -1. Download and install the Docker environment +1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. @@ -20,10 +20,11 @@ PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBM $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Docker安装 PostgreSQL ``` - $ docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 + docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` 使用```$ docker ps```查看数据库是否启动成功 @@ -38,7 +39,8 @@ PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBM su - postgres psql ``` -2. 创建名称为hertzbeat的数据库 + +2. 创建名称为hertzbeat的数据库 `CREATE DATABASE hertzbeat;` 3. 查看hertzbeat数据库是否创建成功 `\l` @@ -46,9 +48,9 @@ PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBM ### 修改hertzbeat的配置文件application.yml切换数据源 1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 + 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml) ```yaml @@ -84,4 +86,4 @@ spring: dialect: org.hibernate.dialect.PostgreSQLDialect ``` -**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** +**启动 HertzBeat 浏览器访问 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/quickstart.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/quickstart.md index f520ba5f0a4..21c956521a9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/quickstart.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/quickstart.md @@ -41,7 +41,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 1. 下载您系统环境对应的安装包`hertzbeat-xx.tar.gz` [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) 2. 配置 HertzBeat 的配置文件 `hertzbeat/config/application.yml`(可选) -3. 部署启动 `$ ./bin/startup.sh ` 或 `bin/startup.bat` +3. 部署启动 `$ ./bin/startup.sh` 或 `bin/startup.bat` 4. 浏览器访问 `http://localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` 5. 部署采集器集群 - 下载您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) @@ -58,7 +58,8 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN manager-host: ${MANAGER_HOST:127.0.0.1} manager-port: ${MANAGER_PORT:1158} ``` - - 启动 `$ ./bin/startup.sh ` 或 `bin/startup.bat` + + - 启动 `$ ./bin/startup.sh` 或 `bin/startup.bat` - 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 更多配置详细步骤参考 [通过安装包安装HertzBeat](package-deploy) @@ -94,10 +95,10 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN ##### 安装Mysql(可选) -1. docker安装Mysql - ` $ docker run -d --name mysql -p 3306:3306 -v /opt/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7` +1. docker安装Mysql + `$ docker run -d --name mysql -p 3306:3306 -v /opt/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7` `-v /opt/data:/var/lib/mysql` - 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 -2. 创建名称为hertzbeat的数据库 +2. 创建名称为hertzbeat的数据库 `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. 在hertzbeat的配置文件`application.yml`配置Mysql数据库替换H2内置数据库连接参数 @@ -105,7 +106,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN ##### 安装TDengine(可选) -1. docker安装TDengine +1. docker安装TDengine `docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp --name tdengine tdengine/tdengine:3.0.4.0` 2. 创建名称为hertzbeat的数据库 3. 在hertzbeat的配置文件`application.yml`配置tdengine连接 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/sslcert-practice.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/sslcert-practice.md index fbcbab39578..835c4625638 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/sslcert-practice.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/sslcert-practice.md @@ -12,10 +12,10 @@ sidebar_label: 使用案例 HertzBeat 一个拥有强大自定义监控能力,无需Agent的实时监控工具。网站监测,PING连通性,端口可用性,数据库,操作系统,中间件,API监控,阈值告警,告警通知(邮件微信钉钉飞书)。 -**官网: https://hertzbeat.com | https://tancloud.cn** +**官网: | ** -github: https://github.com/apache/hertzbeat -gitee: https://gitee.com/hertzbeat/hertzbeat +github: +gitee: #### 安装 HertzBeat @@ -37,7 +37,7 @@ gitee: https://gitee.com/hertzbeat/hertzbeat 2. 配置监控网站 -> 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 +> 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 > 点击确定 注意⚠️新增前默认会先去测试网站连接性,连接成功才会新增,当然也可以把**是否测试**按钮置灰。 ![](/img/docs/start/ssl_2.png) @@ -82,8 +82,8 @@ gitee: https://gitee.com/hertzbeat/hertzbeat 钉钉微信飞书等token配置可以参考帮助文档 -https://hertzbeat.com/docs/help/alert_dingtalk -https://tancloud.cn/docs/help/alert_dingtalk + + > 告警通知 -> 新增告警通知策略 -> 将刚才配置的接收人启用通知 @@ -93,10 +93,10 @@ https://tancloud.cn/docs/help/alert_dingtalk ---- -#### 完! +#### 完 监控SSL证书的实践就到这里,当然对hertzbeat来说这个功能只是冰山一角,如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! -**github: https://github.com/apache/hertzbeat** +**github: ** -**gitee: https://gitee.com/hertzbeat/hertzbeat** +**gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md index c09e1daf15e..757cb363256 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md @@ -9,7 +9,7 @@ HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始 TDengine是一款开源物联网时序型数据库,我们用其存储采集到的监控指标历史数据。 注意支持⚠️ 3.x版本。 -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** +**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** **⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有TDengine环境,可直接跳到创建数据库实例那一步。 @@ -17,7 +17,8 @@ TDengine是一款开源物联网时序型数据库,我们用其存储采集到 ### 通过Docker方式安装TDengine > 可参考官方网站[安装教程](https://docs.taosdata.com/get-started/docker/) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -36,7 +37,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ ``` `-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 -`-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 +`-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 使用```$ docker ps```查看数据库是否启动成功 ### 创建数据库实例 @@ -46,8 +47,9 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ 1. 进入数据库Docker容器 ``` - $ docker exec -it tdengine /bin/bash + docker exec -it tdengine /bin/bash ``` + 2. 修改账户密码 > 建议您修改密码。TDengine默认的账户密码是 root/taosdata @@ -79,6 +81,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ taos> show databases; taos> use hertzbeat; ``` + 5. 退出TDengine CLI ``` @@ -92,7 +95,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ ### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** @@ -133,4 +136,3 @@ warehouse: > td-engine enable是否设置为true > 注意⚠️若hertzbeat和TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/victoria-metrics-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/victoria-metrics-init.md index d1608031078..9b0bc7a6565 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/victoria-metrics-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/victoria-metrics-init.md @@ -9,7 +9,7 @@ HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始 VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决方案和时序数据库,兼容 Prometheus 生态。推荐版本(VictoriaMetrics:v1.95.1+, HertzBeat:v1.4.3+) -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** +**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** **⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有VictoriaMetrics环境,可直接跳到YML配置那一步。 @@ -17,7 +17,8 @@ VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决 ### 通过Docker方式安装VictoriaMetrics > 可参考官方网站[安装教程](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -40,8 +41,8 @@ $ docker run -d -p 8428:8428 \ 3. 在hertzbeat的`application.yml`配置文件配置VictoriaMetrics数据库连接 - 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 配置HertzBeat的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** @@ -67,4 +68,3 @@ warehouse: 1. 时序数据库是否都需要配置,能不能都用 > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,但会影响历史图表数据和存储时长等。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/template.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/template.md index 16205107eaa..5002aa52f7d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/template.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/template.md @@ -6,7 +6,7 @@ sidebar_label: 监控模版 > Hertzbeat 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 > -> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 +> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 > 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? 这是它的架构原理: diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-default.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-default.md index 79bed896359..9d092b935c8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-default.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-default.md @@ -43,7 +43,7 @@ sidebar_label: 系统默认解析方式 ``` 样例: -查询自定义系统的CPU信息,其暴露接口为 `/metrics/cpu`,我们需要其中的`hostname,core,useage`指标 +查询自定义系统的CPU信息,其暴露接口为 `/metrics/cpu`,我们需要其中的`hostname,core,useage`指标 若只有一台虚拟机,其单层格式为: ```json @@ -157,4 +157,3 @@ metrics: # 这里使用HertzBeat默认解析 parseType: default ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md index 03602131897..56e9b125e5f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md @@ -61,7 +61,7 @@ sidebar_label: 教程一:适配一款HTTP协议监控 **HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** -> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个监控模版,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为`hertzbeat`的自定义监控类型,其使用HTTP协议采集指标数据。 @@ -206,10 +206,10 @@ metrics: ---- -#### 完! +#### 完 HTTP协议的自定义监控的实践就到这里,HTTP协议还带其他参数headers,params等,我们可以像用postman一样去定义它,可玩性也非常高! 如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-token.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-token.md index bebcc99244a..1f8e81907d6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-token.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-token.md @@ -196,7 +196,7 @@ metrics: ``` -**此时,重启hertzbeat系统,在系统页面上添加 `hertzbeat_token` 类型监控,配置输入参数,`content-type`填`application/json` , `请求Body`填账户密码json如下: ** +**此时,重启hertzbeat系统,在系统页面上添加 `hertzbeat_token` 类型监控,配置输入参数,`content-type`填`application/json` , `请求Body`填账户密码json如下:** ```json { @@ -378,10 +378,10 @@ metrics: ---- -#### 完! +#### 完 HTTP协议的自定义监控的实践就到这里,HTTP协议还带其他参数headers,params等,我们可以像用postman一样去定义它,可玩性也非常高! 如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-jsonpath.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-jsonpath.md index befd1db351f..8a2385951a2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-jsonpath.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-jsonpath.md @@ -61,7 +61,7 @@ sidebar_label: JsonPath解析方式 #### 样例 -查询自定义系统的数值信息,其暴露接口为 `/metrics/person`,我们需要其中的`type,num`指标 +查询自定义系统的数值信息,其暴露接口为 `/metrics/person`,我们需要其中的`type,num`指标 接口返回的原始数据如下: ```json @@ -175,4 +175,3 @@ metrics: parseType: jsonPath parseScript: '$.number[*]' ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http.md index 1a4a132c073..d6f0ded47c2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http.md @@ -10,10 +10,10 @@ sidebar_label: HTTP协议自定义监控 【**HTTP接口调用**】->【**响应校验**】->【**响应数据解析**】->【**默认方式解析|JsonPath脚本解析 | XmlPath解析(todo) | Prometheus解析**】->【**指标数据提取**】 -由流程可见,我们自定义一个HTTP协议的监控类型,需要配置HTTP请求参数,配置获取哪些指标,对响应数据配置解析方式和解析脚本。 +由流程可见,我们自定义一个HTTP协议的监控类型,需要配置HTTP请求参数,配置获取哪些指标,对响应数据配置解析方式和解析脚本。 HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数,请求方式,请求体等。 -**系统默认解析方式**:http接口返回hertzbeat规定的json数据结构,即可用默认解析方式解析数据提取对应的指标数据,详细介绍见 [**系统默认解析**](extend-http-default) +**系统默认解析方式**:http接口返回hertzbeat规定的json数据结构,即可用默认解析方式解析数据提取对应的指标数据,详细介绍见 [**系统默认解析**](extend-http-default) **JsonPath脚本解析方式**:用JsonPath脚本对响应的json数据进行解析,返回系统指定的数据结构,然后提供对应的指标数据,详细介绍见 [**JsonPath脚本解析**](extend-http-jsonpath) ### 自定义步骤 @@ -22,13 +22,13 @@ HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数, ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下监控模版YML的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个监控模版,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为example_http的自定义监控类型,其使用HTTP协议采集指标数据。 @@ -204,4 +204,3 @@ metrics: basicAuthPassword: ^_^password^_^ parseType: default ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jdbc.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jdbc.md index bb946d8ce1c..cd24a177c63 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jdbc.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jdbc.md @@ -21,7 +21,7 @@ SQL查询回来的数据字段和我们需要的指标映射,就能获取对 > 查询一行数据, 通过查询返回结果集的列名称,和查询的字段映射 -例如: +例如: 查询的指标字段为:one tow three four 查询SQL:select one, tow, three, four from book limit 1; 这里指标字段就能和响应数据一一映射为一行采集数据。 @@ -30,7 +30,7 @@ SQL查询回来的数据字段和我们需要的指标映射,就能获取对 > 查询多行数据, 通过查询返回结果集的列名称,和查询的字段映射 -例如: +例如: 查询的指标字段为:one tow three four 查询SQL:select one, tow, three, four from book; 这里指标字段就能和响应数据一一映射为多行采集数据。 @@ -39,9 +39,9 @@ SQL查询回来的数据字段和我们需要的指标映射,就能获取对 > 采集一行指标数据, 通过查询的两列数据(key-value),key和查询的字段匹配,value为查询字段的值 -例如: -查询字段:one tow three four -查询SQL:select key, value from book; +例如: +查询字段:one tow three four +查询SQL:select key, value from book; SQL响应数据: | key | value | @@ -59,13 +59,13 @@ SQL响应数据: ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为example_sql的自定义监控类型,其使用JDBC协议采集指标数据。 @@ -243,4 +243,3 @@ metrics: sql: show global status where Variable_name like 'innodb%'; url: ^_^url^_^ ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jmx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jmx.md index 71bb06ba2b2..4c4a019c3ab 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jmx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-jmx.md @@ -4,7 +4,7 @@ title: JMX协议自定义监控 sidebar_label: JMX协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JMX协议自定义指标监控。 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JMX协议自定义指标监控。 > JMX协议自定义监控可以让我们很方便的通过配置 JMX Mbean Object 就能监控采集到我们想监控的 Mbean 指标 ### JMX协议采集流程 @@ -23,13 +23,13 @@ sidebar_label: JMX协议自定义监控 ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下监控模版的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为 `example_jvm` 的自定义监控类型,其使用JMX协议采集指标数据。 @@ -236,4 +236,3 @@ metrics: objectName: java.lang:type=MemoryPool,name=* url: ^_^url^_^ ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ngql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ngql.md index 34514b3f2bb..3788a1400e9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ngql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ngql.md @@ -4,7 +4,7 @@ title: NQGL自定义监控 sidebar_label: NGQL自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用NGQL自定义指标监控。 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用NGQL自定义指标监控。 > NGQL自定义监控可以让我们很方便的使用NGQL或者OpenCypher从NebulaGraph图数据库中查询指标数据,支持NebulaGraph 3.X版本。 ### 数据解析方式 @@ -21,6 +21,7 @@ NGQL查询回来的数据字段和我们需要的指标映射,就能获取对 > `filterValue`: 过滤属性值(可选) 例如: + - online_meta_count#SHOW HOSTS META#Status#ONLINE 对 `SHOW HOSTS META` 返回的结果中统计滤Status==ONLINE的数量 - online_meta_count#SHOW HOSTS META## @@ -47,7 +48,8 @@ NGQL查询回来的数据字段和我们需要的指标映射,就能获取对 > 查询多行数据, 通过查询返回结果集的列名称,和查询的字段映射 -例如: +例如: + - 查询的指标字段为:a,b - 查询NGQL:match (v:metrics) return v.metrics.a as a,v.metrics.b as b; 这里指标字段就能和响应数据一一映射为多行采集数据。 @@ -71,13 +73,13 @@ NGQL查询回来的数据字段和我们需要的指标映射,就能获取对 ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为example_ngql的自定义监控类型,其使用NGQL采集指标数据。 @@ -170,4 +172,3 @@ metrics: - match (v:tag2) return "tag2" as name ,count(v) as cnt timeout: ^_^timeout^_^ ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-point.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-point.md index a59d9b1898a..9f7ae8ee1b1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-point.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-point.md @@ -168,4 +168,3 @@ metrics: parseType: website ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-snmp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-snmp.md index 387d67c5987..1172b263c2e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-snmp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-snmp.md @@ -4,7 +4,7 @@ title: SNMP协议自定义监控 sidebar_label: SNMP协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用 SNMP 协议自定义指标监控。 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用 SNMP 协议自定义指标监控。 > SNMP 协议自定义监控可以让我们很方便的通过配置 Mib OID信息 就能监控采集到我们想监控的OID指标 ### SNMP协议采集流程 @@ -23,13 +23,13 @@ sidebar_label: SNMP协议自定义监控 ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为 example_windows 的自定义监控类型,其使用 SNMP 协议采集指标数据。 @@ -207,4 +207,3 @@ metrics: processes: 1.3.6.1.2.1.25.1.6.0 location: 1.3.6.1.2.1.1.6.0 ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ssh.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ssh.md index 0f643f153f8..451e2b3a540 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ssh.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ssh.md @@ -4,7 +4,7 @@ title: SSH协议自定义监控 sidebar_label: SSH协议自定义监控 --- -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用SSH协议自定义指标监控。 +> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用SSH协议自定义指标监控。 > SSH协议自定义监控可以让我们很方便的通过写sh命令脚本就能监控采集到我们想监控的Linux指标 ### SSH协议采集流程 @@ -21,12 +21,12 @@ SHELL脚本查询回来的数据字段和我们需要的指标映射,就能获 > 查询出一列数据, 通过查询返回结果集的字段值(一行一个值)与字段映射 -例如: -需要查询Linux的指标 hostname-主机名称,uptime-启动时间 -主机名称原始查询命令:`hostname` -启动时间原始查询命令:`uptime | awk -F "," '{print $1}'` -则在hertzbeat对应的这两个指标的查询脚本为(用`;`将其连接到一起): -`hostname; uptime | awk -F "," '{print $1}'` +例如: +需要查询Linux的指标 hostname-主机名称,uptime-启动时间 +主机名称原始查询命令:`hostname` +启动时间原始查询命令:`uptime | awk -F "," '{print $1}'` +则在hertzbeat对应的这两个指标的查询脚本为(用`;`将其连接到一起): +`hostname; uptime | awk -F "," '{print $1}'` 终端响应的数据为: ``` @@ -34,8 +34,8 @@ tombook 14:00:15 up 72 days ``` -则最后采集到的指标数据一一映射为: -hostname值为 `tombook` +则最后采集到的指标数据一一映射为: +hostname值为 `tombook` uptime值为 `14:00:15 up 72 days` 这里指标字段就能和响应数据一一映射为一行采集数据。 @@ -44,8 +44,8 @@ uptime值为 `14:00:15 up 72 days` > 查询多行数据, 通过查询返回结果集的列名称,和查询的指标字段映射 -例如: -查询的Linux内存相关指标字段:total-内存总量 used-已使用内存 free-空闲内存 buff-cache-缓存大小 available-可用内存 +例如: +查询的Linux内存相关指标字段:total-内存总量 used-已使用内存 free-空闲内存 buff-cache-缓存大小 available-可用内存 内存指标原始查询命令为:`free -m`, 控制台响应: ```shell @@ -55,7 +55,7 @@ Swap: 8191 33 8158 ``` 在hertzbeat中multiRow格式解析需要响应数据列名称和指标值一一映射,则对应的查询SHELL脚本为: -`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` +`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` 控制台响应为: ```shell @@ -71,13 +71,13 @@ total used free buff_cache available ![](/img/docs/advanced/extend-point-1.png) -------- +------- 下面详细介绍下文件的配置用法,请注意看使用注释。 ### 监控模版YML -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 +> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 > 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 样例:自定义一个名称为example_linux的自定义监控类型,其使用SSH协议采集指标数据。 @@ -216,4 +216,3 @@ metrics: script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' parseType: multiRow ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-tutorial.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-tutorial.md index 369210ee6be..a2b602f7844 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-tutorial.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-tutorial.md @@ -239,7 +239,6 @@ metrics: ---- -#### 完! +#### 完 HTTP协议的自定义监控的实践就到这里,HTTP协议还带其他参数 `headers,params` 等,我们可以像用postman一样去定义它,可玩性也非常高! - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_committer.md index c30a850a3c2..b444d0a970c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_committer.md @@ -32,7 +32,7 @@ limitations under the License. Apache HertzBeat 社区努力追求基于功绩的原则。因此,一旦有人在 CoPDoC 的任何领域有了足够的贡献,他们就可以成为 Committer 的候选人,最终被投票选为 HertzBeat 的 Committer。成为 Apache HertzBeat 的 Committer 并不一定意味着你必须使用你的提交权限向代码库提交代码;它意味着你致力于 HertzBeat 项目并为我们社区的成功做出了积极的贡献。 -## Committer 的要求: +## Committer 的要求 没有成为 Committer 或 PPMC 成员的严格规则。新的 Committer 的候选人通常是积极的贡献者和社区成员。但是,如果能稍微明确一些规则,就可以在一定程度上消除贡献者的疑虑,使社区更加透明、合理和公平。 @@ -57,4 +57,3 @@ Committer 的候选人应该持续参与并为 HertzBeat 做出大量的贡献 - 对于拉取请求审查保持积极、有礼貌与尊重。 - 即使存在分歧,也要以专业和外交的态度参与技术路线图的讨论。 - 通过撰写文章或举办活动来推广项目。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_pmc_member.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_pmc_member.md index 39cf1da9123..ff56d4cb723 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_pmc_member.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/become_pmc_member.md @@ -32,7 +32,7 @@ limitations under the License. Apache HertzBeat 社区努力追求基于功绩的原则。因此,一旦有人在 CoPDoC 的任何领域有了足够的贡献,他们就可以成为 PMC 成员资格的候选人,最终被投票选为 HertzBeat 的 PMC 成员。成为 Apache HertzBeat 的 PMC 成员并不一定意味着您必须使用您的提交权限向代码库提交代码;它意味着您致力于 HertzBeat 项目并为我们社区的成功做出了积极的贡献。 -## PMC 成员的要求: +## PMC 成员的要求 没有成为 Committer 或 PPMC 成员的严格规则。新的 PMC 成员的候选人通常是积极的贡献者和社区成员。但是,如果能稍微明确一些规则,就可以在一定程度上消除贡献者的疑虑,使社区更加透明、合理和公平。 @@ -57,4 +57,3 @@ PMC 成员的候选人应该持续参与并为 HertzBeat 做出大量的贡献 - 对于拉取请求审查保持积极、有礼貌与尊重。 - 即使存在分歧,也要以专业和外交的态度参与技术路线图的讨论。 - 通过撰写文章或举办活动来推广项目。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md index 1cad8c3add1..042e33f0558 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md @@ -27,7 +27,7 @@ limitations under the License. - 新建 `PR` 后需要在 `PR` 页面的 Github Development 按钮处关联已存在的对应 `ISSUE`(若无建议新建对应ISSUE) - - 标题命名格式(英文,小写) + - 标题命名格式(英文,小写) `[feature/bugfix/doc/improve/refactor/bug/cleanup] title` 2. 添加描述信息 @@ -70,6 +70,7 @@ limitations under the License. ```java Cache publicKeyCache; ``` + 2. 变量的拼音缩写是禁止的(排除地名等名词),例如chengdu。 3. 推荐的变量名以 `类型` 结尾。 对于 `Collection/List` 类型的变量,取 `xxxx` (复数表示多个元素)或以 `xxxList` (特定类型)结束。 @@ -79,6 +80,7 @@ limitations under the License. Map idUserMap; Map userIdNameMap; ``` + 4. 通过其名称直观地知道变量的类型和含义。 方法名称应首先以动词开始,如下所示: @@ -114,6 +116,7 @@ limitations under the License. return resp; } ``` + - 正面示例: > 字符串提取为常量引用。 @@ -139,6 +142,7 @@ limitations under the License. return resp; } ``` + 2. 确保代码的可读性和直观性 - `annotation` 符号中的字符串不需要提取为常量。 @@ -198,6 +202,7 @@ public CurrentHashMap funName(); return; } ``` + - 正面示例: ```java @@ -221,11 +226,13 @@ public CurrentHashMap funName(); - 多余的行 一般来说,如果一个方法的代码行深度由于连续嵌套的 `if... else..` 超过了 `2+ Tabs`,那么应该考虑试图 + - `合并分支`, - `反转分支条件` - `提取私有方法` 以减少代码行深度并提高可读性,例如: + - 联合或将逻辑合并到下一级调用中 - 负面示例: @@ -262,6 +269,7 @@ if(expression2) { ...... } ``` + - 反转条件 - 负面示例: @@ -276,6 +284,7 @@ if(expression2) { } } ``` + - 正面示例: ```java @@ -289,6 +298,7 @@ if(expression2) { // ... } ``` + - 使用单一变量或方法减少复杂的条件表达式 - 负面示例: @@ -297,6 +307,7 @@ if(expression2) { ... } ``` + - 正面示例: ```java @@ -341,6 +352,7 @@ if(expression2) { ```java map.computeIfAbsent(key, x -> key.toLowerCase()) ``` + - 正面示例: ```java @@ -354,6 +366,7 @@ if(expression2) { ```java map.computeIfAbsent(key, k-> Loader.load(k)); ``` + - 正面示例: ```java @@ -383,6 +396,7 @@ if(expression2) { return; } ``` + - 正面示例: ```java @@ -400,6 +414,7 @@ if(expression2) { return; } ``` + - 正面示例: ```java @@ -417,6 +432,7 @@ if(expression2) { return; } ``` + - 正面示例: ```java @@ -436,6 +452,7 @@ if(expression2) { return; } ``` + - 正面示例: ```java @@ -453,6 +470,7 @@ if(expression2) { ... } ``` + - 正面示例: ```java @@ -468,6 +486,7 @@ if(expression2) { ```java System.out.println(JobStatus.RUNNING.toString()); ``` + - 正面示例: ```java @@ -483,6 +502,7 @@ if(expression2) { ... } ``` + - 正面示例: ```java @@ -520,6 +540,7 @@ public void process(String input) { ```java log.info("Deploy cluster request " + deployRequest); ``` + - 正面示例 ```java @@ -539,6 +560,7 @@ public void process(String input) { List userList = getUsersByBatch(1000); LOG.debug("All users: {}", getAllUserIds(userList)); ``` + - 正面示例: 在这种情况下,我们应该在进行实际的日志调用之前提前确定日志级别,如下所示: @@ -547,7 +569,7 @@ public void process(String input) { // 忽略声明行。 List userList = getUsersByBatch(1000); if (LOG.isDebugEnabled()) { - LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); + LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); } ``` @@ -559,13 +581,12 @@ public void process(String input) { ## 参考资料 -- https://site.mockito.org/ -- https://alibaba.github.io/p3c/ -- https://rules.sonarsource.com/java/ -- https://junit.org/junit5/ -- https://streampark.apache.org/ +- +- +- +- +- ``` ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contribution.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contribution.md index 55993958797..8a353d72ba7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contribution.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contribution.md @@ -47,7 +47,7 @@ limitations under the License. ### 让 HertzBeat 运行起来 -> 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 +> 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 > 此为前后端分离项目,本地代码启动需将后端 [manager](https://github.com/apache/hertzbeat/tree/master/manager) 和前端 [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) 分别启动生效。 #### 后端启动 @@ -158,6 +158,7 @@ git pull upstream master - **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** 提供监控管理,系统管理基础服务 > 提供对监控的管理,监控应用配置的管理,系统用户租户后台管理等。 +> > - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** 提供监控数据采集服务 > 使用通用协议远程采集获取对端指标数据。 > - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** 提供监控数据仓储服务 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/development.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/development.md index c9ed4b45859..2d440e18f01 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/development.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/development.md @@ -6,7 +6,7 @@ sidebar_label: 运行编译 ## 让 HertzBeat 运行起来 -> 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 +> 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 > 此为前后端分离项目,本地代码启动需将后端 [manager](https://github.com/apache/hertzbeat/tree/master/manager) 和前端 [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) 分别启动生效。 ### 后端启动 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/document.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/document.md index 4cf56e0137b..7032d24688e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/document.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/document.md @@ -40,8 +40,8 @@ git clone git@github.com:/hertzbeat.git 1. 下载并安装 nodejs (版本 18.8.0) 2. 将代码克隆到本地 `git clone git@github.com:apache/hertzbeat.git` 3. 在`home`目录下运行 `npm install` 来安装所需的依赖库。 -4. 在`home`目录下运行 `npm run start`,您可以访问 http://localhost:3000 查看站点的英文模式预览 -5. 在`home`目录下运行 `npm run start-zh-cn`,您可以访问 http://localhost:3000 查看站点的中文模式预览 +4. 在`home`目录下运行 `npm run start`,您可以访问 查看站点的英文模式预览 +5. 在`home`目录下运行 `npm run start-zh-cn`,您可以访问 查看站点的中文模式预览 6. 若要生成静态网站资源文件,请运行 `npm run build`。构建的静态资源位于 build 目录中。 ## 目录结构 @@ -93,4 +93,3 @@ css 和其他样式文件放在 `src/css` 目录中。 ### 页面内容修改 > 所有页面文档都可以通过底部的'编辑此页面'按钮直接跳转到相应的 github 资源修改页面。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md index 2cccecd681c..5eabbc561ed 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md @@ -22,6 +22,7 @@ sidebar_position: 4 ## 2. 准备发布 > 首先整理帐户信息以更好地了解操作过程,稍后会多次使用。 +> > - apache id: `muchunjin (APACHE LDAP 用户名)` > - apache passphrase: `APACHE LDAP 密钥` > - apache email: `muchunjin@apache.org` @@ -128,12 +129,12 @@ gpg: Total number processed: 1 gpg: unchanged: 1 ``` -或者进入 https://keyserver.ubuntu.com/ 网址,输入密钥的名称,然后点击'Search key' 按钮,查看是否有对应名称的密钥。 +或者进入 网址,输入密钥的名称,然后点击'Search key' 按钮,查看是否有对应名称的密钥。 #### 2.4 将 gpg 公钥添加到 Apache SVN 项目仓库的 KEYS 文件中 -- Apache HertzBeat Dev 分支 https://dist.apache.org/repos/dist/dev/incubator/hertzbeat -- Apache HertzBeat Release 分支 https://dist.apache.org/repos/dist/release/incubator/hertzbeat +- Apache HertzBeat Dev 分支 +- Apache HertzBeat Release 分支 ##### 2.4.1 将公钥添加到dev分支的KEYS @@ -167,7 +168,7 @@ $ svn ci -m "add gpg key for muchunjin" ## 3. 准备物料 & 发布 -#### 3.1 基于 master 分支,创建一个名为 release-${release_version}-rcx 的分支,例如 release-1.6.0-rc1。并基于 release-1.6.0-rc1 分支创建一个名为 v1.6.0-rc1 的标签,并将此标签设置为预发布。 +#### 3.1 基于 master 分支,创建一个名为 release-${release_version}-rcx 的分支,例如 release-1.6.0-rc1。并基于 release-1.6.0-rc1 分支创建一个名为 v1.6.0-rc1 的标签,并将此标签设置为预发布 ```shell git checkout master @@ -330,7 +331,7 @@ svn commit -m "release for HertzBeat 1.6.0-RC1" - 检查 Apache SVN 提交结果 -> 在浏览器中访问 https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/ , 检查是否有新的版本内容 +> 在浏览器中访问 , 检查是否有新的版本内容 ## 4. 进入社区投票阶段 @@ -338,7 +339,7 @@ svn commit -m "release for HertzBeat 1.6.0-RC1" 发送社区投票邮件需要至少三个`+1`,且无`-1`。 -> `Send to`: dev@hertzbeat.apache.org
+> `Send to`:
> `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0 rc1
> `Body`: @@ -394,7 +395,7 @@ Thanks! 在72小时后,将统计投票结果,并发送投票结果邮件,如下所示。 -> `Send to`: dev@hertzbeat.apache.org
+> `Send to`:
> `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: @@ -423,14 +424,14 @@ Best, ChunJin Mu ``` -邮件内容中的一项是`Vote thread`,在 https://lists.apache.org/list.html?dev@hertzbeat.apache.org 查看获取 +邮件内容中的一项是`Vote thread`,在 查看获取 #### 3.2 发送孵化社区投票邮件 发送孵化社区投票邮件需要至少三个`+1`,且无`-1`。 -> `Send to`: general@incubator.apache.org
-> `cc`: dev@hertzbeat.apache.org、private@hertzbeat.apache.org
+> `Send to`:
+> `cc`:
> `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: @@ -484,7 +485,7 @@ ChunJin Mu 如果72小时后没有-1,回复邮件如下 -> `Send to`: general@incubator.apache.org
+> `Send to`:
> `Body`: ``` @@ -496,7 +497,7 @@ Chunjin Mu 然后将统计投票结果,并发送投票结果邮件,如下所示。 -> `Send to`: general@incubator.apache.org
+> `Send to`:
> `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: @@ -522,7 +523,7 @@ Best, ChunJin Mu ``` -邮件内容中的一项是`Vote thread`,在 https://lists.apache.org/list.html?general@incubator.apache.org 查看获取 +邮件内容中的一项是`Vote thread`,在 查看获取 等待一天,查看看导师是否有其他意见,如果没有,发送以下公告邮件 @@ -536,10 +537,10 @@ svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 http #### 4.2 添加新版本下载地址到官网 -https://github.com/apache/hertzbeat/blob/master/home/docs/download.md -https://github.com/apache/hertzbeat/blob/master/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md + + -完成后打开官网地址 https://hertzbeat.apache.org/docs/download/ 查看是否有新版本的下载 +完成后打开官网地址 查看是否有新版本的下载 > 需要注意的是,下载链接可能需要一个小时后才会生效,请注意。 @@ -572,8 +573,8 @@ release note: xxx #### 4.4 发送新版本公告邮件 -> `Send to`: general@incubator.apache.org
-> `cc`: dev@hertzbeat.apache.org
+> `Send to`:
+> `cc`:
> `Title`: [ANNOUNCE] Release Apache HertzBeat (incubating) 1.6.0
> `Body`: diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-verify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-verify.md index 9904af1967b..2c8af78f0e8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-verify.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-verify.md @@ -8,7 +8,7 @@ sidebar_position: 4 详细检查列表请参考官方的[check list](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) -在浏览器中可访问版本内容 https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/ +在浏览器中可访问版本内容 ## 1. 下载候选版本到本地 @@ -42,8 +42,8 @@ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_versio #### 2.2.1 导入公钥 ```shell -$ curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # 下载KEYS -$ gpg --import KEYS # 导入KEYS到本地 +curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # 下载KEYS +gpg --import KEYS # 导入KEYS到本地 ``` #### 2.2.2 信任公钥 @@ -121,7 +121,7 @@ tar -xzvf apache-hertzbeat-${release.version}-incubating-bin.tar.gz - [ ] 如果依赖的是Apache许可证并且存在`NOTICE`文件,那么这些`NOTICE`文件也需要加入到版本的`NOTICE`文件中 - [ ] ..... -参考: https://apache.org/legal/resolved.html +参考: ### 2.5. 源码编译验证 @@ -131,7 +131,7 @@ tar -xzvf apache-hertzbeat-${release.version}-incubating-bin.tar.gz cd apache-hertzbeat-${release_version}-incubating-src ``` -编译源码: https://hertzbeat.apache.org/docs/community/development/#build-hertzbeat-binary-package +编译源码: 进行如下检查: @@ -145,7 +145,7 @@ cd apache-hertzbeat-${release_version}-incubating-src - [ ] 能够正确编译 - [ ] ..... -参考: https://apache.org/legal/resolved.html +参考: ## 3. 邮件回复 @@ -154,13 +154,13 @@ cd apache-hertzbeat-${release_version}-incubating-src :::caution 注意 回复的邮件一定要带上自己检查了那些项信息,仅仅回复`+1 approve`,是无效的。 -PPMC 在 dev@hertzbeat.apache.org HertzBeat 的社区投票时,请带上 binding 后缀,表示对 HertzBeat 社区中的投票具有约束性投票,方便统计投票结果。 +PPMC 在 HertzBeat 的社区投票时,请带上 binding 后缀,表示对 HertzBeat 社区中的投票具有约束性投票,方便统计投票结果。 -IPMC 在 general@incubator.apache.org incubator 社区投票,请带上 binding 后缀,表示对 incubator 社区中的投票具有约束性投票,方便统计投票结果。 +IPMC 在 incubator 社区投票,请带上 binding 后缀,表示对 incubator 社区中的投票具有约束性投票,方便统计投票结果。 ::: :::caution 注意 -如果在dev@hertzbeat.apache.org已经投过票,在incubator社区进行投票回复时,可以直接带过去,需要注意约束性 如: +如果在已经投过票,在incubator社区进行投票回复时,可以直接带过去,需要注意约束性 如: ```html //incubator社区 投票时,只有IPMC成员才具有约束性 binding,PPMC需要注意约束性的变化 @@ -195,6 +195,6 @@ I checked: 5. .... ``` ---- +--- This doc refer from [Apache StreamPark](https://streampark.apache.org/) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/mailing_lists.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/mailing_lists.md index 922cbfe9a6a..64d938005fd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/mailing_lists.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/mailing_lists.md @@ -34,7 +34,7 @@ limitations under the License. | 列表名称 | 地址 | 订阅 | 退订 | 归档 | |-----------|--------------------------|-------------------------------------------------|---------------------------------------------------|-------------------------------------------------------------------| -| **开发者列表** | dev@hertzbeat.apache.org | [订阅](mailto:dev-subscribe@hertzbeat.apache.org) | [退订](mailto:dev-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | +| **开发者列表** | | [订阅](mailto:dev-subscribe@hertzbeat.apache.org) | [退订](mailto:dev-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | ### 通知列表 @@ -42,16 +42,16 @@ limitations under the License. | 列表名称 | 地址 | 订阅 | 退订 | 归档 | |----------|------------------------------------|-----------------------------------------------------------|-------------------------------------------------------------|-----------------------------------------------------------------------------| -| **通知列表** | notifications@hertzbeat.apache.org | [订阅](mailto:notifications-subscribe@hertzbeat.apache.org) | [退订](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | +| **通知列表** | | [订阅](mailto:notifications-subscribe@hertzbeat.apache.org) | [退订](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [归档](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | ## 订阅步骤 发送订阅邮件即可订阅。步骤如下: - 1、**订阅**:点击上表中的 **订阅** 按钮,它将重定向到您的邮件客户端。主题和内容是任意的。 - 之后,您会从 dev-help@hertzbeat.apache.org 收到确认邮件(如果没有收到,请确认电子邮件是否被自动分类为垃圾邮件、推广邮件、订阅邮件等)。 + 之后,您会从 收到确认邮件(如果没有收到,请确认电子邮件是否被自动分类为垃圾邮件、推广邮件、订阅邮件等)。 - 2、**确认**:直接回复确认邮件,或点击邮件中的链接快速回复。主题和内容是任意的。 -- 3、**欢迎**:在完成上述步骤后,您会收到一个主题为 WELCOME to dev@hertzbeat.apache.org 的欢迎邮件,您已成功订阅 Apache HertzBeat 邮件列表。 +- 3、**欢迎**:在完成上述步骤后,您会收到一个主题为 WELCOME to 的欢迎邮件,您已成功订阅 Apache HertzBeat 邮件列表。 ## 发送纯文本邮件 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_committer_process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_committer_process.md index 489215891e7..8be0582e157 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_committer_process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_committer_process.md @@ -80,7 +80,7 @@ ${Work list}[1] ``` 注意,投票将在今天一周后结束,即 -[midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) +[midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) [Apache投票指南](https://community.apache.org/newcommitter.html) ### 关闭投票模板 @@ -242,4 +242,3 @@ you can now help fix that. A PPMC member will announce your election to the dev list soon. ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_pmc_member_process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_pmc_member_process.md index d7e144bb52b..500627ec5f2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_pmc_member_process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/new_pmc_member_process.md @@ -79,7 +79,7 @@ ${Work list}[1] ``` 注意,投票将在今天一周后结束,即 -[midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) +[midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) [Apache 参考投票指南](https://community.apache.org/newcommitter.html) ### Close Vote Template @@ -283,4 +283,3 @@ A PPMC member helps manage and guide the direction of the project. Thanks, On behalf of the Apache HertzBeat (incubating) PPMC ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/submit-code.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/submit-code.md index 8940571f71c..7ad649e09d1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/submit-code.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/submit-code.md @@ -29,11 +29,13 @@ limitations under the License. ```shell git clone git@github.com:<您的账户名>/hertzbeat.git ``` + * 添加远程仓库地址,命名为 upstream ```shell git remote add upstream git@github.com:apache/hertzbeat.git ``` + * 查看仓库 ```shell @@ -47,12 +49,14 @@ limitations under the License. ```shell git fetch upstream ``` + * 将远程仓库代码同步到本地仓库 ```shell git checkout origin/master git merge --no-ff upstream/master ``` + * **⚠️注意一定要新建分支开发特性 `git checkout -b feature-xxx`,不建议使用master分支直接开发** * 在本地修改代码后,提交到自己的仓库: **注意提交信息为英文,不包含特殊字符** @@ -61,8 +65,8 @@ limitations under the License. git commit -m '[docs]necessary instructions' git push ``` + * 将更改提交到远程仓库后,您可以在您的仓库页面上看到一个绿色的按钮“Compare & pull request”,点击它。 * 这会弹出新建 Pull Request 页面,您需要这里仔细填写信息(英文),描述和代码同样重要,然后点击“Create pull request”按钮。 * 然后社区的 Committers 将进行 CodeReview,并与您讨论一些细节(包括设计、实现、性能等),之后您可以根据建议直接在这个分支更新代码(无需新建PR)。当社区 Committer approve之后,提交将被合并到 master 分支。 * 最后,恭喜您,您已经成为 HertzBeat 的官方贡献者,您会被加在贡献者墙上,您可以联系社区获取贡献者证书! - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/download.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/download.md index 11455bfa3d1..51a2fadb805 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/download.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/download.md @@ -4,12 +4,14 @@ title: 下载 Apache HertzBeat (incubating) sidebar_label: Download --- -> **这里是 Apache HertzBeat (incubating) 官方下载页面。** +> **这里是 Apache HertzBeat (incubating) 官方下载页面。** > **请再下方表中选择版本下载,推荐使用最新版本。** :::tip + - 验证下载版本,请使用相应的哈希(sha512)、签名和[项目发布KEYS](https://downloads.apache.org/incubator/hertzbeat/KEYS)。 - 检查哈希和签名的方法参考 [如何验证](https://www.apache.org/dyn/closer.cgi#verify)。 + ::: ## 最新版本 @@ -30,6 +32,5 @@ sidebar_label: Download > Apache HertzBeat 为每个版本制作了 Docker 镜像. 你可以从 [Docker Hub](https://hub.docker.com/r/apache/hertzbeat) 拉取使用. -- HertzBeat https://hub.docker.com/r/apache/hertzbeat -- HertzBeat Collector https://hub.docker.com/r/apache/hertzbeat-collector - +- HertzBeat +- HertzBeat Collector diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/activemq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/activemq.md index 29d5478158a..94e2ad54899 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/activemq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/activemq.md @@ -143,4 +143,3 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" | DaemonThreadCount | 个 | 守护进程数 | | CurrentThreadUserTime | ms | 使用时间 | | CurrentThreadCpuTime | ms | 使用CPU时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/airflow.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/airflow.md index 52367155d89..a7f77f7f5b6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/airflow.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/airflow.md @@ -36,4 +36,3 @@ keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] |-------------|------|---------------| | value | 无 | Airflow版本 | | git_version | 无 | Airflow git版本 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_dingtalk.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_dingtalk.md index 9d0ee3b088f..ba6b49bc58a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_dingtalk.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_dingtalk.md @@ -17,16 +17,16 @@ keywords: [告警钉钉机器人通知, 开源告警系统, 开源监控告警 2. **【保存机器人的WebHook地址access_token值】** -> 例如: webHook地址:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` +> 例如: webHook地址:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` > 其机器人access_token值为 `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` 3. **【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】** ![email](/img/docs/help/alert-notice-9.png) -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_discord.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_discord.md index 9694126d0dd..bb3c6287cd4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_discord.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_discord.md @@ -63,8 +63,8 @@ keywords: [告警 Discord 机器人通知, 开源告警系统, 开源监控告 1. Discord 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人Token, ChannelId,是否已配置告警策略关联 +> 请排查在告警中心是否已有触发的告警信息 +> 请排查是否配置正确机器人Token, ChannelId,是否已配置告警策略关联 > 请排查机器人是否被 Discord聊天服务器正确赋权 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_email.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_email.md index d4dc218c591..0f53b58e71d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_email.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_email.md @@ -13,14 +13,14 @@ keywords: [告警邮件通知, 开源告警系统, 开源监控告警系统] ![email](/img/docs/help/alert-notice-1.png) -2. **【获取验证码】-> 【输入邮箱验证码】-> 【确定】** +2. **【获取验证码】-> 【输入邮箱验证码】-> 【确定】** ![email](/img/docs/help/alert-notice-2.png) ![email](/img/docs/help/alert-notice-3.png) -3. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) @@ -32,7 +32,7 @@ keywords: [告警邮件通知, 开源告警系统, 开源监控告警系统] 2. 云环境TanCloud无法接收到邮件通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确邮箱,是否已配置告警策略关联 > 请查询邮箱的垃圾箱里是否把告警邮件拦截 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_feishu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_feishu.md index 604eff34fdc..5a6e95d7067 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_feishu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_feishu.md @@ -13,14 +13,14 @@ keywords: [告警飞书机器人通知, 开源告警系统, 开源监控告警 2. **【保存机器人的WebHook地址的KEY值】** -> 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` > 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择飞书机器人通知方式】->【设置飞书机器人KEY】-> 【确定】** -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) @@ -28,7 +28,7 @@ keywords: [告警飞书机器人通知, 开源告警系统, 开源监控告警 1. 飞书群未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人KEY,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md index c81f5608674..5c5c38c56be 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md @@ -31,7 +31,7 @@ keywords: [告警 Slack Webhook 通知, 开源告警系统, 开源监控告警 1. Slack 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_smn.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_smn.md index 73f434a8e8a..d6bca9843a4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_smn.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_smn.md @@ -29,7 +29,7 @@ keywords: [ 告警华为云SMN通知, 开源告警系统, 开源监控告警系 5. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_telegram.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_telegram.md index df609e66b50..dfb1aa48d8a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_telegram.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_telegram.md @@ -60,8 +60,8 @@ keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] 1. Telegram 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 +> 请排查在告警中心是否已有触发的告警信息 +> 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 > UserId 应为消息接收对象的UserId 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_webhook.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_webhook.md index 022cd50f07e..272c59cfd4c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_webhook.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_webhook.md @@ -13,9 +13,9 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 ![email](/img/docs/help/alert-notice-5.png) -2. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) @@ -60,7 +60,7 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 1. WebHook回调未生效 -> 请查看告警中心是否已经产生此条告警信息 +> 请查看告警中心是否已经产生此条告警信息 > 请排查配置的WebHook回调地址是否正确 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_wework.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_wework.md index e0dbabf1a70..5c73ffee2a6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_wework.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_wework.md @@ -15,16 +15,16 @@ keywords: [告警企业微信通知, 开源告警系统, 开源监控告警系 2. **【保存机器人的WebHook地址的KEY值】** -> 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` > 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择企业微信机器人通知方式】->【设置企业微信机器人KEY】-> 【确定】** ![email](/img/docs/help/alert-notice-7.png) -4. ** 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> ** 注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人 **。 +> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 ![email](/img/docs/help/alert-notice-4.png) @@ -32,7 +32,7 @@ keywords: [告警企业微信通知, 开源告警系统, 开源监控告警系 1. 企业微信群未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人KEY,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/almalinux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/almalinux.md index 391005c080c..abf262d52bd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/almalinux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/almalinux.md @@ -105,4 +105,3 @@ keywords: [开源监控系统, 开源操作系统监控, AlmaLinux操作系统 | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/api.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/api.md index 89f3cd701bc..c1df34f880a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/api.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/api.md @@ -34,4 +34,3 @@ keywords: [开源监控系统, 开源网站监控, HTTP API监控] | 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------| | responseTime | ms毫秒 | 网站响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/centos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/centos.md index 3d0654db3b5..02a93f751c5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/centos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/centos.md @@ -79,4 +79,3 @@ keywords: [开源监控系统, 开源操作系统监控, CentOS操作系统监 | available | Mb | 可用磁盘大小 | | usage | % | 使用率 | | mounted | 无 | 挂载点目录 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/clickhouse.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/clickhouse.md index 955c87b4e4f..1f0e1f0e6c2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/clickhouse.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/clickhouse.md @@ -93,4 +93,3 @@ keywords: [开源监控系统, 开源数据库监控, Clickhouse数据库监控] | MarkCacheBytes | 无 | StorageMergeTree 的 marks 的缓存大小 | | MarkCacheFiles | 无 | StorageMergeTree 的 marks 的缓存文件数量 | | MaxPartCountForPartition | 无 | partitions 中最大的活跃数据块的数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/debian.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/debian.md index 6b353bafd0b..983787f3b1e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/debian.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/debian.md @@ -99,4 +99,3 @@ keywords: [开源监控系统, 操作系统监控, Debian监控] - 内存占用率:% - CPU占用率:% - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dm.md index ea4a376c049..12cb13b8422 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dm.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dm.md @@ -46,4 +46,3 @@ keywords: [开源监控系统, 开源数据库监控, 达梦数据库监控] | dm_sql_thd | 无 | 用于编写 dmsql dmserver 的线程 | | dm_io_thd | 无 | IO线程,由IO_THR_GROUPS参数控制,默认为2个线程 | | dm_quit_thd | 无 | 用于执行正常关闭数据库的线程 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dns.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dns.md index 303ac47444f..386ec0e91e1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dns.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dns.md @@ -70,4 +70,3 @@ keywords: [ 开源监控系统, 开源DNS监控工具, 监控DNS指标 ] | Section0 | 无 | DNS查询的附加信息。 | > Additional 指标集最多会采集10条响应记录,指标名称从 Section0 到 Section9。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/docker.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/docker.md index c546b46fd2c..0b81365780b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/docker.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/docker.md @@ -99,4 +99,3 @@ firewall-cmd --reload | cpu_delta | 无 | Docker容器已经使用的CPU数量 | | number_cpus | 无 | Docker容器可以使用的CPU数量 | | cpu_usage | 无 | Docker容器CPU使用率 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/doris_fe.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/doris_fe.md index 10a66aa6853..78afde77138 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/doris_fe.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/doris_fe.md @@ -129,4 +129,3 @@ keywords: [开源监控系统, 开源数据库监控, DORIS数据库FE监控] | committed | 无 | 已提交 | | visible | 无 | 可见 | | aborted | 无 | 已中止/已撤销 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dynamic_tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dynamic_tp.md index 8c2f1e290e4..1abcb732289 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dynamic_tp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dynamic_tp.md @@ -99,4 +99,3 @@ management: | dynamic | 无 | 是否动态线程池 | | run_timeout_count | 无 | 运行超时任务数 | | queue_timeout_count | 无 | 等待超时任务数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/elasticsearch.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/elasticsearch.md index a0b3082cc1a..0e872084c2e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/elasticsearch.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/elasticsearch.md @@ -61,4 +61,3 @@ keywords: [ 开源监控系统, 监控ElasticSearch ] | disk_free | GB | 磁盘剩余容量 | | disk_total | GB | 磁盘总容量 | | disk_used_percent | % | 磁盘使用率 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/euleros.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/euleros.md index 6c894671cc6..4c7324a3f5d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/euleros.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/euleros.md @@ -105,4 +105,3 @@ keywords: [ 开源监控系统, 开源操作系统监控, EulerOS操作系统监 | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/flink.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/flink.md index 177c41874fb..79dfd76fd7e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/flink.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/flink.md @@ -33,4 +33,3 @@ keywords: [开源监控系统, 开源 Flink 监控] | task_total | 个 | 任务总数 | | jobs_running | 个 | 正在运行的任务数 | | jobs_failed | 个 | 已经失败的任务数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/freebsd.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/freebsd.md index 01313bd7ae6..e2890cc3b9f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/freebsd.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/freebsd.md @@ -85,4 +85,3 @@ keywords: [ 开源监控系统, 开源操作系统监控, FreeBSD操作系统监 | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ftp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ftp.md index d421b6a78eb..ac1e1621c24 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ftp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ftp.md @@ -32,4 +32,3 @@ keywords: [ 开源监控系统, 开源FTP服务器监控工具, 监控FTP指标 |------|------|------------------| | 活动状态 | 无 | 检查目录是否存在,且具有访问权限 | | 响应时间 | ms | 连接FTP响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/fullsite.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/fullsite.md index 9d39da7c9e4..54553c1200c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/fullsite.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/fullsite.md @@ -5,8 +5,8 @@ sidebar_label: 全站监控 keywords: [开源监控系统, 开源网站监控, SiteMap监控] --- -> 对网站的全部页面监测是否可用 -> 往往一个网站有多个不同服务提供的页面,我们通过采集网站暴露出来的网站地图SiteMap来监控全站。 +> 对网站的全部页面监测是否可用 +> 往往一个网站有多个不同服务提供的页面,我们通过采集网站暴露出来的网站地图SiteMap来监控全站。 > 注意⚠️,此监控需您网站支持SiteMap。我们支持XML和TXT格式的SiteMap。 ### 配置参数 @@ -32,4 +32,3 @@ keywords: [开源监控系统, 开源网站监控, SiteMap监控] | statusCode | 无 | 请求此网页的响应HTTP状态码 | | responseTime | ms毫秒 | 网站响应时间 | | errorMsg | 无 | 请求此网站反馈的错误信息 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/guide.md index 48dc239b69a..0b5bfada4e7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/guide.md @@ -9,7 +9,7 @@ sidebar_label: 帮助入门 ## 🔬 监控服务 -> 定时采集监控对端服务暴露的性能指标,提供可视化界面,处理数据供告警等服务调度。 +> 定时采集监控对端服务暴露的性能指标,提供可视化界面,处理数据供告警等服务调度。 > 规划的监控类型:应用服务,数据库,操作系统,云原生,开源中间件 ### 应用服务监控 @@ -99,7 +99,7 @@ sidebar_label: 帮助入门 ## 💡 告警服务 -> 更自由化的阈值告警配置,支持邮箱,短信,webhook,钉钉,企业微信,飞书机器人等告警通知。 +> 更自由化的阈值告警配置,支持邮箱,短信,webhook,钉钉,企业微信,飞书机器人等告警通知。 > 告警服务的定位是阈值准确及时触发,告警通知及时可达。 ### 告警中心 @@ -115,8 +115,8 @@ sidebar_label: 帮助入门 ### 告警通知 -> 触发告警信息后,除了显示在告警中心列表外,还可以用指定方式(邮件钉钉微信飞书等)通知给指定接收人。 -> 告警通知提供设置不同类型的通知方式,如邮件接收人,企业微信机器人通知,钉钉机器人通知,飞书机器人通知。 +> 触发告警信息后,除了显示在告警中心列表外,还可以用指定方式(邮件钉钉微信飞书等)通知给指定接收人。 +> 告警通知提供设置不同类型的通知方式,如邮件接收人,企业微信机器人通知,钉钉机器人通知,飞书机器人通知。 > 接收人设置后需要设置关联的告警通知策略,来配置哪些告警信息发给哪些接收人。  👉 [配置邮箱通知](alert_email)
diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hadoop.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hadoop.md index bda83b006e4..186baede498 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hadoop.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hadoop.md @@ -87,4 +87,3 @@ export HADOOP_OPTS= "$HADOOP_OPTS | DaemonThreadCount | 个 | 守护进程数 | | CurrentThreadUserTime | ms | 使用时间 | | CurrentThreadCpuTime | ms | 使用CPU时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_master.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_master.md index e732bf45fd6..6912712607f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_master.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_master.md @@ -57,4 +57,3 @@ keywords: [开源监控系统, 开源数据库监控, HbaseMaster监控] | receivedBytes | MB | 集群接收数据量 | | sentBytes | MB | 集群发送数据量(MB) | | clusterRequests | 无 | 集群总请求数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_regionserver.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_regionserver.md index 1c1cfdf1802..2452e34a469 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_regionserver.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hbase_regionserver.md @@ -91,4 +91,3 @@ keywords: [开源监控系统, 开源数据库监控, RegionServer监控] | MemHeapMaxM | 无 | 集群负载均衡次数 | | MemMaxM | 无 | RPC句柄数 | | GcCount | MB | 集群接收数据量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_datanode.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_datanode.md index db494acbb8e..5fe981ddaff 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_datanode.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_datanode.md @@ -54,4 +54,3 @@ keywords: [大数据监控系统, 分布式文件系统监控, Apache HDFS DataN | 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | StartTime | | 启动时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_namenode.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_namenode.md index 66343c11cd2..46d69e5e2b4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_namenode.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hdfs_namenode.md @@ -90,4 +90,3 @@ keywords: [大数据监控系统, 分布式文件系统监控, Apache HDFS NameN | ThreadsBlocked | 个 | 处于 BLOCKED 状态的线程数量 | | ThreadsWaiting | 个 | 处于 WAITING 状态的线程数量 | | ThreadsTimedWaiting | 个 | 处于 TIMED WAITING 状态的线程数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hive.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hive.md index 3b41d3979c6..6e1efde0991 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hive.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hive.md @@ -74,4 +74,3 @@ hive --service hiveserver2 & | 内存池初始内存 | MB | 内存池请求的初始内存量。 | | 内存池可分配最大内存 | MB | 内存池可分配的最大内存量。 | | 内存池内存使用量 | MB | 内存池已使用内存量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/huawei_switch.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/huawei_switch.md index 6bd76f639e8..2d42a0170ee 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/huawei_switch.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/huawei_switch.md @@ -51,4 +51,3 @@ keywords: [ 开源监控系统, 网络监控, 华为通用交换机监控 ] | ifOutErrors | 无 | 对于面向数据包的接口,该节点表示由于错误而无法发送的数据包数量。对于面向字符或固定长度接口,该节点表示由于错误而无法传输的传输单元的数量。这种计数器的值可能在管理系统的重新初始化时会不连续,其他时间如ifCounterDiscontinuityTime的值。 | | ifAdminStatus | 无 | 接口的理想状态。 testing(3)状态表示没有可操作的数据包通过。 当受管系统初始化时,全部接口开始于ifAdminStatus在down(2)状态。由于明确的管理动作或被管理的系统保留的每个配置信息,ifAdminStatus然后被更改为Up(1)或testing(3)状态(或保留在down(2)状态)。 | | ifOperStatus | 无 | 当前接口的操作状态。testing(3)状态表示没有可操作的数据包可以通过。如果ifAdminStatus是down(2),则ifOperStatus应该是down(2)。 如果ifAdminStatus是改为up(1),则ifOperStatus应该更改为up(1)。如果接口准备好传输,接收网络流量; 它应该改为dormant(5)。如果接口正在等待外部动作(如串行线路等待传入连接); 它应该保持在down(2)状态,并且只有当有故障阻止它变成up(1)状态。 它应该留在notPresent(6)状态如果接口缺少(通常为硬件)组件。 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hugegraph.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hugegraph.md index bb802791dda..11c9a91749a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hugegraph.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/hugegraph.md @@ -138,4 +138,3 @@ keywords: [开源监控系统, 开源数据库监控, HugeGraph监控] | garbage_collector_g1_old_generation_count | 无 | 表示G1垃圾收集器老年代垃圾收集的次数 | | garbage_collector_g1_old_generation_time | 无 | 表示G1垃圾收集器老年代垃圾收集的总时间 | | garbage_collector_time_unit | 无 | 表示垃圾收集时间的单位(如毫秒、秒等) | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb.md index 00ff0b7f679..1b3bda87fe3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb.md @@ -63,4 +63,3 @@ keywords: [开源监控系统, 开源数据库监控, InfluxDB 数据库监控] |--------|------|--------| | result | 无 | 结果 | | org | 无 | 组织标识符 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb_promql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb_promql.md index 97469a71932..5cb8d241015 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb_promql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/influxdb_promql.md @@ -59,4 +59,3 @@ keywords: [ 开源监控系统, InfluxDB监控,InfluxDB-PromQL监控 ] | instance | 无 | 指标所属实例 | | timestamp | 无 | 采集指标时间戳 | | value | 无 | 指标值 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/iotdb.md index fceb485f05b..8bb3bbb25e0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/iotdb.md @@ -41,7 +41,7 @@ predefinedMetrics: - FILE ``` -2. 重启 IoTDB, 打开浏览器或者用curl 访问 http://ip:9091/metrics, 就能看到metric数据了。 +2. 重启 IoTDB, 打开浏览器或者用curl 访问 , 就能看到metric数据了。 3. 在 HertzBeat 添加对应 IoTDB 监控即可。 @@ -118,4 +118,3 @@ predefinedMetrics: |------------|------|-------------| | name | 无 | 名称 | | connection | 无 | thrift当前连接数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/issue.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/issue.md index 745a4f70a88..f17a60a9b9f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/issue.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/issue.md @@ -6,20 +6,20 @@ sidebar_label: 常见问题 ### 监控常见问题 -1. ** 页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名 ** +1. **页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名** > 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http -2. ** 网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK ** +2. **网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK** > 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) 3. 安装包部署的hertzbeat下ping连通性监控异常 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 -> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 -> docker安装默认启用无此问题 -> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 +> docker安装默认启用无此问题 +> 详见 4. 配置了k8s监控,但是实际监控时间并未按照正确间隔时间执行 请参考下面几点排查问题: @@ -32,35 +32,34 @@ sidebar_label: 常见问题 ### Docker部署常见问题 -1. **MYSQL,TDENGINE和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** +1. **MYSQL,TDENGINE和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 -> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP +> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP > 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` -2. **按照流程部署,访问 http://ip:1157/ 无界面** +2. **按照流程部署,访问 无界面** 请参考下面几点排查问题: -> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 +> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 3. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter ### 安装包部署常见问题 -1. **按照流程部署,访问 http://ip:1157/ 无界面** +1. **按照流程部署,访问 无界面** 请参考下面几点排查问题: -> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 +> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 +> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 2. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jetty.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jetty.md index b60a5882b9f..31e297703fc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jetty.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jetty.md @@ -92,4 +92,3 @@ java -jar $JETTY_HOME/start.jar --add-module=jmx-remote | DaemonThreadCount | 个 | 守护进程数 | | CurrentThreadUserTime | ms | 使用时间 | | CurrentThreadCpuTime | ms | 使用CPU时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jvm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jvm.md index f046b3ef6a0..3d9e96e55e1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jvm.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jvm.md @@ -17,7 +17,7 @@ keywords: [开源监控系统, 开源JAVA监控, JVM虚拟机监控] 应用启动时添加JVM参数 ⚠️注意可自定义暴露端口,对外IP -参考文档: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#remote +参考文档: ```shell -Djava.rmi.server.hostname=对外ip地址 @@ -78,4 +78,3 @@ keywords: [开源监控系统, 开源JAVA监控, JVM虚拟机监控] | DaemonThreadCount | 个 | 守护进程数 | | CurrentThreadUserTime | ms | 使用时间 | | CurrentThreadCpuTime | ms | 使用CPU时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka.md index 3cb4d74132c..a79bb0e91c2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka.md @@ -17,7 +17,7 @@ keywords: [开源监控系统, 开源消息中间件监控, Kafka监控] 2. 修改 Kafka 启动脚本 -修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` +修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` 在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 ```shell @@ -93,4 +93,3 @@ export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=ip地址 -Dcom.sun.management. | FifteenMinuteRate | 无 | 十五分钟处理率 | > 其他指标见文知意,欢迎贡献一起优化文档。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka_promql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka_promql.md index e0e5ecf7e50..f75ee33ba77 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka_promql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka_promql.md @@ -54,4 +54,3 @@ keywords: [ 开源监控系统,开源中间件监控, Kafka监控,Kafka-PromQL 1. kafka启用了JMX监控,可以使用 [Kafka](kafka) 监控; 2. kafka集群部署kafka_exporter暴露的监控指标,可以参考 [Prometheus任务](prometheus) 配置Prometheus采集任务监控kafka。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md index aa242d93a6b..4f0363f621d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md @@ -13,7 +13,7 @@ keywords: [开源监控系统, 开源Kubernetes监控] 参考获取token步骤 -#### 方式一: +#### 方式一 1. 创建service account并绑定默认cluster-admin管理员集群角色 @@ -27,7 +27,7 @@ kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` -#### 方式二: +#### 方式二 ``` kubectl create serviceaccount cluster-admin @@ -96,4 +96,3 @@ kubectl create token --duration=1000h cluster-admin | cluster_ip | 无 | cluster ip | | selector | 无 | tag selector匹配 | | creation_time | 无 | 创建时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/linux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/linux.md index 4a69c04495e..abd87de1ef8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/linux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/linux.md @@ -79,4 +79,3 @@ keywords: [开源监控系统, 开源操作系统监控, Linux操作系统监控 | available | Mb | 可用磁盘大小 | | usage | % | 使用率 | | mounted | 无 | 挂载点目录 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mariadb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mariadb.md index 2490e3630dd..4690b5500ef 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mariadb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mariadb.md @@ -51,4 +51,3 @@ keywords: [开源监控系统, 开源数据库监控, MariaDB数据库监控] | innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | | innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | | innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/memcached.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/memcached.md index db88c1ac5fc..1066d3934dc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/memcached.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/memcached.md @@ -30,7 +30,7 @@ STAT version 1.4.15 ... ``` -**帮助文档: https://www.runoob.com/memcached/memcached-stats.html** +**帮助文档: ** ### 配置参数 @@ -65,4 +65,3 @@ STAT version 1.4.15 | cmd_flush | 无 | Flush 命令请求数 | | get_misses | 无 | Get 命令未命中次数 | | delete_misses | 无 | Delete 命令未命中次数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mongodb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mongodb.md index 8c54174b54a..46d602fac9f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mongodb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mongodb.md @@ -93,4 +93,3 @@ keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] | pageSize | 无 | 内存页大小 | | numPages | 无 | 内存页数量 | | maxOpenFiles | 无 | 系统中允许打开的最大文件数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mysql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mysql.md index c5deaab27a2..47087c88f34 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mysql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/mysql.md @@ -51,4 +51,3 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] | innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | | innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | | innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nacos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nacos.md index 84b432f4651..0b9b96b6099 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nacos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nacos.md @@ -92,4 +92,3 @@ management.endpoints.web.exposure.include=* | nacos_monitor{name='configListenSize'} | 无 | 监听的配置数 | | nacos_client_request_seconds_count | 无 | 请求的次数,包括多种(url,方法,code) | | nacos_client_request_seconds_sum | 秒 | 请求的总耗时,包括多种(url,方法,code) | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph.md index ded4a06ad2f..9faed580e1b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph.md @@ -14,23 +14,23 @@ keywords: [ 开源监控工具, 开源 NebulaGraph 监控工具, 监控 NebulaGr nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 的统计信息。 ``` -### +### **1、通过 stats 和 rocksdb stats 接口获取可用参数。** 1.1、如果只需要获取 nebulaGraph_stats,需要确保可以访问 stats,否则会出现错误。 -默认端口是 19669,访问地址为 http://ip:19669/stats +默认端口是 19669,访问地址为 1.2、如果需要获取 rocksdb stats 的附加参数,需要确保可以访问 rocksdb stats,否则会报错。 首次连接 NebulaGraph 时,必须先注册 Storage 服务,以便正确查询数据。 -**有帮助文档:https://docs.nebula-graph.com.cn/3.4.3/4.deployment-and-installation/connect-to-nebula-graph/** +**有帮助文档:** -**https://docs.nebula-graph.com.cn/3.4.3/2.quick-start/3.quick-start-on-premise/3.1add-storage-hosts/** +**** -默认端口是 19779,访问地址为:http://ip:19779/rocksdb_stats +默认端口是 19779,访问地址为: ### 配置参数 @@ -51,7 +51,7 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB #### 指标集:nebulaGraph_stats 指标太多,相关链接如下 -**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** +**** | 指标名称 | 指标单位 | 指标帮助描述 | |----------------------------------------------------------------|------|--------| @@ -114,11 +114,10 @@ nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB #### 指标集:rocksdb_stats 指标太多,相关链接如下 -**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** +**** | 指标名称 | 指标单位 | 指标帮助描述 | |----------------------------|------|------------------------| | rocksdb.backup.read.bytes | | 备份 RocksDB 数据库期间读取的字节数 | | rocksdb.backup.write.bytes | | 指标名称 | | ... | | ... | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph_cluster.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph_cluster.md index 252f5f47d8a..7fe8792d29d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph_cluster.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nebulagraph_cluster.md @@ -89,4 +89,3 @@ keywords: [ 开源监控系统, 开源数据库监控, 开源图数据库监控, | version | 无 | 版本 | > 如果需要自定义监控模板采集NebulaGraph集群的数据,请参考: [NGQL自定义监控](../advanced/extend-ngql.md) - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nginx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nginx.md index 82908df358b..8c81c5a82c2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nginx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nginx.md @@ -45,8 +45,8 @@ server { location /nginx-status { stub_status on; access_log on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts } } ``` @@ -93,8 +93,8 @@ http { server { location /req-status { req_status_show on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts } } } @@ -108,7 +108,7 @@ nginx -s reload 4. 在浏览器访问 `http://localhost/req-status` 即可查看 Nginx 监控状态信息。 -**参考文档: https://blog.csdn.net/weixin_55985097/article/details/116722309** +**参考文档: ** **⚠️注意监控模块的端点路径为 `/nginx-status` `/req-status`** @@ -151,4 +151,3 @@ nginx -s reload | 总请求数 | | 总请求数 | | 当前并发连接数 | | 当前并发连接数 | | 当前带宽 | kb | 当前带宽 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ntp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ntp.md index 735ab741b4d..a160f2501e4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ntp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ntp.md @@ -39,4 +39,3 @@ NTP监控的中文文档如下: | 层级 | | NTP服务器的层级,表示其与参考时钟的距离。 | | 参考ID | | 指示NTP服务器使用的参考时钟或时间源的标识符。 | | 精度 | | NTP服务器时钟的精度,表示其准确性。 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/openai.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/openai.md index 0af3ca3d17b..a67a0c1c820 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/openai.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/openai.md @@ -12,9 +12,9 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] > 1. 打开 Chrome 浏览器的网络请求界面 > `Mac: cmd + option + i` > `Windows: ctrl + shift + i` -> 2. 访问 https://platform.openai.com/usage -> 3. 找到 https://api.openai.com/dashboard/billing/usage 请求 -> 4. 找到请求头中 Authorization 字段,并复制 `Bearer ` 之后的内容。例如: `sess-123456` +> 2. 访问 +> 3. 找到 请求 +> 4. 找到请求头中 Authorization 字段,并复制 `Bearer` 之后的内容。例如: `sess-123456` ### 注意事项 @@ -81,4 +81,3 @@ keywords: [开源监控系统, 开源网络监控, OpenAI账户监控] | 税务ID | 无 | 税务ID | | 结算地址 | 无 | 结算地址 | | 业务地址 | 无 | 业务地址 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opengauss.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opengauss.md index 8bf21d7debb..964fc909c33 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opengauss.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opengauss.md @@ -53,4 +53,3 @@ keywords: [开源监控系统, 开源数据库监控, OpenGauss数据库监控] | 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|----------| | running | 连接数 | 当前客户端连接数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opensuse.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opensuse.md index f32e2b070ae..2f1e00a9e39 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opensuse.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/opensuse.md @@ -105,4 +105,3 @@ keywords: [开源监控系统, 开源操作系统监控, OpenSUSE操作系统监 | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/oracle.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/oracle.md index 7ffdfa219ff..d6bb80eb98d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/oracle.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/oracle.md @@ -61,4 +61,3 @@ keywords: [开源监控系统, 开源数据库监控, Oracle数据库监控] | qps | QPS | I/O Requests per Second 每秒IO请求数量 | | tps | TPS | User Transaction Per Sec 每秒用户事物处理数量 | | mbps | MBPS | I/O Megabytes per Second 每秒 I/O 兆字节数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ping.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ping.md index 401e86f9382..59ac237ed34 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ping.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ping.md @@ -31,7 +31,6 @@ keywords: [开源监控系统, 开源网络监控, 网络PING监控] 1. 安装包部署的hertzbeat下ping连通性监控异常 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 -> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 -> docker安装默认启用无此问题 -> 详见 https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address - +> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 +> docker安装默认启用无此问题 +> 详见 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/plugin.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/plugin.md index 36a6bba9fe7..fea6eae1085 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/plugin.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/plugin.md @@ -25,4 +25,3 @@ sidebar_label: 自定义插件 4. 将打包后的`jar`包,拷贝到安装目录下的`ext-lib`目录下(若为`docker`安装则先将`ext-lib`目录挂载出来,再拷贝到该目录下) ![plugin-4.png](/img/docs/help/plugin-4.png) 5. 然后重启`HertzBeat`,即可实现自定义告警后处理策略。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pop3.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pop3.md index 4c58cc4a308..7a55a98df3e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pop3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pop3.md @@ -44,4 +44,3 @@ keywords: [开源监控工具,开源Java监控工具,监控POP3指标] |-------|------|-----------| | 邮件数量 | | 邮件数量 | | 邮箱总大小 | kb | 邮箱中邮件的总大小 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/port.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/port.md index dd0b19aac82..0b73299aa53 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/port.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/port.md @@ -26,4 +26,3 @@ keywords: [开源监控系统, 开源网络监控, TCP 端口可用性监控] | 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------| | responseTime | ms毫秒 | 网站响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/postgresql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/postgresql.md index 59adae7da81..12485e62ffa 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/postgresql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/postgresql.md @@ -53,4 +53,3 @@ keywords: [开源监控系统, 开源数据库监控, PostgreSQL数据库监控] | 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|----------| | running | 连接数 | 当前客户端连接数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/process.md index 2eda0726d27..cd21bece380 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/process.md @@ -85,4 +85,3 @@ keywords: [开源监控系统, 操作系统进程监控, 进程监控] - read_bytes(进程从磁盘实际读取的字节数) - write_bytes(进程写入到磁盘的实际字节数) - cancelled_write_bytes(进程写入到磁盘的实际字节数) - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/prometheus.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/prometheus.md index 571da45aac0..a9feebfe623 100755 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/prometheus.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/prometheus.md @@ -39,4 +39,3 @@ keywords: [ 开源监控系统, Prometheus协议监控 ] - 端点路径:`/actuator/prometheus` 其余设置保持默认。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pulsar.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pulsar.md index 1c12244997b..f37070d8604 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pulsar.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/pulsar.md @@ -70,4 +70,3 @@ Broker端消息发布延迟 #### 指标集合:pulsar_metadata_store_ops_latency_ms Broker端元数据存储操作延迟 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rabbitmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rabbitmq.md index 89c728162c9..2210a2452e0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rabbitmq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rabbitmq.md @@ -18,7 +18,7 @@ keywords: [开源监控系统, 开源消息中间件监控, RabbitMQ消息中间 rabbitmq-plugins enable rabbitmq_management ``` -2. 浏览器访问 http://ip:15672/ ,默认账户密码 `guest/guest`. 成功登录即开启成功。 +2. 浏览器访问 ,默认账户密码 `guest/guest`. 成功登录即开启成功。 3. 在 HertzBeat 添加对应 RabbitMQ 监控即可,参数使用 Management 的 IP 端口,默认账户密码。 @@ -123,4 +123,3 @@ rabbitmq-plugins enable rabbitmq_management | message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | | message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | | message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redhat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redhat.md index e0b8ae48cf4..1c4c6b5167a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redhat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redhat.md @@ -105,4 +105,3 @@ keywords: [ 开源监控系统, 开源操作系统监控, RedHat操作系统监 | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redis.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redis.md index 58248fb0b45..0a0c9f77a65 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redis.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/redis.md @@ -237,4 +237,3 @@ keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] | cmdstat_lpop | 无 | lpop命令的统计信息 | | cmdstat_rpop | 无 | rpop命令的统计信息 | | cmdstat_llen | 无 | llen命令的统计信息 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rocketmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rocketmq.md index 84cc24fc976..65ca5d96613 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rocketmq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rocketmq.md @@ -46,4 +46,3 @@ keywords: [ 开源监控系统, 开源中间件监控, RocketMQ消息中间件 | Consume_type | 无 | 消费类型 | | Consume_tps | 无 | 消费TPS | | Delay | 无 | 延迟 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rockylinux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rockylinux.md index 55923468da8..35dcfae06ef 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rockylinux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rockylinux.md @@ -105,4 +105,3 @@ keywords: [ 开源监控系统, 开源操作系统监控, Rocky Linux操作系 | mem_usage | % | 内存占用率 | | cpu_usage | % | CPU占用率 | | command | 无 | 执行命令 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/shenyu.md index 1149ed4bdd9..87bb81b7800 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/shenyu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/shenyu.md @@ -127,4 +127,3 @@ shenyu: |-------|------|-------------| | state | 无 | 线程状态 | | value | 无 | 对应线程状态的线程数量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/smtp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/smtp.md index 5755437e80e..73e9af9ee13 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/smtp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/smtp.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit 通过 SMTP 的 hello 命令确定服务器是否可用 ``` -> 详见 https://datatracker.ietf.org/doc/html/rfc821#page-13 +> 详见 **协议使用:SMTP** @@ -38,4 +38,3 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit | 响应状态 | | 响应状态 | | SMTP 服务器标语 | | SMTP 服务器的标语 | | helo 命令返回信息 | | helo 命令返回的响应信息 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/spring_gateway.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/spring_gateway.md index a0695849705..aaba0dd9841 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/spring_gateway.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/spring_gateway.md @@ -87,4 +87,3 @@ management: | 匹配规则 | 无 | 路由匹配规则 | | 资源标识符 | 无 | 服务资源标识符 | | 优先级 | 无 | 此路由的优先级 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot2.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot2.md index e66d4237a13..d39b67d3efd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot2.md @@ -94,4 +94,3 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ |----------|------|-----------| | space | 无 | 内存空间名称 | | mem_used | MB | 此空间占用内存大小 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot3.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot3.md index 56a63068b17..58f1942cf0d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/springboot3.md @@ -89,4 +89,3 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ | 指标名称 | 指标单位 | 指标帮助描述 | |--------|------|-----------------| | status | 无 | 服务健康状态: UP,Down | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/sqlserver.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/sqlserver.md index 22a5a50ddd8..847a7775adc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/sqlserver.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/sqlserver.md @@ -57,8 +57,8 @@ keywords: [开源监控系统, 开源数据库监控, SqlServer数据库监控] 1. SSL连接问题修复 -jdk版本:jdk11 -问题描述:SQL Server2019使用SA用户连接报错 +jdk版本:jdk11 +问题描述:SQL Server2019使用SA用户连接报错 错误信息: ```text diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ssl_cert.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ssl_cert.md index 73957e31fb8..e15de6e3e97 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ssl_cert.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ssl_cert.md @@ -31,4 +31,3 @@ keywords: [开源监控系统, 开源网站监控, SSL证书监控监控] | start_timestamp | ms毫秒 | 有效期开始时间戳 | | end_time | 无 | 过期时间 | | end_timestamp | ms毫秒 | 过期时间戳 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tidb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tidb.md index fe5eef718ef..b0ea82505ca 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tidb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tidb.md @@ -44,4 +44,3 @@ keywords: [开源监控系统, 开源数据库监控, TiDB数据库监控] | max_connections | 无 | 该变量表示 TiDB 中同时允许的最大客户端连接数,用于资源控制。默认情况下,该变量值为 0 表示不限制客户端连接数。当本变量的值大于 0 且客户端连接数到达此值时,TiDB 服务端将会拒绝新的客户端连接。 | | datadir | 无 | 数据存储的位置,位置可以是本地路径 /tmp/tidb。如果数据存储在 TiKV 上,则可以是指向 PD 服务器的路径。变量值的格式为 ${pd-ip}:${pd-port},表示 TiDB 在启动时连接到的 PD 服务器。 | | port | 无 | 使用 MySQL 协议时 tidb-server 监听的端口。 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/time_expression.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/time_expression.md index 8b5e6c8aca9..1a6b02b45b2 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/time_expression.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/time_expression.md @@ -62,4 +62,3 @@ ${FORMATTER [{ + | - } ]} - `${time+1h+15s+30s}` 计算当前时间一小时15分钟30秒之后的时间,并格式化为 `HH:mm:ss` 2. 复杂表达式模板(如果内置的格式化器无法满足需要,可以组合使用多个表达式) - `${@year}年${@month}月${@day}日`,获取当前日期并按照 yyyy年MM月dd日格式返回 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tomcat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tomcat.md index b366ee3c2ac..e1f112777f7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tomcat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/tomcat.md @@ -72,4 +72,4 @@ keywords: [开源监控系统, 开源网站监控, Tomcat监控] CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote -Djava.rmi.server.hostname=10.1.1.52 -Dcom.sun.management.jmxremote.port=1099 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" ``` -参考: https://blog.csdn.net/weixin_41924764/article/details/108694239 +参考: diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ubuntu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ubuntu.md index 3ec51e5464a..4425f1c2c06 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ubuntu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ubuntu.md @@ -79,4 +79,3 @@ keywords: [开源监控系统, 开源操作系统监控, Ubuntu监控] | available | Mb | 可用磁盘大小 | | usage | % | 使用率 | | mounted | 无 | 挂载点目录 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/udp_port.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/udp_port.md index ee2f388873b..dde32b91e4d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/udp_port.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/udp_port.md @@ -29,4 +29,3 @@ keywords: [开源监控系统, 开源网络监控, UDP 端口可用性监控] | 指标名称 | 指标单位 | 指标帮助描述 | |------|---------|--------| | 响应时间 | 毫秒 (ms) | 网站响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/website.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/website.md index 8efe5262612..7403f255aec 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/website.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/website.md @@ -27,4 +27,3 @@ keywords: [开源监控系统, 开源网站监控] | 指标名称 | 指标单位 | 指标帮助描述 | |--------------|------|--------| | responseTime | ms毫秒 | 网站响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/websocket.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/websocket.md index 3bd02f3ce18..b4dfc13d701 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/websocket.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/websocket.md @@ -31,4 +31,3 @@ keywords: [ 开源监控系统, Websocket监控 ] | statusMessage | 无 | 状态消息 | | connection | 无 | 表示连接方式 | | upgrade | 无 | 升级后的协议 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/windows.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/windows.md index 41447469e61..0b1791435f4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/windows.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/windows.md @@ -8,10 +8,10 @@ keywords: [开源监控系统, 开源操作系统监控, Windows操作系统监 > 通过SNMP协议对Windows操作系统的通用性能指标进行采集监控。 > 注意⚠️ Windows服务器需开启SNMP服务 -参考资料: -[什么是SNMP协议1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) -[什么是SNMP协议2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) -[Win配置SNMP英文](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) +参考资料: +[什么是SNMP协议1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) +[什么是SNMP协议2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) +[Win配置SNMP英文](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) [Win配置SNMP中文](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) ### 配置参数 @@ -41,4 +41,3 @@ keywords: [开源监控系统, 开源操作系统监控, Windows操作系统监 | services | 个数 | 当前服务数量 | | processes | 个数 | 当前进程数量 | | responseTime | ms | 采集响应时间 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/yarn.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/yarn.md index c35a0226876..6694aff14fe 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/yarn.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/yarn.md @@ -81,4 +81,3 @@ keywords: [大数据监控系统, Apache Yarn监控, 资源管理器监控] | 指标名称 | 指标单位 | 指标帮助描述 | |-----------|------|--------| | StartTime | | 启动时间戳 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/zookeeper.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/zookeeper.md index 14d50c3c90d..9752c22bc4e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/zookeeper.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/zookeeper.md @@ -97,4 +97,3 @@ Complete! | zk_max_latency | ms | 最大延时 | | zk_ephemerals_count | 个 | 临时节点数 | | zk_min_latency | ms | 最小延时 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md index 5c3f1f90feb..60c550fa547 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md @@ -32,13 +32,13 @@ slug: / > `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 ----- +---- ### 强大的监控模版 > 开始我们就说 HertzBeat 的特点是自定义监控能力,无需 Agent。在讨论这两点之前,我们先介绍下 HertzBeat 的不一样的监控模版。而正是因为这样的监控模版设计,才会有了后面的高级特性。 -HertzBeat 自身并没有去创造一种采集数据协议让监控对端来适配它。而是充分使用了现有的生态,`SNMP协议`采集网络交换机路由器信息,`JMX规范`采集JAVA应用信息,`JDBC规范`采集数据集信息,`SSH`直连执行脚本获取回显信息,`HTTP+(JsonPath | prometheus等)`解析API接口信息,`IPMI协议`采集服务器信息等等。 +HertzBeat 自身并没有去创造一种采集数据协议让监控对端来适配它。而是充分使用了现有的生态,`SNMP协议`采集网络交换机路由器信息,`JMX规范`采集JAVA应用信息,`JDBC规范`采集数据集信息,`SSH`直连执行脚本获取回显信息,`HTTP+(JsonPath | prometheus等)`解析API接口信息,`IPMI协议`采集服务器信息等等。 HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可配置化,最后使其都可以通过编写YML格式监控模版的形式,来制定模版使用这些协议来采集任何想要的指标数据。 ![hertzbeat](/img/blog/multi-protocol.png) @@ -92,21 +92,22 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ### 强大自定义功能 -> 由前面的**监控模版**介绍,大概清楚了 `HertzBeat` 拥有的强大自定义功能。 -> 我们将每个监控类型都视为一个监控模版,不管是官方内置的还是后期用户自定义新增的。用户都可以方便的通过修改监控模版来新增修改删除监控指标。 +> 由前面的**监控模版**介绍,大概清楚了 `HertzBeat` 拥有的强大自定义功能。 +> 我们将每个监控类型都视为一个监控模版,不管是官方内置的还是后期用户自定义新增的。用户都可以方便的通过修改监控模版来新增修改删除监控指标。 > 模版里面包含各个协议的使用配置,环境变量,指标转换,指标计算,单位转换,指标采集等一系列功能,帮助用户能采集到自己想要的监控指标。 ![hertzbeat](/img/docs/custom-arch.png) ### 无需 Agent -> 对于使用过各种系统的用户来说,可能最麻烦头大的不过就是各种 `agent` 的安装部署调试升级了。 -> 每台主机得装个 `agent`,为了监控不同应用中间件可能还得装几个对应的 `agent`,监控数量上来了轻轻松松上千个,写个批量脚本可能会减轻点负担。 +> 对于使用过各种系统的用户来说,可能最麻烦头大的不过就是各种 `agent` 的安装部署调试升级了。 +> 每台主机得装个 `agent`,为了监控不同应用中间件可能还得装几个对应的 `agent`,监控数量上来了轻轻松松上千个,写个批量脚本可能会减轻点负担。 > `agent` 的版本是否与主应用兼容, `agent` 与主应用的通讯调试, `agent` 的同步升级等等等等,这些全是头大的点。 -`HertzBeat` 的原理就是使用不同的协议去直连对端系统,采用 `PULL` 的形式去拉取采集数据,无需用户在对端主机上部署安装 `Agent` | `Exporter` 等。 -- 比如监控 `linux操作系统`, 在 `HertzBeat` 端输入IP端口账户密码或密钥即可。 -- 比如监控 `mysql数据库`, 在 `HertzBeat` 端输入IP端口账户密码即可。 +`HertzBeat` 的原理就是使用不同的协议去直连对端系统,采用 `PULL` 的形式去拉取采集数据,无需用户在对端主机上部署安装 `Agent` | `Exporter` 等。 + +- 比如监控 `linux操作系统`, 在 `HertzBeat` 端输入IP端口账户密码或密钥即可。 +- 比如监控 `mysql数据库`, 在 `HertzBeat` 端输入IP端口账户密码即可。 **密码等敏感信息全链路加密** ### 高性能集群 @@ -152,11 +153,11 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 --- **`HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。** ------ +----- ## 即刻体验一波 -Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` +Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` 浏览器访问 `http://localhost:1157` 默认账户密码 `admin/hertzbeat` ### 登陆页面 @@ -301,6 +302,6 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 **还有更多强大的功能快去探索呀。Have Fun!** ------ +----- -**Github: https://github.com/apache/hertzbeat** +**Github: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/resource.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/resource.md index 0e01e014901..910499fe860 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/resource.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/resource.md @@ -17,4 +17,3 @@ Download: [SVG](/img/hertzbeat-logo.svg) [PNG](/img/hertzbeat-logo.png) ![logo](/img/hertzbeat-brand.svg) Download: [SVG](/img/hertzbeat-brand.svg) [PNG](/img/hertzbeat-brand.png) - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/account-modify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/account-modify.md index 7ad94c08b93..bcee1414029 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/account-modify.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/account-modify.md @@ -6,9 +6,9 @@ sidebar_label: 更新账户和密钥 ## 更新账户 -Apache HertzBeat (incubating) 默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat -若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 -修改位于安装目录下的 `/hertzbeat/config/sureness.yml` 的配置文件,docker环境目录为`opt/hertzbeat/config/sureness.yml`,建议提前挂载映射 +Apache HertzBeat (incubating) 默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat +若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 +修改位于安装目录下的 `/hertzbeat/config/sureness.yml` 的配置文件,docker环境目录为`opt/hertzbeat/config/sureness.yml`,建议提前挂载映射 配置文件内容参考 项目仓库[/script/sureness.yml](https://github.com/apache/hertzbeat/blob/master/script/sureness.yml) ```yaml @@ -127,4 +127,4 @@ sureness: dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' ``` -**重启 HertzBeat 浏览器访问 http://ip:1157/ 即可探索使用 HertzBeat** +**重启 HertzBeat 浏览器访问 即可探索使用 HertzBeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md index 01380784169..95bedddc350 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md @@ -10,8 +10,8 @@ sidebar_label: 常见参数配置 ### 配置HertzBeat的配置文件 -修改位于 `hertzbeat/config/application.yml` 的配置文件 -注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 +修改位于 `hertzbeat/config/application.yml` 的配置文件 +注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 1. 配置短信发送服务器 @@ -74,4 +74,3 @@ warehouse: port: 6379 password: 123456 ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/docker-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/docker-deploy.md index aa01b6f5d30..76efdf8f5d0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/docker-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/docker-deploy.md @@ -6,7 +6,7 @@ sidebar_label: Docker方式部署 > 推荐使用 Docker 部署 Apache HertzBeat (incubating) -1. 下载安装Docker环境 +1. 下载安装Docker环境 Docker 工具自身的下载请参考以下资料: [Docker官网文档](https://docs.docker.com/get-docker/) [菜鸟教程-Docker教程](https://www.runoob.com/docker/docker-tutorial.html) @@ -16,21 +16,23 @@ sidebar_label: Docker方式部署 $ docker -v Docker version 20.10.12, build e91ed57 ``` -2. 拉取HertzBeat Docker镜像 - 镜像版本TAG可查看 [dockerhub 官方镜像仓库](https://hub.docker.com/r/apache/hertzbeat/tags) + +2. 拉取HertzBeat Docker镜像 + 镜像版本TAG可查看 [dockerhub 官方镜像仓库](https://hub.docker.com/r/apache/hertzbeat/tags) 或者使用 [quay.io 镜像仓库](https://quay.io/repository/apache/hertzbeat) ```shell - $ docker pull apache/hertzbeat - $ docker pull apache/hertzbeat-collector + docker pull apache/hertzbeat + docker pull apache/hertzbeat-collector ``` 若网络超时或者使用 ```shell - $ docker pull quay.io/tancloud/hertzbeat - $ docker pull quay.io/tancloud/hertzbeat-collector + docker pull quay.io/tancloud/hertzbeat + docker pull quay.io/tancloud/hertzbeat-collector ``` + 3. 部署HertzBeat您可能需要掌握的几条命令 ```shell @@ -46,18 +48,19 @@ sidebar_label: Docker方式部署 ctrl+d或者 $ exit ``` -4. 挂载并配置HertzBeat的配置文件(可选) - 下载 `application.yml` 文件到主机目录下,例如: $(pwd)/application.yml + +4. 挂载并配置HertzBeat的配置文件(可选) + 下载 `application.yml` 文件到主机目录下,例如: $(pwd)/application.yml 下载源 [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - 若需使用邮件发送告警,需替换 `application.yml` 里面的邮件服务器参数 - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](tdengine-init) - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](iotdb-init) -5. 挂载并配置HertzBeat用户配置文件,自定义用户密码(可选) - HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat - 若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 - 下载 `sureness.yml` 文件到主机目录下,例如: $(pwd)/sureness.yml - 下载源 [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) +5. 挂载并配置HertzBeat用户配置文件,自定义用户密码(可选) + HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat + 若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 + 下载 `sureness.yml` 文件到主机目录下,例如: $(pwd)/sureness.yml + 下载源 [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) 具体修改步骤参考 [配置修改账户密码](account-modify) 6. 启动HertzBeat Docker容器 @@ -88,12 +91,13 @@ $ docker run -d -p 1157:1157 -p 1158:1158 \ - `--restart=always`:(可选,不需要可删除)使容器在Docker启动后自动重启。若您未在容器创建时指定该参数,可通过以下命令实现该容器自启。 ```shell - $ docker update --restart=always hertzbeat + docker update --restart=always hertzbeat ``` + - `apache/hertzbeat` : 使用拉取最新的的HertzBeat官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat`代替。** 7. 开始探索HertzBeat - 浏览器访问 http://ip:1157/ 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 + 浏览器访问 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 8. 部署采集器集群(可选) @@ -107,6 +111,7 @@ $ docker run -d \ ``` 这条命令启动一个运行HertzBeat采集器的Docker容器,并直连上了HertzBeat主服务节点。 + - `docker run -d` : 通过Docker运行一个容器,使其在后台运行 - `-e IDENTITY=custom-collector-name` : (可选) 设置采集器的唯一标识名称。⚠️注意多采集器时采集器名称需保证唯一性。 - `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 @@ -124,13 +129,13 @@ $ docker run -d \ **最多的问题就是网络问题,请先提前排查** -1. **MYSQL,TDENGINE或IotDB和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** +1. **MYSQL,TDENGINE或IotDB和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 -> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP +> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP > 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` -2. **按照流程部署,访问 http://ip:1157/ 无界面** +2. **按照流程部署,访问 无界面** 请参考下面几点排查问题: > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 @@ -139,7 +144,7 @@ $ docker run -d \ 3. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter 4. **监控历史图表长时间都一直无数据** @@ -159,4 +164,3 @@ $ docker run -d \ > iot-db 或td-engine enable 是否设置为true > 注意⚠️若hertzbeat和IotDB,TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md index 5bf5faaf76e..ed69d93bfba 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md @@ -8,7 +8,7 @@ Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任 > 我们推荐使用并长期支持 VictoriaMetrics 作为存储。 -GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. +GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage. **⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** @@ -16,7 +16,8 @@ It's designed to work on infrastructure of the cloud era, and users benefit from ### 通过Docker方式安装GreptimeDB > 可参考官方网站[安装教程](https://docs.greptime.com/getting-started/overview) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -42,7 +43,7 @@ $ docker run -p 4000-4004:4000-4004 \ ### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** @@ -65,4 +66,3 @@ warehouse: 1. 时序数据库 GreptimeDB 或者 IoTDB 或者 TDengine 是否都需要配置,能不能都用 > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md index ccff933ac1f..d1d4b3f241f 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md @@ -10,7 +10,7 @@ Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任 InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海量时序数据的高性能读、高性能写、高效存储与实时分析等。 注意支持⚠️ 1.x版本。 -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** +**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** **⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** ### 1. 直接使用华为云服务 GaussDB For Influx @@ -24,7 +24,8 @@ InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海 ### 2. 通过Docker方式安装InfluxDB > 可参考官方网站[安装教程](https://hub.docker.com/_/influxdb) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -41,13 +42,13 @@ $ docker run -p 8086:8086 \ influxdb:1.8 ``` -`-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 +`-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 使用```$ docker ps```查看数据库是否启动成功 ### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** @@ -74,4 +75,3 @@ warehouse: 1. 时序数据库InfluxDb, IoTDB和TDengine是否都需要配置,能不能都用 > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/iotdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/iotdb-init.md index b24eba892a2..c5286af5330 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/iotdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/iotdb-init.md @@ -28,6 +28,7 @@ Apache IoTDB是一体化收集、存储、管理与分析物联网时序数据 $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Docker安装IoTDB ```shell @@ -122,4 +123,3 @@ warehouse: > iot-db enable是否设置为true > 注意⚠️若hertzbeat和IotDB都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/mysql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/mysql-change.md index 1d122575d2c..874de9a0328 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/mysql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/mysql-change.md @@ -12,7 +12,7 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) ### 通过Docker方式安装MYSQL -1. 下载安装Docker环境 +1. 下载安装Docker环境 Docker 的安装请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后请于终端检查Docker版本输出是否正常。 @@ -20,6 +20,7 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Docker安装MYSQl ``` @@ -31,14 +32,14 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) mysql:5.7 ``` - `-v /opt/data:/var/lib/mysql` 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 + `-v /opt/data:/var/lib/mysql` 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 使用```$ docker ps```查看数据库是否启动成功 ### 数据库创建 -1. 进入MYSQL或使用客户端连接MYSQL服务 +1. 进入MYSQL或使用客户端连接MYSQL服务 `mysql -uroot -p123456` -2. 创建名称为hertzbeat的数据库 +2. 创建名称为hertzbeat的数据库 `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. 查看hertzbeat数据库是否创建成功 `show databases;` @@ -46,9 +47,9 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) ### 修改hertzbeat的配置文件application.yml切换数据源 - 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 + 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://github.com/hertzbeat/hertzbeat/raw/master/script/application.yml) 需修改部分原参数: @@ -75,15 +76,16 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) jpa: database: mysql ``` + - 通过docker启动时,建议修改host为宿主机的外网IP地址,包括mysql连接字符串和redis。 -**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** +**启动 HertzBeat 浏览器访问 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** ### 常见问题 1. 缺少hibernate的mysql方言,导致启动异常 Caused by: org.hibernate.HibernateException: Access to DialectResolutionInfo cannot be null when 'hibernate.dialect' not set -如果上述配置启动系统,出现` Caused by: org.hibernate.HibernateException: Access to DialectResolutionInfo cannot be null when 'hibernate.dialect' not set`异常, +如果上述配置启动系统,出现`Caused by: org.hibernate.HibernateException: Access to DialectResolutionInfo cannot be null when 'hibernate.dialect' not set`异常, 需要在`application.yml`文件中增加以下配置: ```yaml @@ -95,4 +97,3 @@ spring: hibernate: dialect: org.hibernate.dialect.MySQL5InnoDBDialect ``` - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md index 894c58f1e3b..e89cabd8b10 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md @@ -9,7 +9,7 @@ sidebar_label: 安装包方式部署 1. 下载HertzBeat安装包 下载您系统环境对应的安装包 `hertzbeat-xx.tar.gz` `hertzbeat-collector-xx.tar.gz` - [下载页面](/docs/download) -2. 配置HertzBeat的配置文件(可选) +2. 配置HertzBeat的配置文件(可选) 解压安装包到主机 eg: /opt/hertzbeat ``` @@ -18,26 +18,27 @@ sidebar_label: 安装包方式部署 $ unzip -o hertzbeat-xx.zip ``` - 修改位于 `hertzbeat/config/application.yml` 的配置文件(可选),您可以根据需求修改配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件(可选),您可以根据需求修改配置文件 - 若需使用邮件发送告警,需替换`application.yml`里面的邮件服务器参数 - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) - **强烈推荐** 以后我们将主要支持VictoriaMetrics作为时序数据库,若需使用时序数据库VictoriaMetrics来存储指标数据,需替换`application.yml`里面的`warehouse.store.victoria-metrics`参数 具体步骤参见 [使用VictoriaMetrics存储指标数据](victoria-metrics-init) - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](tdengine-init) - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](iotdb-init) -3. 配置用户配置文件(可选,自定义配置用户密码) - HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat - 若需要新增删除修改账户或密码,可以通过修改位于 `hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 +3. 配置用户配置文件(可选,自定义配置用户密码) + HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat + 若需要新增删除修改账户或密码,可以通过修改位于 `hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 具体参考 [配置修改账户密码](account-modify) 4. 部署启动 执行位于安装目录hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat ``` - $ ./startup.sh + ./startup.sh ``` + 5. 开始探索HertzBeat - 浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 + 浏览器访问 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 6. 部署采集器集群(可选) - 下载解压您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [下载页面](/docs/download) - 配置采集器的配置文件 `hertzbeat-collector/config/application.yml` 里面的连接主HertzBeat服务的对外IP,端口,当前采集器名称(需保证唯一性)等参数 `identity` `mode` (public or private) `manager-host` `manager-port` @@ -53,7 +54,8 @@ sidebar_label: 安装包方式部署 manager-host: ${MANAGER_HOST:127.0.0.1} manager-port: ${MANAGER_PORT:1158} ``` - - 启动 `$ ./bin/startup.sh ` 或 `bin/startup.bat` + + - 启动 `$ ./bin/startup.sh` 或 `bin/startup.bat` - 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 **HAVE FUN** @@ -64,9 +66,9 @@ sidebar_label: 安装包方式部署 1. **若您使用的是不含JDK的安装包,需您提前准备JAVA运行环境** -安装JAVA运行环境-可参考[官方网站](http://www.oracle.com/technetwork/java/javase/downloads/index.html) -要求:JAVA17环境 -下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) +安装JAVA运行环境-可参考[官方网站](http://www.oracle.com/technetwork/java/javase/downloads/index.html) +要求:JAVA17环境 +下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) 安装后命令行检查是否成功安装 ``` @@ -77,16 +79,16 @@ Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) ``` -2. **按照流程部署,访问 http://ip:1157/ 无界面** +2. **按照流程部署,访问 无界面** 请参考下面几点排查问题: > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 +> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 3. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 +> 一:排查配置的数据库账户密码是否正确,数据库是否创建 > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter 4. **监控历史图表长时间都一直无数据** @@ -94,4 +96,3 @@ Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) > 一:时序数据库是否配置,未配置则无历史图表数据 > 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 > 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 时序数据库 IP账户密码等配置是否正确 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md index 6a48c741201..e3a6b66a41d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md @@ -12,7 +12,7 @@ PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBM ### 通过Docker方式安装PostgreSQL -1. Download and install the Docker environment +1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. @@ -20,10 +20,11 @@ PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBM $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Docker安装 PostgreSQL ``` - $ docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 + docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` 使用```$ docker ps```查看数据库是否启动成功 @@ -38,7 +39,8 @@ PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBM su - postgres psql ``` -2. 创建名称为hertzbeat的数据库 + +2. 创建名称为hertzbeat的数据库 `CREATE DATABASE hertzbeat;` 3. 查看hertzbeat数据库是否创建成功 `\l` @@ -46,9 +48,9 @@ PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBM ### 修改hertzbeat的配置文件application.yml切换数据源 1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 + 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://github.com/hertzbeat/hertzbeat/raw/master/script/application.yml) ```yaml @@ -84,4 +86,4 @@ spring: dialect: org.hibernate.dialect.PostgreSQLDialect ``` -**启动 HertzBeat 浏览器访问 http://ip:1157/ 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** +**启动 HertzBeat 浏览器访问 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md index 918bfdea17a..85756d3df46 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md @@ -41,7 +41,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN 1. 下载您系统环境对应的安装包`hertzbeat-xx.tar.gz` [Download](https://hertzbeat.apache.org/docs/download) 2. 配置 HertzBeat 的配置文件 `hertzbeat/config/application.yml`(可选) -3. 部署启动 `$ ./bin/startup.sh ` 或 `bin/startup.bat` +3. 部署启动 `$ ./bin/startup.sh` 或 `bin/startup.bat` 4. 浏览器访问 `http://localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` 5. 部署采集器集群(可选) - 下载您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [Download](https://hertzbeat.apache.org/docs/download) @@ -58,7 +58,8 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN manager-host: ${MANAGER_HOST:127.0.0.1} manager-port: ${MANAGER_PORT:1158} ``` - - 启动 `$ ./bin/startup.sh ` 或 `bin/startup.bat` + + - 启动 `$ ./bin/startup.sh` 或 `bin/startup.bat` - 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 更多配置详细步骤参考 [通过安装包安装HertzBeat](package-deploy) @@ -94,10 +95,10 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN ##### 安装Mysql(可选) -1. docker安装Mysql - ` $ docker run -d --name mysql -p 3306:3306 -v /opt/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7` +1. docker安装Mysql + `$ docker run -d --name mysql -p 3306:3306 -v /opt/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7` `-v /opt/data:/var/lib/mysql` - 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 -2. 创建名称为hertzbeat的数据库 +2. 创建名称为hertzbeat的数据库 `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. 在hertzbeat的配置文件`application.yml`配置Mysql数据库替换H2内置数据库连接参数 @@ -105,7 +106,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN ##### 安装TDengine(可选) -1. docker安装TDengine +1. docker安装TDengine `docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp --name tdengine tdengine/tdengine:3.0.4.0` 2. 创建名称为hertzbeat的数据库 3. 在hertzbeat的配置文件`application.yml`配置tdengine连接 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/sslcert-practice.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/sslcert-practice.md index a6efb36482d..1a0bc9a39f6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/sslcert-practice.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/sslcert-practice.md @@ -12,7 +12,7 @@ sidebar_label: 使用案例 Apache HertzBeat (incubating) 一个拥有强大自定义监控能力,无需Agent的实时监控工具。网站监测,PING连通性,端口可用性,数据库,操作系统,中间件,API监控,阈值告警,告警通知(邮件微信钉钉飞书)。 -github: https://github.com/apache/hertzbeat +github: #### 安装 HertzBeat @@ -32,7 +32,7 @@ github: https://github.com/apache/hertzbeat 2. 配置监控网站 -> 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 +> 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 > 点击确定 注意⚠️新增前默认会先去测试网站连接性,连接成功才会新增,当然也可以把**是否测试**按钮置灰。 ![](/img/docs/start/ssl_2.png) @@ -77,8 +77,8 @@ github: https://github.com/apache/hertzbeat 钉钉微信飞书等token配置可以参考帮助文档 -https://hertzbeat.apache.org/docs/help/alert_dingtalk -https://tancloud.cn/docs/help/alert_dingtalk + + > 告警通知 -> 新增告警通知策略 -> 将刚才配置的接收人启用通知 @@ -88,8 +88,8 @@ https://tancloud.cn/docs/help/alert_dingtalk ---- -#### 完! +#### 完 监控SSL证书的实践就到这里,当然对hertzbeat来说这个功能只是冰山一角,如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md index 85c6db66eb2..3daaa5fa17d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md @@ -10,7 +10,7 @@ Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任 TDengine是一款开源物联网时序型数据库,我们用其存储采集到的监控指标历史数据。 注意支持⚠️ 3.x版本。 -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** +**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** **⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有TDengine环境,可直接跳到创建数据库实例那一步。 @@ -18,7 +18,8 @@ TDengine是一款开源物联网时序型数据库,我们用其存储采集到 ### 通过Docker方式安装TDengine > 可参考官方网站[安装教程](https://docs.taosdata.com/get-started/docker/) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -37,7 +38,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ ``` `-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 -`-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 +`-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 使用```$ docker ps```查看数据库是否启动成功 ### 创建数据库实例 @@ -47,8 +48,9 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ 1. 进入数据库Docker容器 ``` - $ docker exec -it tdengine /bin/bash + docker exec -it tdengine /bin/bash ``` + 2. 修改账户密码 > 建议您修改密码。TDengine默认的账户密码是 root/taosdata @@ -80,6 +82,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ taos> show databases; taos> use hertzbeat; ``` + 5. 退出TDengine CLI ``` @@ -93,7 +96,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ ### 在hertzbeat的`application.yml`配置文件配置此数据库连接 1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** @@ -134,4 +137,3 @@ warehouse: > td-engine enable是否设置为true > 注意⚠️若hertzbeat和TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP > 可根据logs目录下启动日志排查 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md index 05355fe6559..1b07dd1c789 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md @@ -10,7 +10,7 @@ Apache HertzBeat (incubating) 的历史数据存储依赖时序数据库,任 VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决方案和时序数据库,兼容 Prometheus 生态。推荐版本(VictoriaMetrics:v1.95.1+, HertzBeat:v1.4.3+) -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** +**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** **⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** > 如果您已有VictoriaMetrics环境,可直接跳到YML配置那一步。 @@ -18,7 +18,8 @@ VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决 ### 通过Docker方式安装VictoriaMetrics > 可参考官方网站[安装教程](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -> 1. 下载安装Docker环境 +> +> 1. 下载安装Docker环境 > Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 > 安装完毕后终端查看Docker版本是否正常输出。 > @@ -41,8 +42,8 @@ $ docker run -d -p 8428:8428 \ 3. 在hertzbeat的`application.yml`配置文件配置VictoriaMetrics数据库连接 - 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 配置HertzBeat的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** @@ -68,4 +69,3 @@ warehouse: 1. 时序数据库是否都需要配置,能不能都用 > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,但会影响历史图表数据和存储时长等。 - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/template.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/template.md index 219620a230c..521e528a299 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/template.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/template.md @@ -6,7 +6,7 @@ sidebar_label: 监控模版 > Apache HertzBeat (incubating) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 > -> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 +> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 > 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? 这是它的架构原理: diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http-default.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http-default.md index 9ccb0e9454b..1030e382685 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http-default.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-http-default.md @@ -154,4 +154,3 @@ metrics: # Hertzbeat default parsing is used here parseType: default ``` - diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md index 43d0b6cd9c9..810556ea95d 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md @@ -55,7 +55,7 @@ As above, usually our background API interface will design such a general return } ``` -**This time we get the metric data such as `category`, `app`, `status`, `size`, `availableSize` under the app. ** +**This time we get the metric data such as `category`, `app`, `status`, `size`, `availableSize` under the app.** ### Add custom monitoring template `hertzbeat` @@ -194,7 +194,7 @@ metrics: ``` -**The addition is complete, now we save and apply. We can see that the system page has added a `hertzbeat` monitoring type. ** +**The addition is complete, now we save and apply. We can see that the system page has added a `hertzbeat` monitoring type.** ![](/img/docs/advanced/extend-http-example-1.png) @@ -216,12 +216,12 @@ metrics: ---- -#### over! +#### over This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. -**github: https://github.com/apache/hertzbeat** +**github: ** -**gitee: https://gitee.com/hertzbeat/hertzbeat** +**gitee: ** diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-token.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-token.md index edd713acad9..d53e304500d 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-token.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-token.md @@ -10,7 +10,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz ### Request process -【**Authentication information metrics (highest priority)**】【**HTTP interface carries account password call**】->【**Response data analysis**】->【**Analysis and issuance of TOKEN-accessToken as an metric **] -> [**Assign accessToken as a variable parameter to other collection index groups**] +【**Authentication information metrics (highest priority)**】【**HTTP interface carries account password call**】->【**Response data analysis**】->【**Analysis and issuance of TOKEN-accessToken as an metric**] -> [**Assign accessToken as a variable parameter to other collection index groups**] > Here we still use the hertzbeat monitoring example of Tutorial 1! The hertzbeat background interface not only supports the basic direct account password authentication used in Tutorial 1, but also supports token authentication. @@ -202,7 +202,7 @@ metrics: ``` -**At this time, save and apply, add `hertzbeat_token` type monitoring on the system page, configure input parameters, `content-type` fill in `application/json`, `request Body` fill in the account password json as follows: ** +**At this time, save and apply, add `hertzbeat_token` type monitoring on the system page, configure input parameters, `content-type` fill in `application/json`, `request Body` fill in the account password json as follows:** ```json { @@ -213,7 +213,7 @@ metrics: ![](/img/docs/advanced/extend-http-example-5.png) -** After the addition is successful, we can see the `token`, `refreshToken` metric data we collected on the details page. ** +**After the addition is successful, we can see the `token`, `refreshToken` metric data we collected on the details page.** ![](/img/docs/advanced/extend-http-example-6.png) @@ -223,7 +223,7 @@ metrics: **Add an index group definition `summary` in `app-hertzbeat_token.yml`, which is the same as `summary` in Tutorial 1, and set the collection priority to 1** -**Set the authentication method in the HTTP protocol configuration of this index group to `Bearer Token`, assign the index `token` collected by the previous index group `auth` as a parameter, and use `^o^` as the internal replacement symbol, that is ` ^o^token^o^`. as follows:** +**Set the authentication method in the HTTP protocol configuration of this index group to `Bearer Token`, assign the index `token` collected by the previous index group `auth` as a parameter, and use `^o^` as the internal replacement symbol, that is `^o^token^o^`. as follows:** ```yaml - name: summary @@ -382,12 +382,12 @@ metrics: --- -#### over! +#### over This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. -**github: https://github.com/apache/hertzbeat** +**github: ** -**gitee: https://gitee.com/hertzbeat/hertzbeat** +**gitee: ** diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http-jsonpath.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http-jsonpath.md index 86a49c06756..4e12fe86b57 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http-jsonpath.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-http-jsonpath.md @@ -61,7 +61,7 @@ Multilayer format:Set key value in the array #### Example -Query the value information of the custom system, and its exposed interface is `/metrics/person`. We need `type,num` Metric. +Query the value information of the custom system, and its exposed interface is `/metrics/person`. We need `type,num` Metric. The raw data returned by the interface is as follows: ```json @@ -172,4 +172,3 @@ metrics: parseType: jsonPath parseScript: '$.number[*]' ``` - diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http.md index 5c4735bd2cd..58094187429 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-http.md @@ -13,14 +13,14 @@ sidebar_label: HTTP Protocol Custom Monitoring It can be seen from the process that we define a monitoring type of HTTP protocol. We need to configure HTTP request parameters, configure which Metrics to obtain, and configure the parsing method and parsing script for response data. HTTP protocol supports us to customize HTTP request path, request header, request parameters, request method, request body, etc. -**System default parsing method**:HTTP interface returns the JSON data structure specified by hertzbeat, that is, the default parsing method can be used to parse the data and extract the corresponding Metric data. For details, refer to [**System Default Parsing**](extend-http-default) +**System default parsing method**:HTTP interface returns the JSON data structure specified by hertzbeat, that is, the default parsing method can be used to parse the data and extract the corresponding Metric data. For details, refer to [**System Default Parsing**](extend-http-default) **JsonPath script parsing method**:Use JsonPath script to parse the response JSON data, return the data structure specified by the system, and then provide the corresponding Metric data. For details, refer to [**JsonPath Script Parsing**](extend-http-jsonpath) ### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** -------- +------- Configuration usages of the monitoring templates yml are detailed below. Please pay attention to usage annotation. @@ -202,4 +202,3 @@ metrics: basicAuthPassword: ^_^password^_^ parseType: default ``` - diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-jdbc.md b/home/versioned_docs/version-v1.4.x/advanced/extend-jdbc.md index 09e2c031e0c..1f766b08da0 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-jdbc.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-jdbc.md @@ -21,7 +21,7 @@ We can obtain the corresponding Metric data through the data fields queried by S > Query a row of data, return the column name of the result set through query and map them to the queried field. -eg: +eg: queried Metric fields:one two three four query SQL:select one, two, three, four from book limit 1; Here the Metric field and the response data can be mapped into a row of collected data one by one. @@ -30,7 +30,7 @@ Here the Metric field and the response data can be mapped into a row of collecte > Query multiple rows of data, return the column names of the result set and map them to the queried fields. -eg: +eg: queried Metric fields:one two three four query SQL:select one, two, three, four from book; Here the Metric field and the response data can be mapped into multiple rows of collected data one by one. @@ -39,9 +39,9 @@ Here the Metric field and the response data can be mapped into multiple rows of > Collect a row of Metric data. By matching the two columns of queried data (key value), key and the queried field, value is the value of the query field. -eg: -queried fields:one two three four -query SQL:select key, value from book; +eg: +queried fields:one two three four +query SQL:select key, value from book; SQL response data: | key | value | @@ -57,7 +57,7 @@ Here by mapping the Metric field with the key of the response data, we can obta **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -241,4 +241,3 @@ metrics: sql: show global status where Variable_name like 'innodb%'; url: ^_^url^_^ ``` - diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-jmx.md b/home/versioned_docs/version-v1.4.x/advanced/extend-jmx.md index d6d9efdb651..e354a4152fd 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-jmx.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-jmx.md @@ -23,7 +23,7 @@ By configuring the monitoring template YML metrics `field`, `aliasFields`, `obje ![](/img/docs/advanced/extend-point-1.png) -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -192,4 +192,3 @@ metrics: objectName: java.lang:type=MemoryPool,name=* url: ^_^url^_^ ``` - diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-point.md b/home/versioned_docs/version-v1.4.x/advanced/extend-point.md index 314e3f1affa..3f02f6040f4 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-point.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-point.md @@ -11,7 +11,7 @@ sidebar_label: Custom Monitoring **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -130,4 +130,3 @@ metrics: parseType: website ``` - diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-snmp.md b/home/versioned_docs/version-v1.4.x/advanced/extend-snmp.md index b3bb9173c87..3dae2b8b6dd 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-snmp.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-snmp.md @@ -23,7 +23,7 @@ By configuring the metrics `field`, `aliasFields`, and `oids` under the `snmp` p ![](/img/docs/advanced/extend-point-1.png) -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -169,4 +169,3 @@ metrics: processes: 1.3.6.1.2.1.25.1.6.0 location: 1.3.6.1.2.1.1.6.0 ``` - diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-ssh.md b/home/versioned_docs/version-v1.4.x/advanced/extend-ssh.md index 6db1d4a5675..2226a2a6334 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-ssh.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-ssh.md @@ -21,12 +21,12 @@ We can obtain the corresponding Metric data through the data fields queried by t > Query out a column of data, return the field value (one value per row) of the result set through query and map them to the field. -eg: -Metrics of Linux to be queried hostname-host name,uptime-start time -Host name original query command:`hostname` -Start time original query command:`uptime | awk -F "," '{print $1}'` -Then the query script of the two Metrics in hertzbeat is(Use `;` Connect them together): -`hostname; uptime | awk -F "," '{print $1}'` +eg: +Metrics of Linux to be queried hostname-host name,uptime-start time +Host name original query command:`hostname` +Start time original query command:`uptime | awk -F "," '{print $1}'` +Then the query script of the two Metrics in hertzbeat is(Use `;` Connect them together): +`hostname; uptime | awk -F "," '{print $1}'` The data responded by the terminal is: ``` @@ -34,8 +34,8 @@ tombook 14:00:15 up 72 days ``` -At last collected Metric data is mapped one by one as: -hostname is `tombook` +At last collected Metric data is mapped one by one as: +hostname is `tombook` uptime is `14:00:15 up 72 days` Here the Metric field and the response data can be mapped into a row of collected data one by one @@ -44,8 +44,8 @@ Here the Metric field and the response data can be mapped into a row of collecte > Query multiple rows of data, return the column names of the result set through the query, and map them to the Metric field of the query. -eg: -Linux memory related Metric fields queried:total-Total memory, used-Used memory,free-Free memory, buff-cache-Cache size, available-Available memory +eg: +Linux memory related Metric fields queried:total-Total memory, used-Used memory,free-Free memory, buff-cache-Cache size, available-Available memory Memory metrics original query command:`free -m`, Console response: ```shell @@ -55,7 +55,7 @@ Swap: 8191 33 8158 ``` In hertzbeat multiRow format parsing requires a one-to-one mapping between the column name of the response data and the indicaotr value, so the corresponding query SHELL script is: -`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` +`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` Console response is: ```shell @@ -69,7 +69,7 @@ Here the Metric field and the response data can be mapped into collected data on **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -212,4 +212,3 @@ metrics: script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' parseType: multiRow ``` - diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-tutorial.md b/home/versioned_docs/version-v1.4.x/advanced/extend-tutorial.md index 363cb51e089..cd594f5a378 100644 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-tutorial.md +++ b/home/versioned_docs/version-v1.4.x/advanced/extend-tutorial.md @@ -55,7 +55,7 @@ As above, usually our background API interface will design such a general return } ``` -**This time we get the metrics data such as `category`, `app`, `status`, `size`, `availableSize` under the app. ** +**This time we get the metrics data such as `category`, `app`, `status`, `size`, `availableSize` under the app.** ### Add Monitoring Template Yml @@ -193,7 +193,7 @@ metrics: parseScript: '$.data.apps.*' ``` -**The addition is complete, now we restart the hertzbeat system. We can see that the system page has added a `hertzbeat` monitoring type. ** +**The addition is complete, now we restart the hertzbeat system. We can see that the system page has added a `hertzbeat` monitoring type.** ![](/img/docs/advanced/extend-http-example-1.png) @@ -215,12 +215,12 @@ metrics: ---- -#### over! +#### over This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. Thanks for the old iron support. Refill! -**github: https://github.com/apache/hertzbeat** +**github: ** -**gitee: https://gitee.com/hertzbeat/hertzbeat** +**gitee: ** diff --git a/home/versioned_docs/version-v1.4.x/help/activemq.md b/home/versioned_docs/version-v1.4.x/help/activemq.md index f24bc37fbbb..ef3cc911969 100644 --- a/home/versioned_docs/version-v1.4.x/help/activemq.md +++ b/home/versioned_docs/version-v1.4.x/help/activemq.md @@ -143,4 +143,3 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.4.x/help/airflow.md b/home/versioned_docs/version-v1.4.x/help/airflow.md index 52367155d89..a7f77f7f5b6 100644 --- a/home/versioned_docs/version-v1.4.x/help/airflow.md +++ b/home/versioned_docs/version-v1.4.x/help/airflow.md @@ -36,4 +36,3 @@ keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] |-------------|------|---------------| | value | 无 | Airflow版本 | | git_version | 无 | Airflow git版本 | - diff --git a/home/versioned_docs/version-v1.4.x/help/alert_dingtalk.md b/home/versioned_docs/version-v1.4.x/help/alert_dingtalk.md index b86ed662940..36e332d9b21 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_dingtalk.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_dingtalk.md @@ -17,7 +17,7 @@ keywords: [open source monitoring tool, open source alerter, open source DingDin 2. **【Save access_token value of the WebHook address of the robot】** -> eg: webHook address:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` +> eg: webHook address:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` > Its robot access_token value is `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` 3. **【Alarm notification】->【Add new recipient】 ->【Select DingDing robot notification method】->【Set DingDing robot ACCESS_TOKEN】-> 【Confirm】** diff --git a/home/versioned_docs/version-v1.4.x/help/alert_discord.md b/home/versioned_docs/version-v1.4.x/help/alert_discord.md index 7aa565c0acf..68296148f22 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_discord.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_discord.md @@ -61,8 +61,8 @@ keywords: [open source monitoring tool, open source alerter, open source Discord 1. Discord doesn't receive bot alert notifications -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured +> Please check whether the alarm information has been triggered in the alarm center +> Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured > Please check whether the bot is properly authorized by the Discord chat server Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_email.md b/home/versioned_docs/version-v1.4.x/help/alert_email.md index fb6dc7fa571..c507a970bae 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_email.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_email.md @@ -13,7 +13,7 @@ keywords: [open source monitoring tool, open source alerter, open source email n ![email](/img/docs/help/alert-notice-1.png) -2. **【Get verification code】-> 【Enter email verification code】-> 【Confirm】** +2. **【Get verification code】-> 【Enter email verification code】-> 【Confirm】** ![email](/img/docs/help/alert-notice-2.png) ![email](/img/docs/help/alert-notice-3.png) @@ -32,7 +32,7 @@ keywords: [open source monitoring tool, open source alerter, open source email n 2. Cloud environment tancloud cannot receive email notification -> Please check whether there is any triggered alarm information in the alarm center. +> Please check whether there is any triggered alarm information in the alarm center. > Please check whether the mailbox is configured correctly and whether the alarm strategy association is configured. > Please check whether the warning email is blocked in the trash can of the mailbox. diff --git a/home/versioned_docs/version-v1.4.x/help/alert_feishu.md b/home/versioned_docs/version-v1.4.x/help/alert_feishu.md index 8f7e9391001..38f7c72cf03 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_feishu.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_feishu.md @@ -13,7 +13,7 @@ keywords: [open source monitoring tool, open source alerter, open source feishu 2. **【Save the key value of the WebHook address of the robot】** -> eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` > Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select FeiShu robot notification method】->【Set FeiShu robot KEY】-> 【Confirm】** @@ -28,7 +28,7 @@ keywords: [open source monitoring tool, open source alerter, open source feishu 1. FeiShu group did not receive the robot alarm notification. -> Please check whether there is any triggered alarm information in the alarm center. +> Please check whether there is any triggered alarm information in the alarm center. > Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_slack.md b/home/versioned_docs/version-v1.4.x/help/alert_slack.md index 5148432fe8b..26bde4ed2e5 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_slack.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_slack.md @@ -29,7 +29,7 @@ Refer to the official website document [Sending messages using Incoming Webhooks 1. Slack did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center +> Please check whether the alarm information has been triggered in the alarm center > Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_smn.md b/home/versioned_docs/version-v1.4.x/help/alert_smn.md index b4013b9f902..53774315561 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_smn.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_smn.md @@ -37,7 +37,7 @@ keywords: [ open source monitoring tool, open source alerter, open source Huawei 1. Huawei Cloud SMN did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center +> Please check whether the alarm information has been triggered in the alarm center > Please check whether the Huawei Cloud SMN AK, SK and other configurations are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_telegram.md b/home/versioned_docs/version-v1.4.x/help/alert_telegram.md index cb60f266778..1689788f0f4 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_telegram.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_telegram.md @@ -58,8 +58,8 @@ Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token 1. Telegram did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured +> Please check whether the alarm information has been triggered in the alarm center +> Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured > UserId should be the UserId of the recipient of the message Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_threshold.md b/home/versioned_docs/version-v1.4.x/help/alert_threshold.md index c62dee02704..f4b934fd27c 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_threshold.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_threshold.md @@ -14,12 +14,12 @@ sidebar_label: Threshold alarm configuration As shown above: -**Metric object**:Select the monitoring Metric object for which we need to configure the threshold. Eg:website monitoring type -> summary Metric set -> responseTime-response time Metric -**Threshold trigger expression**:Calculate and judge whether to trigger the threshold according to this expression. See the page prompts for expression environment variables and operators. Eg:set the response time greater than 50 to trigger an alarm, and the expression is `responseTime > 50`. For detailed help on threshold expression, see [Threshold expression help](alert_threshold_expr) +**Metric object**:Select the monitoring Metric object for which we need to configure the threshold. Eg:website monitoring type -> summary Metric set -> responseTime-response time Metric +**Threshold trigger expression**:Calculate and judge whether to trigger the threshold according to this expression. See the page prompts for expression environment variables and operators. Eg:set the response time greater than 50 to trigger an alarm, and the expression is `responseTime > 50`. For detailed help on threshold expression, see [Threshold expression help](alert_threshold_expr) **Alarm level**:The alarm level that triggers the threshold, from low to high: warning, critical, emergency. -**Trigger times**:How many times will the threshold be triggered before the alarm is really triggered. -**Notification template**:Notification information Template sent after alarm triggering, See page prompts for template environment variables, eg:`${app}.${metrics}.${metric} Metric's value is ${responseTime}, greater than 50 triggers an alarm` -**Global default**: Set whether this threshold is valid for such global Metrics, and the default is No. After adding a new threshold, you need to associate the threshold with the monitoring object, so that the threshold will take effect for this monitoring. +**Trigger times**:How many times will the threshold be triggered before the alarm is really triggered. +**Notification template**:Notification information Template sent after alarm triggering, See page prompts for template environment variables, eg:`${app}.${metrics}.${metric} Metric's value is ${responseTime}, greater than 50 triggers an alarm` +**Global default**: Set whether this threshold is valid for such global Metrics, and the default is No. After adding a new threshold, you need to associate the threshold with the monitoring object, so that the threshold will take effect for this monitoring. **Enable alarm**:This alarm threshold configuration is enabled or disabled. 2. **Threshold association monitoring⚠️ 【Alarm configuration】-> 【Threshold just set】-> 【Configure associated monitoring】-> 【Confirm after configuration】** @@ -30,7 +30,7 @@ As shown above: ![threshold](/img/docs/help/alert-threshold-3.png) -**After the threshold alarm is configured, the alarm information that has been successfully triggered can be seen in 【alarm center】.** +**After the threshold alarm is configured, the alarm information that has been successfully triggered can be seen in 【alarm center】.** **If you need to notify the relevant personnel of the alarm information by email, Wechat, DingDing and Feishu, it can be configured in 【alarm notification】.** Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md b/home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md index 493d7fbce15..45d80f82764 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md @@ -23,28 +23,28 @@ equals(str1,str2) || ``` -Rich operators allow us to define expressions freely. +Rich operators allow us to define expressions freely. Note⚠️ For the equality of string, please use `equals(str1,str2)`, while for the equality judgment of number, please use == or != #### Supported environment variables > Environment variables, i.e. supported variables such as Metric values, are used in the expression. When the threshold value is calculated and judged, the variables will be replaced with actual values for calculation. -Non fixed environment variables:These variables will change dynamically according to the monitoring Metric object we choose. For example, if we choose **response time Metric of website monitoring**, the environment variables will have `responseTime - This is the response time variable` +Non fixed environment variables:These variables will change dynamically according to the monitoring Metric object we choose. For example, if we choose **response time Metric of website monitoring**, the environment variables will have `responseTime - This is the response time variable` If we want to set **when the response time of website monitoring is greater than 400** to trigger an alarm,the expression is `responseTime>400` -Fixed environment variables(Rarely used):`instance : Row instance value` +Fixed environment variables(Rarely used):`instance : Row instance value` This variable is mainly used to calculate multiple instances. For example, we collected `usage`(`usage is non fixed environment variables`) of disk C and disk D, but we only want to set the alarm when **the usage of C disk is greater than 80**. Then the expression is `equals(instance,"c")&&usage>80` #### Expression setting case -1. Website monitoring -> Trigger alarm when the response time is greater than or equal to 400ms +1. Website monitoring -> Trigger alarm when the response time is greater than or equal to 400ms `responseTime>=400` -2. API monitoring -> Trigger alarm when the response time is greater than 3000ms +2. API monitoring -> Trigger alarm when the response time is greater than 3000ms `responseTime>3000` -3. Entire site monitoring -> Trigger alarm when URL(instance) path is `https://baidu.com/book/3` and the response time is greater than 200ms +3. Entire site monitoring -> Trigger alarm when URL(instance) path is `https://baidu.com/book/3` and the response time is greater than 200ms `equals(instance,"https://baidu.com/book/3")&&responseTime>200` -4. MYSQL monitoring -> status Metric group -> Trigger alarm when hreads_running(number of running threads) Metric is greater than 7 +4. MYSQL monitoring -> status Metric group -> Trigger alarm when hreads_running(number of running threads) Metric is greater than 7 `threads_running>7` Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_wework.md b/home/versioned_docs/version-v1.4.x/help/alert_wework.md index ca14d5615fa..ce344200301 100644 --- a/home/versioned_docs/version-v1.4.x/help/alert_wework.md +++ b/home/versioned_docs/version-v1.4.x/help/alert_wework.md @@ -15,7 +15,7 @@ keywords: [open source monitoring tool, open source alerter, open source WeWork 2. **【Save the key value of the WebHook address of the robot】** -> eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` > Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select enterprise Wechat robot notification method】->【Set enterprise Wechat robot KEY】-> 【Confirm】** @@ -32,7 +32,7 @@ keywords: [open source monitoring tool, open source alerter, open source WeWork 1. The enterprise wechat group did not receive the robot alarm notification. -> Please check whether there is any triggered alarm information in the alarm center. +> Please check whether there is any triggered alarm information in the alarm center. > Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/api.md b/home/versioned_docs/version-v1.4.x/help/api.md index 136dd081926..958fb532639 100644 --- a/home/versioned_docs/version-v1.4.x/help/api.md +++ b/home/versioned_docs/version-v1.4.x/help/api.md @@ -32,4 +32,3 @@ keywords: [open source monitoring tool, monitoring http api] | Metric name | Metric unit | Metric help description | |--------------|-------------|-------------------------| | responseTime | ms | Website response time | - diff --git a/home/versioned_docs/version-v1.4.x/help/centos.md b/home/versioned_docs/version-v1.4.x/help/centos.md index 60b770ebf96..858a1d2bb94 100644 --- a/home/versioned_docs/version-v1.4.x/help/centos.md +++ b/home/versioned_docs/version-v1.4.x/help/centos.md @@ -79,4 +79,3 @@ keywords: [open source monitoring tool, open source os monitoring tool, monitori | available | Mb | Available disk size | | usage | % | usage | | mounted | none | Mount point directory | - diff --git a/home/versioned_docs/version-v1.4.x/help/dm.md b/home/versioned_docs/version-v1.4.x/help/dm.md index 82159bf2408..f8e031bfe20 100644 --- a/home/versioned_docs/version-v1.4.x/help/dm.md +++ b/home/versioned_docs/version-v1.4.x/help/dm.md @@ -46,4 +46,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | dm_sql_thd | None | Thread for writing dmsql dmserver | | dm_io_thd | None | IO threads, controlled by IO_THR_GROUPS parameter, default is 2 threads | | dm_quit_thd | None | Thread used to perform a graceful shutdown of the database | - diff --git a/home/versioned_docs/version-v1.4.x/help/docker.md b/home/versioned_docs/version-v1.4.x/help/docker.md index 0e3a1f0b428..63fe3b03a19 100644 --- a/home/versioned_docs/version-v1.4.x/help/docker.md +++ b/home/versioned_docs/version-v1.4.x/help/docker.md @@ -32,7 +32,7 @@ systemctl daemon-reload systemctl restart docker ``` -**Note: Remember to open the `2375` port number in the server console. ** +**Note: Remember to open the `2375` port number in the server console.** **3. If the above method does not work:** @@ -99,4 +99,3 @@ firewall-cmd --reload | cpu_delta | None | The number of CPUs already used by the Docker container | | number_cpus | None | The number of CPUs that the Docker container can use | | cpu_usage | None | Docker container CPU usage | - diff --git a/home/versioned_docs/version-v1.4.x/help/doris_be.md b/home/versioned_docs/version-v1.4.x/help/doris_be.md index 8dcde7b549b..3e6fd37de03 100644 --- a/home/versioned_docs/version-v1.4.x/help/doris_be.md +++ b/home/versioned_docs/version-v1.4.x/help/doris_be.md @@ -168,4 +168,3 @@ keywords: [开源监控系统, 开源数据库监控, DORIS数据库BE监控] | 指标名称 | 指标单位 | 指标帮助描述 | |-------|------|------------------------------------------| | value | 字节 | BE 进程物理内存大小,取自 `/proc/self/status/VmRSS` | - diff --git a/home/versioned_docs/version-v1.4.x/help/doris_fe.md b/home/versioned_docs/version-v1.4.x/help/doris_fe.md index 67c4de34042..23432ad2cbd 100644 --- a/home/versioned_docs/version-v1.4.x/help/doris_fe.md +++ b/home/versioned_docs/version-v1.4.x/help/doris_fe.md @@ -124,4 +124,3 @@ keywords: [开源监控系统, 开源数据库监控, DORIS数据库FE监控] | committed | 无 | 已提交 | | visible | 无 | 可见 | | aborted | 无 | 已中止/已撤销 | - diff --git a/home/versioned_docs/version-v1.4.x/help/dynamic_tp.md b/home/versioned_docs/version-v1.4.x/help/dynamic_tp.md index fd36206bc6e..332767b2a39 100644 --- a/home/versioned_docs/version-v1.4.x/help/dynamic_tp.md +++ b/home/versioned_docs/version-v1.4.x/help/dynamic_tp.md @@ -99,4 +99,3 @@ Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has respo | dynamic | None | Dynamic thread pool or not | | run_timeout_count | None | Number of running timeout tasks | | queue_timeout_count | None | Number of tasks waiting for timeout | - diff --git a/home/versioned_docs/version-v1.4.x/help/fullsite.md b/home/versioned_docs/version-v1.4.x/help/fullsite.md index 6145f238bdc..bad94c4b751 100644 --- a/home/versioned_docs/version-v1.4.x/help/fullsite.md +++ b/home/versioned_docs/version-v1.4.x/help/fullsite.md @@ -5,8 +5,8 @@ sidebar_label: Full site Monitor keywords: [open source monitoring tool, open source website monitoring tool, monitoring sitemap metrics] --- -> Available or not to monitor all pages of the website. -> A website often has multiple pages provided by different services. We monitor the full site by collecting the SiteMap exposed by the website. +> Available or not to monitor all pages of the website. +> A website often has multiple pages provided by different services. We monitor the full site by collecting the SiteMap exposed by the website. > Note⚠️ This monitoring requires your website to support SiteMap. We support SiteMap in XML and TXT formats. ### Configuration parameter @@ -32,4 +32,3 @@ keywords: [open source monitoring tool, open source website monitoring tool, mon | statusCode | none | Response HTTP status code for requesting the website | | responseTime | ms | Website response time | | errorMsg | none | Error message feedback after requesting the website | - diff --git a/home/versioned_docs/version-v1.4.x/help/guide.md b/home/versioned_docs/version-v1.4.x/help/guide.md index ee506b0d6cd..9d2e9dcbf6b 100644 --- a/home/versioned_docs/version-v1.4.x/help/guide.md +++ b/home/versioned_docs/version-v1.4.x/help/guide.md @@ -9,7 +9,7 @@ sidebar_label: Help Center ## 🔬 Monitoring services -> Regularly collect and monitor the performance Metrics exposed by end-to-end services, provide visual interfaces, and process data for alarm and other service scheduling. +> Regularly collect and monitor the performance Metrics exposed by end-to-end services, provide visual interfaces, and process data for alarm and other service scheduling. > Planned monitoring type:application service, database, operating system, cloud native, open source middleware. ### Application service monitoring @@ -77,8 +77,8 @@ More details see 👉 [threshold alarm](alert_threshold)
### Alarm notification -> After triggering the alarm information, in addition to being displayed in the alarm center list, it can also be notified to the designated recipient in a specified way (e-mail, wechat and FeiShu etc.) -> Alarm notification provides different types of notification methods, such as email recipient, enterprise wechat robot notification, DingDing robot notification, and FeiShu robot notification. +> After triggering the alarm information, in addition to being displayed in the alarm center list, it can also be notified to the designated recipient in a specified way (e-mail, wechat and FeiShu etc.) +> Alarm notification provides different types of notification methods, such as email recipient, enterprise wechat robot notification, DingDing robot notification, and FeiShu robot notification. > After setting the receiver, you need to set the associated alarm notification strategy to configure which alarm information is sent to which receiver.  👉 [Configure Email Notification](alert_email)
@@ -86,6 +86,6 @@ More details see 👉 [threshold alarm](alert_threshold)
 👉 [Configure Slack Notification](alert_webhook)
 👉 [Configure Telegram Notification](alert_webhook)
 👉 [Configure WebHook Notification](alert_webhook)
- 👉 [Configure enterprise WeChat Robot Notification](alert_wework)
+ 👉 [Configure enterprise WeChat Robot Notification](alert_wework)
 👉 [Configure DingDing Robot Notification](alert_dingtalk)
 👉 [Configure FeiShu Robot Notification](alert_feishu)
diff --git a/home/versioned_docs/version-v1.4.x/help/hadoop.md b/home/versioned_docs/version-v1.4.x/help/hadoop.md index 56f19472277..e12a44807ea 100644 --- a/home/versioned_docs/version-v1.4.x/help/hadoop.md +++ b/home/versioned_docs/version-v1.4.x/help/hadoop.md @@ -87,4 +87,3 @@ export HADOOP_OPTS= "$HADOOP_OPTS | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.4.x/help/hive.md b/home/versioned_docs/version-v1.4.x/help/hive.md index 806969c2e7c..1293fbd3802 100644 --- a/home/versioned_docs/version-v1.4.x/help/hive.md +++ b/home/versioned_docs/version-v1.4.x/help/hive.md @@ -74,4 +74,3 @@ hive --service hiveserver2 & | init | MB | The initial amount of memory requested for the memory pool. | | max | MB | The maximum amount of memory that can be allocated for the memory pool. | | used | MB | The amount of memory currently being used by the memory pool. | - diff --git a/home/versioned_docs/version-v1.4.x/help/iotdb.md b/home/versioned_docs/version-v1.4.x/help/iotdb.md index bec827feb73..011b9cbec12 100644 --- a/home/versioned_docs/version-v1.4.x/help/iotdb.md +++ b/home/versioned_docs/version-v1.4.x/help/iotdb.md @@ -118,4 +118,3 @@ predefinedMetrics: |-------------|-------------|----------------------------------| | name | None | name | | connection | none | thrift current connection number | - diff --git a/home/versioned_docs/version-v1.4.x/help/issue.md b/home/versioned_docs/version-v1.4.x/help/issue.md index 6e20f392661..384387b45d6 100644 --- a/home/versioned_docs/version-v1.4.x/help/issue.md +++ b/home/versioned_docs/version-v1.4.x/help/issue.md @@ -17,9 +17,9 @@ sidebar_label: Common issues 3. Ping connectivity monitoring exception when installing hertzbeat for package deployment. The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 -> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. -> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. +> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. +> See 4. If the history chart on the monitoring page is not displayed,popup [please configure dependency service on TDengine time series database] @@ -28,36 +28,35 @@ sidebar_label: Common issues ### Docker Deployment common issues -1. **MYSQL, TDENGINE and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** +1. **MYSQL, TDENGINE and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** The problems lies in Docker container failed to visit and connect localhost port. Beacuse the docker default network mode is Bridge mode which can't access loacl machine through localhost. -> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. +> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. > Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` -2. **According to the process deploy,visit http://ip:1157/ no interface** +2. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issuess: -> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. +> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. > two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. > >> three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. 3. **Log an error TDengine connection or insert SQL failed** -> one:Check whether database account and password configured is correct, the database is created. +> one:Check whether database account and password configured is correct, the database is created. > two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. ### Package Deployment common issues -1. **According to the process deploy,visit http://ip:1157/ no interface** +1. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issuess: -> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. -> two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. +> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. +> two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. > three: Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 2. **Log an error TDengine connection or insert SQL failed** -> one:Check whether database account and password configured is correct, the database is created. +> one:Check whether database account and password configured is correct, the database is created. > two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. - diff --git a/home/versioned_docs/version-v1.4.x/help/jetty.md b/home/versioned_docs/version-v1.4.x/help/jetty.md index 6e069553dba..ccec65b5559 100644 --- a/home/versioned_docs/version-v1.4.x/help/jetty.md +++ b/home/versioned_docs/version-v1.4.x/help/jetty.md @@ -92,4 +92,3 @@ Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.4.x/help/jvm.md b/home/versioned_docs/version-v1.4.x/help/jvm.md index 3b47e0e7a8a..477d9fbece1 100644 --- a/home/versioned_docs/version-v1.4.x/help/jvm.md +++ b/home/versioned_docs/version-v1.4.x/help/jvm.md @@ -13,7 +13,7 @@ keywords: [open source monitoring tool, open source java jvm monitoring tool, mo 1. Add JVM `VM options` When Start Server ⚠️ customIP -Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#remote +Refer: ```shell -Djava.rmi.server.hostname=customIP @@ -74,4 +74,3 @@ Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#rem | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.4.x/help/kafka.md b/home/versioned_docs/version-v1.4.x/help/kafka.md index f86913733b1..48d06b2037b 100644 --- a/home/versioned_docs/version-v1.4.x/help/kafka.md +++ b/home/versioned_docs/version-v1.4.x/help/kafka.md @@ -87,4 +87,3 @@ exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" | FiveMinuteRate | % | Five Minute Rate | | MeanRate | % | Mean Rate | | FifteenMinuteRate | % | Fifteen Minute Rate | - diff --git a/home/versioned_docs/version-v1.4.x/help/kubernetes.md b/home/versioned_docs/version-v1.4.x/help/kubernetes.md index 45adda576fc..3cb2336e768 100644 --- a/home/versioned_docs/version-v1.4.x/help/kubernetes.md +++ b/home/versioned_docs/version-v1.4.x/help/kubernetes.md @@ -13,7 +13,7 @@ If you want to monitor the information in 'Kubernetes', you need to obtain an au Refer to the steps to obtain token -#### method one: +#### method one 1. Create a service account and bind the default cluster-admin administrator cluster role @@ -27,7 +27,7 @@ kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` -#### method two: +#### method two ```shell kubectl create serviceaccount cluster-admin @@ -93,4 +93,3 @@ kubectl create token --duration=1000h cluster-admin | cluster_ip | None | cluster ip | | selector | None | tag selector matches | | creation_time | None | Created | - diff --git a/home/versioned_docs/version-v1.4.x/help/linux.md b/home/versioned_docs/version-v1.4.x/help/linux.md index 6c22028114c..f5c77a72ca6 100644 --- a/home/versioned_docs/version-v1.4.x/help/linux.md +++ b/home/versioned_docs/version-v1.4.x/help/linux.md @@ -79,4 +79,3 @@ keywords: [open source monitoring tool, open source linux monitoring tool, monit | available | Mb | Available disk size | | usage | % | usage | | mounted | none | Mount point directory | - diff --git a/home/versioned_docs/version-v1.4.x/help/mariadb.md b/home/versioned_docs/version-v1.4.x/help/mariadb.md index 374e6e6a081..8373b61cec3 100644 --- a/home/versioned_docs/version-v1.4.x/help/mariadb.md +++ b/home/versioned_docs/version-v1.4.x/help/mariadb.md @@ -51,4 +51,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | innodb_data_writes | none | innodb average number of writes from file per second | | innodb_data_read | KB | innodb average amount of data read per second | | innodb_data_written | KB | innodb average amount of data written per second | - diff --git a/home/versioned_docs/version-v1.4.x/help/memcached.md b/home/versioned_docs/version-v1.4.x/help/memcached.md index 920da021e6b..f3c1ddfab55 100644 --- a/home/versioned_docs/version-v1.4.x/help/memcached.md +++ b/home/versioned_docs/version-v1.4.x/help/memcached.md @@ -14,7 +14,7 @@ The default YML configuration for the memcache version is in compliance with 1.4 You need to use the stats command to view the parameters that your memcache can monitor ``` -### +### **1、Obtain usable parameter indicators through commands such as stats、stats setting、stats settings. @@ -32,7 +32,7 @@ STAT version 1.4.15 ... ``` -**There is help_doc: https://www.runoob.com/memcached/memcached-stats.html** +**There is help_doc: ** ### Configuration parameter @@ -67,4 +67,3 @@ STAT version 1.4.15 | cmd_flush | | Flush command request count | | get_misses | | Get command misses | | delete_misses | | Delete command misses | - diff --git a/home/versioned_docs/version-v1.4.x/help/mysql.md b/home/versioned_docs/version-v1.4.x/help/mysql.md index dca64b3f9f0..86922782e27 100644 --- a/home/versioned_docs/version-v1.4.x/help/mysql.md +++ b/home/versioned_docs/version-v1.4.x/help/mysql.md @@ -51,4 +51,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | innodb_data_writes | none | innodb average number of writes from file per second | | innodb_data_read | KB | innodb average amount of data read per second | | innodb_data_written | KB | innodb average amount of data written per second | - diff --git a/home/versioned_docs/version-v1.4.x/help/nebulagraph.md b/home/versioned_docs/version-v1.4.x/help/nebulagraph.md index c23e39c14fe..60ac139f827 100644 --- a/home/versioned_docs/version-v1.4.x/help/nebulagraph.md +++ b/home/versioned_docs/version-v1.4.x/help/nebulagraph.md @@ -14,13 +14,13 @@ The monitoring has two parts,nebulaGraph_stats and rocksdb_stats. nebulaGraph_stats is nebulaGraph's statistics, and rocksdb_stats is rocksdb's statistics. ``` -### +### **1、Obtain available parameters through the stats and rocksdb stats interfaces.** 1.1、 If you only need to get nebulaGraph_stats, you need to ensure that you have access to stats, or you'll get errors. -The default port is 19669 and the access address is http://ip:19669/stats +The default port is 19669 and the access address is 1.2、If you need to obtain additional parameters for rocksdb stats, you need to ensure that you have access to rocksdb stats, otherwise an error will be reported. @@ -28,11 +28,11 @@ stats, otherwise an error will be reported. Once you connect to NebulaGraph for the first time, you must first register your Storage service in order to properly query your data. -**There is help_doc: https://docs.nebula-graph.com.cn/3.4.3/4.deployment-and-installation/connect-to-nebula-graph/** +**There is help_doc: ** -**https://docs.nebula-graph.com.cn/3.4.3/2.quick-start/3.quick-start-on-premise/3.1add-storage-hosts/** +**** -The default port is 19779 and the access address is:http://ip:19779/rocksdb_stats +The default port is 19779 and the access address is: ### Configuration parameter @@ -53,7 +53,7 @@ The default port is 19779 and the access address is:http://ip:19779/rocksdb_stat #### Metrics Set:nebulaGraph_stats Too many indicators, related links are as follows -**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** +**** | Metric name | Metric unit | Metric help description | |---------------------------------------|-------------|--------------------------------------------------------------| @@ -65,11 +65,10 @@ Too many indicators, related links are as follows #### Metrics Set:rocksdb_stats Too many indicators, related links are as follows -**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** +**** | Metric name | Metric unit | Metric help description | |----------------------------|-------------|-------------------------------------------------------------| | rocksdb.backup.read.bytes | | Number of bytes read during the RocksDB database backup. | | rocksdb.backup.write.bytes | | Number of bytes written during the RocksDB database backup. | | ... | | ... | - diff --git a/home/versioned_docs/version-v1.4.x/help/nginx.md b/home/versioned_docs/version-v1.4.x/help/nginx.md index f630e4d4d24..a5662be985f 100644 --- a/home/versioned_docs/version-v1.4.x/help/nginx.md +++ b/home/versioned_docs/version-v1.4.x/help/nginx.md @@ -46,8 +46,8 @@ server { location /nginx-status { stub_status on; access_log on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts } } ``` @@ -93,8 +93,8 @@ http { server { location /req-status { req_status_show on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts } } } @@ -109,7 +109,7 @@ nginx -s reload 4. Access `http://localhost/req-status` in the browser to view the Nginx monitoring status information. -**Refer Doc: https://github.com/zls0424/ngx_req_status** +**Refer Doc: ** **⚠️Attention: The endpoint path of the monitoring module is `/nginx-status` `/req-status`** @@ -152,4 +152,3 @@ nginx -s reload | requests | | Total requests | | active | | Current concurrent connections | | bandwidth | kb | Current bandwidth | - diff --git a/home/versioned_docs/version-v1.4.x/help/ntp.md b/home/versioned_docs/version-v1.4.x/help/ntp.md index 666f2a6b39a..fc7f7925ca6 100644 --- a/home/versioned_docs/version-v1.4.x/help/ntp.md +++ b/home/versioned_docs/version-v1.4.x/help/ntp.md @@ -35,4 +35,3 @@ keywords: [ open source monitoring tool, open source NTP monitoring tool, monito | stratum | | The stratumevel of the NTP server, indicating its distance from a reference clock). | | referenceId | | An identifier that indicates the reference clock or time source used by the NTP server). | | precision | | The precision of the NTP server's clock, indicating its accuracy). | - diff --git a/home/versioned_docs/version-v1.4.x/help/opengauss.md b/home/versioned_docs/version-v1.4.x/help/opengauss.md index 28171658951..3490bb8b003 100644 --- a/home/versioned_docs/version-v1.4.x/help/opengauss.md +++ b/home/versioned_docs/version-v1.4.x/help/opengauss.md @@ -53,4 +53,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | Metric name | Metric unit | Metric help description | |-------------|-------------|--------------------------------------| | running | connections | Number of current client connections | - diff --git a/home/versioned_docs/version-v1.4.x/help/oracle.md b/home/versioned_docs/version-v1.4.x/help/oracle.md index 50d2f6422bc..978e6736620 100644 --- a/home/versioned_docs/version-v1.4.x/help/oracle.md +++ b/home/versioned_docs/version-v1.4.x/help/oracle.md @@ -61,4 +61,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | qps | QPS | I/O Requests per second | | tps | TPS | User transaction per second | | mbps | MBPS | I/O Megabytes per second | - diff --git a/home/versioned_docs/version-v1.4.x/help/ping.md b/home/versioned_docs/version-v1.4.x/help/ping.md index 7c894f488ff..bed89d53dcf 100644 --- a/home/versioned_docs/version-v1.4.x/help/ping.md +++ b/home/versioned_docs/version-v1.4.x/help/ping.md @@ -31,7 +31,6 @@ keywords: [open source monitoring tool, open source network monitoring tool, mon 1. Ping connectivity monitoring exception when installing hertzbeat for package deployment. The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 -> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. -> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address - +> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. +> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. +> See diff --git a/home/versioned_docs/version-v1.4.x/help/pop3.md b/home/versioned_docs/version-v1.4.x/help/pop3.md index fffff2a494f..c73884a0afe 100644 --- a/home/versioned_docs/version-v1.4.x/help/pop3.md +++ b/home/versioned_docs/version-v1.4.x/help/pop3.md @@ -45,4 +45,3 @@ If you want to monitor information in 'POP3' with this monitoring type, you just |--------------|-------------|-----------------------------------------| | email_count | | Number of emails | | mailbox_size | kb | The total size of emails in the mailbox | - diff --git a/home/versioned_docs/version-v1.4.x/help/port.md b/home/versioned_docs/version-v1.4.x/help/port.md index 35ff99fcb57..8d58ac1f5c5 100644 --- a/home/versioned_docs/version-v1.4.x/help/port.md +++ b/home/versioned_docs/version-v1.4.x/help/port.md @@ -26,4 +26,3 @@ keywords: [open source monitoring tool, open source port monitoring tool, monito | Metric name | Metric unit | Metric help description | |--------------|-------------|-------------------------| | responseTime | ms | Website response time | - diff --git a/home/versioned_docs/version-v1.4.x/help/postgresql.md b/home/versioned_docs/version-v1.4.x/help/postgresql.md index 57834a713bd..5191f7d325d 100644 --- a/home/versioned_docs/version-v1.4.x/help/postgresql.md +++ b/home/versioned_docs/version-v1.4.x/help/postgresql.md @@ -53,4 +53,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | Metric name | Metric unit | Metric help description | |-------------|-------------|--------------------------------------| | running | connections | Number of current client connections | - diff --git a/home/versioned_docs/version-v1.4.x/help/rabbitmq.md b/home/versioned_docs/version-v1.4.x/help/rabbitmq.md index 917ca63c3d3..e49d572ee72 100644 --- a/home/versioned_docs/version-v1.4.x/help/rabbitmq.md +++ b/home/versioned_docs/version-v1.4.x/help/rabbitmq.md @@ -18,7 +18,7 @@ keywords: [open source monitoring tool, open source rabbitmq monitoring tool, mo rabbitmq-plugins enable rabbitmq_management ``` -2. Access http://ip:15672/ with a browser, and the default account password is `guest/guest`. Successful login means that it is successfully opened. +2. Access with a browser, and the default account password is `guest/guest`. Successful login means that it is successfully opened. 3. Just add the corresponding RabbitMQ monitoring in HertzBeat, the parameters use the IP port of Management, and the default account password. @@ -123,4 +123,3 @@ rabbitmq-plugins enable rabbitmq_management | message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | | message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | | message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | - diff --git a/home/versioned_docs/version-v1.4.x/help/redis.md b/home/versioned_docs/version-v1.4.x/help/redis.md index 58248fb0b45..0a0c9f77a65 100644 --- a/home/versioned_docs/version-v1.4.x/help/redis.md +++ b/home/versioned_docs/version-v1.4.x/help/redis.md @@ -237,4 +237,3 @@ keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] | cmdstat_lpop | 无 | lpop命令的统计信息 | | cmdstat_rpop | 无 | rpop命令的统计信息 | | cmdstat_llen | 无 | llen命令的统计信息 | - diff --git a/home/versioned_docs/version-v1.4.x/help/shenyu.md b/home/versioned_docs/version-v1.4.x/help/shenyu.md index c7f12bbfaf0..aa4a43a8d5c 100644 --- a/home/versioned_docs/version-v1.4.x/help/shenyu.md +++ b/home/versioned_docs/version-v1.4.x/help/shenyu.md @@ -127,4 +127,3 @@ Finally, restart the access gateway metrics endpoint `http://ip:8090` to respond |-------------|-------------|---------------------------------------------------------| | state | none | thread state | | value | None | The number of threads corresponding to the thread state | - diff --git a/home/versioned_docs/version-v1.4.x/help/smtp.md b/home/versioned_docs/version-v1.4.x/help/smtp.md index fedb17e0040..4be044bc090 100644 --- a/home/versioned_docs/version-v1.4.x/help/smtp.md +++ b/home/versioned_docs/version-v1.4.x/help/smtp.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit Determine whether the server is available through the hello command in SMTP ``` -> see https://datatracker.ietf.org/doc/html/rfc821#page-13 +> see **Protocol Use:SMTP** @@ -38,4 +38,3 @@ Determine whether the server is available through the hello command in SMTP | response | | Response Status. | | smtpBanner | | Banner of SMTP server. | | heloInfo | | Response information returned by helo. | - diff --git a/home/versioned_docs/version-v1.4.x/help/spark.md b/home/versioned_docs/version-v1.4.x/help/spark.md index 41865300024..8bc045fc9a1 100644 --- a/home/versioned_docs/version-v1.4.x/help/spark.md +++ b/home/versioned_docs/version-v1.4.x/help/spark.md @@ -13,7 +13,7 @@ keywords: [open source monitoring tool, open source java spark monitoring tool, 1. Add Spark `VM options` When Start Server ⚠️ customIP -Refer: https://spark.apache.org/docs/latest/spark-standalone.html +Refer: **监控配置spark的监控主要分为Master、Worker、driver、executor监控。Master和Worker的监控在spark集群运行时即可监控,Driver和Excutor的监控需要针对某一个app来进行监控。** **如果都要监控,需要根据以下步骤来配置** @@ -112,4 +112,3 @@ gement.jmxremote.port=8711 | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.4.x/help/spring_gateway.md b/home/versioned_docs/version-v1.4.x/help/spring_gateway.md index 66c5f0b4f29..7f27b7fe8ef 100644 --- a/home/versioned_docs/version-v1.4.x/help/spring_gateway.md +++ b/home/versioned_docs/version-v1.4.x/help/spring_gateway.md @@ -87,4 +87,3 @@ management: | predicate | None | This is a routing matching rule | | uri | None | This is a service resource identifier | | order | None | The priority of this route | - diff --git a/home/versioned_docs/version-v1.4.x/help/springboot2.md b/home/versioned_docs/version-v1.4.x/help/springboot2.md index 6452aff270e..08029dc23b5 100644 --- a/home/versioned_docs/version-v1.4.x/help/springboot2.md +++ b/home/versioned_docs/version-v1.4.x/help/springboot2.md @@ -93,4 +93,3 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ |-------------|-------------|--------------------------------------| | space | None | Memory space name | | mem_used | MB | This space occupies a memory size of | - diff --git a/home/versioned_docs/version-v1.4.x/help/sqlserver.md b/home/versioned_docs/version-v1.4.x/help/sqlserver.md index 71bd8ebdc83..06e19252ede 100644 --- a/home/versioned_docs/version-v1.4.x/help/sqlserver.md +++ b/home/versioned_docs/version-v1.4.x/help/sqlserver.md @@ -57,20 +57,20 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo 1. SSL connection problem fixed -jdk version: jdk11 -Description of the problem: SQL Server 2019 uses the SA user connection to report an error +jdk version: jdk11 +Description of the problem: SQL Server 2019 uses the SA user connection to report an error Error message: ```text The driver could not establish a secure connection to SQL Server by using Secure Sockets Layer (SSL) encryption. Error: "PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target". ClientConnectionId:xxxxxxxxxxxxxxxxxxxx ``` -Screenshot of the problem: +Screenshot of the problem: ![issue](https://user-images.githubusercontent.com/38679717/206621658-c0741d48-673d-45ff-9a3b-47d113064c12.png) -solution: +solution: Use advanced settings when adding `SqlServer` monitoring, customize JDBC URL, add parameter configuration after the spliced jdbc url, ```;encrypt=true;trustServerCertificate=true;```This parameter true means unconditionally trust the server returned any root certificate. Example: ```jdbc:sqlserver://127.0.0.1:1433;DatabaseName=demo;encrypt=true;trustServerCertificate=true;``` -Reference document: [microsoft pkix-path-building-failed-unable-to-find-valid-certification](https://techcommunity.microsoft.com/t5/azure-database-support-blog/pkix-path-building- failed-unable-to-find-valid-certification/ba-p/2591304) +Reference document: [microsoft pkix-path-building-failed-unable-to-find-valid-certification]( failed-unable-to-find-valid-certification/ba-p/2591304) diff --git a/home/versioned_docs/version-v1.4.x/help/ssl_cert.md b/home/versioned_docs/version-v1.4.x/help/ssl_cert.md index e7b60fc8a89..253485f8b1a 100644 --- a/home/versioned_docs/version-v1.4.x/help/ssl_cert.md +++ b/home/versioned_docs/version-v1.4.x/help/ssl_cert.md @@ -31,4 +31,3 @@ keywords: [open source monitoring tool, open source ssl cert monitoring tool, mo | start_timestamp | ms millisecond | Validity start timestamp | | end_time | None | Expiration time | | end_timestamp | ms milliseconds | expiration timestamp | - diff --git a/home/versioned_docs/version-v1.4.x/help/tomcat.md b/home/versioned_docs/version-v1.4.x/help/tomcat.md index 60591f85579..9f103dfe5be 100644 --- a/home/versioned_docs/version-v1.4.x/help/tomcat.md +++ b/home/versioned_docs/version-v1.4.x/help/tomcat.md @@ -71,4 +71,3 @@ keywords: [open source monitoring tool, open source tomcat monitoring tool, moni ```aidl CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote -Djava.rmi.server.hostname=10.1.1.52 -Dcom.sun.management.jmxremote.port=1099 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" ``` - diff --git a/home/versioned_docs/version-v1.4.x/help/ubuntu.md b/home/versioned_docs/version-v1.4.x/help/ubuntu.md index 8d3b65ce195..9de28efe095 100644 --- a/home/versioned_docs/version-v1.4.x/help/ubuntu.md +++ b/home/versioned_docs/version-v1.4.x/help/ubuntu.md @@ -79,4 +79,3 @@ keywords: [open source monitoring tool, open source linux ubuntu monitoring tool | available | Mb | Available disk size | | usage | % | usage | | mounted | none | Mount point directory | - diff --git a/home/versioned_docs/version-v1.4.x/help/website.md b/home/versioned_docs/version-v1.4.x/help/website.md index afe86397c9e..1041755f156 100644 --- a/home/versioned_docs/version-v1.4.x/help/website.md +++ b/home/versioned_docs/version-v1.4.x/help/website.md @@ -27,4 +27,3 @@ keywords: [open source monitoring tool, open source website monitoring tool, mon | Metric name | Metric unit | Metric help description | |--------------|-------------|-------------------------| | responseTime | ms | Website response time | - diff --git a/home/versioned_docs/version-v1.4.x/help/windows.md b/home/versioned_docs/version-v1.4.x/help/windows.md index e4be2bd6d96..99d305cbce5 100644 --- a/home/versioned_docs/version-v1.4.x/help/windows.md +++ b/home/versioned_docs/version-v1.4.x/help/windows.md @@ -8,10 +8,10 @@ keywords: [open source monitoring tool, open source windows monitoring tool, mon > Collect and monitor the general performance Metrics of Windows operating system through SNMP protocol. > Note⚠️ You need to start SNMP service for Windows server. -References: -[What is SNMP protocol 1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) -[What is SNMP protocol 2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) -[Win configure SNMP in English](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) +References: +[What is SNMP protocol 1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) +[What is SNMP protocol 2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) +[Win configure SNMP in English](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) [Win configure SNMP in Chinese](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) ### Configuration parameter @@ -41,4 +41,3 @@ References: | services | number | Current number of services | | processes | number | Current number of processes | | responseTime | ms | Collection response time | - diff --git a/home/versioned_docs/version-v1.4.x/help/zookeeper.md b/home/versioned_docs/version-v1.4.x/help/zookeeper.md index f14b0bb8273..362edf8cff9 100644 --- a/home/versioned_docs/version-v1.4.x/help/zookeeper.md +++ b/home/versioned_docs/version-v1.4.x/help/zookeeper.md @@ -100,4 +100,3 @@ Complete! | zk_max_latency | ms | Max latency | | zk_ephemerals_count | number | Number of ephemeral nodes | | zk_min_latency | ms | Min latency | - diff --git a/home/versioned_docs/version-v1.4.x/introduce.md b/home/versioned_docs/version-v1.4.x/introduce.md index 60b8a623bf4..f968e1063c9 100644 --- a/home/versioned_docs/version-v1.4.x/introduce.md +++ b/home/versioned_docs/version-v1.4.x/introduce.md @@ -37,11 +37,12 @@ slug: / > HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system. --- + ### Powerful Monitoring Templates > Before we discuss the customizable monitoring capabilities of HertzBeat, which we mentioned at the beginning, let's introduce the different monitoring templates of HertzBeat. And it is because of this monitoring template design that the advanced features come later. -HertzBeat itself did not create a data collection protocol for the monitoring client to adapt to. Instead, HertzBeat makes full use of the existing ecosystem, `SNMP protocol` to collect information from network switches and routers, `JMX specification` to collect information from Java applications, `JDBC specification` to collect information from datasets, `SSH` to directly connect to scripts to get the display information, `HTTP+ (JsonPath | prometheus, etc.) ` to parse the information from API interfaces, `IPMI protocol to collect server information, and so on. +HertzBeat itself did not create a data collection protocol for the monitoring client to adapt to. Instead, HertzBeat makes full use of the existing ecosystem, `SNMP protocol` to collect information from network switches and routers, `JMX specification` to collect information from Java applications, `JDBC specification` to collect information from datasets, `SSH` to directly connect to scripts to get the display information, `HTTP+ (JsonPath | prometheus, etc.)` to parse the information from API interfaces, `IPMI protocol to collect server information, and so on. HertzBeat uses these existing standard protocols or specifications, makes them abstractly configurable, and finally makes them all available in the form of YML format monitoring templates that can be written to create templates that use these protocols to collect any desired metrics data. ![hertzbeat](/img/blog/multi-protocol.png) @@ -53,7 +54,7 @@ Do you believe that users can just write a monitoring template on the UI page, c **There are a lot of built-in monitoring templates for users to add directly on the page, one monitoring type corresponds to one YML monitoring template**. -- [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), +* [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), [Http Api](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml), [Ping Connect](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml), [Jvm](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml), [Ssl Certificate](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot2](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml), @@ -62,7 +63,7 @@ Do you believe that users can just write a monitoring template on the UI page, c [Pop3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-pop3.yml), [Ntp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ntp.yml), [Api Code](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api_code.yml), [Smtp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-smtp.yml), [Nginx](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nginx.yml) -- [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), +* [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), [MariaDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml), [ElasticSearch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml), [Oracle](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml), @@ -71,13 +72,13 @@ Do you believe that users can just write a monitoring template on the UI page, c [Redis Cluster](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml), [Redis Sentinel](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml) [Doris BE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_be.yml), [Doris FE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_fe.yml), [Memcached](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-memcached.yml), [NebulaGraph](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-nebulaGraph.yml) -- [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), +* [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), [CentOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml), [EulerOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml), [Fedora CoreOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml), [OpenSUSE](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml), [Rocky Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml), [Red Hat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml), [FreeBSD](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml), [AlmaLinux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml), [Debian Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml) -- [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), +* [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml), [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml), [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml), @@ -85,31 +86,31 @@ Do you believe that users can just write a monitoring template on the UI page, c [Spring Gateway](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spring_gateway.yml), [EMQX MQTT](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-emqx.yml), [AirFlow](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-airflow.yml), [Hive](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hive.yml), [Spark](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spark.yml), [Hadoop](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hadoop.yml) -- [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) -- [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), +* [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) +* [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) -- And More Your Custom Template. -- Notified Support `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. +* And More Your Custom Template. +* Notified Support `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. ### Powerful Customization -> From the previous introduction of **Monitoring Templates**, it is clear that `HertzBeat` has powerful customization features. -> Each monitor type is considered as a monitor template, no matter it is built-in or user-defined. You can easily add, modify and delete indicators by modifying the monitoring template. +> From the previous introduction of **Monitoring Templates**, it is clear that `HertzBeat` has powerful customization features. +> Each monitor type is considered as a monitor template, no matter it is built-in or user-defined. You can easily add, modify and delete indicators by modifying the monitoring template. > The templates contain a series of functions such as protocol configuration, environment variables, metrics conversion, metrics calculation, units conversion, metrics collection, etc., which help users to collect the metrics they want. ![hertzbeat](/img/docs/custom-arch.png) ### No Agent Required -> For users who have used various systems, the most troublesome thing is the installation, deployment, debugging and upgrading of various `agents`. -> You need to install one `agent` per host, and several corresponding `agents` to monitor different application middleware, and the number of monitoring can easily reach thousands, so writing a batch script may ease the burden. +> For users who have used various systems, the most troublesome thing is the installation, deployment, debugging and upgrading of various `agents`. +> You need to install one `agent` per host, and several corresponding `agents` to monitor different application middleware, and the number of monitoring can easily reach thousands, so writing a batch script may ease the burden. > The problem of whether the version of `agent` is compatible with the main application, debugging the communication between `agent` and the main application, upgrading the `agent` synchronization and so on and so forth, are all big headaches. The principle of `HertzBeat` is to use different protocols to connect directly to the end system, and use the `PULL` form to pull the collected data, without the need for the user to deploy and install `Agent` | `Exporter` on the host of the end, etc. For example, monitoring the `linux operating system`. -- For example, if you want to monitor `linux OS`, you can just input the IP port account password or key on `HertzBeat` side. -- For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. +* For example, if you want to monitor `linux OS`, you can just input the IP port account password or key on `HertzBeat` side. +* For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. **Password and other sensitive information is encrypted on all links**. @@ -117,9 +118,9 @@ The principle of `HertzBeat` is to use different protocols to connect directly t > When the number of monitors rises exponentially, the collection performance drops or the environment is unstable and prone to single point of failure of the collectors, then our collector clusters come into play. -- HertzBeat supports the deployment of collector clusters and the horizontal expansion of multiple collector clusters to exponentially increase the number of monitorable tasks and collection performance. -- Monitoring tasks are self-scheduled in the collector cluster, single collector hangs without sensing the failure to migrate the collection tasks, and the newly added collector nodes are automatically scheduled to share the collection pressure. -- It is very easy to switch between stand-alone mode and cluster mode without additional component deployment. +* HertzBeat supports the deployment of collector clusters and the horizontal expansion of multiple collector clusters to exponentially increase the number of monitorable tasks and collection performance. +* Monitoring tasks are self-scheduled in the collector cluster, single collector hangs without sensing the failure to migrate the collection tasks, and the newly added collector nodes are automatically scheduled to share the collection pressure. +* It is very easy to switch between stand-alone mode and cluster mode without additional component deployment. ![hertzbeat](/img/docs/cluster-arch.png) @@ -127,25 +128,25 @@ The principle of `HertzBeat` is to use different protocols to connect directly t > Two locations, three centers, multi-cloud environments, multi-isolated networks, you may have heard of these scenarios. When there is a need for a unified monitoring system to monitor the IT resources of different isolated networks, this is where our Cloud Edge Collaboration comes in. -In an isolated network where multiple networks are not connected, we need to deploy a monitoring system in each network in the previous solution, which leads to data non-interoperability and inconvenient management, deployment and maintenance. +In an isolated network where multiple networks are not connected, we need to deploy a monitoring system in each network in the previous solution, which leads to data non-interoperability and inconvenient management, deployment and maintenance. `HertzBeat` provides the ability of cloud edge collaboration, can be deployed in multiple isolated networks edge collector, collector in the isolated network within the monitoring task collection, collection of data reported by the main service unified scheduling management display. ![hertzbeat](/img/docs/cluster-arch.png) ### Easy to Use -- Set **Monitoring+Alarm+Notification** All in one, no need to deploy multiple component services separately. -- Full UI interface operation, no matter adding new monitor, modifying monitor template, or alarm threshold notification, all can be done in WEB interface, no need to modify files or scripts or reboot. -- No Agent is needed, we only need to fill in the required IP, port, account, password and other parameters in the WEB interface. -- Customization friendly, only need a monitoring template YML, automatically generate monitoring management page, data chart page, threshold configuration for corresponding monitoring types. -- Threshold alarm notification friendly, based on the expression threshold configuration, a variety of alarm notification channels, support alarm silence, time label alarm level filtering and so on. +* Set **Monitoring+Alarm+Notification** All in one, no need to deploy multiple component services separately. +* Full UI interface operation, no matter adding new monitor, modifying monitor template, or alarm threshold notification, all can be done in WEB interface, no need to modify files or scripts or reboot. +* No Agent is needed, we only need to fill in the required IP, port, account, password and other parameters in the WEB interface. +* Customization friendly, only need a monitoring template YML, automatically generate monitoring management page, data chart page, threshold configuration for corresponding monitoring types. +* Threshold alarm notification friendly, based on the expression threshold configuration, a variety of alarm notification channels, support alarm silence, time label alarm level filtering and so on. ### Completely Open Source -- An open source collaboration product using the `Apache2` protocol, maintained by a free and open source community. -- No monitoring number `License`, monitoring type and other pseudo-open source restrictions . -- Built on `Java+SpringBoot+TypeScript+Angular` mainstream technology stack , convenient secondary development . -- Open source is not the same as free, dev based on HertzBeat must retain the logo, name, page footnotes, copyright, etc. +* An open source collaboration product using the `Apache2` protocol, maintained by a free and open source community. +* No monitoring number `License`, monitoring type and other pseudo-open source restrictions . +* Built on `Java+SpringBoot+TypeScript+Angular` mainstream technology stack , convenient secondary development . +* Open source is not the same as free, dev based on HertzBeat must retain the logo, name, page footnotes, copyright, etc. **HertzBeat has been included in the [CNCF Observability And Analysis - Monitoring Landscape](https://landscape.cncf.io/card-mode?category=monitoring&grouping=category)** @@ -155,36 +156,36 @@ In an isolated network where multiple networks are not connected, we need to dep **HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system.** ------ +----- ## Quickly Start -Just run a single command in a Docker environment: `docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` +Just run a single command in a Docker environment: `docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` Browser access `http://localhost:1157` default account password `admin/hertzbeat` ### Landing Page -- HertzBeat's user management is unified by the configuration file `sureness.yml`, which allows users to add, delete, and modify user information, user role permissions, and so on. Default password admin/hertzbeat +* HertzBeat's user management is unified by the configuration file `sureness.yml`, which allows users to add, delete, and modify user information, user role permissions, and so on. Default password admin/hertzbeat ![hertzbeat](/img/home/0.png) ### Overview Page -- The global overview page shows the distribution of current monitoring categories, users can visualize the current monitoring types and quantities and click to jump to the corresponding monitoring types for maintenance and management. -- Show the status of currently registered collector clusters, including collector on-line status, monitoring tasks, startup time, IP address, name and so on. -- Show the list of recent alarm messages, alarm level distribution and alarm processing rate. +* The global overview page shows the distribution of current monitoring categories, users can visualize the current monitoring types and quantities and click to jump to the corresponding monitoring types for maintenance and management. +* Show the status of currently registered collector clusters, including collector on-line status, monitoring tasks, startup time, IP address, name and so on. +* Show the list of recent alarm messages, alarm level distribution and alarm processing rate. ![hertzbeat](/img/home/1.png) ### Monitoring Center -- The monitoring portal supports the management of monitoring of application services, database, operating system, middleware, network, customization, etc. It displays the currently added monitors in the form of a list. -- It displays the currently added monitors in the form of a list and supports adding, modifying, deleting, canceling, importing, exporting and batch management of monitors. -- Support tag grouping, query filtering, view monitoring details portal. +* The monitoring portal supports the management of monitoring of application services, database, operating system, middleware, network, customization, etc. It displays the currently added monitors in the form of a list. +* It displays the currently added monitors in the form of a list and supports adding, modifying, deleting, canceling, importing, exporting and batch management of monitors. +* Support tag grouping, query filtering, view monitoring details portal. Built-in support for monitoring types include: -- [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), +* [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), [Http Api](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml), [Ping Connect](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml), [Jvm](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml), [Ssl Certificate](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot2](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml), @@ -193,7 +194,7 @@ Built-in support for monitoring types include: [Pop3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-pop3.yml), [Ntp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ntp.yml), [Api Code](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api_code.yml), [Smtp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-smtp.yml), [Nginx](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nginx.yml) -- [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), +* [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), [MariaDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml), [ElasticSearch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml), [Oracle](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml), @@ -202,13 +203,13 @@ Built-in support for monitoring types include: [Redis Cluster](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml), [Redis Sentinel](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml) [Doris BE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_be.yml), [Doris FE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_fe.yml), [Memcached](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-memcached.yml), [NebulaGraph](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-nebulaGraph.yml) -- [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), +* [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), [CentOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml), [EulerOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml), [Fedora CoreOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml), [OpenSUSE](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml), [Rocky Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml), [Red Hat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml), [FreeBSD](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml), [AlmaLinux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml), [Debian Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml) -- [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), +* [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml), [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml), [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml), @@ -216,8 +217,8 @@ Built-in support for monitoring types include: [Spring Gateway](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spring_gateway.yml), [EMQX MQTT](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-emqx.yml), [AirFlow](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-airflow.yml), [Hive](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hive.yml), [Spark](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spark.yml), [Hadoop](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hadoop.yml) -- [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) -- [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), +* [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) +* [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) @@ -225,18 +226,18 @@ Built-in support for monitoring types include: ### Add and Modify Surveillance -- You can add or modify monitoring instances of a specific monitoring type, configure the IP, port and other parameters of the monitoring on the other end, set the collection period, collection task scheduling method, support detecting availability in advance, etc. The monitoring instances on the page are defined by the corresponding monitoring templates. -- The monitoring parameters configured on the page are defined by the monitoring template of the corresponding monitoring type, and users can modify the configuration parameters on the page by modifying the monitoring template. -- Support associated tags to manage monitoring grouping, alarm matching, and so on. +* You can add or modify monitoring instances of a specific monitoring type, configure the IP, port and other parameters of the monitoring on the other end, set the collection period, collection task scheduling method, support detecting availability in advance, etc. The monitoring instances on the page are defined by the corresponding monitoring templates. +* The monitoring parameters configured on the page are defined by the monitoring template of the corresponding monitoring type, and users can modify the configuration parameters on the page by modifying the monitoring template. +* Support associated tags to manage monitoring grouping, alarm matching, and so on. ![hertzbeat](/img/home/10.png) ### Monitor Details -- The monitoring data detail page shows the basic parameter information of the current monitoring, and the monitoring indicator data information. -- Monitor Real-time Data Report displays the real-time values of all the currently monitored indicators in the form of a list of small cards, and users can configure alarm threshold rules based on the real-time values for reference. -- Monitor Historical Data Report displays the historical values of the currently monitored metrics in the form of trend charts, supports querying hourly, daily and monthly historical data, and supports configuring the page refresh time. -- ⚠️ Note that the monitoring history charts need to be configured with an external timing database in order to get the full functionality, timing database support: IOTDB, TDengine, InfluxDB, GreptimeDB +* The monitoring data detail page shows the basic parameter information of the current monitoring, and the monitoring indicator data information. +* Monitor Real-time Data Report displays the real-time values of all the currently monitored indicators in the form of a list of small cards, and users can configure alarm threshold rules based on the real-time values for reference. +* Monitor Historical Data Report displays the historical values of the currently monitored metrics in the form of trend charts, supports querying hourly, daily and monthly historical data, and supports configuring the page refresh time. +* ⚠️ Note that the monitoring history charts need to be configured with an external timing database in order to get the full functionality, timing database support: IOTDB, TDengine, InfluxDB, GreptimeDB ![hertzbeat](/img/home/3.png) @@ -244,17 +245,17 @@ Built-in support for monitoring types include: ### Alarm Center -- The management display page of triggered alarm messages enables users to visualize the current alarm situation. -- Support alarm processing, alarm marking unprocessed, alarm deletion, clearing and other batch operations. +* The management display page of triggered alarm messages enables users to visualize the current alarm situation. +* Support alarm processing, alarm marking unprocessed, alarm deletion, clearing and other batch operations. ![hertzbeat](/img/home/7.png) ### Threshold Rules -- Threshold rules can be configured for monitoring the availability status, and alerts can be issued when the value of a particular metric exceeds the expected range. -- There are three levels of alerts: notification alerts, critical alerts, and emergency alerts. -- Threshold rules support visual page configuration or expression rule configuration for more flexibility. -- It supports configuring the number of triggers, alarm levels, notification templates, associated with a specific monitor and so on. +* Threshold rules can be configured for monitoring the availability status, and alerts can be issued when the value of a particular metric exceeds the expected range. +* There are three levels of alerts: notification alerts, critical alerts, and emergency alerts. +* Threshold rules support visual page configuration or expression rule configuration for more flexibility. +* It supports configuring the number of triggers, alarm levels, notification templates, associated with a specific monitor and so on. ![hertzbeat](/img/home/6.png) @@ -262,8 +263,8 @@ Built-in support for monitoring types include: ### Alarm Convergence -- When the alarm is triggered by the threshold rule, it will enter into the alarm convergence, the alarm convergence will be based on the rules of the specific time period of the duplicate alarm message de-emphasis convergence, to avoid a large number of repetitive alarms lead to the receiver alarm numbness. -- Alarm convergence rules support duplicate alarm effective time period, label matching and alarm level matching filter. +* When the alarm is triggered by the threshold rule, it will enter into the alarm convergence, the alarm convergence will be based on the rules of the specific time period of the duplicate alarm message de-emphasis convergence, to avoid a large number of repetitive alarms lead to the receiver alarm numbness. +* Alarm convergence rules support duplicate alarm effective time period, label matching and alarm level matching filter. ![hertzbeat](/img/home/12.png) @@ -271,9 +272,9 @@ Built-in support for monitoring types include: ### Alarm Silence -- When the alarm is triggered by the threshold rule, it will enter into the alarm silence, the alarm silence will be based on the rules of a specific one-time time period or periodic time period of the alarm message blocking silence, this time period does not send alarm messages. -- This application scenario, such as users in the system maintenance, do not need to send known alarms. Users will only receive alarm messages on weekdays, and users need to avoid disturbances at night. -- Alarm silence rules support one-time time period or periodic time period, support label matching and alarm level matching. +* When the alarm is triggered by the threshold rule, it will enter into the alarm silence, the alarm silence will be based on the rules of a specific one-time time period or periodic time period of the alarm message blocking silence, this time period does not send alarm messages. +* This application scenario, such as users in the system maintenance, do not need to send known alarms. Users will only receive alarm messages on weekdays, and users need to avoid disturbances at night. +* Alarm silence rules support one-time time period or periodic time period, support label matching and alarm level matching. ![hertzbeat](/img/home/14.png) @@ -281,11 +282,11 @@ Built-in support for monitoring types include: ### Message Notification -- Message notification is a function to notify alarm messages to specified recipients through different media channels, so that the alarm messages can reach them in time. -- It includes recipient information management and notification policy management. Recipient management maintains the information of recipients and their notification methods, while notification policy management maintains the policy rules of which recipients will be notified of the alert messages. -- Notification methods support `Email` `Discord` `Slack` `Telegram` `Pinning` `WeChat` `Flybook` `SMS` `Webhook` and so on. -- The notification policy supports tag matching and alert level matching, which makes it convenient to assign alerts with different tags and alert levels to different receivers and handlers. -- Support notification templates, users can customize the content format of the templates to meet their own personalized notification display needs. +* Message notification is a function to notify alarm messages to specified recipients through different media channels, so that the alarm messages can reach them in time. +* It includes recipient information management and notification policy management. Recipient management maintains the information of recipients and their notification methods, while notification policy management maintains the policy rules of which recipients will be notified of the alert messages. +* Notification methods support `Email` `Discord` `Slack` `Telegram` `Pinning` `WeChat` `Flybook` `SMS` `Webhook` and so on. +* The notification policy supports tag matching and alert level matching, which makes it convenient to assign alerts with different tags and alert levels to different receivers and handlers. +* Support notification templates, users can customize the content format of the templates to meet their own personalized notification display needs. ![hertzbeat](/img/home/16.png) @@ -295,8 +296,8 @@ Built-in support for monitoring types include: ### Monitoring Templates -- HertzBeat makes `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` and other protocols configurable so that you can customize the metrics you want to collect using these protocols by simply configuring the monitoring template `YML` in your browser. Would you believe that you can instantly adapt a new monitoring type such as `K8s` or `Docker` just by configuring it? -- All our built-in monitoring types (mysql, website, jvm, k8s) are also mapped to corresponding monitoring templates, so you can add and modify monitoring templates to customize your monitoring functions. +* HertzBeat makes `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` and other protocols configurable so that you can customize the metrics you want to collect using these protocols by simply configuring the monitoring template `YML` in your browser. Would you believe that you can instantly adapt a new monitoring type such as `K8s` or `Docker` just by configuring it? +* All our built-in monitoring types (mysql, website, jvm, k8s) are also mapped to corresponding monitoring templates, so you can add and modify monitoring templates to customize your monitoring functions. ![hertzbeat](/img/home/9.png) @@ -304,8 +305,8 @@ Built-in support for monitoring types include: **There's so much more to discover. Have Fun!** ------ +----- -**Home: https://hertzbeat.com/** -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**Home: ** +**Github: ** +**Gitee: ** diff --git a/home/versioned_docs/version-v1.4.x/others/contributing.md b/home/versioned_docs/version-v1.4.x/others/contributing.md index 237eb63860e..7c0c80721f3 100644 --- a/home/versioned_docs/version-v1.4.x/others/contributing.md +++ b/home/versioned_docs/version-v1.4.x/others/contributing.md @@ -134,6 +134,7 @@ Public WeChat: `tancloudtech` - **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** Provide monitoring management, system management basic services. > Provides monitoring management, monitoring configuration management, system user management, etc. +> > - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** Provide metrics data collection services. > Use common protocols to remotely collect and obtain peer-to-peer metrics data. > - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** Provide monitoring data warehousing services. diff --git a/home/versioned_docs/version-v1.4.x/others/developer.md b/home/versioned_docs/version-v1.4.x/others/developer.md index e8d651af594..bdaf8172b8b 100644 --- a/home/versioned_docs/version-v1.4.x/others/developer.md +++ b/home/versioned_docs/version-v1.4.x/others/developer.md @@ -15,12 +15,12 @@ sidebar_label: Contributors 进击的阿晨
进击的阿晨

💻 🎨 🐛 铁甲小宝
铁甲小宝

🐛 💻 📖 cuipiheqiuqiu
cuipiheqiuqiu

💻 ⚠️ 🎨 - hudongdong129
hudongdong129

💻 ⚠️ 📖 + hudongdong129
hudongdong129

💻 ⚠️ 📖 zqr10159
Logic

📖 💻🎨 vinci
vinci

💻 📖 🎨 - 淞筱
淞筱

💻 📖 🎨 + 淞筱
淞筱

💻 📖 🎨 东风
东风

💻 🎨 📖 diff --git a/home/versioned_docs/version-v1.4.x/others/hertzbeat.md b/home/versioned_docs/version-v1.4.x/others/hertzbeat.md index ab3dc5bf36a..d06ffeb7fde 100644 --- a/home/versioned_docs/version-v1.4.x/others/hertzbeat.md +++ b/home/versioned_docs/version-v1.4.x/others/hertzbeat.md @@ -30,7 +30,7 @@ sidebar_label: HertzBeat 实时监控 > `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 ----- +---- ### 完全开源 @@ -43,7 +43,7 @@ sidebar_label: HertzBeat 实时监控 > 开始我们就说 HertzBeat 的特点是自定义监控能力,无需 Agent。在讨论这两点之前,我们先介绍下 HertzBeat 的不一样的监控模版。而正是因为这样的监控模版设计,才会有了后面的高级特性。 -HertzBeat 自身并没有去创造一种采集数据协议,让对端来适配它。而是充分使用了现有的生态,SNMP采集网络交换机路由器信息,JMX采集JAVA应用信息,JDBC规范采集数据集信息,SSH直连执行脚本获取回显信息,HTTP+(JsonPath | prometheus等)解析接口信息,IPMI采集服务器信息等等。 +HertzBeat 自身并没有去创造一种采集数据协议,让对端来适配它。而是充分使用了现有的生态,SNMP采集网络交换机路由器信息,JMX采集JAVA应用信息,JDBC规范采集数据集信息,SSH直连执行脚本获取回显信息,HTTP+(JsonPath | prometheus等)解析接口信息,IPMI采集服务器信息等等。 HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可配置化,最后使其都可以通过编写YML格式监控模版的形式,来制定模版使用这些协议来采集任何想要的指标信息。 ![hertzbeat](/img/blog/multi-protocol.png) @@ -85,27 +85,27 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ### 强大自定义功能 -> 由前面的**监控模版**介绍,大概清楚了 HertzBeat 拥有的强大自定义功能。 -> 我们将每个监控类型都视为一个监控模版,不管是官方内置的还是后期用户自定义新增的。用户都可以方便的通过修改监控模版来新增修改删除监控指标。 +> 由前面的**监控模版**介绍,大概清楚了 HertzBeat 拥有的强大自定义功能。 +> 我们将每个监控类型都视为一个监控模版,不管是官方内置的还是后期用户自定义新增的。用户都可以方便的通过修改监控模版来新增修改删除监控指标。 > 模版里面包含各个协议的使用,指标别名转换,指标计算,单位转换等一系列功能,帮助用户能采集到自己想要的监控指标。 ![hertzbeat](/img/docs/custom-arch.png) ### 无需 Agent -> 对于使用过各种系统的用户来说,可能最麻烦头大的不过就是各种 agent 的安装部署调试了。 -> 每台主机得装个 agent,为了监控不同应用中间件可能还得装几个对应的 agent,量上来了轻轻松松上千个,写个批量脚本可能会减轻点负担。 +> 对于使用过各种系统的用户来说,可能最麻烦头大的不过就是各种 agent 的安装部署调试了。 +> 每台主机得装个 agent,为了监控不同应用中间件可能还得装几个对应的 agent,量上来了轻轻松松上千个,写个批量脚本可能会减轻点负担。 > agent 的版本是否与主应用兼容, agent 与主应用的通讯调试, agent 的同步升级等等等等,这些全是头大的点。 -HertzBeat 的原理就是使用不同的协议去直连对端系统,采集 PULL 的形式去拉取采集数据,无需用户在对端主机上部署安装 Agent | Exporter等。 -比如监控 linux, 在 HertzBeat 端输入IP端口账户密码或密钥即可。 -比如监控 mysql, 在 HertzBeat 端输入IP端口账户密码即可。 +HertzBeat 的原理就是使用不同的协议去直连对端系统,采集 PULL 的形式去拉取采集数据,无需用户在对端主机上部署安装 Agent | Exporter等。 +比如监控 linux, 在 HertzBeat 端输入IP端口账户密码或密钥即可。 +比如监控 mysql, 在 HertzBeat 端输入IP端口账户密码即可。 **密码等敏感信息全链路加密** ### 高性能集群 -> 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 -> 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 +> 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 +> 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 > 单机模式与集群模式相互切换部署非常方便,无需额外组件部署。 ![hertzbeat](/img/docs/cluster-arch.png) @@ -121,20 +121,20 @@ HertzBeat 提供云边协同能力,可以在多个隔离网络部署边缘采 ### 易用友好 -> 集 **监控+告警+通知** All in one, 无需单独部署多个组件服务。 -> 全UI界面操作,不管是新增监控,修改监控模版,还是告警阈值通知,都可在WEB界面操作完成,无需要修改文件或脚本或重启。 -> 无需 Agent, 监控对端我们只需在WEB界面填写所需IP端口账户密码等参数即可。 -> 自定义友好,只需一个监控模版YML,自动生成对应监控类型的监控管理页面,数据图表页面,阈值配置等。 +> 集 **监控+告警+通知** All in one, 无需单独部署多个组件服务。 +> 全UI界面操作,不管是新增监控,修改监控模版,还是告警阈值通知,都可在WEB界面操作完成,无需要修改文件或脚本或重启。 +> 无需 Agent, 监控对端我们只需在WEB界面填写所需IP端口账户密码等参数即可。 +> 自定义友好,只需一个监控模版YML,自动生成对应监控类型的监控管理页面,数据图表页面,阈值配置等。 > 阈值告警通知友好,基于表达式阈值配置,多种告警通知渠道,支持告警静默,时段标签告警级别过滤等。 --- **`HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。** ------ +----- ## 即刻体验一波 -Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` +Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` 浏览器访问 `http://localhost:1157` 默认账户密码 `admin/hertzbeat` ### 登陆页面 @@ -269,8 +269,8 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 **`HertzBeat`更多强大的功能欢迎使用探索。Have Fun!** ------ +----- -**官网: https://hertzbeat.com/** -**Github: https://github.com/apache/hertzbeat** -**Gitee: https://gitee.com/hertzbeat/hertzbeat** +**官网: ** +**Github: ** +**Gitee: ** diff --git a/home/versioned_docs/version-v1.4.x/others/huaweicloud.md b/home/versioned_docs/version-v1.4.x/others/huaweicloud.md index 9f1d408fc79..bc2c4f50c96 100644 --- a/home/versioned_docs/version-v1.4.x/others/huaweicloud.md +++ b/home/versioned_docs/version-v1.4.x/others/huaweicloud.md @@ -20,4 +20,4 @@ HuaweiCloud 华为云将面向开源软件工具链与环境、开源应用构 开发者将开源软件工具、开源应用和开源组件与华为云对象存储OBS、数仓DWS、云容器CCE等云服务对接,同时基于Terraform模板,上架到华为云云商店,支持其他开发者一键部署使用开源组件 ,称为“开源xxx for HuaweiCloud”。 -感兴趣的开发者可以查看:华为云开源项目仓库 https://gitee.com/HuaweiCloudDeveloper/huaweicloud-cloud-native-plugins-kits 了解更多。 +感兴趣的开发者可以查看:华为云开源项目仓库 了解更多。 diff --git a/home/versioned_docs/version-v1.4.x/others/images-deploy.md b/home/versioned_docs/version-v1.4.x/others/images-deploy.md index ff350e763e0..3cdc25e6196 100644 --- a/home/versioned_docs/version-v1.4.x/others/images-deploy.md +++ b/home/versioned_docs/version-v1.4.x/others/images-deploy.md @@ -21,17 +21,17 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 ## 🎡 介绍 -> [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 -> 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等监控,阈值告警通知一步到位。 +> [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 +> 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等监控,阈值告警通知一步到位。 > 更自由化的阈值规则(计算表达式),`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式及时送达。 > -> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 +> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 > 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? > -> `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 +> `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 > 当然我们也提供了对应的 **[SAAS版本监控云](https://console.tancloud.cn)**,中小团队和个人无需再为了监控自己的网站资源,而去部署学习一套繁琐的监控系统,**[登录即可免费开始](https://console.tancloud.cn)**。 ----- +---- ![hertzbeat](/img/home/1.png) @@ -75,19 +75,20 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](../start/tdengine-init) - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](../start/iotdb-init) -4. 配置用户配置文件(可选,自定义配置用户密码) - HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat - 若需要新增删除修改账户或密码,可以通过修改位于 `/opt/hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 +4. 配置用户配置文件(可选,自定义配置用户密码) + HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat + 若需要新增删除修改账户或密码,可以通过修改位于 `/opt/hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 具体参考 [配置修改账户密码](../start/account-modify) 5. 部署启动 执行位于安装目录/opt/hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat ``` - $ ./startup.sh + ./startup.sh ``` + 6. 开始探索HertzBeat - 浏览器访问 http://ip:1157/ 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 + 浏览器访问 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 **HAVE FUN** @@ -95,11 +96,11 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 **最多的问题就是网络问题,请先提前排查** -1. **按照流程部署,访问 http://ip:1157/ 无界面** +1. **按照流程部署,访问 无界面** 请参考下面几点排查问题: > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 +> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 2. **监控历史图表长时间都一直无数据** @@ -107,4 +108,3 @@ sidebar_label: HertzBeat 华为云镜像部署快速指引 > 一:Tdengine或IoTDB是否配置,未配置则无历史图表数据 > 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 > 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB 或 Tdengine IP账户密码等配置是否正确 - diff --git a/home/versioned_docs/version-v1.4.x/others/resource.md b/home/versioned_docs/version-v1.4.x/others/resource.md index 4aeae0293b6..6b52c0ee20d 100644 --- a/home/versioned_docs/version-v1.4.x/others/resource.md +++ b/home/versioned_docs/version-v1.4.x/others/resource.md @@ -15,4 +15,3 @@ Download: [PDF](http://cdn.hertzbeat.com/hertzbeat.pdf) ![logo](/img/hertzbeat-logo.svg) Download: [SVG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.svg) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.jpg) - diff --git a/home/versioned_docs/version-v1.4.x/others/sponsor.md b/home/versioned_docs/version-v1.4.x/others/sponsor.md index ae338dbea3e..c741e29218a 100644 --- a/home/versioned_docs/version-v1.4.x/others/sponsor.md +++ b/home/versioned_docs/version-v1.4.x/others/sponsor.md @@ -8,6 +8,5 @@ sidebar_label: Sponsor ![wechat-alipay](/img/docs/pay.png) -Thanks [JiShi Information(build a new microwave + optical transaction network)](https://www.flarespeed.com) sponsored server node. +Thanks [JiShi Information(build a new microwave + optical transaction network)](https://www.flarespeed.com) sponsored server node. Thanks [TianShang cloud computing(new wisdom cloud)](https://www.tsyvps.com/aff/BZBEGYLX) sponsored server node. - diff --git a/home/versioned_docs/version-v1.4.x/start/account-modify.md b/home/versioned_docs/version-v1.4.x/start/account-modify.md index b1c618a8eda..99541502eb2 100644 --- a/home/versioned_docs/version-v1.4.x/start/account-modify.md +++ b/home/versioned_docs/version-v1.4.x/start/account-modify.md @@ -4,9 +4,9 @@ title: Modify Account Username Password sidebar_label: Update Account --- -HertzBeat default built-in three user accounts, respectively admin/hertzbeat tom/hertzbeat guest/hertzbeat +HertzBeat default built-in three user accounts, respectively admin/hertzbeat tom/hertzbeat guest/hertzbeat If you need add, delete or modify account or password, configure `sureness.yml`. Ignore this step without this demand. -The configuration file content refer to project repository[/script/sureness.yml](https://gitee.com/hertzbeat/hertzbeat/blob/master/script/sureness.yml) +The configuration file content refer to project repository[/script/sureness.yml](https://gitee.com/hertzbeat/hertzbeat/blob/master/script/sureness.yml) Modify the following **part parameters** in sureness.yml:**[Note⚠️Other default sureness configuration parameters should be retained]** ```yaml @@ -125,4 +125,4 @@ sureness: dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' ``` -**Restart HertzBeat, access http://ip:1157/ to explore** +**Restart HertzBeat, access to explore** diff --git a/home/versioned_docs/version-v1.4.x/start/custom-config.md b/home/versioned_docs/version-v1.4.x/start/custom-config.md index 7554498bc6e..7f45b5dd27d 100644 --- a/home/versioned_docs/version-v1.4.x/start/custom-config.md +++ b/home/versioned_docs/version-v1.4.x/start/custom-config.md @@ -10,8 +10,8 @@ This describes how to configure the SMS server, the number of built-in availabil ### Configure the configuration file of HertzBeat -Modify the configuration file located at `hertzbeat/config/application.yml` -Note ⚠️The docker container method needs to mount the application.yml file to the local host +Modify the configuration file located at `hertzbeat/config/application.yml` +Note ⚠️The docker container method needs to mount the application.yml file to the local host The installation package can be decompressed and modified in `hertzbeat/config/application.yml` 1. Configure the SMS sending server @@ -57,4 +57,3 @@ warehouse: port: 6379 password: 123456 ``` - diff --git a/home/versioned_docs/version-v1.4.x/start/docker-deploy.md b/home/versioned_docs/version-v1.4.x/start/docker-deploy.md index 6b1cafd90d3..a1ff268fcb0 100644 --- a/home/versioned_docs/version-v1.4.x/start/docker-deploy.md +++ b/home/versioned_docs/version-v1.4.x/start/docker-deploy.md @@ -6,7 +6,7 @@ sidebar_label: Install via Docker > Recommend to use docker deploy HertzBeat -1. Download and install the Docker environment +1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. @@ -14,34 +14,36 @@ sidebar_label: Install via Docker $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. pull HertzBeat Docker mirror you can look up the mirror version TAG in [dockerhub mirror repository](https://hub.docker.com/r/apache/hertzbeat/tags) or in [quay.io mirror repository](https://quay.io/repository/apache/hertzbeat) ```shell - $ docker pull apache/hertzbeat - $ docker pull apache/hertzbeat-collector + docker pull apache/hertzbeat + docker pull apache/hertzbeat-collector ``` or ```shell - $ docker pull quay.io/tancloud/hertzbeat - $ docker pull quay.io/tancloud/hertzbeat-collector + docker pull quay.io/tancloud/hertzbeat + docker pull quay.io/tancloud/hertzbeat-collector ``` -3. Mounted HertzBeat configuration file (optional) - Download and config `application.yml` in the host directory, eg:`$(pwd)/application.yml` - Download from [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) or [gitee/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml). + +3. Mounted HertzBeat configuration file (optional) + Download and config `application.yml` in the host directory, eg:`$(pwd)/application.yml` + Download from [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) or [gitee/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml). You can modify the configuration yml file according to your needs. - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) - **Recommended** If you need to use the time series database TDengine to store metric data, you need to replace the `warehouse.store.td-engine` parameter in `application.yml` for specific steps, see [Using TDengine to store metrics data](tdengine-init) - **Recommended** If you need to use the time series database IotDB to store the metric database, you need to replace the `warehouse.storeiot-db` parameter in `application.yml` For specific steps, see [Use IotDB to store metrics data](iotdb-init) -4. Mounted the account file(optional) - HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` - If you need update account or password, configure `sureness.yml`. Ignore this step without this demand. - Download and config `sureness.yml` in the host directory,eg:`$(pwd)/sureness.yml` - Download from [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) or [gitee/script/sureness.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/sureness.yml) +4. Mounted the account file(optional) + HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` + If you need update account or password, configure `sureness.yml`. Ignore this step without this demand. + Download and config `sureness.yml` in the host directory,eg:`$(pwd)/sureness.yml` + Download from [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) or [gitee/script/sureness.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/sureness.yml) For detail steps, please refer to [Configure Account Password](account-modify) 5. Start the HertzBeat Docker container @@ -57,13 +59,14 @@ $ docker run -d -p 1157:1157 -p 1158:1158 \ ``` This command starts a running HertzBeat Docker container with mapping port 1157-1158. If existing processes on the host use the port, please modify host mapped port. + - `docker run -d` : Run a container in the background via Docker - `-p 1157:1157 -p 1158:1158` : Mapping container ports to the host, 1157 is web-ui port, 1158 is cluster port. - `-e LANG=en_US.UTF-8` : Set the system language - `-e TZ=Asia/Shanghai` : Set the system timezone - `-v $(pwd)/data:/opt/hertzbeat/data` : (optional, data persistence) Important⚠️ Mount the H2 database file to the local host, to ensure that the data is not lost due creating or deleting container. - `-v $(pwd)/logs:/opt/hertzbeat/logs` : (optional, if you don't have a need, just delete it) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. -- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional, if you don't have a need, just delete it) Mount the local configuration file into the container which has been modified in the previous step, namely using the local configuration file to cover container configuration file. +- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional, if you don't have a need, just delete it) Mount the local configuration file into the container which has been modified in the previous step, namely using the local configuration file to cover container configuration file. - `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (optional, if you don't have a need, just delete it) Mount account configuration file modified in the previous step into the container. Delete this command parameters if no needs. - `--name hertzbeat` : Naming container name hertzbeat - `apache/hertzbeat` : Use the pulled latest HertzBeat official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat` instead if you pull `quay.io` docker image.** @@ -84,6 +87,7 @@ $ docker run -d \ ``` This command starts a running HertzBeat-Collector container. + - `docker run -d` : Run a container in the background via Docker - `-e IDENTITY=custom-collector-name` : (optional) Set the collector unique identity name. Attention the clusters collector name must unique. - `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. @@ -101,13 +105,13 @@ This command starts a running HertzBeat-Collector container. **The most common problem is network problems, please check in advance** -1. **MYSQL, TDENGINE, IoTDB and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** +1. **MYSQL, TDENGINE, IoTDB and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. -> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. +> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. > Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` -2. **According to the process deploy,visit http://ip:1157/ no interface** +2. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. @@ -116,7 +120,7 @@ This command starts a running HertzBeat-Collector container. 3. **Log an error TDengine connection or insert SQL failed** -> 1:Check whether database account and password configured is correct, the database is created. +> 1:Check whether database account and password configured is correct, the database is created. > 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. 4. **Historical monitoring charts have been missing data for a long time** @@ -140,4 +144,3 @@ This command starts a running HertzBeat-Collector container. > Is iot-db or td-engine enable set to true > Note⚠️If both hertzbeat and IotDB, TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed > You can check the startup logs according to the logs directory - diff --git a/home/versioned_docs/version-v1.4.x/start/greptime-init.md b/home/versioned_docs/version-v1.4.x/start/greptime-init.md index 10dafda79b1..5102ecfe6b7 100644 --- a/home/versioned_docs/version-v1.4.x/start/greptime-init.md +++ b/home/versioned_docs/version-v1.4.x/start/greptime-init.md @@ -16,8 +16,9 @@ It's designed to work on infrastructure of the cloud era, and users benefit from ### Install GreptimeDB via Docker > Refer to the official website [installation tutorial](https://docs.greptime.com/getting-started/overview) -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > > ``` @@ -41,9 +42,9 @@ use```$ docker ps``` to check if the database started successfully ### Configure the database connection in hertzbeat `application.yml` configuration file -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` +1. Configure HertzBeat's configuration file + Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.greptime` data source parameters, URL account and password. ```yaml @@ -65,4 +66,3 @@ warehouse: 1. Do both the time series databases Greptime, IoTDB or TDengine need to be configured? Can they both be used? > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. - diff --git a/home/versioned_docs/version-v1.4.x/start/influxdb-init.md b/home/versioned_docs/version-v1.4.x/start/influxdb-init.md index 0bea6129fb6..05f6b44d876 100644 --- a/home/versioned_docs/version-v1.4.x/start/influxdb-init.md +++ b/home/versioned_docs/version-v1.4.x/start/influxdb-init.md @@ -10,7 +10,7 @@ We recommend VictoriaMetrics for long term support. TDengine is the Time Series Data Platform where developers build IoT, analytics, and cloud applications. **Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** +**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** Note⚠️ Need InfluxDB 1.x Version. ### 1. Use HuaweiCloud GaussDB For Influx @@ -24,8 +24,9 @@ Note⚠️ Need InfluxDB 1.x Version. ### 2. Install TDengine via Docker > Refer to the official website [installation tutorial](https://hub.docker.com/_/influxdb) -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > > ``` @@ -41,14 +42,14 @@ Note⚠️ Need InfluxDB 1.x Version. > influxdb:1.8 > ``` > -> `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. +> `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. > use```$ docker ps``` to check if the database started successfully ### Configure the database connection in hertzbeat `application.yml` configuration file -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` +1. Configure HertzBeat's configuration file + Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.influxdb` data source parameters, URL account and password. ```yaml @@ -74,4 +75,3 @@ warehouse: 1. Do both the time series databases InfluxDB, IoTDB and TDengine need to be configured? Can they both be used? > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. - diff --git a/home/versioned_docs/version-v1.4.x/start/iotdb-init.md b/home/versioned_docs/version-v1.4.x/start/iotdb-init.md index d015527ac1b..7c3f7bd4e38 100644 --- a/home/versioned_docs/version-v1.4.x/start/iotdb-init.md +++ b/home/versioned_docs/version-v1.4.x/start/iotdb-init.md @@ -9,7 +9,7 @@ We recommend VictoriaMetrics for long term support. Apache IoTDB is a software system that integrates the collection, storage, management and analysis of time series data of the Internet of Things. We use it to store and analyze the historical data of monitoring metrics collected. Support V0.13+ version and V1.0.+ version. -**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** +**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** **⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** > If you already have an IoTDB environment, you can skip directly to the YML configuration step. @@ -17,7 +17,8 @@ Apache IoTDB is a software system that integrates the collection, storage, manag ### Install IoTDB via Docker > Refer to the official website [installation tutorial](https://iotdb.apache.org/UserGuide/V0.13.x/QuickStart/WayToGetIoTDB.html) -> 1. Download and install Docker environment +> +> 1. Download and install Docker environment > Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > @@ -40,8 +41,8 @@ use```$ docker ps``` to check if the database started successfully 3. Configure the database connection in hertzbeat `application.yml`configuration file - Modify `hertzbeat/config/application.yml` configuration file - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` + Modify `hertzbeat/config/application.yml` configuration file + Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Config the `warehouse.store.jpa.enabled` `false`. Replace `warehouse.store.iot-db` data source parameters, HOST account and password. ``` @@ -82,4 +83,3 @@ warehouse: > Is td-engine enable set to true > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed > You can check the startup logs according to the logs directory - diff --git a/home/versioned_docs/version-v1.4.x/start/mysql-change.md b/home/versioned_docs/version-v1.4.x/start/mysql-change.md index 5dc513d3650..e79b98cd264 100644 --- a/home/versioned_docs/version-v1.4.x/start/mysql-change.md +++ b/home/versioned_docs/version-v1.4.x/start/mysql-change.md @@ -10,7 +10,7 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data ### Install MYSQL via Docker -1. Download and install the Docker environment +1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. @@ -18,20 +18,21 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Install MYSQl with Docker ``` - $ docker run -d --name mysql -p 3306:3306 -v /opt/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7 + docker run -d --name mysql -p 3306:3306 -v /opt/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7 ``` - `-v /opt/data:/var/lib/mysql` is local persistent mount of mysql data directory. `/opt/data` should be replaced with the actual local directory. + `-v /opt/data:/var/lib/mysql` is local persistent mount of mysql data directory. `/opt/data` should be replaced with the actual local directory. use ```$ docker ps``` to check if the database started successfully ### Database creation -1. Enter MYSQL or use the client to connect MYSQL service +1. Enter MYSQL or use the client to connect MYSQL service `mysql -uroot -p123456` -2. Create database named hertzbeat +2. Create database named hertzbeat `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. Check if hertzbeat database has been successfully created `show databases;` @@ -63,4 +64,4 @@ spring: url: jdbc:mysql://localhost:3306/hertzbeat?useUnicode=true&characterEncoding=utf-8&useSSL=false ``` -**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** +**Start HertzBeat visit on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/versioned_docs/version-v1.4.x/start/package-deploy.md b/home/versioned_docs/version-v1.4.x/start/package-deploy.md index 0111ef08114..f86a68c6362 100644 --- a/home/versioned_docs/version-v1.4.x/start/package-deploy.md +++ b/home/versioned_docs/version-v1.4.x/start/package-deploy.md @@ -6,11 +6,11 @@ sidebar_label: Install via Package > You can install and run HertzBeat on Linux Windows Mac system, and CPU supports X86/ARM64. -1. Download HertzBeat installation package +1. Download HertzBeat installation package Download installation package `hertzbeat-xx.tar.gz` `hertzbeat-collector-xx.tar.gz` corresponding to your system environment - download from [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) repository - download from [Download](https://hertzbeat.apache.org/docs/download) repository -2. Configure HertzBeat's configuration file(optional) +2. Configure HertzBeat's configuration file(optional) Unzip the installation package to the host eg: /opt/hertzbeat ``` @@ -19,27 +19,28 @@ sidebar_label: Install via Package $ unzip -o hertzbeat-xx.zip ``` - Modify the configuration file `hertzbeat/config/application.yml` params according to your needs. - - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` + Modify the configuration file `hertzbeat/config/application.yml` params according to your needs. + - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) - **Highly recommended** From now on we will mainly support VictoriaMetrics as a time-series database, if you need to use the time series database VictoriaMetrics to store metric data, you need to replace the `warehouse.store.victoria-metrics` parameter in `application.yml` for specific steps, see [Using VictoriaMetrics to store metrics data](victoria-metrics-init) - - **Recommended** If you need to use the time series database TDengine to store metric data, you need to replace the `warehouse.store.td-engine` parameter in `application.yml` for specific steps, see [Using TDengine to store metrics data](tdengine-init) + - **Recommended** If you need to use the time series database TDengine to store metric data, you need to replace the `warehouse.store.td-engine` parameter in `application.yml` for specific steps, see [Using TDengine to store metrics data](tdengine-init) - **Recommended** If you need to use the time series database IotDB to store the metric database, you need to replace the `warehouse.storeiot-db` parameter in `application.yml` For specific steps, see [Use IotDB to store metrics data](iotdb-init) -3. Configure the account file(optional) - HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` - If you need add, delete or modify account or password, configure `hertzbeat/config/sureness.yml`. Ignore this step without this demand. +3. Configure the account file(optional) + HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` + If you need add, delete or modify account or password, configure `hertzbeat/config/sureness.yml`. Ignore this step without this demand. For detail steps, please refer to [Configure Account Password](account-modify) -4. Start the service +4. Start the service Execute the startup script `startup.sh` in the installation directory `hertzbeat/bin/`, or `startup.bat` in windows. ``` - $ ./startup.sh + ./startup.sh ``` + 5. Begin to explore HertzBeat - Access http://localhost:1157/ using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! + Access using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! 6. Deploy collector clusters (Optional) @@ -57,7 +58,8 @@ sidebar_label: Install via Package manager-host: ${MANAGER_HOST:127.0.0.1} manager-port: ${MANAGER_PORT:1158} ``` - - Run command `$ ./bin/startup.sh ` or `bin/startup.bat` + + - Run command `$ ./bin/startup.sh` or `bin/startup.bat` - Access `http://localhost:1157` and you will see the registered new collector in dashboard **HAVE FUN** @@ -66,9 +68,9 @@ sidebar_label: Install via Package 1. **If using the package not contains JDK, you need to prepare the JAVA environment in advance** - Install JAVA runtime environment-refer to [official website](http://www.oracle.com/technetwork/java/javase/downloads/index.html) - requirement:JDK11 ENV - download JAVA installation package: [mirror website](https://repo.huaweicloud.com/java/jdk/) + Install JAVA runtime environment-refer to [official website](http://www.oracle.com/technetwork/java/javase/downloads/index.html) + requirement:JDK11 ENV + download JAVA installation package: [mirror website](https://repo.huaweicloud.com/java/jdk/) After installation use command line to check whether you install it successfully. ``` @@ -78,21 +80,21 @@ sidebar_label: Install via Package Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.12+8-LTS-237, mixed mode) ``` -2. **According to the process deploy,visit http://ip:1157/ no interface** + +2. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. -> 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. +> 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. > 3:Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 3. **Log an error TDengine connection or insert SQL failed** -> 1:Check whether database account and password configured is correct, the database is created. +> 1:Check whether database account and password configured is correct, the database is created. > 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. -4. **Monitoring historical charts with no data for a long time ** +4. **Monitoring historical charts with no data for a long time** > 1: Whether the time series database is configured or not, if it is not configured, there is no historical chart data. > 2: If you are using Tdengine, check whether the database `hertzbeat` of Tdengine is created. > 3: HertzBeat's configuration file `application.yml`, the dependent services in it, the time series, the IP account password, etc. are configured correctly. - diff --git a/home/versioned_docs/version-v1.4.x/start/postgresql-change.md b/home/versioned_docs/version-v1.4.x/start/postgresql-change.md index a6c1fdb580a..d06d040ee7e 100644 --- a/home/versioned_docs/version-v1.4.x/start/postgresql-change.md +++ b/home/versioned_docs/version-v1.4.x/start/postgresql-change.md @@ -10,7 +10,7 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition ### Install PostgreSQL via Docker -1. Download and install the Docker environment +1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. @@ -18,10 +18,11 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Install PostgreSQL with Docker ``` - $ docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 + docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` use```$ docker ps```to check if the database started successfully @@ -36,7 +37,8 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition su - postgres psql ``` -2. Create database named hertzbeat + +2. Create database named hertzbeat `CREATE DATABASE hertzbeat;` 3. Check if hertzbeat database has been successfully created `\l` @@ -81,4 +83,4 @@ spring: dialect: org.hibernate.dialect.PostgreSQLDialect ``` -**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** +**Start HertzBeat visit on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/versioned_docs/version-v1.4.x/start/quickstart.md b/home/versioned_docs/version-v1.4.x/start/quickstart.md index 909594e1275..5c3b8239a82 100644 --- a/home/versioned_docs/version-v1.4.x/start/quickstart.md +++ b/home/versioned_docs/version-v1.4.x/start/quickstart.md @@ -41,7 +41,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do 1. Download the release package `hertzbeat-xx.tar.gz` [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) 2. Configure the HertzBeat configuration yml file `hertzbeat/config/application.yml` (optional) -3. Run command `$ ./bin/startup.sh ` or `bin/startup.bat` +3. Run command `$ ./bin/startup.sh` or `bin/startup.bat` 4. Access `http://localhost:1157` to start, default account: `admin/hertzbeat` 5. Deploy collector clusters - Download the release package `hertzbeat-collector-xx.tar.gz` to new machine [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) @@ -58,7 +58,8 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do manager-host: ${MANAGER_HOST:127.0.0.1} manager-port: ${MANAGER_PORT:1158} ``` - - Run command `$ ./bin/startup.sh ` or `bin/startup.bat` + + - Run command `$ ./bin/startup.sh` or `bin/startup.bat` - Access `http://localhost:1157` and you will see the registered new collector in dashboard Detailed config refer to [Install HertzBeat via Package](https://hertzbeat.com/docs/start/package-deploy) diff --git a/home/versioned_docs/version-v1.4.x/start/sslcert-practice.md b/home/versioned_docs/version-v1.4.x/start/sslcert-practice.md index 43253ed946b..f3acc57b71e 100644 --- a/home/versioned_docs/version-v1.4.x/start/sslcert-practice.md +++ b/home/versioned_docs/version-v1.4.x/start/sslcert-practice.md @@ -12,10 +12,10 @@ This article introduces how to use the hertzbeat monitoring tool to detect the v HertzBeat is a real-time monitoring tool with powerful custom monitoring capabilities without Agent. Website monitoring, PING connectivity, port availability, database, operating system, middleware, API monitoring, threshold alarms, alarm notification (email, WeChat, Ding Ding Feishu). -**Official website: https://hertzbeat.com | https://tancloud.cn** +**Official website: | ** -github: https://github.com/apache/hertzbeat -gitee: https://gitee.com/hertzbeat/hertzbeat +github: +gitee: #### Install HertzBeat @@ -82,8 +82,8 @@ gitee: https://gitee.com/hertzbeat/hertzbeat For token configuration such as Dingding WeChat Feishu, please refer to the help document -https://hertzbeat.com/docs/help/alert_dingtalk -https://tancloud.cn/docs/help/alert_dingtalk + + > Alarm Notification -> New Alarm Notification Policy -> Enable Notification for the Recipient Just Configured @@ -93,10 +93,10 @@ https://tancloud.cn/docs/help/alert_dingtalk ---- -#### Finish! +#### Finish The practice of monitoring SSL certificates is here. Of course, for hertzbeat, this function is just the tip of the iceberg. If you think hertzbeat is a good open source project, please give us a Gitee star on GitHub, thank you very much. Thank you for your support. Refill! -**github: https://github.com/apache/hertzbeat** +**github: ** -**gitee: https://gitee.com/hertzbeat/hertzbeat** +**gitee: ** diff --git a/home/versioned_docs/version-v1.4.x/start/tdengine-init.md b/home/versioned_docs/version-v1.4.x/start/tdengine-init.md index fc1615fa8c7..f443a72eb56 100644 --- a/home/versioned_docs/version-v1.4.x/start/tdengine-init.md +++ b/home/versioned_docs/version-v1.4.x/start/tdengine-init.md @@ -9,8 +9,8 @@ We recommend VictoriaMetrics for long term support. TDengine is an open-source IoT time-series database, which we use to store the collected historical data of monitoring metrics. Pay attention to support ⚠️ 3.x version. -**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** +**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** +**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** Note⚠️ Need TDengine 3.x Version. > If you have TDengine environment, can directly skip to create a database instance. @@ -18,8 +18,9 @@ Note⚠️ Need TDengine 3.x Version. ### Install TDengine via Docker > Refer to the official website [installation tutorial](https://docs.taosdata.com/get-started/docker/) -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > > ``` @@ -36,8 +37,8 @@ Note⚠️ Need TDengine 3.x Version. > tdengine/tdengine:3.0.4.0 > ``` > -> `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. -> `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. +> `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. +> `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. > use```$ docker ps``` to check if the database started successfully ### Create database instance @@ -45,9 +46,10 @@ Note⚠️ Need TDengine 3.x Version. 1. Enter database Docker container ``` - $ docker exec -it tdengine /bin/bash + docker exec -it tdengine /bin/bash ``` -2. Create database named hertzbeat + +2. Create database named hertzbeat After entering the container,execute `taos` command as follows: ``` @@ -64,7 +66,7 @@ Note⚠️ Need TDengine 3.x Version. taos> CREATE DATABASE hertzbeat KEEP 90 DURATION 10 BUFFER 16; ``` - The above statements will create a database named hertzbeat. The data will be saved for 90 days (more than 90 days data will be automatically deleted). + The above statements will create a database named hertzbeat. The data will be saved for 90 days (more than 90 days data will be automatically deleted). A data file every 10 days, memory blocks buffer is 16MB. 3. Check if hertzbeat database has been created success @@ -80,9 +82,9 @@ Note⚠️ Need TDengine 3.x Version. ### Configure the database connection in hertzbeat `application.yml` configuration file -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - Note⚠️The docker container way need to mount application.yml file locally,while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` +1. Configure HertzBeat's configuration file + Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + Note⚠️The docker container way need to mount application.yml file locally,while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.td-engine` data source parameters, URL account and password. ```yaml @@ -122,4 +124,3 @@ warehouse: > Is td-engine enable set to true > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed > You can check the startup logs according to the logs directory - diff --git a/home/versioned_docs/version-v1.4.x/start/victoria-metrics-init.md b/home/versioned_docs/version-v1.4.x/start/victoria-metrics-init.md index 66a91fd49af..0c4c968371a 100644 --- a/home/versioned_docs/version-v1.4.x/start/victoria-metrics-init.md +++ b/home/versioned_docs/version-v1.4.x/start/victoria-metrics-init.md @@ -9,7 +9,7 @@ We recommend VictoriaMetrics for long term support. VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.Recommend Version(VictoriaMetrics:v1.95.1+, HertzBeat:v1.4.3+) -**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** +**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** **⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** > If you already have an VictoriaMetrics environment, you can skip directly to the YML configuration step. @@ -17,7 +17,8 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t ### Install VictoriaMetrics via Docker > Refer to the official website [installation tutorial](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -> 1. Download and install Docker environment +> +> 1. Download and install Docker environment > Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > @@ -40,8 +41,8 @@ use```$ docker ps``` to check if the database started successfully 3. Configure the database connection in hertzbeat `application.yml`configuration file - Modify `hertzbeat/config/application.yml` configuration file - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` + Modify `hertzbeat/config/application.yml` configuration file + Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Config the `warehouse.store.jpa.enabled` `false`. Replace `warehouse.store.victoria-metrics` data source parameters, HOST account and password. ```yaml @@ -65,4 +66,3 @@ warehouse: 1. Do both the time series databases need to be configured? Can they both be used? > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which can affects the historical chart data. - diff --git a/home/versioned_docs/version-v1.4.x/template.md b/home/versioned_docs/version-v1.4.x/template.md index 6e82517f324..92fba55542c 100644 --- a/home/versioned_docs/version-v1.4.x/template.md +++ b/home/versioned_docs/version-v1.4.x/template.md @@ -6,7 +6,7 @@ sidebar_label: Monitoring Template > Hertzbeat is an open source, real-time monitoring tool with custom-monitor and agentLess. > -> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. +> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? Here is the architecture. diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-http-default.md b/home/versioned_docs/version-v1.5.x/advanced/extend-http-default.md index 9ccb0e9454b..1030e382685 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-http-default.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-http-default.md @@ -154,4 +154,3 @@ metrics: # Hertzbeat default parsing is used here parseType: default ``` - diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md b/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md index 93a5c1e2dce..0d1a7112bb9 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md @@ -55,7 +55,7 @@ As above, usually our background API interface will design such a general return } ``` -**This time we get the metric data such as `category`, `app`, `status`, `size`, `availableSize` under the app. ** +**This time we get the metric data such as `category`, `app`, `status`, `size`, `availableSize` under the app.** ### Add custom monitoring template `hertzbeat` @@ -194,7 +194,7 @@ metrics: ``` -**The addition is complete, now we save and apply. We can see that the system page has added a `hertzbeat` monitoring type. ** +**The addition is complete, now we save and apply. We can see that the system page has added a `hertzbeat` monitoring type.** ![](/img/docs/advanced/extend-http-example-1.png) @@ -216,10 +216,10 @@ metrics: ---- -#### over! +#### over This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-token.md b/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-token.md index 7881b048357..5bf51fd17a0 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-token.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-token.md @@ -10,7 +10,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz ### Request process -【**Authentication information metrics (highest priority)**】【**HTTP interface carries account password call**】->【**Response data analysis**】->【**Analysis and issuance of TOKEN-accessToken as an metric **] -> [**Assign accessToken as a variable parameter to other collection index groups**] +【**Authentication information metrics (highest priority)**】【**HTTP interface carries account password call**】->【**Response data analysis**】->【**Analysis and issuance of TOKEN-accessToken as an metric**] -> [**Assign accessToken as a variable parameter to other collection index groups**] > Here we still use the hertzbeat monitoring example of Tutorial 1! The hertzbeat background interface not only supports the basic direct account password authentication used in Tutorial 1, but also supports token authentication. @@ -202,7 +202,7 @@ metrics: ``` -**At this time, save and apply, add `hertzbeat_token` type monitoring on the system page, configure input parameters, `content-type` fill in `application/json`, `request Body` fill in the account password json as follows: ** +**At this time, save and apply, add `hertzbeat_token` type monitoring on the system page, configure input parameters, `content-type` fill in `application/json`, `request Body` fill in the account password json as follows:** ```json { @@ -213,7 +213,7 @@ metrics: ![](/img/docs/advanced/extend-http-example-5.png) -** After the addition is successful, we can see the `token`, `refreshToken` metric data we collected on the details page. ** +**After the addition is successful, we can see the `token`, `refreshToken` metric data we collected on the details page.** ![](/img/docs/advanced/extend-http-example-6.png) @@ -223,7 +223,7 @@ metrics: **Add an index group definition `summary` in `app-hertzbeat_token.yml`, which is the same as `summary` in Tutorial 1, and set the collection priority to 1** -**Set the authentication method in the HTTP protocol configuration of this index group to `Bearer Token`, assign the index `token` collected by the previous index group `auth` as a parameter, and use `^o^` as the internal replacement symbol, that is ` ^o^token^o^`. as follows:** +**Set the authentication method in the HTTP protocol configuration of this index group to `Bearer Token`, assign the index `token` collected by the previous index group `auth` as a parameter, and use `^o^` as the internal replacement symbol, that is `^o^token^o^`. as follows:** ```yaml - name: summary @@ -382,10 +382,10 @@ metrics: --- -#### over! +#### over This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-http-jsonpath.md b/home/versioned_docs/version-v1.5.x/advanced/extend-http-jsonpath.md index 86a49c06756..4e12fe86b57 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-http-jsonpath.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-http-jsonpath.md @@ -61,7 +61,7 @@ Multilayer format:Set key value in the array #### Example -Query the value information of the custom system, and its exposed interface is `/metrics/person`. We need `type,num` Metric. +Query the value information of the custom system, and its exposed interface is `/metrics/person`. We need `type,num` Metric. The raw data returned by the interface is as follows: ```json @@ -172,4 +172,3 @@ metrics: parseType: jsonPath parseScript: '$.number[*]' ``` - diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-http.md b/home/versioned_docs/version-v1.5.x/advanced/extend-http.md index bab8800e7a1..acc006437d3 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-http.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-http.md @@ -13,14 +13,14 @@ sidebar_label: HTTP Protocol Custom Monitoring It can be seen from the process that we define a monitoring type of HTTP protocol. We need to configure HTTP request parameters, configure which Metrics to obtain, and configure the parsing method and parsing script for response data. HTTP protocol supports us to customize HTTP request path, request header, request parameters, request method, request body, etc. -**System default parsing method**:HTTP interface returns the JSON data structure specified by hertzbeat, that is, the default parsing method can be used to parse the data and extract the corresponding Metric data. For details, refer to [**System Default Parsing**](extend-http-default) +**System default parsing method**:HTTP interface returns the JSON data structure specified by hertzbeat, that is, the default parsing method can be used to parse the data and extract the corresponding Metric data. For details, refer to [**System Default Parsing**](extend-http-default) **JsonPath script parsing method**:Use JsonPath script to parse the response JSON data, return the data structure specified by the system, and then provide the corresponding Metric data. For details, refer to [**JsonPath Script Parsing**](extend-http-jsonpath) ### Custom Steps **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** -------- +------- Configuration usages of the monitoring templates yml are detailed below. Please pay attention to usage annotation. @@ -203,4 +203,3 @@ metrics: basicAuthPassword: ^_^password^_^ parseType: default ``` - diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-jdbc.md b/home/versioned_docs/version-v1.5.x/advanced/extend-jdbc.md index ec42f84f642..4a92d94c74f 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-jdbc.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-jdbc.md @@ -21,7 +21,7 @@ We can obtain the corresponding Metric data through the data fields queried by S > Query a row of data, return the column name of the result set through query and map them to the queried field. -eg: +eg: queried Metric fields:one two three four query SQL:select one, two, three, four from book limit 1; Here the Metric field and the response data can be mapped into a row of collected data one by one. @@ -30,7 +30,7 @@ Here the Metric field and the response data can be mapped into a row of collecte > Query multiple rows of data, return the column names of the result set and map them to the queried fields. -eg: +eg: queried Metric fields:one two three four query SQL:select one, two, three, four from book; Here the Metric field and the response data can be mapped into multiple rows of collected data one by one. @@ -39,9 +39,9 @@ Here the Metric field and the response data can be mapped into multiple rows of > Collect a row of Metric data. By matching the two columns of queried data (key value), key and the queried field, value is the value of the query field. -eg: -queried fields:one two three four -query SQL:select key, value from book; +eg: +queried fields:one two three four +query SQL:select key, value from book; SQL response data: | key | value | @@ -57,7 +57,7 @@ Here by mapping the Metric field with the key of the response data, we can obta **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -241,4 +241,3 @@ metrics: sql: show global status where Variable_name like 'innodb%'; url: ^_^url^_^ ``` - diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-jmx.md b/home/versioned_docs/version-v1.5.x/advanced/extend-jmx.md index 2f9ba992f63..2110e98dca8 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-jmx.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-jmx.md @@ -23,7 +23,7 @@ By configuring the monitoring template YML metrics `field`, `aliasFields`, `obje ![](/img/docs/advanced/extend-point-1.png) -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -192,4 +192,3 @@ metrics: objectName: java.lang:type=MemoryPool,name=* url: ^_^url^_^ ``` - diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-ngql.md b/home/versioned_docs/version-v1.5.x/advanced/extend-ngql.md index 2047e1d1cf5..65c5fb0c69b 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-ngql.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-ngql.md @@ -21,6 +21,7 @@ Mapping the fields returned by NGQL queries to the metrics we need allows us to > `filterValue`: filter attribute value (optional) For example: + - online_meta_count#SHOW HOSTS META#Status#ONLINE Counts the number of rows returned by `SHOW HOSTS META` where Status equals ONLINE. - online_meta_count#SHOW HOSTS META## @@ -31,12 +32,14 @@ Counts the number of rows returned by `SHOW HOSTS META`. > Queries a single row of data by mapping the column names of the query result set to the queried fields. For example: + - Metrics fields: a, b - NGQL query: match (v:metrics) return v.metrics.a as a, v.metrics.b as b; Here, the metric fields can be mapped to the response data row by row. Notes: + - When using the `oneRow` method, if a single query statement returns multiple rows of results, only the first row of results will be mapped to the metric fields. - When the `commands` field contains two or more query statements and the returned fields of multiple query statements are the same, the fields returned by the subsequent statement will overwrite those returned by the previous statement. - It is recommended to use the limit statement to limit the number of rows returned in the result set when defining `commands`. @@ -46,11 +49,13 @@ Notes: > Queries multiple rows of data by mapping the column names of the query result set to the queried fields. For example: + - Metrics fields: a, b - NGQL query: match (v:metrics) return v.metrics.a as a, v.metrics.b as b; Here, the metric fields can be mapped to the response data row by row. Notes: + - When using the `multiRow` method, the `commands` field can only contain one query statement. #### **columns** @@ -58,6 +63,7 @@ Notes: > Collects a single row of metric data by mapping two columns of data (key-value), where the key matches the queried fields and the value is the value of the queried field. Notes: + - When using the `columns` method, the first two columns of the result set are mapped to collect data by default, where the first column corresponds to the metric name and the second column corresponds to the metric value. - When the `commands` field contains two or more query statements and the first column of data returned by multiple query statements is duplicated, the result of the last statement will be retained. @@ -67,7 +73,7 @@ Notes: ![HertzBeat Page](/img/docs/advanced/extend-point-1.png) -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -166,4 +172,3 @@ metrics: - match (v:tag2) return "tag2" as name ,count(v) as cnt timeout: ^_^timeout^_^ ``` - diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-point.md b/home/versioned_docs/version-v1.5.x/advanced/extend-point.md index 314e3f1affa..3f02f6040f4 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-point.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-point.md @@ -11,7 +11,7 @@ sidebar_label: Custom Monitoring **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -130,4 +130,3 @@ metrics: parseType: website ``` - diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-snmp.md b/home/versioned_docs/version-v1.5.x/advanced/extend-snmp.md index b3bb9173c87..3dae2b8b6dd 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-snmp.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-snmp.md @@ -23,7 +23,7 @@ By configuring the metrics `field`, `aliasFields`, and `oids` under the `snmp` p ![](/img/docs/advanced/extend-point-1.png) -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -169,4 +169,3 @@ metrics: processes: 1.3.6.1.2.1.25.1.6.0 location: 1.3.6.1.2.1.1.6.0 ``` - diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-ssh.md b/home/versioned_docs/version-v1.5.x/advanced/extend-ssh.md index bf960376179..3a5486c394b 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-ssh.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-ssh.md @@ -21,12 +21,12 @@ We can obtain the corresponding Metric data through the data fields queried by t > Query out a column of data, return the field value (one value per row) of the result set through query and map them to the field. -eg: -Metrics of Linux to be queried hostname-host name,uptime-start time -Host name original query command:`hostname` -Start time original query command:`uptime | awk -F "," '{print $1}'` -Then the query script of the two Metrics in hertzbeat is(Use `;` Connect them together): -`hostname; uptime | awk -F "," '{print $1}'` +eg: +Metrics of Linux to be queried hostname-host name,uptime-start time +Host name original query command:`hostname` +Start time original query command:`uptime | awk -F "," '{print $1}'` +Then the query script of the two Metrics in hertzbeat is(Use `;` Connect them together): +`hostname; uptime | awk -F "," '{print $1}'` The data responded by the terminal is: ``` @@ -34,8 +34,8 @@ tombook 14:00:15 up 72 days ``` -At last collected Metric data is mapped one by one as: -hostname is `tombook` +At last collected Metric data is mapped one by one as: +hostname is `tombook` uptime is `14:00:15 up 72 days` Here the Metric field and the response data can be mapped into a row of collected data one by one @@ -44,8 +44,8 @@ Here the Metric field and the response data can be mapped into a row of collecte > Query multiple rows of data, return the column names of the result set through the query, and map them to the Metric field of the query. -eg: -Linux memory related Metric fields queried:total-Total memory, used-Used memory,free-Free memory, buff-cache-Cache size, available-Available memory +eg: +Linux memory related Metric fields queried:total-Total memory, used-Used memory,free-Free memory, buff-cache-Cache size, available-Available memory Memory metrics original query command:`free -m`, Console response: ```shell @@ -55,7 +55,7 @@ Swap: 8191 33 8158 ``` In hertzbeat multiRow format parsing requires a one-to-one mapping between the column name of the response data and the indicaotr value, so the corresponding query SHELL script is: -`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` +`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` Console response is: ```shell @@ -69,7 +69,7 @@ Here the Metric field and the response data can be mapped into collected data on **HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** -------- +------- Configuration usages of the monitoring templates yml are detailed below. @@ -212,4 +212,3 @@ metrics: script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' parseType: multiRow ``` - diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-tutorial.md b/home/versioned_docs/version-v1.5.x/advanced/extend-tutorial.md index f991b5702e8..c9759063fa5 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-tutorial.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-tutorial.md @@ -55,7 +55,7 @@ As above, usually our background API interface will design such a general return } ``` -**This time we get the metrics data such as `category`, `app`, `status`, `size`, `availableSize` under the app. ** +**This time we get the metrics data such as `category`, `app`, `status`, `size`, `availableSize` under the app.** ### Add Monitoring Template Yml @@ -193,7 +193,7 @@ metrics: parseScript: '$.data.apps.*' ``` -**The addition is complete, now we restart the hertzbeat system. We can see that the system page has added a `hertzbeat` monitoring type. ** +**The addition is complete, now we restart the hertzbeat system. We can see that the system page has added a `hertzbeat` monitoring type.** ![](/img/docs/advanced/extend-http-example-1.png) @@ -215,10 +215,10 @@ metrics: ---- -#### over! +#### over This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. Thanks for the old iron support. Refill! -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/versioned_docs/version-v1.5.x/community/become_committer.md b/home/versioned_docs/version-v1.5.x/community/become_committer.md index 382e69fb9bf..11d0c660b13 100644 --- a/home/versioned_docs/version-v1.5.x/community/become_committer.md +++ b/home/versioned_docs/version-v1.5.x/community/become_committer.md @@ -44,7 +44,7 @@ you must commit code with your commit privilege to the codebase; it means you are committed to the HertzBeat project and are productively contributing to our community's success. -## Committer requirements: +## Committer requirements There are no strict rules for becoming a committer or PPMC member. Candidates for new committers are typically people that are active @@ -83,4 +83,3 @@ of the main website or HertzBeat's GitHub repositories. - Be involved in the design road map discussions with a professional and diplomatic approach even if there is a disagreement - Promoting the project by writing articles or holding events - diff --git a/home/versioned_docs/version-v1.5.x/community/become_pmc_member.md b/home/versioned_docs/version-v1.5.x/community/become_pmc_member.md index cd9dff4e02a..70aa00575dd 100644 --- a/home/versioned_docs/version-v1.5.x/community/become_pmc_member.md +++ b/home/versioned_docs/version-v1.5.x/community/become_pmc_member.md @@ -44,7 +44,7 @@ you must commit code with your commit privilege to the codebase; it means you are committed to the HertzBeat project and are productively contributing to our community's success. -## PMC member requirements: +## PMC member requirements There are no strict rules for becoming a committer or PPMC member. Candidates for new PMC member are typically people that are active @@ -83,4 +83,3 @@ of the main website or HertzBeat's GitHub repositories. - Be involved in the design road map discussions with a professional and diplomatic approach even if there is a disagreement - Promoting the project by writing articles or holding events - diff --git a/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md b/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md index c86438a577e..5b87ee9bf49 100644 --- a/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md +++ b/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md @@ -70,6 +70,7 @@ limitations under the License. ```java Cache publicKeyCache; ``` + 2. Pinyin abbreviations are prohibited for variables (excluding nouns such as place names), such as chengdu. 3. It is recommended to end variable names with a `type`. For variables of type `Collection/List`, take `xxxx` (plural representing multiple elements) or end with `xxxList` (specific type). @@ -79,6 +80,7 @@ limitations under the License. Map idUserMap; Map userIdNameMap; ``` + 4. That can intuitively know the type and meaning of the variable through its name. Method names should start with a verb first as follows: @@ -114,6 +116,7 @@ limitations under the License. return resp; } ``` + - Positive demo: > Strings are extracted as constant references. @@ -139,6 +142,7 @@ limitations under the License. return resp; } ``` + 2. Ensure code readability and intuitiveness - The string in the `annotation` symbol doesn't need to be extracted as constant. @@ -198,6 +202,7 @@ public CurrentHashMap funName(); return; } ``` + - Positive demo: ```java @@ -221,11 +226,13 @@ public CurrentHashMap funName(); - Redundant lines Generally speaking, if a method's code line depth exceeds `2+ Tabs` due to continuous nested `if... else..`, it should be considered to try + - `merging branches`, - `inverting branch conditions` - `extracting private methods` to reduce code line depth and improve readability like follows: + - Union or merge the logic into the next level calling - Negative demo: @@ -262,6 +269,7 @@ if(expression2) { ...... } ``` + - Reverse the condition - Negative demo: @@ -276,6 +284,7 @@ if(expression2) { } } ``` + - Positive demo: ```java @@ -289,6 +298,7 @@ if(expression2) { // ... } ``` + - Using a single variable or method to reduce the complex conditional expression - Negative demo: @@ -297,6 +307,7 @@ if(expression2) { ... } ``` + - Positive demo: ```java @@ -341,6 +352,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. ```java map.computeIfAbsent(key, x -> key.toLowerCase()) ``` + - Positive demo: ```java @@ -354,6 +366,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. ```java map.computeIfAbsent(key, k-> Loader.load(k)); ``` + - Positive demo: ```java @@ -383,6 +396,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. return; } ``` + - Positive demo: ```java @@ -400,6 +414,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. return; } ``` + - Positive demo: ```java @@ -417,6 +432,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. return; } ``` + - Positive demo: ```java @@ -436,6 +452,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. return; } ``` + - Positive demo: ```java @@ -453,6 +470,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. ... } ``` + - Positive demo: ```java @@ -468,6 +486,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. ```java System.out.println(JobStatus.RUNNING.toString()); ``` + - Positive demo: ```java @@ -483,6 +502,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. ... } ``` + - Positive demo: ```java @@ -520,6 +540,7 @@ public void process(String input) { ```java log.info("Deploy cluster request " + deployRequest); ``` + - Positive demo ```java @@ -539,6 +560,7 @@ When printing the log content, if the actual parameters of the log placeholder a List userList = getUsersByBatch(1000); LOG.debug("All users: {}", getAllUserIds(userList)); ``` + - Positive demo: In this case, we should determine the log level in advance before making actual log calls as follows: @@ -547,7 +569,7 @@ When printing the log content, if the actual parameters of the log placeholder a // ignored declaration lines. List userList = getUsersByBatch(1000); if (LOG.isDebugEnabled()) { - LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); + LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); } ``` @@ -559,13 +581,12 @@ When printing the log content, if the actual parameters of the log placeholder a ## References -- https://site.mockito.org/ -- https://alibaba.github.io/p3c/ -- https://rules.sonarsource.com/java/ -- https://junit.org/junit5/ -- https://streampark.apache.org/ +- +- +- +- +- ``` ``` - diff --git a/home/versioned_docs/version-v1.5.x/community/contribution.md b/home/versioned_docs/version-v1.5.x/community/contribution.md index f7d932bcd2c..9a634ffffed 100644 --- a/home/versioned_docs/version-v1.5.x/community/contribution.md +++ b/home/versioned_docs/version-v1.5.x/community/contribution.md @@ -47,7 +47,7 @@ Even small corrections to typos are very welcome :) ### Getting HertzBeat up and running -> To get HertzBeat code running on your development tools, and able to debug with breakpoints. +> To get HertzBeat code running on your development tools, and able to debug with breakpoints. > This is a front-end and back-end separation project. To start the local code, the back-end manager and the front-end web-app must be started separately. #### Backend start @@ -162,6 +162,7 @@ Add WeChat account `ahertzbeat` to pull you into the WeChat group. - **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** Provide monitoring management, system management basic services. > Provides monitoring management, monitoring configuration management, system user management, etc. +> > - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** Provide metrics data collection services. > Use common protocols to remotely collect and obtain peer-to-peer metrics data. > - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** Provide monitoring data warehousing services. diff --git a/home/versioned_docs/version-v1.5.x/community/development.md b/home/versioned_docs/version-v1.5.x/community/development.md index 549084ce516..3e17d01385b 100644 --- a/home/versioned_docs/version-v1.5.x/community/development.md +++ b/home/versioned_docs/version-v1.5.x/community/development.md @@ -6,8 +6,8 @@ sidebar_label: Development ## Getting HertzBeat code up and running -> To get HertzBeat code running on your development tools, and able to debug with breakpoints. -> This is a front-end and back-end separation project. +> To get HertzBeat code running on your development tools, and able to debug with breakpoints. +> This is a front-end and back-end separation project. > To start the local code, the back-end [manager](https://github.com/apache/hertzbeat/tree/master/manager) and the front-end [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) must be started separately. ### Backend start diff --git a/home/versioned_docs/version-v1.5.x/community/document.md b/home/versioned_docs/version-v1.5.x/community/document.md index e561b3f1b2c..b7f3af828d1 100644 --- a/home/versioned_docs/version-v1.5.x/community/document.md +++ b/home/versioned_docs/version-v1.5.x/community/document.md @@ -40,8 +40,8 @@ This website is compiled using node, using Docusaurus framework components 1. Download and install nodejs (version 18.8.0) 2. Clone the code to the local `git clone git@github.com:apache/hertzbeat.git` 3. In `home` directory run `npm install` to install the required dependent libraries. -4. In `home` directory run `npm run start`, you can visit http://localhost:3000 to view the English mode preview of the site -5. In `home` directory run `npm run start-zh-cn`, you can visit http://localhost:3000 to view the Chinese mode preview of the site +4. In `home` directory run `npm run start`, you can visit to view the English mode preview of the site +5. In `home` directory run `npm run start-zh-cn`, you can visit to view the Chinese mode preview of the site 6. To generate static website resource files, run `npm run build`. The static resources of the build are in the build directory. ## Directory structure @@ -93,4 +93,3 @@ css and other style files are placed in the `src/css` directory ### Page content modification > All pages doc can be directly jumped to the corresponding github resource modification page through the 'Edit this page' button at the bottom - diff --git a/home/versioned_docs/version-v1.5.x/community/how-to-release.md b/home/versioned_docs/version-v1.5.x/community/how-to-release.md index ed4bdfb636a..71583c0d36c 100644 --- a/home/versioned_docs/version-v1.5.x/community/how-to-release.md +++ b/home/versioned_docs/version-v1.5.x/community/how-to-release.md @@ -22,6 +22,7 @@ This release process is operated in the UbuntuOS(Windows,Mac), and the following ## 2. Preparing for release > First summarize the account information to better understand the operation process, will be used many times later. +> > - apache id: `muchunjin (APACHE LDAP UserName)` > - apache passphrase: `APACHE LDAP Passphrase` > - apache email: `muchunjin@apache.org` @@ -128,12 +129,12 @@ gpg: Total number processed: 1 gpg: unchanged: 1 ``` -Or enter https://keyserver.ubuntu.com/ address in the browser, enter the name of the key and click 'Search key' to search if existed. +Or enter address in the browser, enter the name of the key and click 'Search key' to search if existed. #### 2.4 Add the gpg public key to the KEYS file of the Apache SVN project repo -- Apache HertzBeat Branch Dev https://dist.apache.org/repos/dist/dev/incubator/hertzbeat -- Apache HertzBeat Branch Release https://dist.apache.org/repos/dist/release/incubator/hertzbeat +- Apache HertzBeat Branch Dev +- Apache HertzBeat Branch Release ##### 2.4.1 Add public key to KEYS in dev branch @@ -167,7 +168,7 @@ $ svn ci -m "add gpg key for muchunjin" ## 3. Prepare material package & release -#### 3.1 Based on the master branch, create a release-${release_version}-rcx branch, such as release-1.6.0-rc1, And create a tag named v1.6.0-rc1 based on the release-1.6.0-rc1 branch, and set this tag as pre-release. +#### 3.1 Based on the master branch, create a release-${release_version}-rcx branch, such as release-1.6.0-rc1, And create a tag named v1.6.0-rc1 based on the release-1.6.0-rc1 branch, and set this tag as pre-release ```shell git checkout master @@ -328,7 +329,7 @@ svn commit -m "release for HertzBeat 1.6.0" - Check Apache SVN Commit Results -> Visit the address https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1/ in the browser, check if existed the new material package +> Visit the address in the browser, check if existed the new material package ## 4. Enter the community voting stage @@ -336,7 +337,7 @@ svn commit -m "release for HertzBeat 1.6.0" Send a voting email in the community requires at least three `+1` and no `-1`. -> `Send to`: dev@hertzbeat.apache.org
+> `Send to`:
> `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0 rc1
> `Body`: @@ -392,7 +393,7 @@ Thanks! After 72 hours, the voting results will be counted, and the voting result email will be sent, as follows. -> `Send to`: dev@hertzbeat.apache.org
+> `Send to`:
> `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: @@ -421,14 +422,14 @@ Best, ChunJin Mu ``` -One item of the email content is `Vote thread`, and the link is obtained here: https://lists.apache.org/list.html?dev@hertzbeat.apache.org +One item of the email content is `Vote thread`, and the link is obtained here: #### 3.2 Send Incubator Community voting mail Send a voting email in the incubator community requires at least three `+1` and no `-1`. -> `Send to`: general@incubator.apache.org
-> `cc`: dev@hertzbeat.apache.org、private@hertzbeat.apache.org
+> `Send to`:
+> `cc`:
> `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: @@ -482,7 +483,7 @@ ChunJin Mu If there is no -1 after 72 hours, reply to the email as follows -> `Send to`: general@incubator.apache.org
+> `Send to`:
> `Body`: ``` @@ -494,7 +495,7 @@ Chunjin Mu Then the voting results will be counted, and the voting result email will be sent, as follows. -> `Send to`: general@incubator.apache.org
+> `Send to`:
> `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: @@ -520,7 +521,7 @@ Best, ChunJin Mu ``` -One item of the email content is `Vote thread`, and the link is obtained here: https://lists.apache.org/list.html?general@incubator.apache.org +One item of the email content is `Vote thread`, and the link is obtained here: Wait a day to see if the tutor has any other comments, if not, send the following announcement email @@ -534,10 +535,10 @@ svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 http #### 4.2 Add the new version download address to the official website -https://github.com/apache/hertzbeat/blob/master/home/docs/download.md -https://github.com/apache/hertzbeat/blob/master/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/download.md + + -Open the official website address https://hertzbeat.apache.org/docs/download/ to see if there is a new version of the download +Open the official website address to see if there is a new version of the download > It should be noted that the download link may take effect after an hour, so please pay attention to it. @@ -550,6 +551,7 @@ You can modify it on the original RC Release without creating a new Release. ::: Then enter Release Title and Describe + - Release Title: ``` @@ -569,8 +571,8 @@ The rename the release-1.6.0-rc1 branch to release-1.6.0. #### 4.5 Send new version announcement email -> `Send to`: general@incubator.apache.org
-> `cc`: dev@hertzbeat.apache.org
+> `Send to`:
+> `cc`:
> `Title`: [ANNOUNCE] Release Apache HertzBeat (incubating) 1.6.0
> `Body`: diff --git a/home/versioned_docs/version-v1.5.x/community/how-to-verify.md b/home/versioned_docs/version-v1.5.x/community/how-to-verify.md index 38b507149b0..77e53ee444c 100644 --- a/home/versioned_docs/version-v1.5.x/community/how-to-verify.md +++ b/home/versioned_docs/version-v1.5.x/community/how-to-verify.md @@ -8,7 +8,7 @@ sidebar_position: 4 For detailed check list, please refer to the official [check list](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) -Version content accessible in browser https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/ +Version content accessible in browser ## 1. Download the candidate version @@ -47,8 +47,8 @@ First import the publisher's public key. Import KEYS from the svn repository to #### 2.2.1 Import public key ```shell -$ curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # Download KEYS -$ gpg --import KEYS # Import KEYS to local +curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # Download KEYS +gpg --import KEYS # Import KEYS to local ``` #### 2.2.2 Trust the public key @@ -83,7 +83,7 @@ gpg> #### 2.2.3 Check the gpg signature ```shell -$ for i in *.tar.gz; do echo $i; gpg --verify $i.asc $i; done +for i in *.tar.gz; do echo $i; gpg --verify $i.asc $i; done ``` check result @@ -100,7 +100,7 @@ gpg: Good signature from "xxx @apache.org>" ### 2.3 Check sha512 hash ```shell -$ for i in *.tar.gz; do echo $i; sha512sum --check $i.sha512; done +for i in *.tar.gz; do echo $i; sha512sum --check $i.sha512; done ``` ### 2.4 Check the binary package @@ -155,13 +155,13 @@ If you initiate a posting vote, you can refer to this response example to reply When replying to the email, you must bring the information that you have checked by yourself. Simply replying to `+1 approve` is invalid. -When PPMC votes in the dev@hertzbeat.apache.org hertzbeat community, Please bring the binding suffix to indicate that it has a binding vote for the vote in the hertzbeat community, and it is convenient to count the voting results. +When PPMC votes in the hertzbeat community, Please bring the binding suffix to indicate that it has a binding vote for the vote in the hertzbeat community, and it is convenient to count the voting results. -When IPMC votes in the general@incubator.apache.org incubator community. Please bring the binding suffix to indicate that the voting in the incubator community has a binding vote, which is convenient for counting the voting results. +When IPMC votes in the incubator community. Please bring the binding suffix to indicate that the voting in the incubator community has a binding vote, which is convenient for counting the voting results. :::caution -If you have already voted on dev@hertzbeat.apache.org, you can take it directly to the incubator community when you reply to the vote, such as: +If you have already voted on , you can take it directly to the incubator community when you reply to the vote, such as: ```html //Incubator community voting, only IPMC members have binding binding,PPMC needs to be aware of binding changes @@ -196,6 +196,6 @@ I checked: 5. ``` ---- +--- This doc refer from [Apache StreamPark](https://streampark.apache.org/) diff --git a/home/versioned_docs/version-v1.5.x/community/mailing_lists.md b/home/versioned_docs/version-v1.5.x/community/mailing_lists.md index c5ab8df7604..ef1a0a20329 100644 --- a/home/versioned_docs/version-v1.5.x/community/mailing_lists.md +++ b/home/versioned_docs/version-v1.5.x/community/mailing_lists.md @@ -34,7 +34,7 @@ Before you post anything to the mailing lists, be sure that you already **subscr | List Name | Address | Subscribe | Unsubscribe | Archive | |--------------------|--------------------------|--------------------------------------------------------|------------------------------------------------------------|------------------------------------------------------------------------| -| **Developer List** | dev@hertzbeat.apache.org | [subscribe](mailto:dev-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:dev-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | +| **Developer List** | | [subscribe](mailto:dev-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:dev-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?dev@hertzbeat.apache.org) | ### Notification List @@ -42,16 +42,16 @@ Before you post anything to the mailing lists, be sure that you already **subscr | List Name | Address | Subscribe | Unsubscribe | Archive | |-----------------------|------------------------------------|------------------------------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------| -| **Notification List** | notifications@hertzbeat.apache.org | [subscribe](mailto:notifications-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | +| **Notification List** | | [subscribe](mailto:notifications-subscribe@hertzbeat.apache.org) | [unsubscribe](mailto:notifications-unsubscribe@hertzbeat.apache.org) | [archive](https://lists.apache.org/list.html?notifications@hertzbeat.apache.org) | ## Steps for Subscription Sending a subscription email is also very simple. The steps are as follows: - 1、**Subscribe**: Click the **subscribe** button in the above table, and it redirects to your mail client. The subject and content are arbitrary. - After that, you will receive a confirmation email from dev-help@hertzbeat.apache.org (if not received, please confirm whether the email is automatically classified as SPAM, promotion email, subscription email, etc.). + After that, you will receive a confirmation email from (if not received, please confirm whether the email is automatically classified as SPAM, promotion email, subscription email, etc.). - 2、**Confirm**: Reply directly to the confirmation email, or click on the link in the email to reply quickly. The subject and content are arbitrary. -- 3、**Welcome**: After completing the above steps, you will receive a welcome email with the subject WELCOME to dev@hertzbeat.apache.org, and you have successfully subscribed to the Apache HertzBeat mailing list. +- 3、**Welcome**: After completing the above steps, you will receive a welcome email with the subject WELCOME to , and you have successfully subscribed to the Apache HertzBeat mailing list. ## Post Plain Text Mails diff --git a/home/versioned_docs/version-v1.5.x/community/new_committer_process.md b/home/versioned_docs/version-v1.5.x/community/new_committer_process.md index 0801f579000..47cf938bea9 100644 --- a/home/versioned_docs/version-v1.5.x/community/new_committer_process.md +++ b/home/versioned_docs/version-v1.5.x/community/new_committer_process.md @@ -80,7 +80,7 @@ ${Work list}[1] ``` Note that, Voting ends one week from today, i.e. -[midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) +[midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) [Apache Voting Guidelines](https://community.apache.org/newcommitter.html) ### Close Vote Template @@ -242,4 +242,3 @@ you can now help fix that. A PPMC member will announce your election to the dev list soon. ``` - diff --git a/home/versioned_docs/version-v1.5.x/community/new_pmc_member_process.md b/home/versioned_docs/version-v1.5.x/community/new_pmc_member_process.md index 414dad94a56..9397d6dc034 100644 --- a/home/versioned_docs/version-v1.5.x/community/new_pmc_member_process.md +++ b/home/versioned_docs/version-v1.5.x/community/new_pmc_member_process.md @@ -78,7 +78,7 @@ ${Work list}[1] [1] https://github.com/apache/hertzbeat/commits?author=${NEW_PMC_NAME} ``` -Note that, Voting ends one week from today, i.e. [midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) +Note that, Voting ends one week from today, i.e. [midnight UTC on YYYY-MM-DD](https://www.timeanddate.com/counters/customcounter.html?year=YYYY&month=MM&day=DD) [Apache Voting Guidelines](https://community.apache.org/newcommitter.html) ### Close Vote Template @@ -282,4 +282,3 @@ A PPMC member helps manage and guide the direction of the project. Thanks, On behalf of the Apache HertzBeat (incubating) PPMC ``` - diff --git a/home/versioned_docs/version-v1.5.x/community/submit-code.md b/home/versioned_docs/version-v1.5.x/community/submit-code.md index 15bfeba545a..4b5f5dacf42 100644 --- a/home/versioned_docs/version-v1.5.x/community/submit-code.md +++ b/home/versioned_docs/version-v1.5.x/community/submit-code.md @@ -49,12 +49,14 @@ git remote -v ```shell git fetch upstream ``` + * Synchronize remote repository code to local repository ```shell git checkout origin/dev git merge --no-ff upstream/dev ``` + * **⚠️Note that you must create a new branch to develop features `git checkout -b feature-xxx`. It is not recommended to use the master branch for direct development** * After modifying the code locally, submit it to your own repository: **Note that the submission information does not contain special characters** @@ -63,8 +65,8 @@ git remote -v git commit -m 'commit content' git push ``` + * Submit changes to the remote repository, you can see a green button "Compare & pull request" on your repository page, click it. * Select the modified local branch and the branch you want to merge with the past, you need input the message carefully, describe doc is important as code, click "Create pull request". * Then the community Committers will do CodeReview, and then he will discuss some details (design, implementation, performance, etc.) with you, afterward you can directly update the code in this branch according to the suggestions (no need to create a new PR). When this pr is approved, the commit will be merged into the master branch * Finally, congratulations, you have become an official contributor to HertzBeat ! You will be added to the contributor wall, you can contact the community to obtain a contributor certificate. - diff --git a/home/versioned_docs/version-v1.5.x/download.md b/home/versioned_docs/version-v1.5.x/download.md index 355fed91be7..67c05a32860 100644 --- a/home/versioned_docs/version-v1.5.x/download.md +++ b/home/versioned_docs/version-v1.5.x/download.md @@ -4,12 +4,14 @@ title: Download Apache HertzBeat (incubating) sidebar_label: Download --- -> **Here is the Apache HertzBeat (incubating) official download page.** +> **Here is the Apache HertzBeat (incubating) official download page.** > **Please choose version to download from the following tables. It is recommended use the latest.** :::tip + - Please verify the release with corresponding hashes(sha512), signatures and [project release KEYS](https://downloads.apache.org/incubator/hertzbeat/KEYS). - Refer to [How to Verify](https://www.apache.org/dyn/closer.cgi#verify) for how to check the hashes and signatures. + ::: ## The Latest Release @@ -30,6 +32,5 @@ For older releases, please check the [archive](https://archive.apache.org/dist/i > Apache HertzBeat provides a docker image for each release. You can pull the image from the [Docker Hub](https://hub.docker.com/r/apache/hertzbeat). -- HertzBeat https://hub.docker.com/r/apache/hertzbeat -- HertzBeat Collector https://hub.docker.com/r/apache/hertzbeat-collector - +- HertzBeat +- HertzBeat Collector diff --git a/home/versioned_docs/version-v1.5.x/help/activemq.md b/home/versioned_docs/version-v1.5.x/help/activemq.md index f24bc37fbbb..ef3cc911969 100644 --- a/home/versioned_docs/version-v1.5.x/help/activemq.md +++ b/home/versioned_docs/version-v1.5.x/help/activemq.md @@ -143,4 +143,3 @@ ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.5.x/help/airflow.md b/home/versioned_docs/version-v1.5.x/help/airflow.md index 52367155d89..a7f77f7f5b6 100644 --- a/home/versioned_docs/version-v1.5.x/help/airflow.md +++ b/home/versioned_docs/version-v1.5.x/help/airflow.md @@ -36,4 +36,3 @@ keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] |-------------|------|---------------| | value | 无 | Airflow版本 | | git_version | 无 | Airflow git版本 | - diff --git a/home/versioned_docs/version-v1.5.x/help/alert_dingtalk.md b/home/versioned_docs/version-v1.5.x/help/alert_dingtalk.md index b86ed662940..36e332d9b21 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_dingtalk.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_dingtalk.md @@ -17,7 +17,7 @@ keywords: [open source monitoring tool, open source alerter, open source DingDin 2. **【Save access_token value of the WebHook address of the robot】** -> eg: webHook address:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` +> eg: webHook address:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` > Its robot access_token value is `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` 3. **【Alarm notification】->【Add new recipient】 ->【Select DingDing robot notification method】->【Set DingDing robot ACCESS_TOKEN】-> 【Confirm】** diff --git a/home/versioned_docs/version-v1.5.x/help/alert_discord.md b/home/versioned_docs/version-v1.5.x/help/alert_discord.md index 7aa565c0acf..68296148f22 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_discord.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_discord.md @@ -61,8 +61,8 @@ keywords: [open source monitoring tool, open source alerter, open source Discord 1. Discord doesn't receive bot alert notifications -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured +> Please check whether the alarm information has been triggered in the alarm center +> Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured > Please check whether the bot is properly authorized by the Discord chat server Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_email.md b/home/versioned_docs/version-v1.5.x/help/alert_email.md index fb6dc7fa571..c507a970bae 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_email.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_email.md @@ -13,7 +13,7 @@ keywords: [open source monitoring tool, open source alerter, open source email n ![email](/img/docs/help/alert-notice-1.png) -2. **【Get verification code】-> 【Enter email verification code】-> 【Confirm】** +2. **【Get verification code】-> 【Enter email verification code】-> 【Confirm】** ![email](/img/docs/help/alert-notice-2.png) ![email](/img/docs/help/alert-notice-3.png) @@ -32,7 +32,7 @@ keywords: [open source monitoring tool, open source alerter, open source email n 2. Cloud environment tancloud cannot receive email notification -> Please check whether there is any triggered alarm information in the alarm center. +> Please check whether there is any triggered alarm information in the alarm center. > Please check whether the mailbox is configured correctly and whether the alarm strategy association is configured. > Please check whether the warning email is blocked in the trash can of the mailbox. diff --git a/home/versioned_docs/version-v1.5.x/help/alert_feishu.md b/home/versioned_docs/version-v1.5.x/help/alert_feishu.md index 8f7e9391001..38f7c72cf03 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_feishu.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_feishu.md @@ -13,7 +13,7 @@ keywords: [open source monitoring tool, open source alerter, open source feishu 2. **【Save the key value of the WebHook address of the robot】** -> eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` > Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select FeiShu robot notification method】->【Set FeiShu robot KEY】-> 【Confirm】** @@ -28,7 +28,7 @@ keywords: [open source monitoring tool, open source alerter, open source feishu 1. FeiShu group did not receive the robot alarm notification. -> Please check whether there is any triggered alarm information in the alarm center. +> Please check whether there is any triggered alarm information in the alarm center. > Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_slack.md b/home/versioned_docs/version-v1.5.x/help/alert_slack.md index 5148432fe8b..26bde4ed2e5 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_slack.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_slack.md @@ -29,7 +29,7 @@ Refer to the official website document [Sending messages using Incoming Webhooks 1. Slack did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center +> Please check whether the alarm information has been triggered in the alarm center > Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_smn.md b/home/versioned_docs/version-v1.5.x/help/alert_smn.md index b4013b9f902..53774315561 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_smn.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_smn.md @@ -37,7 +37,7 @@ keywords: [ open source monitoring tool, open source alerter, open source Huawei 1. Huawei Cloud SMN did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center +> Please check whether the alarm information has been triggered in the alarm center > Please check whether the Huawei Cloud SMN AK, SK and other configurations are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_telegram.md b/home/versioned_docs/version-v1.5.x/help/alert_telegram.md index cb60f266778..1689788f0f4 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_telegram.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_telegram.md @@ -58,8 +58,8 @@ Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token 1. Telegram did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured +> Please check whether the alarm information has been triggered in the alarm center +> Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured > UserId should be the UserId of the recipient of the message Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_threshold_expr.md b/home/versioned_docs/version-v1.5.x/help/alert_threshold_expr.md index 6b8772388e6..c999d887982 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_threshold_expr.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_threshold_expr.md @@ -58,7 +58,7 @@ This variable is mainly used for calculations involving multiple instances. For `responseTime>=400` 2. API Monitoring -> Alert when response time is greater than 3000ms `responseTime>3000` -3. Overall Monitoring -> Alert when response time for URL (instance) path 'https://baidu.com/book/3' is greater than 200ms +3. Overall Monitoring -> Alert when response time for URL (instance) path '' is greater than 200ms `equals(instance,"https://baidu.com/book/3")&&responseTime>200` 4. MYSQL Monitoring -> Alert when 'threads_running' metric under 'status' exceeds 7 `threads_running>7` diff --git a/home/versioned_docs/version-v1.5.x/help/alert_wework.md b/home/versioned_docs/version-v1.5.x/help/alert_wework.md index ca14d5615fa..ce344200301 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_wework.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_wework.md @@ -15,7 +15,7 @@ keywords: [open source monitoring tool, open source alerter, open source WeWork 2. **【Save the key value of the WebHook address of the robot】** -> eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` +> eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` > Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select enterprise Wechat robot notification method】->【Set enterprise Wechat robot KEY】-> 【Confirm】** @@ -32,7 +32,7 @@ keywords: [open source monitoring tool, open source alerter, open source WeWork 1. The enterprise wechat group did not receive the robot alarm notification. -> Please check whether there is any triggered alarm information in the alarm center. +> Please check whether there is any triggered alarm information in the alarm center. > Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/almalinux.md b/home/versioned_docs/version-v1.5.x/help/almalinux.md index 40a07028c96..695a8be57b3 100644 --- a/home/versioned_docs/version-v1.5.x/help/almalinux.md +++ b/home/versioned_docs/version-v1.5.x/help/almalinux.md @@ -107,4 +107,3 @@ Statistics for the top 10 processes using memory. Statistics include: process ID | command | None | Executed command | --- - diff --git a/home/versioned_docs/version-v1.5.x/help/api.md b/home/versioned_docs/version-v1.5.x/help/api.md index 98763e0eefe..7e068a390b3 100644 --- a/home/versioned_docs/version-v1.5.x/help/api.md +++ b/home/versioned_docs/version-v1.5.x/help/api.md @@ -34,4 +34,3 @@ keywords: [open source monitoring tool, monitoring http api] | Metric name | Metric unit | Metric help description | |--------------|-------------|-------------------------| | responseTime | ms | Website response time | - diff --git a/home/versioned_docs/version-v1.5.x/help/centos.md b/home/versioned_docs/version-v1.5.x/help/centos.md index 60b770ebf96..858a1d2bb94 100644 --- a/home/versioned_docs/version-v1.5.x/help/centos.md +++ b/home/versioned_docs/version-v1.5.x/help/centos.md @@ -79,4 +79,3 @@ keywords: [open source monitoring tool, open source os monitoring tool, monitori | available | Mb | Available disk size | | usage | % | usage | | mounted | none | Mount point directory | - diff --git a/home/versioned_docs/version-v1.5.x/help/clickhouse.md b/home/versioned_docs/version-v1.5.x/help/clickhouse.md index efd873d1f32..d9994148bcd 100644 --- a/home/versioned_docs/version-v1.5.x/help/clickhouse.md +++ b/home/versioned_docs/version-v1.5.x/help/clickhouse.md @@ -93,4 +93,3 @@ keywords: [open source monitoring system, open source database monitoring, Click | MarkCacheBytes | N/A | Size of marks cache in StorageMergeTree | | MarkCacheFiles | N/A | Number of files in marks cache for StorageMergeTree | | MaxPartCountForPartition | N/A | Maximum active data blocks in partitions | - diff --git a/home/versioned_docs/version-v1.5.x/help/debian.md b/home/versioned_docs/version-v1.5.x/help/debian.md index 14cee060aaf..47487573f36 100644 --- a/home/versioned_docs/version-v1.5.x/help/debian.md +++ b/home/versioned_docs/version-v1.5.x/help/debian.md @@ -95,4 +95,3 @@ Metric Unit: - Memory Usage Rate: % - CPU Usage Rate: % - diff --git a/home/versioned_docs/version-v1.5.x/help/dm.md b/home/versioned_docs/version-v1.5.x/help/dm.md index 82159bf2408..f8e031bfe20 100644 --- a/home/versioned_docs/version-v1.5.x/help/dm.md +++ b/home/versioned_docs/version-v1.5.x/help/dm.md @@ -46,4 +46,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | dm_sql_thd | None | Thread for writing dmsql dmserver | | dm_io_thd | None | IO threads, controlled by IO_THR_GROUPS parameter, default is 2 threads | | dm_quit_thd | None | Thread used to perform a graceful shutdown of the database | - diff --git a/home/versioned_docs/version-v1.5.x/help/dns.md b/home/versioned_docs/version-v1.5.x/help/dns.md index d8dbd8d0921..3d6a5fe4b8e 100644 --- a/home/versioned_docs/version-v1.5.x/help/dns.md +++ b/home/versioned_docs/version-v1.5.x/help/dns.md @@ -68,4 +68,3 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito | Section0 | none | Additional information for DNS queries. | > The metric set collects up to 10 records, with metric names from Section0 to Section9. - diff --git a/home/versioned_docs/version-v1.5.x/help/docker.md b/home/versioned_docs/version-v1.5.x/help/docker.md index 0e3a1f0b428..63fe3b03a19 100644 --- a/home/versioned_docs/version-v1.5.x/help/docker.md +++ b/home/versioned_docs/version-v1.5.x/help/docker.md @@ -32,7 +32,7 @@ systemctl daemon-reload systemctl restart docker ``` -**Note: Remember to open the `2375` port number in the server console. ** +**Note: Remember to open the `2375` port number in the server console.** **3. If the above method does not work:** @@ -99,4 +99,3 @@ firewall-cmd --reload | cpu_delta | None | The number of CPUs already used by the Docker container | | number_cpus | None | The number of CPUs that the Docker container can use | | cpu_usage | None | Docker container CPU usage | - diff --git a/home/versioned_docs/version-v1.5.x/help/doris_be.md b/home/versioned_docs/version-v1.5.x/help/doris_be.md index 8dcde7b549b..3e6fd37de03 100644 --- a/home/versioned_docs/version-v1.5.x/help/doris_be.md +++ b/home/versioned_docs/version-v1.5.x/help/doris_be.md @@ -168,4 +168,3 @@ keywords: [开源监控系统, 开源数据库监控, DORIS数据库BE监控] | 指标名称 | 指标单位 | 指标帮助描述 | |-------|------|------------------------------------------| | value | 字节 | BE 进程物理内存大小,取自 `/proc/self/status/VmRSS` | - diff --git a/home/versioned_docs/version-v1.5.x/help/doris_fe.md b/home/versioned_docs/version-v1.5.x/help/doris_fe.md index b478b2eaadb..ecfad855ba6 100644 --- a/home/versioned_docs/version-v1.5.x/help/doris_fe.md +++ b/home/versioned_docs/version-v1.5.x/help/doris_fe.md @@ -130,4 +130,3 @@ Can observe the number of import transactions in various states to determine if | committed | None | Committed | | visible | None | Visible | | aborted | None | Aborted / Revoked | - diff --git a/home/versioned_docs/version-v1.5.x/help/dynamic_tp.md b/home/versioned_docs/version-v1.5.x/help/dynamic_tp.md index fd36206bc6e..332767b2a39 100644 --- a/home/versioned_docs/version-v1.5.x/help/dynamic_tp.md +++ b/home/versioned_docs/version-v1.5.x/help/dynamic_tp.md @@ -99,4 +99,3 @@ Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has respo | dynamic | None | Dynamic thread pool or not | | run_timeout_count | None | Number of running timeout tasks | | queue_timeout_count | None | Number of tasks waiting for timeout | - diff --git a/home/versioned_docs/version-v1.5.x/help/elasticsearch.md b/home/versioned_docs/version-v1.5.x/help/elasticsearch.md index 3ac3d62a7e1..f0c29a8fd9f 100644 --- a/home/versioned_docs/version-v1.5.x/help/elasticsearch.md +++ b/home/versioned_docs/version-v1.5.x/help/elasticsearch.md @@ -61,4 +61,3 @@ keywords: [ open source monitoring tool, monitoring ElasticSearch metrics ] | disk_free | GB | Disk Free | | disk_total | GB | Disk Total | | disk_used_percent | % | Disk Used Percent | - diff --git a/home/versioned_docs/version-v1.5.x/help/euleros.md b/home/versioned_docs/version-v1.5.x/help/euleros.md index 786dab30afc..5fad0c856ae 100644 --- a/home/versioned_docs/version-v1.5.x/help/euleros.md +++ b/home/versioned_docs/version-v1.5.x/help/euleros.md @@ -105,4 +105,3 @@ Top 10 processes consuming memory. Metrics include: Process ID, Memory usage, CP | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | - diff --git a/home/versioned_docs/version-v1.5.x/help/flink.md b/home/versioned_docs/version-v1.5.x/help/flink.md index bd731a6dee6..9fb7c7cb9e1 100644 --- a/home/versioned_docs/version-v1.5.x/help/flink.md +++ b/home/versioned_docs/version-v1.5.x/help/flink.md @@ -33,4 +33,3 @@ keywords: [open source monitoring tool, open source flink monitoring tool] | task_total | Units | Total number of tasks. | | jobs_running | Units | Number of jobs running. | | jobs_failed | Units | Number of jobs failed. | - diff --git a/home/versioned_docs/version-v1.5.x/help/freebsd.md b/home/versioned_docs/version-v1.5.x/help/freebsd.md index 51d0ed9ab0b..d6505d83dd1 100644 --- a/home/versioned_docs/version-v1.5.x/help/freebsd.md +++ b/home/versioned_docs/version-v1.5.x/help/freebsd.md @@ -85,4 +85,3 @@ Statistics of the top 10 processes using memory. Statistics include: Process ID, | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | - diff --git a/home/versioned_docs/version-v1.5.x/help/ftp.md b/home/versioned_docs/version-v1.5.x/help/ftp.md index 50a571eb7a7..72d55e8c1e2 100644 --- a/home/versioned_docs/version-v1.5.x/help/ftp.md +++ b/home/versioned_docs/version-v1.5.x/help/ftp.md @@ -32,4 +32,3 @@ keywords: [ open source monitoring tool, open source ftp server monitoring tool, |---------------|-------------|----------------------------------------------------------| | Is Active | none | Check if the directory exists and has access permission. | | Response Time | ms | Response Time | - diff --git a/home/versioned_docs/version-v1.5.x/help/fullsite.md b/home/versioned_docs/version-v1.5.x/help/fullsite.md index 6145f238bdc..bad94c4b751 100644 --- a/home/versioned_docs/version-v1.5.x/help/fullsite.md +++ b/home/versioned_docs/version-v1.5.x/help/fullsite.md @@ -5,8 +5,8 @@ sidebar_label: Full site Monitor keywords: [open source monitoring tool, open source website monitoring tool, monitoring sitemap metrics] --- -> Available or not to monitor all pages of the website. -> A website often has multiple pages provided by different services. We monitor the full site by collecting the SiteMap exposed by the website. +> Available or not to monitor all pages of the website. +> A website often has multiple pages provided by different services. We monitor the full site by collecting the SiteMap exposed by the website. > Note⚠️ This monitoring requires your website to support SiteMap. We support SiteMap in XML and TXT formats. ### Configuration parameter @@ -32,4 +32,3 @@ keywords: [open source monitoring tool, open source website monitoring tool, mon | statusCode | none | Response HTTP status code for requesting the website | | responseTime | ms | Website response time | | errorMsg | none | Error message feedback after requesting the website | - diff --git a/home/versioned_docs/version-v1.5.x/help/guide.md b/home/versioned_docs/version-v1.5.x/help/guide.md index ce182746ffa..8728b7f8e37 100644 --- a/home/versioned_docs/version-v1.5.x/help/guide.md +++ b/home/versioned_docs/version-v1.5.x/help/guide.md @@ -9,7 +9,7 @@ sidebar_label: Help Center ## 🔬 Monitoring services -> Regularly collect and monitor the performance Metrics exposed by end-to-end services, provide visual interfaces, and process data for alarm and other service scheduling. +> Regularly collect and monitor the performance Metrics exposed by end-to-end services, provide visual interfaces, and process data for alarm and other service scheduling. > Planned monitoring type:application service, database, operating system, cloud native, open source middleware. ### Application service monitoring @@ -115,8 +115,8 @@ More details see 👉 [Threshold alarm](alert_threshold)
### Alarm notification -> After triggering the alarm information, in addition to being displayed in the alarm center list, it can also be notified to the designated recipient in a specified way (e-mail, wechat and FeiShu etc.) -> Alarm notification provides different types of notification methods, such as email recipient, enterprise wechat robot notification, DingDing robot notification, and FeiShu robot notification. +> After triggering the alarm information, in addition to being displayed in the alarm center list, it can also be notified to the designated recipient in a specified way (e-mail, wechat and FeiShu etc.) +> Alarm notification provides different types of notification methods, such as email recipient, enterprise wechat robot notification, DingDing robot notification, and FeiShu robot notification. > After setting the receiver, you need to set the associated alarm notification strategy to configure which alarm information is sent to which receiver.  👉 [Configure Email Notification](alert_email)
diff --git a/home/versioned_docs/version-v1.5.x/help/hadoop.md b/home/versioned_docs/version-v1.5.x/help/hadoop.md index 56f19472277..e12a44807ea 100644 --- a/home/versioned_docs/version-v1.5.x/help/hadoop.md +++ b/home/versioned_docs/version-v1.5.x/help/hadoop.md @@ -87,4 +87,3 @@ export HADOOP_OPTS= "$HADOOP_OPTS | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.5.x/help/hbase_master.md b/home/versioned_docs/version-v1.5.x/help/hbase_master.md index 1e3efe84af7..d30c8d4bdf7 100644 --- a/home/versioned_docs/version-v1.5.x/help/hbase_master.md +++ b/home/versioned_docs/version-v1.5.x/help/hbase_master.md @@ -57,4 +57,3 @@ Check the `hbase-site.xml` file to obtain the value of the `hbase.master.info.po | receivedBytes | MB | Cluster received data volume | | sentBytes | MB | Cluster sent data volume (MB) | | clusterRequests | none | Total number of cluster requests | - diff --git a/home/versioned_docs/version-v1.5.x/help/hbase_regionserver.md b/home/versioned_docs/version-v1.5.x/help/hbase_regionserver.md index 0a77eb5441b..a2940b9048c 100644 --- a/home/versioned_docs/version-v1.5.x/help/hbase_regionserver.md +++ b/home/versioned_docs/version-v1.5.x/help/hbase_regionserver.md @@ -91,4 +91,3 @@ Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver. | MemHeapMaxM | None | Cluster balance load times | | MemMaxM | None | RPC handle count | | GcCount | MB | Cluster data reception volume | - diff --git a/home/versioned_docs/version-v1.5.x/help/hdfs_datanode.md b/home/versioned_docs/version-v1.5.x/help/hdfs_datanode.md index 164adc7d6df..68e1c4a38de 100644 --- a/home/versioned_docs/version-v1.5.x/help/hdfs_datanode.md +++ b/home/versioned_docs/version-v1.5.x/help/hdfs_datanode.md @@ -54,4 +54,3 @@ Retrieve the HTTP monitoring port for the Apache HDFS DataNode. Value: `dfs.data | Metric Name | Metric Unit | Metric Description | |-------------|-------------|--------------------| | StartTime | | Startup time | - diff --git a/home/versioned_docs/version-v1.5.x/help/hdfs_namenode.md b/home/versioned_docs/version-v1.5.x/help/hdfs_namenode.md index 978daac3613..975c2e5d935 100644 --- a/home/versioned_docs/version-v1.5.x/help/hdfs_namenode.md +++ b/home/versioned_docs/version-v1.5.x/help/hdfs_namenode.md @@ -90,4 +90,3 @@ Ensure that you have obtained the JMX monitoring port for the HDFS NameNode. | ThreadsBlocked | Count | Number of threads in BLOCKED state | | ThreadsWaiting | Count | Number of threads in WAITING state | | ThreadsTimedWaiting | Count | Number of threads in TIMED WAITING state | - diff --git a/home/versioned_docs/version-v1.5.x/help/hive.md b/home/versioned_docs/version-v1.5.x/help/hive.md index 806969c2e7c..1293fbd3802 100644 --- a/home/versioned_docs/version-v1.5.x/help/hive.md +++ b/home/versioned_docs/version-v1.5.x/help/hive.md @@ -74,4 +74,3 @@ hive --service hiveserver2 & | init | MB | The initial amount of memory requested for the memory pool. | | max | MB | The maximum amount of memory that can be allocated for the memory pool. | | used | MB | The amount of memory currently being used by the memory pool. | - diff --git a/home/versioned_docs/version-v1.5.x/help/http_sd.md b/home/versioned_docs/version-v1.5.x/help/http_sd.md index 6b8de487555..122b159f41b 100644 --- a/home/versioned_docs/version-v1.5.x/help/http_sd.md +++ b/home/versioned_docs/version-v1.5.x/help/http_sd.md @@ -49,4 +49,3 @@ keywords: [open source monitoring tool, open source java monitoring tool, monito | Address | | | | Port | | | | Health Status | | Current health status of service | - diff --git a/home/versioned_docs/version-v1.5.x/help/huawei_switch.md b/home/versioned_docs/version-v1.5.x/help/huawei_switch.md index 902c0596965..6bc99169bce 100644 --- a/home/versioned_docs/version-v1.5.x/help/huawei_switch.md +++ b/home/versioned_docs/version-v1.5.x/help/huawei_switch.md @@ -51,4 +51,3 @@ This document only introduces the monitoring indicators queried in the monitor t | ifOutErrors | none | For packet-oriented interfaces, the number of outbound packets that could not be transmitted because of errors. For character-oriented or fixed-length interfaces, the number of outbound transmission units that could not be transmitted because of errors. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime. | | ifAdminStatus | none | The desired state of the interface. The testing(3) state indicates that no operational packets can be passed. When a managed system initializes, all interfaces start with ifAdminStatus in the down(2) state. As a result of either explicit management action or per configuration information retained by the managed system, ifAdminStatus is then changed to either the up(1) or testing(3) states (or remains in the down(2) state). | | ifOperStatus | none | The current operational state of the interface. The testing(3) state indicates that no operational packets can be passed. If ifAdminStatus is down(2) then ifOperStatus should be down(2). If ifAdminStatus is changed to up(1) then ifOperStatus should change to up(1) if the interface is ready to transmit and receive network traffic; it should change to dormant(5) if the interface is waiting for external actions (such as a serial line waiting for an incoming connection); it should remain in the down(2) state if and only if there is a fault that prevents it from going to the up(1) state; it should remain in the notPresent(6) state if the interface has missing (typically, hardware) components. | - diff --git a/home/versioned_docs/version-v1.5.x/help/hugegraph.md b/home/versioned_docs/version-v1.5.x/help/hugegraph.md index 66b0574aab7..90334a4bc03 100644 --- a/home/versioned_docs/version-v1.5.x/help/hugegraph.md +++ b/home/versioned_docs/version-v1.5.x/help/hugegraph.md @@ -138,4 +138,3 @@ Check the `rest-server.properties` file to obtain the value of the `restserver_p | garbage_collector_g1_old_generation_count | NONE | Indicates the number of old generation garbage collections by G1 garbage collector | | garbage_collector_g1_old_generation_time | NONE | Indicates the total time spent in old generation garbage collections by G1 garbage collector | | garbage_collector_time_unit | NONE | Indicates the unit of garbage collection time (such as milliseconds, seconds, etc.) | - diff --git a/home/versioned_docs/version-v1.5.x/help/influxdb.md b/home/versioned_docs/version-v1.5.x/help/influxdb.md index 92c5da380ef..31fb3efec6c 100644 --- a/home/versioned_docs/version-v1.5.x/help/influxdb.md +++ b/home/versioned_docs/version-v1.5.x/help/influxdb.md @@ -63,4 +63,3 @@ keywords: [open source monitoring system, open source database monitoring, Influ |-------------|-------------|-------------------------| | result | N/A | Result | | org | N/A | Organization identifier | - diff --git a/home/versioned_docs/version-v1.5.x/help/influxdb_promql.md b/home/versioned_docs/version-v1.5.x/help/influxdb_promql.md index afed14cad7a..c8b55c9ae76 100644 --- a/home/versioned_docs/version-v1.5.x/help/influxdb_promql.md +++ b/home/versioned_docs/version-v1.5.x/help/influxdb_promql.md @@ -59,4 +59,3 @@ keywords: [ Open Source Monitoring System, InfluxDB Monitoring, InfluxDB-PromQL | instance | None | Instance to which the metric belongs | | timestamp | None | Timestamp of metric collection | | value | None | Metric value | - diff --git a/home/versioned_docs/version-v1.5.x/help/iotdb.md b/home/versioned_docs/version-v1.5.x/help/iotdb.md index bec827feb73..011b9cbec12 100644 --- a/home/versioned_docs/version-v1.5.x/help/iotdb.md +++ b/home/versioned_docs/version-v1.5.x/help/iotdb.md @@ -118,4 +118,3 @@ predefinedMetrics: |-------------|-------------|----------------------------------| | name | None | name | | connection | none | thrift current connection number | - diff --git a/home/versioned_docs/version-v1.5.x/help/issue.md b/home/versioned_docs/version-v1.5.x/help/issue.md index a48e84cfd4e..9904fab6551 100644 --- a/home/versioned_docs/version-v1.5.x/help/issue.md +++ b/home/versioned_docs/version-v1.5.x/help/issue.md @@ -17,42 +17,41 @@ sidebar_label: Common issues 3. Ping connectivity monitoring exception when installing hertzbeat for package deployment. The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 -> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. -> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address +> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. +> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. +> See ### Docker Deployment common issues -1. **MYSQL, TDENGINE and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** +1. **MYSQL, TDENGINE and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. -> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. +> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. > Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` -2. **According to the process deploy,visit http://ip:1157/ no interface** +2. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: -> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. +> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. > two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. > >> three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. 3. **Log an error TDengine connection or insert SQL failed** -> one:Check whether database account and password configured is correct, the database is created. +> one:Check whether database account and password configured is correct, the database is created. > two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. ### Package Deployment common issues -1. **According to the process deploy,visit http://ip:1157/ no interface** +1. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: -> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. -> two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. +> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. +> two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. > three: Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 2. **Log an error TDengine connection or insert SQL failed** -> one:Check whether database account and password configured is correct, the database is created. +> one:Check whether database account and password configured is correct, the database is created. > two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. - diff --git a/home/versioned_docs/version-v1.5.x/help/jetty.md b/home/versioned_docs/version-v1.5.x/help/jetty.md index 6e069553dba..ccec65b5559 100644 --- a/home/versioned_docs/version-v1.5.x/help/jetty.md +++ b/home/versioned_docs/version-v1.5.x/help/jetty.md @@ -92,4 +92,3 @@ Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.5.x/help/jvm.md b/home/versioned_docs/version-v1.5.x/help/jvm.md index 3b47e0e7a8a..477d9fbece1 100644 --- a/home/versioned_docs/version-v1.5.x/help/jvm.md +++ b/home/versioned_docs/version-v1.5.x/help/jvm.md @@ -13,7 +13,7 @@ keywords: [open source monitoring tool, open source java jvm monitoring tool, mo 1. Add JVM `VM options` When Start Server ⚠️ customIP -Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#remote +Refer: ```shell -Djava.rmi.server.hostname=customIP @@ -74,4 +74,3 @@ Refer: https://docs.oracle.com/javase/1.5.0/docs/guide/management/agent.html#rem | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.5.x/help/kafka.md b/home/versioned_docs/version-v1.5.x/help/kafka.md index f86913733b1..48d06b2037b 100644 --- a/home/versioned_docs/version-v1.5.x/help/kafka.md +++ b/home/versioned_docs/version-v1.5.x/help/kafka.md @@ -87,4 +87,3 @@ exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" | FiveMinuteRate | % | Five Minute Rate | | MeanRate | % | Mean Rate | | FifteenMinuteRate | % | Fifteen Minute Rate | - diff --git a/home/versioned_docs/version-v1.5.x/help/kafka_promql.md b/home/versioned_docs/version-v1.5.x/help/kafka_promql.md index ea358d0de8d..203ef2bef4b 100644 --- a/home/versioned_docs/version-v1.5.x/help/kafka_promql.md +++ b/home/versioned_docs/version-v1.5.x/help/kafka_promql.md @@ -64,4 +64,3 @@ keywords: [ Open Source Monitoring System, Open Source Middleware Monitoring, Ka 1. If Kafka is enabled with JMX monitoring, you can use [Kafka](kafka) Monitoring. 2. If Kafka cluster deploys kafka_exporter to expose monitoring metrics, you can refer to [Prometheus task](prometheus) to configure the Prometheus collection task to monitor kafka. - diff --git a/home/versioned_docs/version-v1.5.x/help/kubernetes.md b/home/versioned_docs/version-v1.5.x/help/kubernetes.md index 45adda576fc..3cb2336e768 100644 --- a/home/versioned_docs/version-v1.5.x/help/kubernetes.md +++ b/home/versioned_docs/version-v1.5.x/help/kubernetes.md @@ -13,7 +13,7 @@ If you want to monitor the information in 'Kubernetes', you need to obtain an au Refer to the steps to obtain token -#### method one: +#### method one 1. Create a service account and bind the default cluster-admin administrator cluster role @@ -27,7 +27,7 @@ kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` -#### method two: +#### method two ```shell kubectl create serviceaccount cluster-admin @@ -93,4 +93,3 @@ kubectl create token --duration=1000h cluster-admin | cluster_ip | None | cluster ip | | selector | None | tag selector matches | | creation_time | None | Created | - diff --git a/home/versioned_docs/version-v1.5.x/help/linux.md b/home/versioned_docs/version-v1.5.x/help/linux.md index 6c22028114c..f5c77a72ca6 100644 --- a/home/versioned_docs/version-v1.5.x/help/linux.md +++ b/home/versioned_docs/version-v1.5.x/help/linux.md @@ -79,4 +79,3 @@ keywords: [open source monitoring tool, open source linux monitoring tool, monit | available | Mb | Available disk size | | usage | % | usage | | mounted | none | Mount point directory | - diff --git a/home/versioned_docs/version-v1.5.x/help/mariadb.md b/home/versioned_docs/version-v1.5.x/help/mariadb.md index 374e6e6a081..8373b61cec3 100644 --- a/home/versioned_docs/version-v1.5.x/help/mariadb.md +++ b/home/versioned_docs/version-v1.5.x/help/mariadb.md @@ -51,4 +51,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | innodb_data_writes | none | innodb average number of writes from file per second | | innodb_data_read | KB | innodb average amount of data read per second | | innodb_data_written | KB | innodb average amount of data written per second | - diff --git a/home/versioned_docs/version-v1.5.x/help/memcached.md b/home/versioned_docs/version-v1.5.x/help/memcached.md index 920da021e6b..f3c1ddfab55 100644 --- a/home/versioned_docs/version-v1.5.x/help/memcached.md +++ b/home/versioned_docs/version-v1.5.x/help/memcached.md @@ -14,7 +14,7 @@ The default YML configuration for the memcache version is in compliance with 1.4 You need to use the stats command to view the parameters that your memcache can monitor ``` -### +### **1、Obtain usable parameter indicators through commands such as stats、stats setting、stats settings. @@ -32,7 +32,7 @@ STAT version 1.4.15 ... ``` -**There is help_doc: https://www.runoob.com/memcached/memcached-stats.html** +**There is help_doc: ** ### Configuration parameter @@ -67,4 +67,3 @@ STAT version 1.4.15 | cmd_flush | | Flush command request count | | get_misses | | Get command misses | | delete_misses | | Delete command misses | - diff --git a/home/versioned_docs/version-v1.5.x/help/mongodb.md b/home/versioned_docs/version-v1.5.x/help/mongodb.md index 9c536e73d7a..52582b47097 100644 --- a/home/versioned_docs/version-v1.5.x/help/mongodb.md +++ b/home/versioned_docs/version-v1.5.x/help/mongodb.md @@ -93,4 +93,3 @@ keywords: [ open source monitoring tool, open source database monitoring tool, m | pageSize | none | Size of a memory page in bytes. | | numPages | none | Total number of memory pages. | | maxOpenFiles | none | Maximum number of open files allowed. | - diff --git a/home/versioned_docs/version-v1.5.x/help/mysql.md b/home/versioned_docs/version-v1.5.x/help/mysql.md index dca64b3f9f0..86922782e27 100644 --- a/home/versioned_docs/version-v1.5.x/help/mysql.md +++ b/home/versioned_docs/version-v1.5.x/help/mysql.md @@ -51,4 +51,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | innodb_data_writes | none | innodb average number of writes from file per second | | innodb_data_read | KB | innodb average amount of data read per second | | innodb_data_written | KB | innodb average amount of data written per second | - diff --git a/home/versioned_docs/version-v1.5.x/help/nacos.md b/home/versioned_docs/version-v1.5.x/help/nacos.md index 4de3661c17b..f7c8815521f 100644 --- a/home/versioned_docs/version-v1.5.x/help/nacos.md +++ b/home/versioned_docs/version-v1.5.x/help/nacos.md @@ -92,4 +92,3 @@ More information see [Nacos monitor guide](https://nacos.io/en-us/docs/monitor-g | nacos_monitor{name='configListenSize'} | none | listened configuration file count | | nacos_client_request_seconds_count | none | request count | | nacos_client_request_seconds_sum | second | request time | - diff --git a/home/versioned_docs/version-v1.5.x/help/nebulagraph.md b/home/versioned_docs/version-v1.5.x/help/nebulagraph.md index c23e39c14fe..60ac139f827 100644 --- a/home/versioned_docs/version-v1.5.x/help/nebulagraph.md +++ b/home/versioned_docs/version-v1.5.x/help/nebulagraph.md @@ -14,13 +14,13 @@ The monitoring has two parts,nebulaGraph_stats and rocksdb_stats. nebulaGraph_stats is nebulaGraph's statistics, and rocksdb_stats is rocksdb's statistics. ``` -### +### **1、Obtain available parameters through the stats and rocksdb stats interfaces.** 1.1、 If you only need to get nebulaGraph_stats, you need to ensure that you have access to stats, or you'll get errors. -The default port is 19669 and the access address is http://ip:19669/stats +The default port is 19669 and the access address is 1.2、If you need to obtain additional parameters for rocksdb stats, you need to ensure that you have access to rocksdb stats, otherwise an error will be reported. @@ -28,11 +28,11 @@ stats, otherwise an error will be reported. Once you connect to NebulaGraph for the first time, you must first register your Storage service in order to properly query your data. -**There is help_doc: https://docs.nebula-graph.com.cn/3.4.3/4.deployment-and-installation/connect-to-nebula-graph/** +**There is help_doc: ** -**https://docs.nebula-graph.com.cn/3.4.3/2.quick-start/3.quick-start-on-premise/3.1add-storage-hosts/** +**** -The default port is 19779 and the access address is:http://ip:19779/rocksdb_stats +The default port is 19779 and the access address is: ### Configuration parameter @@ -53,7 +53,7 @@ The default port is 19779 and the access address is:http://ip:19779/rocksdb_stat #### Metrics Set:nebulaGraph_stats Too many indicators, related links are as follows -**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** +**** | Metric name | Metric unit | Metric help description | |---------------------------------------|-------------|--------------------------------------------------------------| @@ -65,11 +65,10 @@ Too many indicators, related links are as follows #### Metrics Set:rocksdb_stats Too many indicators, related links are as follows -**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** +**** | Metric name | Metric unit | Metric help description | |----------------------------|-------------|-------------------------------------------------------------| | rocksdb.backup.read.bytes | | Number of bytes read during the RocksDB database backup. | | rocksdb.backup.write.bytes | | Number of bytes written during the RocksDB database backup. | | ... | | ... | - diff --git a/home/versioned_docs/version-v1.5.x/help/nebulagraph_cluster.md b/home/versioned_docs/version-v1.5.x/help/nebulagraph_cluster.md index c39195f427e..1a4291dd5f6 100644 --- a/home/versioned_docs/version-v1.5.x/help/nebulagraph_cluster.md +++ b/home/versioned_docs/version-v1.5.x/help/nebulagraph_cluster.md @@ -89,4 +89,3 @@ keywords: [ Open Source Monitoring System, Open Source Database Monitoring, Open | version | None | Version | > If you need to customize monitoring templates to collect data from NebulaGraph clusters, please refer to: [NGQL Custom Monitoring](../advanced/extend-ngql.md) - diff --git a/home/versioned_docs/version-v1.5.x/help/nginx.md b/home/versioned_docs/version-v1.5.x/help/nginx.md index f630e4d4d24..a5662be985f 100644 --- a/home/versioned_docs/version-v1.5.x/help/nginx.md +++ b/home/versioned_docs/version-v1.5.x/help/nginx.md @@ -46,8 +46,8 @@ server { location /nginx-status { stub_status on; access_log on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts } } ``` @@ -93,8 +93,8 @@ http { server { location /req-status { req_status_show on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts } } } @@ -109,7 +109,7 @@ nginx -s reload 4. Access `http://localhost/req-status` in the browser to view the Nginx monitoring status information. -**Refer Doc: https://github.com/zls0424/ngx_req_status** +**Refer Doc: ** **⚠️Attention: The endpoint path of the monitoring module is `/nginx-status` `/req-status`** @@ -152,4 +152,3 @@ nginx -s reload | requests | | Total requests | | active | | Current concurrent connections | | bandwidth | kb | Current bandwidth | - diff --git a/home/versioned_docs/version-v1.5.x/help/ntp.md b/home/versioned_docs/version-v1.5.x/help/ntp.md index 666f2a6b39a..fc7f7925ca6 100644 --- a/home/versioned_docs/version-v1.5.x/help/ntp.md +++ b/home/versioned_docs/version-v1.5.x/help/ntp.md @@ -35,4 +35,3 @@ keywords: [ open source monitoring tool, open source NTP monitoring tool, monito | stratum | | The stratumevel of the NTP server, indicating its distance from a reference clock). | | referenceId | | An identifier that indicates the reference clock or time source used by the NTP server). | | precision | | The precision of the NTP server's clock, indicating its accuracy). | - diff --git a/home/versioned_docs/version-v1.5.x/help/openai.md b/home/versioned_docs/version-v1.5.x/help/openai.md index 7165925372f..a7a10de2b19 100644 --- a/home/versioned_docs/version-v1.5.x/help/openai.md +++ b/home/versioned_docs/version-v1.5.x/help/openai.md @@ -12,8 +12,8 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI > 1. Open Chrome browser's network request interface > `Mac: cmd + option + i` > `Windows: ctrl + shift + i` -> 2. Visit https://platform.openai.com/usage -> 3. Find the request to https://api.openai.com/dashboard/billing/usage +> 2. Visit +> 3. Find the request to > 4. Find the Authorization field in the request headers, and copy the content after `Bearer`. For example: `sess-123456` ### Notes @@ -81,4 +81,3 @@ keywords: [open source monitoring system, open source network monitoring, OpenAI | Tax IDs | None | Tax IDs | | Billing Address | None | Billing address | | Business Address | None | Business address | - diff --git a/home/versioned_docs/version-v1.5.x/help/opengauss.md b/home/versioned_docs/version-v1.5.x/help/opengauss.md index 28171658951..3490bb8b003 100644 --- a/home/versioned_docs/version-v1.5.x/help/opengauss.md +++ b/home/versioned_docs/version-v1.5.x/help/opengauss.md @@ -53,4 +53,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | Metric name | Metric unit | Metric help description | |-------------|-------------|--------------------------------------| | running | connections | Number of current client connections | - diff --git a/home/versioned_docs/version-v1.5.x/help/opensuse.md b/home/versioned_docs/version-v1.5.x/help/opensuse.md index acaf86632d4..a4c1fc873de 100644 --- a/home/versioned_docs/version-v1.5.x/help/opensuse.md +++ b/home/versioned_docs/version-v1.5.x/help/opensuse.md @@ -105,4 +105,3 @@ Statistics for the top 10 processes using memory. Statistics include: Process ID | mem_usage | % | Memory usage rate | | cpu_usage | % | CPU usage rate | | command | None | Executed command | - diff --git a/home/versioned_docs/version-v1.5.x/help/oracle.md b/home/versioned_docs/version-v1.5.x/help/oracle.md index 50d2f6422bc..978e6736620 100644 --- a/home/versioned_docs/version-v1.5.x/help/oracle.md +++ b/home/versioned_docs/version-v1.5.x/help/oracle.md @@ -61,4 +61,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | qps | QPS | I/O Requests per second | | tps | TPS | User transaction per second | | mbps | MBPS | I/O Megabytes per second | - diff --git a/home/versioned_docs/version-v1.5.x/help/ping.md b/home/versioned_docs/version-v1.5.x/help/ping.md index 7c894f488ff..bed89d53dcf 100644 --- a/home/versioned_docs/version-v1.5.x/help/ping.md +++ b/home/versioned_docs/version-v1.5.x/help/ping.md @@ -31,7 +31,6 @@ keywords: [open source monitoring tool, open source network monitoring tool, mon 1. Ping connectivity monitoring exception when installing hertzbeat for package deployment. The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 -> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. -> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address - +> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. +> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. +> See diff --git a/home/versioned_docs/version-v1.5.x/help/plugin.md b/home/versioned_docs/version-v1.5.x/help/plugin.md index 6c0f938c7a4..11d42c65fcf 100644 --- a/home/versioned_docs/version-v1.5.x/help/plugin.md +++ b/home/versioned_docs/version-v1.5.x/help/plugin.md @@ -18,11 +18,10 @@ Currently, `HertzBeat` only set up the trigger `alert` method after alarm, if yo 1. Pull the master branch code `git clone https://github.com/apache/hertzbeat.git` and locate the `plugin` module's `Plugin` interface. ![plugin-1.png](/img/docs/help/plugin-1.png) -2. In the `org.apache.hertzbeat.plugin.impl` directory, create a new interface implementation class, such as `org.apache.hertzbeat.plugin.impl.DemoPluginImpl`, and receive the `Alert` class as a parameter, implement the `alert ` method, the logic is customized by the user, here we simply print the object. +2. In the `org.apache.hertzbeat.plugin.impl` directory, create a new interface implementation class, such as `org.apache.hertzbeat.plugin.impl.DemoPluginImpl`, and receive the `Alert` class as a parameter, implement the `alert` method, the logic is customized by the user, here we simply print the object. ![plugin-2.png](/img/docs/help/plugin-2.png) 3. Package the `hertzbeat-plugin` module. ![plugin-3.png](/img/docs/help/plugin-3.png) 4. Copy the packaged `jar` package to the `ext-lib` directory under the installation directory (for `docker` installations, mount the `ext-lib` directory first, then copy it there). ![plugin-4.png](/img/docs/help/plugin-4.png) 5. Then restart `HertzBeat` to enable the customized post-alert handling policy. - diff --git a/home/versioned_docs/version-v1.5.x/help/pop3.md b/home/versioned_docs/version-v1.5.x/help/pop3.md index fffff2a494f..c73884a0afe 100644 --- a/home/versioned_docs/version-v1.5.x/help/pop3.md +++ b/home/versioned_docs/version-v1.5.x/help/pop3.md @@ -45,4 +45,3 @@ If you want to monitor information in 'POP3' with this monitoring type, you just |--------------|-------------|-----------------------------------------| | email_count | | Number of emails | | mailbox_size | kb | The total size of emails in the mailbox | - diff --git a/home/versioned_docs/version-v1.5.x/help/port.md b/home/versioned_docs/version-v1.5.x/help/port.md index 7f420fd1375..6ae4a6bda2d 100644 --- a/home/versioned_docs/version-v1.5.x/help/port.md +++ b/home/versioned_docs/version-v1.5.x/help/port.md @@ -26,4 +26,3 @@ keywords: [open source monitoring tool, open source port monitoring tool, monito | Metric name | Metric unit | Metric help description | |--------------|-------------|-------------------------| | responseTime | ms | Website response time | - diff --git a/home/versioned_docs/version-v1.5.x/help/postgresql.md b/home/versioned_docs/version-v1.5.x/help/postgresql.md index 57834a713bd..5191f7d325d 100644 --- a/home/versioned_docs/version-v1.5.x/help/postgresql.md +++ b/home/versioned_docs/version-v1.5.x/help/postgresql.md @@ -53,4 +53,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | Metric name | Metric unit | Metric help description | |-------------|-------------|--------------------------------------| | running | connections | Number of current client connections | - diff --git a/home/versioned_docs/version-v1.5.x/help/process.md b/home/versioned_docs/version-v1.5.x/help/process.md index 825a20ac43b..61dacc52ba6 100644 --- a/home/versioned_docs/version-v1.5.x/help/process.md +++ b/home/versioned_docs/version-v1.5.x/help/process.md @@ -85,4 +85,3 @@ Includes metrics for: - read_bytes (Actual number of bytes read by the process from disk) - write_bytes (Actual number of bytes written by the process to disk) - cancelled_write_bytes (Actual number of bytes cancelled by the process while writing to disk) - diff --git a/home/versioned_docs/version-v1.5.x/help/prometheus.md b/home/versioned_docs/version-v1.5.x/help/prometheus.md index 571a2e9b51b..39af4dff7e4 100755 --- a/home/versioned_docs/version-v1.5.x/help/prometheus.md +++ b/home/versioned_docs/version-v1.5.x/help/prometheus.md @@ -39,4 +39,3 @@ You can use the following configuration: - Endpoint Path: `/actuator/prometheus` Keep the rest of the settings default. - diff --git a/home/versioned_docs/version-v1.5.x/help/rabbitmq.md b/home/versioned_docs/version-v1.5.x/help/rabbitmq.md index 917ca63c3d3..e49d572ee72 100644 --- a/home/versioned_docs/version-v1.5.x/help/rabbitmq.md +++ b/home/versioned_docs/version-v1.5.x/help/rabbitmq.md @@ -18,7 +18,7 @@ keywords: [open source monitoring tool, open source rabbitmq monitoring tool, mo rabbitmq-plugins enable rabbitmq_management ``` -2. Access http://ip:15672/ with a browser, and the default account password is `guest/guest`. Successful login means that it is successfully opened. +2. Access with a browser, and the default account password is `guest/guest`. Successful login means that it is successfully opened. 3. Just add the corresponding RabbitMQ monitoring in HertzBeat, the parameters use the IP port of Management, and the default account password. @@ -123,4 +123,3 @@ rabbitmq-plugins enable rabbitmq_management | message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | | message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | | message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | - diff --git a/home/versioned_docs/version-v1.5.x/help/redhat.md b/home/versioned_docs/version-v1.5.x/help/redhat.md index 2a8472e00d6..28b076f129d 100644 --- a/home/versioned_docs/version-v1.5.x/help/redhat.md +++ b/home/versioned_docs/version-v1.5.x/help/redhat.md @@ -105,4 +105,3 @@ Top 10 processes consuming memory. Metrics include: Process ID, Memory usage, CP | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | - diff --git a/home/versioned_docs/version-v1.5.x/help/redis.md b/home/versioned_docs/version-v1.5.x/help/redis.md index bdb78ce3584..dca24d20781 100644 --- a/home/versioned_docs/version-v1.5.x/help/redis.md +++ b/home/versioned_docs/version-v1.5.x/help/redis.md @@ -237,4 +237,3 @@ keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] | cmdstat_lpop | 无 | lpop命令的统计信息 | | cmdstat_rpop | 无 | rpop命令的统计信息 | | cmdstat_llen | 无 | llen命令的统计信息 | - diff --git a/home/versioned_docs/version-v1.5.x/help/rocketmq.md b/home/versioned_docs/version-v1.5.x/help/rocketmq.md index f31dea47d9b..4fbe5e195a4 100644 --- a/home/versioned_docs/version-v1.5.x/help/rocketmq.md +++ b/home/versioned_docs/version-v1.5.x/help/rocketmq.md @@ -46,4 +46,3 @@ keywords: [ open source monitoring tool, monitoring Apache RocketMQ metrics ] | Consume_type | none | Consume type | | Consume_tps | none | Consume tps | | Delay | none | Delay | - diff --git a/home/versioned_docs/version-v1.5.x/help/rockylinux.md b/home/versioned_docs/version-v1.5.x/help/rockylinux.md index b1e093bc210..43a5f78d7ee 100644 --- a/home/versioned_docs/version-v1.5.x/help/rockylinux.md +++ b/home/versioned_docs/version-v1.5.x/help/rockylinux.md @@ -105,4 +105,3 @@ Top 10 processes consuming memory. Metrics include: Process ID, Memory usage, CP | mem_usage | % | Memory usage | | cpu_usage | % | CPU usage | | command | None | Executed command | - diff --git a/home/versioned_docs/version-v1.5.x/help/shenyu.md b/home/versioned_docs/version-v1.5.x/help/shenyu.md index c7f12bbfaf0..aa4a43a8d5c 100644 --- a/home/versioned_docs/version-v1.5.x/help/shenyu.md +++ b/home/versioned_docs/version-v1.5.x/help/shenyu.md @@ -127,4 +127,3 @@ Finally, restart the access gateway metrics endpoint `http://ip:8090` to respond |-------------|-------------|---------------------------------------------------------| | state | none | thread state | | value | None | The number of threads corresponding to the thread state | - diff --git a/home/versioned_docs/version-v1.5.x/help/smtp.md b/home/versioned_docs/version-v1.5.x/help/smtp.md index fedb17e0040..4be044bc090 100644 --- a/home/versioned_docs/version-v1.5.x/help/smtp.md +++ b/home/versioned_docs/version-v1.5.x/help/smtp.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source SMTP monitoring tool, monit Determine whether the server is available through the hello command in SMTP ``` -> see https://datatracker.ietf.org/doc/html/rfc821#page-13 +> see **Protocol Use:SMTP** @@ -38,4 +38,3 @@ Determine whether the server is available through the hello command in SMTP | response | | Response Status. | | smtpBanner | | Banner of SMTP server. | | heloInfo | | Response information returned by helo. | - diff --git a/home/versioned_docs/version-v1.5.x/help/spark.md b/home/versioned_docs/version-v1.5.x/help/spark.md index 41865300024..8bc045fc9a1 100644 --- a/home/versioned_docs/version-v1.5.x/help/spark.md +++ b/home/versioned_docs/version-v1.5.x/help/spark.md @@ -13,7 +13,7 @@ keywords: [open source monitoring tool, open source java spark monitoring tool, 1. Add Spark `VM options` When Start Server ⚠️ customIP -Refer: https://spark.apache.org/docs/latest/spark-standalone.html +Refer: **监控配置spark的监控主要分为Master、Worker、driver、executor监控。Master和Worker的监控在spark集群运行时即可监控,Driver和Excutor的监控需要针对某一个app来进行监控。** **如果都要监控,需要根据以下步骤来配置** @@ -112,4 +112,3 @@ gement.jmxremote.port=8711 | DaemonThreadCount | | Daemon Thread Count | | CurrentThreadUserTime | ms | Current Thread User Time | | CurrentThreadCpuTime | ms | Current Thread Cpu Time | - diff --git a/home/versioned_docs/version-v1.5.x/help/spring_gateway.md b/home/versioned_docs/version-v1.5.x/help/spring_gateway.md index 66c5f0b4f29..7f27b7fe8ef 100644 --- a/home/versioned_docs/version-v1.5.x/help/spring_gateway.md +++ b/home/versioned_docs/version-v1.5.x/help/spring_gateway.md @@ -87,4 +87,3 @@ management: | predicate | None | This is a routing matching rule | | uri | None | This is a service resource identifier | | order | None | The priority of this route | - diff --git a/home/versioned_docs/version-v1.5.x/help/springboot2.md b/home/versioned_docs/version-v1.5.x/help/springboot2.md index 6452aff270e..08029dc23b5 100644 --- a/home/versioned_docs/version-v1.5.x/help/springboot2.md +++ b/home/versioned_docs/version-v1.5.x/help/springboot2.md @@ -93,4 +93,3 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ |-------------|-------------|--------------------------------------| | space | None | Memory space name | | mem_used | MB | This space occupies a memory size of | - diff --git a/home/versioned_docs/version-v1.5.x/help/springboot3.md b/home/versioned_docs/version-v1.5.x/help/springboot3.md index 47b3db10b5c..77d7032e436 100644 --- a/home/versioned_docs/version-v1.5.x/help/springboot3.md +++ b/home/versioned_docs/version-v1.5.x/help/springboot3.md @@ -89,4 +89,3 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter{ | Metric Name | Unit | Metric Description | |-------------|------|---------------------------------| | status | None | Service health status: UP, Down | - diff --git a/home/versioned_docs/version-v1.5.x/help/sqlserver.md b/home/versioned_docs/version-v1.5.x/help/sqlserver.md index 71bd8ebdc83..06e19252ede 100644 --- a/home/versioned_docs/version-v1.5.x/help/sqlserver.md +++ b/home/versioned_docs/version-v1.5.x/help/sqlserver.md @@ -57,20 +57,20 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo 1. SSL connection problem fixed -jdk version: jdk11 -Description of the problem: SQL Server 2019 uses the SA user connection to report an error +jdk version: jdk11 +Description of the problem: SQL Server 2019 uses the SA user connection to report an error Error message: ```text The driver could not establish a secure connection to SQL Server by using Secure Sockets Layer (SSL) encryption. Error: "PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target". ClientConnectionId:xxxxxxxxxxxxxxxxxxxx ``` -Screenshot of the problem: +Screenshot of the problem: ![issue](https://user-images.githubusercontent.com/38679717/206621658-c0741d48-673d-45ff-9a3b-47d113064c12.png) -solution: +solution: Use advanced settings when adding `SqlServer` monitoring, customize JDBC URL, add parameter configuration after the spliced jdbc url, ```;encrypt=true;trustServerCertificate=true;```This parameter true means unconditionally trust the server returned any root certificate. Example: ```jdbc:sqlserver://127.0.0.1:1433;DatabaseName=demo;encrypt=true;trustServerCertificate=true;``` -Reference document: [microsoft pkix-path-building-failed-unable-to-find-valid-certification](https://techcommunity.microsoft.com/t5/azure-database-support-blog/pkix-path-building- failed-unable-to-find-valid-certification/ba-p/2591304) +Reference document: [microsoft pkix-path-building-failed-unable-to-find-valid-certification]( failed-unable-to-find-valid-certification/ba-p/2591304) diff --git a/home/versioned_docs/version-v1.5.x/help/ssl_cert.md b/home/versioned_docs/version-v1.5.x/help/ssl_cert.md index e7b60fc8a89..253485f8b1a 100644 --- a/home/versioned_docs/version-v1.5.x/help/ssl_cert.md +++ b/home/versioned_docs/version-v1.5.x/help/ssl_cert.md @@ -31,4 +31,3 @@ keywords: [open source monitoring tool, open source ssl cert monitoring tool, mo | start_timestamp | ms millisecond | Validity start timestamp | | end_time | None | Expiration time | | end_timestamp | ms milliseconds | expiration timestamp | - diff --git a/home/versioned_docs/version-v1.5.x/help/tidb.md b/home/versioned_docs/version-v1.5.x/help/tidb.md index 83128c527c8..2378b224110 100644 --- a/home/versioned_docs/version-v1.5.x/help/tidb.md +++ b/home/versioned_docs/version-v1.5.x/help/tidb.md @@ -52,4 +52,3 @@ Due to the large number of metrics that can be monitored, only the metrics queri | max_connections | none | The maximum number of concurrent connections permitted for a single TiDB instance. This variable can be used for resources control. The default value 0 means no limit. When the value of this variable is larger than 0, and the number of connections reaches the value, the TiDB server rejects new connections from clients. | | datadir | none | The location where data is stored. This location can be a local path /tmp/tidb, or point to a PD server if the data is stored on TiKV. A value in the format of ${pd-ip}:${pd-port} indicates the PD server that TiDB connects to on startup. | | port | none | The port that the tidb-server is listening on when speaking the MySQL protocol. | - diff --git a/home/versioned_docs/version-v1.5.x/help/time_expression.md b/home/versioned_docs/version-v1.5.x/help/time_expression.md index 482fc04cc05..2f0711c4cf9 100644 --- a/home/versioned_docs/version-v1.5.x/help/time_expression.md +++ b/home/versioned_docs/version-v1.5.x/help/time_expression.md @@ -62,4 +62,3 @@ ${FORMATTER [{ + | - } ]} - `${time+1h+15s+30s}` calculates the time one hour, 15 minutes, and 30 seconds from now and formats it as `HH:mm:ss` 2. Complex expression template (if the built-in formatter does not meet your needs, you can combine multiple expressions) - `${@year}年${@month}月${@day}日` returns the current date formatted as yyyy年MM月dd日 - diff --git a/home/versioned_docs/version-v1.5.x/help/tomcat.md b/home/versioned_docs/version-v1.5.x/help/tomcat.md index 60591f85579..9f103dfe5be 100644 --- a/home/versioned_docs/version-v1.5.x/help/tomcat.md +++ b/home/versioned_docs/version-v1.5.x/help/tomcat.md @@ -71,4 +71,3 @@ keywords: [open source monitoring tool, open source tomcat monitoring tool, moni ```aidl CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote -Djava.rmi.server.hostname=10.1.1.52 -Dcom.sun.management.jmxremote.port=1099 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" ``` - diff --git a/home/versioned_docs/version-v1.5.x/help/ubuntu.md b/home/versioned_docs/version-v1.5.x/help/ubuntu.md index 8d3b65ce195..9de28efe095 100644 --- a/home/versioned_docs/version-v1.5.x/help/ubuntu.md +++ b/home/versioned_docs/version-v1.5.x/help/ubuntu.md @@ -79,4 +79,3 @@ keywords: [open source monitoring tool, open source linux ubuntu monitoring tool | available | Mb | Available disk size | | usage | % | usage | | mounted | none | Mount point directory | - diff --git a/home/versioned_docs/version-v1.5.x/help/udp_port.md b/home/versioned_docs/version-v1.5.x/help/udp_port.md index 51c3098dc9a..85d4fcb4383 100644 --- a/home/versioned_docs/version-v1.5.x/help/udp_port.md +++ b/home/versioned_docs/version-v1.5.x/help/udp_port.md @@ -29,4 +29,3 @@ keywords: [open source monitoring tool, open source port monitoring tool, monito | Metric name | Metric unit | Metric help description | |---------------|-------------------|-------------------------| | Response Time | Milliseconds (ms) | Website response time | - diff --git a/home/versioned_docs/version-v1.5.x/help/website.md b/home/versioned_docs/version-v1.5.x/help/website.md index afe86397c9e..1041755f156 100644 --- a/home/versioned_docs/version-v1.5.x/help/website.md +++ b/home/versioned_docs/version-v1.5.x/help/website.md @@ -27,4 +27,3 @@ keywords: [open source monitoring tool, open source website monitoring tool, mon | Metric name | Metric unit | Metric help description | |--------------|-------------|-------------------------| | responseTime | ms | Website response time | - diff --git a/home/versioned_docs/version-v1.5.x/help/websocket.md b/home/versioned_docs/version-v1.5.x/help/websocket.md index 13d1f6eed31..1523a145bd6 100644 --- a/home/versioned_docs/version-v1.5.x/help/websocket.md +++ b/home/versioned_docs/version-v1.5.x/help/websocket.md @@ -31,4 +31,3 @@ keywords: [ open source monitoring tool, Websocket监控 ] | statusMessage | none | Status messages | | connection | none | Connect type | | upgrade | none | Upgraded protocols | - diff --git a/home/versioned_docs/version-v1.5.x/help/windows.md b/home/versioned_docs/version-v1.5.x/help/windows.md index e4be2bd6d96..99d305cbce5 100644 --- a/home/versioned_docs/version-v1.5.x/help/windows.md +++ b/home/versioned_docs/version-v1.5.x/help/windows.md @@ -8,10 +8,10 @@ keywords: [open source monitoring tool, open source windows monitoring tool, mon > Collect and monitor the general performance Metrics of Windows operating system through SNMP protocol. > Note⚠️ You need to start SNMP service for Windows server. -References: -[What is SNMP protocol 1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) -[What is SNMP protocol 2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) -[Win configure SNMP in English](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) +References: +[What is SNMP protocol 1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) +[What is SNMP protocol 2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) +[Win configure SNMP in English](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) [Win configure SNMP in Chinese](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) ### Configuration parameter @@ -41,4 +41,3 @@ References: | services | number | Current number of services | | processes | number | Current number of processes | | responseTime | ms | Collection response time | - diff --git a/home/versioned_docs/version-v1.5.x/help/yarn.md b/home/versioned_docs/version-v1.5.x/help/yarn.md index d7f304ff910..cea4079abf7 100644 --- a/home/versioned_docs/version-v1.5.x/help/yarn.md +++ b/home/versioned_docs/version-v1.5.x/help/yarn.md @@ -81,4 +81,3 @@ Retrieve the HTTP monitoring port of Apache Yarn. Value: `yarn.resourcemanager.w | Metric Name | Unit | Metric Description | |-------------|------|--------------------| | StartTime | | Startup timestamp | - diff --git a/home/versioned_docs/version-v1.5.x/help/zookeeper.md b/home/versioned_docs/version-v1.5.x/help/zookeeper.md index f14b0bb8273..362edf8cff9 100644 --- a/home/versioned_docs/version-v1.5.x/help/zookeeper.md +++ b/home/versioned_docs/version-v1.5.x/help/zookeeper.md @@ -100,4 +100,3 @@ Complete! | zk_max_latency | ms | Max latency | | zk_ephemerals_count | number | Number of ephemeral nodes | | zk_min_latency | ms | Min latency | - diff --git a/home/versioned_docs/version-v1.5.x/introduce.md b/home/versioned_docs/version-v1.5.x/introduce.md index b1dd5bc6771..9bf3cd50930 100644 --- a/home/versioned_docs/version-v1.5.x/introduce.md +++ b/home/versioned_docs/version-v1.5.x/introduce.md @@ -35,11 +35,12 @@ slug: / > HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system. --- + ### Powerful Monitoring Templates > Before we discuss the customizable monitoring capabilities of HertzBeat, which we mentioned at the beginning, let's introduce the different monitoring templates of HertzBeat. And it is because of this monitoring template design that the advanced features come later. -HertzBeat itself did not create a data collection protocol for the monitoring client to adapt to. Instead, HertzBeat makes full use of the existing ecosystem, `SNMP protocol` to collect information from network switches and routers, `JMX specification` to collect information from Java applications, `JDBC specification` to collect information from datasets, `SSH` to directly connect to scripts to get the display information, `HTTP+ (JsonPath | prometheus, etc.) ` to parse the information from API interfaces, `IPMI protocol` to collect server information, and so on. +HertzBeat itself did not create a data collection protocol for the monitoring client to adapt to. Instead, HertzBeat makes full use of the existing ecosystem, `SNMP protocol` to collect information from network switches and routers, `JMX specification` to collect information from Java applications, `JDBC specification` to collect information from datasets, `SSH` to directly connect to scripts to get the display information, `HTTP+ (JsonPath | prometheus, etc.)` to parse the information from API interfaces, `IPMI protocol` to collect server information, and so on. HertzBeat uses these existing standard protocols or specifications, makes them abstractly configurable, and finally makes them all available in the form of YML format monitoring templates that can be written to create templates that use these protocols to collect any desired metrics data. ![hertzbeat](/img/blog/multi-protocol.png) @@ -51,7 +52,7 @@ Do you believe that users can just write a monitoring template on the UI page, c **There are a lot of built-in monitoring templates for users to add directly on the page, one monitoring type corresponds to one YML monitoring template**. -- [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), +* [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), [Http Api](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml), [Ping Connect](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml), [Jvm](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml), [Ssl Certificate](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot2](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml), @@ -60,7 +61,7 @@ Do you believe that users can just write a monitoring template on the UI page, c [Pop3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-pop3.yml), [Ntp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ntp.yml), [Api Code](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api_code.yml), [Smtp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-smtp.yml), [Nginx](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nginx.yml) -- [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), +* [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), [MariaDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml), [ElasticSearch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml), [Oracle](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml), @@ -69,13 +70,13 @@ Do you believe that users can just write a monitoring template on the UI page, c [Redis Cluster](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml), [Redis Sentinel](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml) [Doris BE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_be.yml), [Doris FE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_fe.yml), [Memcached](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-memcached.yml), [NebulaGraph](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-nebulaGraph.yml) -- [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), +* [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), [CentOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml), [EulerOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml), [Fedora CoreOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml), [OpenSUSE](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml), [Rocky Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml), [Red Hat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml), [FreeBSD](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml), [AlmaLinux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml), [Debian Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml) -- [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), +* [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml), [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml), [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml), @@ -83,31 +84,31 @@ Do you believe that users can just write a monitoring template on the UI page, c [Spring Gateway](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spring_gateway.yml), [EMQX MQTT](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-emqx.yml), [AirFlow](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-airflow.yml), [Hive](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hive.yml), [Spark](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spark.yml), [Hadoop](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hadoop.yml) -- [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) -- [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), +* [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) +* [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) -- And More Your Custom Template. -- Notified Support `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. +* And More Your Custom Template. +* Notified Support `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. ### Powerful Customization -> From the previous introduction of **Monitoring Templates**, it is clear that `HertzBeat` has powerful customization features. -> Each monitor type is considered as a monitor template, no matter it is built-in or user-defined. You can easily add, modify and delete indicators by modifying the monitoring template. +> From the previous introduction of **Monitoring Templates**, it is clear that `HertzBeat` has powerful customization features. +> Each monitor type is considered as a monitor template, no matter it is built-in or user-defined. You can easily add, modify and delete indicators by modifying the monitoring template. > The templates contain a series of functions such as protocol configuration, environment variables, metrics conversion, metrics calculation, units conversion, metrics collection, etc., which help users to collect the metrics they want. ![hertzbeat](/img/docs/custom-arch.png) ### No Agent Required -> For users who have used various systems, the most troublesome thing is the installation, deployment, debugging and upgrading of various `agents`. -> You need to install one `agent` per host, and several corresponding `agents` to monitor different application middleware, and the number of monitoring can easily reach thousands, so writing a batch script may ease the burden. +> For users who have used various systems, the most troublesome thing is the installation, deployment, debugging and upgrading of various `agents`. +> You need to install one `agent` per host, and several corresponding `agents` to monitor different application middleware, and the number of monitoring can easily reach thousands, so writing a batch script may ease the burden. > The problem of whether the version of `agent` is compatible with the main application, debugging the communication between `agent` and the main application, upgrading the `agent` synchronization and so on and so forth, are all big headaches. The principle of `HertzBeat` is to use different protocols to connect directly to the end system, and use the `PULL` form to pull the collected data, without the need for the user to deploy and install `Agent` | `Exporter` on the host of the end, etc. For example, monitoring the `linux operating system`. -- For example, if you want to monitor `linux OS`, you can just input the IP port account password or key on `HertzBeat` side. -- For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. +* For example, if you want to monitor `linux OS`, you can just input the IP port account password or key on `HertzBeat` side. +* For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. **Password and other sensitive information is encrypted on all links**. @@ -115,9 +116,9 @@ The principle of `HertzBeat` is to use different protocols to connect directly t > When the number of monitors rises exponentially, the collection performance drops or the environment is unstable and prone to single point of failure of the collectors, then our collector clusters come into play. -- HertzBeat supports the deployment of collector clusters and the horizontal expansion of multiple collector clusters to exponentially increase the number of monitorable tasks and collection performance. -- Monitoring tasks are self-scheduled in the collector cluster, single collector hangs without sensing the failure to migrate the collection tasks, and the newly added collector nodes are automatically scheduled to share the collection pressure. -- It is very easy to switch between stand-alone mode and cluster mode without additional component deployment. +* HertzBeat supports the deployment of collector clusters and the horizontal expansion of multiple collector clusters to exponentially increase the number of monitorable tasks and collection performance. +* Monitoring tasks are self-scheduled in the collector cluster, single collector hangs without sensing the failure to migrate the collection tasks, and the newly added collector nodes are automatically scheduled to share the collection pressure. +* It is very easy to switch between stand-alone mode and cluster mode without additional component deployment. ![hertzbeat](/img/docs/cluster-arch.png) @@ -125,25 +126,25 @@ The principle of `HertzBeat` is to use different protocols to connect directly t > Two locations, three centers, multi-cloud environments, multi-isolated networks, you may have heard of these scenarios. When there is a need for a unified monitoring system to monitor the IT resources of different isolated networks, this is where our Cloud Edge Collaboration comes in. -In an isolated network where multiple networks are not connected, we need to deploy a monitoring system in each network in the previous solution, which leads to data non-interoperability and inconvenient management, deployment and maintenance. +In an isolated network where multiple networks are not connected, we need to deploy a monitoring system in each network in the previous solution, which leads to data non-interoperability and inconvenient management, deployment and maintenance. `HertzBeat` provides the ability of cloud edge collaboration, can be deployed in multiple isolated networks edge collector, collector in the isolated network within the monitoring task collection, collection of data reported by the main service unified scheduling management display. ![hertzbeat](/img/docs/cluster-arch.png) ### Easy to Use -- Set **Monitoring+Alarm+Notification** All in one, no need to deploy multiple component services separately. -- Full UI interface operation, no matter adding new monitor, modifying monitor template, or alarm threshold notification, all can be done in WEB interface, no need to modify files or scripts or reboot. -- No Agent is needed, we only need to fill in the required IP, port, account, password and other parameters in the WEB interface. -- Customization friendly, only need a monitoring template YML, automatically generate monitoring management page, data chart page, threshold configuration for corresponding monitoring types. -- Threshold alarm notification friendly, based on the expression threshold configuration, a variety of alarm notification channels, support alarm silence, time label alarm level filtering and so on. +* Set **Monitoring+Alarm+Notification** All in one, no need to deploy multiple component services separately. +* Full UI interface operation, no matter adding new monitor, modifying monitor template, or alarm threshold notification, all can be done in WEB interface, no need to modify files or scripts or reboot. +* No Agent is needed, we only need to fill in the required IP, port, account, password and other parameters in the WEB interface. +* Customization friendly, only need a monitoring template YML, automatically generate monitoring management page, data chart page, threshold configuration for corresponding monitoring types. +* Threshold alarm notification friendly, based on the expression threshold configuration, a variety of alarm notification channels, support alarm silence, time label alarm level filtering and so on. ### Completely Open Source -- An open source collaboration product using the `Apache2` protocol, maintained by a free and open source community. -- No monitoring number `License`, monitoring type and other pseudo-open source restrictions . -- Built on `Java+SpringBoot+TypeScript+Angular` mainstream technology stack , convenient secondary development . -- Open source is not the same as free, dev based on HertzBeat must retain page footnotes, copyright, etc. +* An open source collaboration product using the `Apache2` protocol, maintained by a free and open source community. +* No monitoring number `License`, monitoring type and other pseudo-open source restrictions . +* Built on `Java+SpringBoot+TypeScript+Angular` mainstream technology stack , convenient secondary development . +* Open source is not the same as free, dev based on HertzBeat must retain page footnotes, copyright, etc. **HertzBeat has been included in the [CNCF Observability And Analysis - Monitoring Landscape](https://landscape.cncf.io/card-mode?category=monitoring&grouping=category)** @@ -153,36 +154,36 @@ In an isolated network where multiple networks are not connected, we need to dep **HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system.** ------ +----- ## Quickly Start -Just run a single command in a Docker environment: `docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` +Just run a single command in a Docker environment: `docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` Browser access `http://localhost:1157` default account password `admin/hertzbeat` ### Landing Page -- HertzBeat's user management is unified by the configuration file `sureness.yml`, which allows users to add, delete, and modify user information, user role permissions, and so on. Default password admin/hertzbeat +* HertzBeat's user management is unified by the configuration file `sureness.yml`, which allows users to add, delete, and modify user information, user role permissions, and so on. Default password admin/hertzbeat ![hertzbeat](/img/home/0.png) ### Overview Page -- The global overview page shows the distribution of current monitoring categories, users can visualize the current monitoring types and quantities and click to jump to the corresponding monitoring types for maintenance and management. -- Show the status of currently registered collector clusters, including collector on-line status, monitoring tasks, startup time, IP address, name and so on. -- Show the list of recent alarm messages, alarm level distribution and alarm processing rate. +* The global overview page shows the distribution of current monitoring categories, users can visualize the current monitoring types and quantities and click to jump to the corresponding monitoring types for maintenance and management. +* Show the status of currently registered collector clusters, including collector on-line status, monitoring tasks, startup time, IP address, name and so on. +* Show the list of recent alarm messages, alarm level distribution and alarm processing rate. ![hertzbeat](/img/home/1.png) ### Monitoring Center -- The monitoring portal supports the management of monitoring of application services, database, operating system, middleware, network, customization, etc. It displays the currently added monitors in the form of a list. -- It displays the currently added monitors in the form of a list and supports adding, modifying, deleting, canceling, importing, exporting and batch management of monitors. -- Support tag grouping, query filtering, view monitoring details portal. +* The monitoring portal supports the management of monitoring of application services, database, operating system, middleware, network, customization, etc. It displays the currently added monitors in the form of a list. +* It displays the currently added monitors in the form of a list and supports adding, modifying, deleting, canceling, importing, exporting and batch management of monitors. +* Support tag grouping, query filtering, view monitoring details portal. Built-in support for monitoring types include: -- [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), +* [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), [Http Api](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml), [Ping Connect](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml), [Jvm](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml), [Ssl Certificate](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot2](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml), @@ -191,7 +192,7 @@ Built-in support for monitoring types include: [Pop3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-pop3.yml), [Ntp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ntp.yml), [Api Code](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api_code.yml), [Smtp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-smtp.yml), [Nginx](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nginx.yml) -- [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), +* [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), [MariaDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml), [ElasticSearch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml), [Oracle](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml), @@ -200,13 +201,13 @@ Built-in support for monitoring types include: [Redis Cluster](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml), [Redis Sentinel](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml) [Doris BE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_be.yml), [Doris FE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_fe.yml), [Memcached](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-memcached.yml), [NebulaGraph](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-nebulaGraph.yml) -- [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), +* [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), [CentOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml), [EulerOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml), [Fedora CoreOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml), [OpenSUSE](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml), [Rocky Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml), [Red Hat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml), [FreeBSD](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml), [AlmaLinux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml), [Debian Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml) -- [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), +* [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml), [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml), [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml), @@ -214,8 +215,8 @@ Built-in support for monitoring types include: [Spring Gateway](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spring_gateway.yml), [EMQX MQTT](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-emqx.yml), [AirFlow](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-airflow.yml), [Hive](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hive.yml), [Spark](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spark.yml), [Hadoop](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hadoop.yml) -- [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) -- [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), +* [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) +* [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) @@ -223,18 +224,18 @@ Built-in support for monitoring types include: ### Add and Modify Surveillance -- You can add or modify monitoring instances of a specific monitoring type, configure the IP, port and other parameters of the monitoring on the other end, set the collection period, collection task scheduling method, support detecting availability in advance, etc. The monitoring instances on the page are defined by the corresponding monitoring templates. -- The monitoring parameters configured on the page are defined by the monitoring template of the corresponding monitoring type, and users can modify the configuration parameters on the page by modifying the monitoring template. -- Support associated tags to manage monitoring grouping, alarm matching, and so on. +* You can add or modify monitoring instances of a specific monitoring type, configure the IP, port and other parameters of the monitoring on the other end, set the collection period, collection task scheduling method, support detecting availability in advance, etc. The monitoring instances on the page are defined by the corresponding monitoring templates. +* The monitoring parameters configured on the page are defined by the monitoring template of the corresponding monitoring type, and users can modify the configuration parameters on the page by modifying the monitoring template. +* Support associated tags to manage monitoring grouping, alarm matching, and so on. ![hertzbeat](/img/home/10.png) ### Monitor Details -- The monitoring data detail page shows the basic parameter information of the current monitoring, and the monitoring indicator data information. -- Monitor Real-time Data Report displays the real-time values of all the currently monitored indicators in the form of a list of small cards, and users can configure alarm threshold rules based on the real-time values for reference. -- Monitor Historical Data Report displays the historical values of the currently monitored metrics in the form of trend charts, supports querying hourly, daily and monthly historical data, and supports configuring the page refresh time. -- ⚠️ Note that the monitoring history charts need to be configured with an external timing database in order to get the full functionality, timing database support: IOTDB, TDengine, InfluxDB, GreptimeDB +* The monitoring data detail page shows the basic parameter information of the current monitoring, and the monitoring indicator data information. +* Monitor Real-time Data Report displays the real-time values of all the currently monitored indicators in the form of a list of small cards, and users can configure alarm threshold rules based on the real-time values for reference. +* Monitor Historical Data Report displays the historical values of the currently monitored metrics in the form of trend charts, supports querying hourly, daily and monthly historical data, and supports configuring the page refresh time. +* ⚠️ Note that the monitoring history charts need to be configured with an external timing database in order to get the full functionality, timing database support: IOTDB, TDengine, InfluxDB, GreptimeDB ![hertzbeat](/img/home/3.png) @@ -242,17 +243,17 @@ Built-in support for monitoring types include: ### Alarm Center -- The management display page of triggered alarm messages enables users to visualize the current alarm situation. -- Support alarm processing, alarm marking unprocessed, alarm deletion, clearing and other batch operations. +* The management display page of triggered alarm messages enables users to visualize the current alarm situation. +* Support alarm processing, alarm marking unprocessed, alarm deletion, clearing and other batch operations. ![hertzbeat](/img/home/7.png) ### Threshold Rules -- Threshold rules can be configured for monitoring the availability status, and alerts can be issued when the value of a particular metric exceeds the expected range. -- There are three levels of alerts: notification alerts, critical alerts, and emergency alerts. -- Threshold rules support visual page configuration or expression rule configuration for more flexibility. -- It supports configuring the number of triggers, alarm levels, notification templates, associated with a specific monitor and so on. +* Threshold rules can be configured for monitoring the availability status, and alerts can be issued when the value of a particular metric exceeds the expected range. +* There are three levels of alerts: notification alerts, critical alerts, and emergency alerts. +* Threshold rules support visual page configuration or expression rule configuration for more flexibility. +* It supports configuring the number of triggers, alarm levels, notification templates, associated with a specific monitor and so on. ![hertzbeat](/img/home/6.png) @@ -260,8 +261,8 @@ Built-in support for monitoring types include: ### Alarm Convergence -- When the alarm is triggered by the threshold rule, it will enter into the alarm convergence, the alarm convergence will be based on the rules of the specific time period of the duplicate alarm message de-emphasis convergence, to avoid a large number of repetitive alarms lead to the receiver alarm numbness. -- Alarm convergence rules support duplicate alarm effective time period, label matching and alarm level matching filter. +* When the alarm is triggered by the threshold rule, it will enter into the alarm convergence, the alarm convergence will be based on the rules of the specific time period of the duplicate alarm message de-emphasis convergence, to avoid a large number of repetitive alarms lead to the receiver alarm numbness. +* Alarm convergence rules support duplicate alarm effective time period, label matching and alarm level matching filter. ![hertzbeat](/img/home/12.png) @@ -269,9 +270,9 @@ Built-in support for monitoring types include: ### Alarm Silence -- When the alarm is triggered by the threshold rule, it will enter into the alarm silence, the alarm silence will be based on the rules of a specific one-time time period or periodic time period of the alarm message blocking silence, this time period does not send alarm messages. -- This application scenario, such as users in the system maintenance, do not need to send known alarms. Users will only receive alarm messages on weekdays, and users need to avoid disturbances at night. -- Alarm silence rules support one-time time period or periodic time period, support label matching and alarm level matching. +* When the alarm is triggered by the threshold rule, it will enter into the alarm silence, the alarm silence will be based on the rules of a specific one-time time period or periodic time period of the alarm message blocking silence, this time period does not send alarm messages. +* This application scenario, such as users in the system maintenance, do not need to send known alarms. Users will only receive alarm messages on weekdays, and users need to avoid disturbances at night. +* Alarm silence rules support one-time time period or periodic time period, support label matching and alarm level matching. ![hertzbeat](/img/home/14.png) @@ -279,11 +280,11 @@ Built-in support for monitoring types include: ### Message Notification -- Message notification is a function to notify alarm messages to specified recipients through different media channels, so that the alarm messages can reach them in time. -- It includes recipient information management and notification policy management. Recipient management maintains the information of recipients and their notification methods, while notification policy management maintains the policy rules of which recipients will be notified of the alert messages. -- Notification methods support `Email` `Discord` `Slack` `Telegram` `Pinning` `WeChat` `Flybook` `SMS` `Webhook` and so on. -- The notification policy supports tag matching and alert level matching, which makes it convenient to assign alerts with different tags and alert levels to different receivers and handlers. -- Support notification templates, users can customize the content format of the templates to meet their own personalized notification display needs. +* Message notification is a function to notify alarm messages to specified recipients through different media channels, so that the alarm messages can reach them in time. +* It includes recipient information management and notification policy management. Recipient management maintains the information of recipients and their notification methods, while notification policy management maintains the policy rules of which recipients will be notified of the alert messages. +* Notification methods support `Email` `Discord` `Slack` `Telegram` `Pinning` `WeChat` `Flybook` `SMS` `Webhook` and so on. +* The notification policy supports tag matching and alert level matching, which makes it convenient to assign alerts with different tags and alert levels to different receivers and handlers. +* Support notification templates, users can customize the content format of the templates to meet their own personalized notification display needs. ![hertzbeat](/img/home/16.png) @@ -293,8 +294,8 @@ Built-in support for monitoring types include: ### Monitoring Templates -- HertzBeat makes `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` and other protocols configurable so that you can customize the metrics you want to collect using these protocols by simply configuring the monitoring template `YML` in your browser. Would you believe that you can instantly adapt a new monitoring type such as `K8s` or `Docker` just by configuring it? -- All our built-in monitoring types (mysql, website, jvm, k8s) are also mapped to corresponding monitoring templates, so you can add and modify monitoring templates to customize your monitoring functions. +* HertzBeat makes `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` and other protocols configurable so that you can customize the metrics you want to collect using these protocols by simply configuring the monitoring template `YML` in your browser. Would you believe that you can instantly adapt a new monitoring type such as `K8s` or `Docker` just by configuring it? +* All our built-in monitoring types (mysql, website, jvm, k8s) are also mapped to corresponding monitoring templates, so you can add and modify monitoring templates to customize your monitoring functions. ![hertzbeat](/img/home/9.png) @@ -302,6 +303,6 @@ Built-in support for monitoring types include: **There's so much more to discover. Have Fun!** ------ +----- -**Github: https://github.com/apache/hertzbeat** +**Github: ** diff --git a/home/versioned_docs/version-v1.5.x/start/account-modify.md b/home/versioned_docs/version-v1.5.x/start/account-modify.md index 8dc8b03a565..06a2ee468a0 100644 --- a/home/versioned_docs/version-v1.5.x/start/account-modify.md +++ b/home/versioned_docs/version-v1.5.x/start/account-modify.md @@ -6,9 +6,9 @@ sidebar_label: Update Account Secret ## Update Account -Apache HertzBeat (incubating) default built-in three user accounts, respectively admin/hertzbeat tom/hertzbeat guest/hertzbeat +Apache HertzBeat (incubating) default built-in three user accounts, respectively admin/hertzbeat tom/hertzbeat guest/hertzbeat If you need add, delete or modify account or password, configure `sureness.yml`. Ignore this step without this demand. -The configuration file content refer to project repository[/script/sureness.yml](https://github.com/hertzbeat/hertzbeat/blob/master/script/sureness.yml) +The configuration file content refer to project repository[/script/sureness.yml](https://github.com/hertzbeat/hertzbeat/blob/master/script/sureness.yml) Modify the following **part parameters** in sureness.yml:**[Note⚠️Other default sureness configuration parameters should be retained]** ```yaml @@ -127,4 +127,4 @@ sureness: dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' ``` -**Restart HertzBeat, access http://ip:1157/ to explore** +**Restart HertzBeat, access to explore** diff --git a/home/versioned_docs/version-v1.5.x/start/custom-config.md b/home/versioned_docs/version-v1.5.x/start/custom-config.md index 7554498bc6e..7f45b5dd27d 100644 --- a/home/versioned_docs/version-v1.5.x/start/custom-config.md +++ b/home/versioned_docs/version-v1.5.x/start/custom-config.md @@ -10,8 +10,8 @@ This describes how to configure the SMS server, the number of built-in availabil ### Configure the configuration file of HertzBeat -Modify the configuration file located at `hertzbeat/config/application.yml` -Note ⚠️The docker container method needs to mount the application.yml file to the local host +Modify the configuration file located at `hertzbeat/config/application.yml` +Note ⚠️The docker container method needs to mount the application.yml file to the local host The installation package can be decompressed and modified in `hertzbeat/config/application.yml` 1. Configure the SMS sending server @@ -57,4 +57,3 @@ warehouse: port: 6379 password: 123456 ``` - diff --git a/home/versioned_docs/version-v1.5.x/start/docker-deploy.md b/home/versioned_docs/version-v1.5.x/start/docker-deploy.md index 10ecb09b001..ce7b784bfe0 100644 --- a/home/versioned_docs/version-v1.5.x/start/docker-deploy.md +++ b/home/versioned_docs/version-v1.5.x/start/docker-deploy.md @@ -6,7 +6,7 @@ sidebar_label: Install via Docker > Recommend to use docker deploy Apache HertzBeat (incubating) -1. Download and install the Docker environment +1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. @@ -14,34 +14,36 @@ sidebar_label: Install via Docker $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. pull HertzBeat Docker mirror you can look up the mirror version TAG in [dockerhub mirror repository](https://hub.docker.com/r/apache/hertzbeat/tags) or in [quay.io mirror repository](https://quay.io/repository/apache/hertzbeat) ```shell - $ docker pull apache/hertzbeat - $ docker pull apache/hertzbeat-collector + docker pull apache/hertzbeat + docker pull apache/hertzbeat-collector ``` or ```shell - $ docker pull quay.io/tancloud/hertzbeat - $ docker pull quay.io/tancloud/hertzbeat-collector + docker pull quay.io/tancloud/hertzbeat + docker pull quay.io/tancloud/hertzbeat-collector ``` -3. Mounted HertzBeat configuration file (optional) - Download and config `application.yml` in the host directory, eg:`$(pwd)/application.yml` - Download from [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + +3. Mounted HertzBeat configuration file (optional) + Download and config `application.yml` in the host directory, eg:`$(pwd)/application.yml` + Download from [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) You can modify the configuration yml file according to your needs. - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) - **Recommended** If you need to use the time series database TDengine to store metric data, you need to replace the `warehouse.store.td-engine` parameter in `application.yml` for specific steps, see [Using TDengine to store metrics data](tdengine-init) - **Recommended** If you need to use the time series database IotDB to store the metric database, you need to replace the `warehouse.storeiot-db` parameter in `application.yml` For specific steps, see [Use IotDB to store metrics data](iotdb-init) -4. Mounted the account file(optional) - HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` - If you need update account or password, configure `sureness.yml`. Ignore this step without this demand. - Download and config `sureness.yml` in the host directory,eg:`$(pwd)/sureness.yml` - Download from [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) +4. Mounted the account file(optional) + HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` + If you need update account or password, configure `sureness.yml`. Ignore this step without this demand. + Download and config `sureness.yml` in the host directory,eg:`$(pwd)/sureness.yml` + Download from [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) For detail steps, please refer to [Configure Account Password](account-modify) 5. Start the HertzBeat Docker container @@ -57,13 +59,14 @@ $ docker run -d -p 1157:1157 -p 1158:1158 \ ``` This command starts a running HertzBeat Docker container with mapping port 1157-1158. If existing processes on the host use the port, please modify host mapped port. + - `docker run -d` : Run a container in the background via Docker - `-p 1157:1157 -p 1158:1158` : Mapping container ports to the host, 1157 is web-ui port, 1158 is cluster port. - `-e LANG=en_US.UTF-8` : Set the system language - `-e TZ=Asia/Shanghai` : Set the system timezone - `-v $(pwd)/data:/opt/hertzbeat/data` : (optional, data persistence) Important⚠️ Mount the H2 database file to the local host, to ensure that the data is not lost due creating or deleting container. - `-v $(pwd)/logs:/opt/hertzbeat/logs` : (optional, if you don't have a need, just delete it) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. -- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional, if you don't have a need, just delete it) Mount the local configuration file into the container which has been modified in the previous step, namely using the local configuration file to cover container configuration file. +- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional, if you don't have a need, just delete it) Mount the local configuration file into the container which has been modified in the previous step, namely using the local configuration file to cover container configuration file. - `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (optional, if you don't have a need, just delete it) Mount account configuration file modified in the previous step into the container. Delete this command parameters if no needs. - `--name hertzbeat` : Naming container name hertzbeat - `apache/hertzbeat` : Use the pulled latest HertzBeat official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat` instead if you pull `quay.io` docker image.** @@ -84,6 +87,7 @@ $ docker run -d \ ``` This command starts a running HertzBeat-Collector container. + - `docker run -d` : Run a container in the background via Docker - `-e IDENTITY=custom-collector-name` : (optional) Set the collector unique identity name. Attention the clusters collector name must unique. - `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. @@ -101,13 +105,13 @@ This command starts a running HertzBeat-Collector container. **The most common problem is network problems, please check in advance** -1. **MYSQL, TDENGINE, IoTDB and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** +1. **MYSQL, TDENGINE, IoTDB and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. -> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. +> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. > Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` -2. **According to the process deploy,visit http://ip:1157/ no interface** +2. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. @@ -116,7 +120,7 @@ This command starts a running HertzBeat-Collector container. 3. **Log an error TDengine connection or insert SQL failed** -> 1:Check whether database account and password configured is correct, the database is created. +> 1:Check whether database account and password configured is correct, the database is created. > 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. 4. **Historical monitoring charts have been missing data for a long time** @@ -140,4 +144,3 @@ This command starts a running HertzBeat-Collector container. > Is iot-db or td-engine enable set to true > Note⚠️If both hertzbeat and IotDB, TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed > You can check the startup logs according to the logs directory - diff --git a/home/versioned_docs/version-v1.5.x/start/greptime-init.md b/home/versioned_docs/version-v1.5.x/start/greptime-init.md index 84916590868..c58325ee3a7 100644 --- a/home/versioned_docs/version-v1.5.x/start/greptime-init.md +++ b/home/versioned_docs/version-v1.5.x/start/greptime-init.md @@ -17,8 +17,9 @@ It's designed to work on infrastructure of the cloud era, and users benefit from ### Install GreptimeDB via Docker > Refer to the official website [installation tutorial](https://docs.greptime.com/getting-started/overview) -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > > ``` @@ -42,9 +43,9 @@ use```$ docker ps``` to check if the database started successfully ### Configure the database connection in hertzbeat `application.yml` configuration file -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` +1. Configure HertzBeat's configuration file + Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.greptime` data source parameters, URL account and password. ```yaml @@ -66,4 +67,3 @@ warehouse: 1. Do both the time series databases Greptime, IoTDB or TDengine need to be configured? Can they both be used? > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. - diff --git a/home/versioned_docs/version-v1.5.x/start/influxdb-init.md b/home/versioned_docs/version-v1.5.x/start/influxdb-init.md index b9eeb2fd00c..a1b68b3b077 100644 --- a/home/versioned_docs/version-v1.5.x/start/influxdb-init.md +++ b/home/versioned_docs/version-v1.5.x/start/influxdb-init.md @@ -9,7 +9,7 @@ Apache HertzBeat (incubating)'s historical data storage relies on the time serie > It is recommended to use VictoriaMetrics as metrics storage. **Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** +**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** Note⚠️ Need InfluxDB 1.x Version. ### 1. Use HuaweiCloud GaussDB For Influx @@ -23,8 +23,9 @@ Note⚠️ Need InfluxDB 1.x Version. ### 2. Install TDengine via Docker > Refer to the official website [installation tutorial](https://hub.docker.com/_/influxdb) -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > > ``` @@ -40,14 +41,14 @@ Note⚠️ Need InfluxDB 1.x Version. > influxdb:1.8 > ``` > -> `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. +> `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. > use```$ docker ps``` to check if the database started successfully ### Configure the database connection in hertzbeat `application.yml` configuration file -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` +1. Configure HertzBeat's configuration file + Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.influxdb` data source parameters, URL account and password. ```yaml @@ -73,4 +74,3 @@ warehouse: 1. Do both the time series databases InfluxDB, IoTDB and TDengine need to be configured? Can they both be used? > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. - diff --git a/home/versioned_docs/version-v1.5.x/start/iotdb-init.md b/home/versioned_docs/version-v1.5.x/start/iotdb-init.md index 43fb3235406..a105bd6b769 100644 --- a/home/versioned_docs/version-v1.5.x/start/iotdb-init.md +++ b/home/versioned_docs/version-v1.5.x/start/iotdb-init.md @@ -28,6 +28,7 @@ Apache IoTDB is a software system that integrates the collection, storage, manag $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Install IoTDB via Docker ```shell @@ -120,4 +121,3 @@ Configuration parameters: > Is td-engine enable set to true > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed > You can check the startup logs according to the logs directory - diff --git a/home/versioned_docs/version-v1.5.x/start/mysql-change.md b/home/versioned_docs/version-v1.5.x/start/mysql-change.md index e0f3721bb7f..e78d414af5a 100644 --- a/home/versioned_docs/version-v1.5.x/start/mysql-change.md +++ b/home/versioned_docs/version-v1.5.x/start/mysql-change.md @@ -10,7 +10,7 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data ### Install MYSQL via Docker -1. Download and install the Docker environment +1. Download and install the Docker environment For Docker installation, please refer to the [Docker official documentation](https://docs.docker.com/get-docker/). After the installation, please verify in the terminal that the Docker version can be printed normally. @@ -18,6 +18,7 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Install MYSQl with Docker ``` @@ -29,14 +30,14 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data mysql:5.7 ``` - `-v /opt/data:/var/lib/mysql` is local persistent mount of mysql data directory. `/opt/data` should be replaced with the actual local directory. + `-v /opt/data:/var/lib/mysql` is local persistent mount of mysql data directory. `/opt/data` should be replaced with the actual local directory. use ```$ docker ps``` to check if the database started successfully ### Database creation -1. Enter MYSQL or use the client to connect MYSQL service +1. Enter MYSQL or use the client to connect MYSQL service `mysql -uroot -p123456` -2. Create database named hertzbeat +2. Create database named hertzbeat `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` 3. Check if hertzbeat database has been successfully created `show databases;` @@ -72,6 +73,7 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data jpa: database: mysql ``` + - It is recommended to set the host field in the MySQL URL or Redis URL to the public IP address when using Hertzbeat in docker. -**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** +**Start HertzBeat visit on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/versioned_docs/version-v1.5.x/start/package-deploy.md b/home/versioned_docs/version-v1.5.x/start/package-deploy.md index cbd42ad5bb9..ed26d0532e2 100644 --- a/home/versioned_docs/version-v1.5.x/start/package-deploy.md +++ b/home/versioned_docs/version-v1.5.x/start/package-deploy.md @@ -6,10 +6,10 @@ sidebar_label: Install via Package > You can install and run Apache HertzBeat (incubating) on Linux Windows Mac system, and CPU supports X86/ARM64. -1. Download HertzBeat installation package +1. Download HertzBeat installation package Download installation package `hertzbeat-xx.tar.gz` `hertzbeat-collector-xx.tar.gz` corresponding to your system environment - [Download Page](/docs/download) -2. Configure HertzBeat's configuration file(optional) +2. Configure HertzBeat's configuration file(optional) Unzip the installation package to the host eg: /opt/hertzbeat ``` @@ -18,27 +18,28 @@ sidebar_label: Install via Package $ unzip -o hertzbeat-xx.zip ``` - Modify the configuration file `hertzbeat/config/application.yml` params according to your needs. - - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` + Modify the configuration file `hertzbeat/config/application.yml` params according to your needs. + - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) - **Highly recommended** From now on we will mainly support VictoriaMetrics as a time-series database, if you need to use the time series database VictoriaMetrics to store metric data, you need to replace the `warehouse.store.victoria-metrics` parameter in `application.yml` for specific steps, see [Using VictoriaMetrics to store metrics data](victoria-metrics-init) - - **Recommended** If you need to use the time series database TDengine to store metric data, you need to replace the `warehouse.store.td-engine` parameter in `application.yml` for specific steps, see [Using TDengine to store metrics data](tdengine-init) + - **Recommended** If you need to use the time series database TDengine to store metric data, you need to replace the `warehouse.store.td-engine` parameter in `application.yml` for specific steps, see [Using TDengine to store metrics data](tdengine-init) - **Recommended** If you need to use the time series database IotDB to store the metric database, you need to replace the `warehouse.storeiot-db` parameter in `application.yml` For specific steps, see [Use IotDB to store metrics data](iotdb-init) -3. Configure the account file(optional) - HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` - If you need add, delete or modify account or password, configure `hertzbeat/config/sureness.yml`. Ignore this step without this demand. +3. Configure the account file(optional) + HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` + If you need add, delete or modify account or password, configure `hertzbeat/config/sureness.yml`. Ignore this step without this demand. For detail steps, please refer to [Configure Account Password](account-modify) -4. Start the service +4. Start the service Execute the startup script `startup.sh` in the installation directory `hertzbeat/bin/`, or `startup.bat` in windows. ``` - $ ./startup.sh + ./startup.sh ``` + 5. Begin to explore HertzBeat - Access http://localhost:1157/ using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! + Access using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! 6. Deploy collector clusters (Optional) @@ -56,7 +57,8 @@ sidebar_label: Install via Package manager-host: ${MANAGER_HOST:127.0.0.1} manager-port: ${MANAGER_PORT:1158} ``` - - Run command `$ ./bin/startup.sh ` or `bin/startup.bat` + + - Run command `$ ./bin/startup.sh` or `bin/startup.bat` - Access `http://localhost:1157` and you will see the registered new collector in dashboard **HAVE FUN** @@ -65,9 +67,9 @@ sidebar_label: Install via Package 1. **If using the package not contains JDK, you need to prepare the JAVA environment in advance** - Install JAVA runtime environment-refer to [official website](http://www.oracle.com/technetwork/java/javase/downloads/index.html) - requirement:JDK17 ENV - download JAVA installation package: [mirror website](https://repo.huaweicloud.com/java/jdk/) + Install JAVA runtime environment-refer to [official website](http://www.oracle.com/technetwork/java/javase/downloads/index.html) + requirement:JDK17 ENV + download JAVA installation package: [mirror website](https://repo.huaweicloud.com/java/jdk/) After installation use command line to check whether you install it successfully. ``` @@ -77,21 +79,21 @@ sidebar_label: Install via Package Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) ``` -2. **According to the process deploy,visit http://ip:1157/ no interface** + +2. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. -> 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. +> 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. > 3:Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 3. **Log an error TDengine connection or insert SQL failed** -> 1:Check whether database account and password configured is correct, the database is created. +> 1:Check whether database account and password configured is correct, the database is created. > 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. -4. **Monitoring historical charts with no data for a long time ** +4. **Monitoring historical charts with no data for a long time** > 1: Whether the time series database is configured or not, if it is not configured, there is no historical chart data. > 2: If you are using Tdengine, check whether the database `hertzbeat` of Tdengine is created. > 3: HertzBeat's configuration file `application.yml`, the dependent services in it, the time series, the IP account password, etc. are configured correctly. - diff --git a/home/versioned_docs/version-v1.5.x/start/postgresql-change.md b/home/versioned_docs/version-v1.5.x/start/postgresql-change.md index ac63a41ebc9..26eff086cb6 100644 --- a/home/versioned_docs/version-v1.5.x/start/postgresql-change.md +++ b/home/versioned_docs/version-v1.5.x/start/postgresql-change.md @@ -10,7 +10,7 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition ### Install PostgreSQL via Docker -1. Download and install the Docker environment +1. Download and install the Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. @@ -18,10 +18,11 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition $ docker -v Docker version 20.10.12, build e91ed57 ``` + 2. Install PostgreSQL with Docker ``` - $ docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 + docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` use```$ docker ps```to check if the database started successfully @@ -36,7 +37,8 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition su - postgres psql ``` -2. Create database named hertzbeat + +2. Create database named hertzbeat `CREATE DATABASE hertzbeat;` 3. Check if hertzbeat database has been successfully created `\l` @@ -81,4 +83,4 @@ spring: dialect: org.hibernate.dialect.PostgreSQLDialect ``` -**Start HertzBeat visit http://ip:1157/ on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** +**Start HertzBeat visit on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/versioned_docs/version-v1.5.x/start/quickstart.md b/home/versioned_docs/version-v1.5.x/start/quickstart.md index 2e10d8f72de..6cbaffc43a6 100644 --- a/home/versioned_docs/version-v1.5.x/start/quickstart.md +++ b/home/versioned_docs/version-v1.5.x/start/quickstart.md @@ -41,7 +41,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.apache 1. Download the release package `hertzbeat-xx.tar.gz` [Download](https://hertzbeat.apache.org/docs/download) 2. Configure the HertzBeat configuration yml file `hertzbeat/config/application.yml` (optional) -3. Run command `$ ./bin/startup.sh ` or `bin/startup.bat` +3. Run command `$ ./bin/startup.sh` or `bin/startup.bat` 4. Access `http://localhost:1157` to start, default account: `admin/hertzbeat` 5. Deploy collector clusters(Optional) - Download the release package `hertzbeat-collector-xx.tar.gz` to new machine [Download](https://hertzbeat.apache.org/docs/download) @@ -58,7 +58,8 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.apache manager-host: ${MANAGER_HOST:127.0.0.1} manager-port: ${MANAGER_PORT:1158} ``` - - Run command `$ ./bin/startup.sh ` or `bin/startup.bat` + + - Run command `$ ./bin/startup.sh` or `bin/startup.bat` - Access `http://localhost:1157` and you will see the registered new collector in dashboard Detailed config refer to [Install HertzBeat via Package](https://hertzbeat.apache.org/docs/start/package-deploy) diff --git a/home/versioned_docs/version-v1.5.x/start/sslcert-practice.md b/home/versioned_docs/version-v1.5.x/start/sslcert-practice.md index 26c7f6ecf32..919e1aa1669 100644 --- a/home/versioned_docs/version-v1.5.x/start/sslcert-practice.md +++ b/home/versioned_docs/version-v1.5.x/start/sslcert-practice.md @@ -12,7 +12,7 @@ This article introduces how to use the hertzbeat monitoring tool to detect the v Apache HertzBeat (incubating) is a real-time monitoring tool with powerful custom monitoring capabilities without Agent. Website monitoring, PING connectivity, port availability, database, operating system, middleware, API monitoring, threshold alarms, alarm notification (email, WeChat, Ding Ding Feishu). -github: https://github.com/apache/hertzbeat +github: #### Install HertzBeat @@ -77,7 +77,7 @@ github: https://github.com/apache/hertzbeat For token configuration such as Dingding WeChat Feishu, please refer to the help document -https://hertzbeat.apache.org/docs/help/alert_dingtalk + > Alarm Notification -> New Alarm Notification Policy -> Enable Notification for the Recipient Just Configured @@ -87,8 +87,8 @@ https://hertzbeat.apache.org/docs/help/alert_dingtalk ---- -#### Finish! +#### Finish The practice of monitoring SSL certificates is here. Of course, for hertzbeat, this function is just the tip of the iceberg. If you think hertzbeat is a good open source project, please give us a Gitee star on GitHub, thank you very much. Thank you for your support. Refill! -**github: https://github.com/apache/hertzbeat** +**github: ** diff --git a/home/versioned_docs/version-v1.5.x/start/tdengine-init.md b/home/versioned_docs/version-v1.5.x/start/tdengine-init.md index 4048520bfe2..0ec76f6c8f6 100644 --- a/home/versioned_docs/version-v1.5.x/start/tdengine-init.md +++ b/home/versioned_docs/version-v1.5.x/start/tdengine-init.md @@ -10,8 +10,8 @@ Apache HertzBeat (incubating)'s historical data storage relies on the time serie TDengine is an open-source IoT time-series database, which we use to store the collected historical data of monitoring metrics. Pay attention to support ⚠️ 3.x version. -**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** +**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** +**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** Note⚠️ Need TDengine 3.x Version. > If you have TDengine environment, can directly skip to create a database instance. @@ -19,8 +19,9 @@ Note⚠️ Need TDengine 3.x Version. ### Install TDengine via Docker > Refer to the official website [installation tutorial](https://docs.taosdata.com/get-started/docker/) -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +> +> 1. Download and install Docker environment +> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > > ``` @@ -37,8 +38,8 @@ Note⚠️ Need TDengine 3.x Version. > tdengine/tdengine:3.0.4.0 > ``` > -> `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. -> `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. +> `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. +> `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. > use```$ docker ps``` to check if the database started successfully ### Create database instance @@ -46,9 +47,10 @@ Note⚠️ Need TDengine 3.x Version. 1. Enter database Docker container ``` - $ docker exec -it tdengine /bin/bash + docker exec -it tdengine /bin/bash ``` -2. Create database named hertzbeat + +2. Create database named hertzbeat After entering the container,execute `taos` command as follows: ``` @@ -65,7 +67,7 @@ Note⚠️ Need TDengine 3.x Version. taos> CREATE DATABASE hertzbeat KEEP 90 DURATION 10 BUFFER 16; ``` - The above statements will create a database named hertzbeat. The data will be saved for 90 days (more than 90 days data will be automatically deleted). + The above statements will create a database named hertzbeat. The data will be saved for 90 days (more than 90 days data will be automatically deleted). A data file every 10 days, memory blocks buffer is 16MB. 3. Check if hertzbeat database has been created success @@ -81,9 +83,9 @@ Note⚠️ Need TDengine 3.x Version. ### Configure the database connection in hertzbeat `application.yml` configuration file -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - Note⚠️The docker container way need to mount application.yml file locally,while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` +1. Configure HertzBeat's configuration file + Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + Note⚠️The docker container way need to mount application.yml file locally,while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.td-engine` data source parameters, URL account and password. ```yaml @@ -123,4 +125,3 @@ warehouse: > Is td-engine enable set to true > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed > You can check the startup logs according to the logs directory - diff --git a/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md b/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md index 3d0c22901a8..b2ae6a65799 100644 --- a/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md +++ b/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md @@ -10,7 +10,7 @@ Apache HertzBeat (incubating)'s historical data storage relies on the time serie VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.Recommend Version(VictoriaMetrics:v1.95.1+, HertzBeat:v1.4.3+) -**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** +**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** **⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** > If you already have an VictoriaMetrics environment, you can skip directly to the YML configuration step. @@ -18,7 +18,8 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t ### Install VictoriaMetrics via Docker > Refer to the official website [installation tutorial](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -> 1. Download and install Docker environment +> +> 1. Download and install Docker environment > Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). > After the installation you can check if the Docker version normally output at the terminal. > @@ -41,8 +42,8 @@ use```$ docker ps``` to check if the database started successfully 3. Configure the database connection in hertzbeat `application.yml`configuration file - Modify `hertzbeat/config/application.yml` configuration file - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` + Modify `hertzbeat/config/application.yml` configuration file + Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Config the `warehouse.store.jpa.enabled` `false`. Replace `warehouse.store.victoria-metrics` data source parameters, HOST account and password. ```yaml @@ -66,4 +67,3 @@ warehouse: 1. Do both the time series databases need to be configured? Can they both be used? > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which can affects the historical chart data. - diff --git a/home/versioned_docs/version-v1.5.x/template.md b/home/versioned_docs/version-v1.5.x/template.md index cee7aa05055..2359a43e51f 100644 --- a/home/versioned_docs/version-v1.5.x/template.md +++ b/home/versioned_docs/version-v1.5.x/template.md @@ -6,7 +6,7 @@ sidebar_label: Monitoring Template > Apache HertzBeat (incubating) is an open source, real-time monitoring tool with custom-monitor and agentLess. > -> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. +> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. > Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? Here is the architecture. diff --git a/pom.xml b/pom.xml index c0ed26b9548..ef78581687a 100644 --- a/pom.xml +++ b/pom.xml @@ -482,39 +482,6 @@ - - com.diffplug.spotless - spotless-maven-plugin - ${spotless.version} - - false - - - home/docs/**/*.md - home/blog/**/*.md - home/i18n/**/*.md - - - - Markdown Formatter - (^-*\n$)([\s\S]*?)(-+$) - ---$2--- - - - - true - - - - - spotless-check - - check - - validate - - - From d056ce2aa69ee9af40813517c77137fc01546a03 Mon Sep 17 00:00:00 2001 From: Jast Date: Wed, 21 Aug 2024 23:19:14 +0800 Subject: [PATCH 215/257] [feature] support dead link check (#2579) Co-authored-by: tomsun28 --- .github/exclude_files.txt | 44 +++++++++++++++++++ .github/link_check.json | 20 +++++++++ .github/workflows/doc-build-test.yml | 8 ++++ home/docs/start/upgrade.md | 8 ++-- .../hertzbeat-mysql-iotdb/README.md | 2 +- .../hertzbeat-mysql-iotdb/README_CN.md | 2 +- .../hertzbeat-mysql-tdengine/README.md | 2 +- .../hertzbeat-mysql-tdengine/README_CN.md | 2 +- .../README.md | 2 +- .../README_CN.md | 2 +- .../README_CN.md | 2 +- 11 files changed, 83 insertions(+), 11 deletions(-) create mode 100644 .github/exclude_files.txt create mode 100644 .github/link_check.json diff --git a/.github/exclude_files.txt b/.github/exclude_files.txt new file mode 100644 index 00000000000..ce60d627e71 --- /dev/null +++ b/.github/exclude_files.txt @@ -0,0 +1,44 @@ +./home/versioned_docs/version-v1.5.x/community/how-to-release.md +./home/versioned_docs/version-v1.5.x/start/postgresql-change.md +./home/versioned_docs/version-v1.4.x/start/postgresql-change.md +./home/versioned_docs/version-v1.5.x/introduce.md +./home/versioned_docs/version-v1.4.x/introduce.md +./home/versioned_docs/version-v1.5.x/help/sqlserver.md +./home/versioned_docs/version-v1.4.x/help/sqlserver.md +./home/versioned_docs/version-v1.5.x/help/alert_threshold_expr.md +./home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md +./home/versioned_docs/version-v1.4.x/others/resource.md +./home/versioned_docs/version-v1.5.x/others/resource.md +./home/docs/community/how-to-release.md +./home/blog/2022-06-01-hertzbeat-v1.0.md +./home/blog/2023-12-11-hertzbeat-v1.4.3.md +./home/blog/2023-11-12-hertzbeat-v1.4.2.md +./home/blog/2024-01-18-hertzbeat-v1.4.4.md +./home/docs/introduce.md +./home/docs/help/sqlserver.md +./home/docs/postgresql-change.md +./home/docs/help/alert_threshold_expr.md +./home/docs/others/resource.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/sqlserver.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/current/postgresql-change.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_threshold_expr.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/current/others/resource.md +./home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md +./home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-01-hertzbeat-v1.0.md +./home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md +./home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/sqlserver.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_threshold_expr.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/others/resource.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md +./home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md \ No newline at end of file diff --git a/.github/link_check.json b/.github/link_check.json new file mode 100644 index 00000000000..7091f39379f --- /dev/null +++ b/.github/link_check.json @@ -0,0 +1,20 @@ +{ + "ignorePatterns": [ + { + "pattern": "^(?!http).*" + }, + { + "pattern": "!\\[.*\\]\\((?!http).*\\)" + } + ], + "timeout": "10s", + "retryOn429": true, + "retryCount": 10, + "fallbackRetryDelay": "1000s", + "aliveStatusCodes": [ + 0, + 200, + 401, + 403 + ] +} diff --git a/.github/workflows/doc-build-test.yml b/.github/workflows/doc-build-test.yml index e8b750618be..670da80af79 100644 --- a/.github/workflows/doc-build-test.yml +++ b/.github/workflows/doc-build-test.yml @@ -114,6 +114,14 @@ jobs: else echo "All file names are valid." fi + - name: Dead Link + run: | + sudo npm install -g markdown-link-check@3.8.7 + for file in $(find ./home -name "*.md"); do + if ! grep -Fxq "$file" .github/exclude_files.txt; then + markdown-link-check -c .github/link_check.json -q "$file" + fi + done - name: NPM INSTALL working-directory: home run: npm install diff --git a/home/docs/start/upgrade.md b/home/docs/start/upgrade.md index ebd4af61e70..63f048ca1b7 100644 --- a/home/docs/start/upgrade.md +++ b/home/docs/start/upgrade.md @@ -24,11 +24,11 @@ Apache HertzBeat (incubating)'s metadata information is stored in H2 or Mysql, P 2. If using the built-in default H2 database - Need to mount or back up `-v $(pwd)/data:/opt/hertzbeat/data` database file directory in the container `/opt/hertzbeat/data` - Stop and delete the container, delete the local HertzBeat docker image, and pull the new version image - - Refer to [Docker installation of HertzBeat](docker-deploy) to create a new container using a new image. Note that the database file directory needs to be mounted `-v $(pwd)/data:/opt/hertzbeat/data` + - Refer to [Docker installation of HertzBeat](./docker-deploy) to create a new container using a new image. Note that the database file directory needs to be mounted `-v $(pwd)/data:/opt/hertzbeat/data` 3. If using external relational database Mysql, PostgreSQL - No need to mount the database file directory in the backup container - Stop and delete the container, delete the local HertzBeat docker image, and pull the new version image - - Refer to [Docker installation HertzBeat](docker-deploy) to create a new container using the new image, and configure the database connection in `application.yml` + - Refer to [Docker installation HertzBeat](./docker-deploy) to create a new container using the new image, and configure the database connection in `application.yml` ### Upgrade For Package Deploy @@ -36,11 +36,11 @@ Apache HertzBeat (incubating)'s metadata information is stored in H2 or Mysql, P - Back up the database file directory under the installation package `/opt/hertzbeat/data` - If there is a custom monitoring template, you need to back up the template YML under `/opt/hertzbeat/define` - `bin/shutdown.sh` stops the HertzBeat process and downloads the new installation package - - Refer to [Installation package to install HertzBeat](package-deploy) to start using the new installation package + - Refer to [Installation package to install HertzBeat](./package-deploy) to start using the new installation package 2. If using external relational database Mysql, PostgreSQL - No need to back up the database file directory under the installation package - If there is a custom monitoring template, you need to back up the template YML under `/opt/hertzbeat/define` - `bin/shutdown.sh` stops the HertzBeat process and downloads the new installation package - - Refer to [Installation package to install HertzBeat](package-deploy) to start with the new installation package and configure the database connection in `application.yml` + - Refer to [Installation package to install HertzBeat](./package-deploy) to start with the new installation package and configure the database connection in `application.yml` **HAVE FUN** diff --git a/script/docker-compose/hertzbeat-mysql-iotdb/README.md b/script/docker-compose/hertzbeat-mysql-iotdb/README.md index 851016b5a3c..b5b94efcec2 100644 --- a/script/docker-compose/hertzbeat-mysql-iotdb/README.md +++ b/script/docker-compose/hertzbeat-mysql-iotdb/README.md @@ -15,7 +15,7 @@ ##### docker compose deploys hertzbeat and its dependent services 1. Download the hertzbeat-docker-compose installation deployment script file - The script file is located in `script/docker-compose/hertzbeat-mysql-iotdb` link [script/docker-compose](https://github.com/hertzbeat/hertzbeat/tree/master/script/docker-compose/ hertzbeat-mysql-iotdb) + The script file is located in `script/docker-compose/hertzbeat-mysql-iotdb` link [script/docker-compose](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/ hertzbeat-mysql-iotdb) 2. Add MYSQL jdbc driver jar diff --git a/script/docker-compose/hertzbeat-mysql-iotdb/README_CN.md b/script/docker-compose/hertzbeat-mysql-iotdb/README_CN.md index cc290c72040..9f145fe769b 100644 --- a/script/docker-compose/hertzbeat-mysql-iotdb/README_CN.md +++ b/script/docker-compose/hertzbeat-mysql-iotdb/README_CN.md @@ -17,7 +17,7 @@ ##### docker compose部署hertzbeat及其依赖服务 1. 下载hertzbeat-docker-compose安装部署脚本文件 - 脚本文件位于代码仓库下`script/docker-compose/hertzbeat-mysql-iotdb` 链接 [script/docker-compose](https://github.com/hertzbeat/hertzbeat/tree/master/script/docker-compose/hertzbeat-mysql-iotdb) + 脚本文件位于代码仓库下`script/docker-compose/hertzbeat-mysql-iotdb` 链接 [script/docker-compose](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-mysql-iotdb) 2. 添加 MYSQL jdbc 驱动 jar 下载 MYSQL jdbc driver jar, 例如 mysql-connector-java-8.0.25.jar. https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip diff --git a/script/docker-compose/hertzbeat-mysql-tdengine/README.md b/script/docker-compose/hertzbeat-mysql-tdengine/README.md index d8b165f1792..27c1febaba5 100644 --- a/script/docker-compose/hertzbeat-mysql-tdengine/README.md +++ b/script/docker-compose/hertzbeat-mysql-tdengine/README.md @@ -15,7 +15,7 @@ ##### docker compose deploys hertzbeat and its dependent services 1. Download the hertzbeat-docker-compose installation deployment script file - The script file is located in `script/docker-compose/hertzbeat-mysql-tdengine` link [script/docker-compose](https://github.com/hertzbeat/hertzbeat/tree/master/script/docker-compose/hertzbeat-mysql-tdengine) + The script file is located in `script/docker-compose/hertzbeat-mysql-tdengine` link [script/docker-compose](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-mysql-tdengine) 2. Add MYSQL jdbc driver jar diff --git a/script/docker-compose/hertzbeat-mysql-tdengine/README_CN.md b/script/docker-compose/hertzbeat-mysql-tdengine/README_CN.md index 0f9f097c703..329344585d3 100644 --- a/script/docker-compose/hertzbeat-mysql-tdengine/README_CN.md +++ b/script/docker-compose/hertzbeat-mysql-tdengine/README_CN.md @@ -17,7 +17,7 @@ ##### docker compose部署hertzbeat及其依赖服务 1. 下载hertzbeat-docker-compose安装部署脚本文件 - 脚本文件位于代码仓库下`script/docker-compose/hertzbeat-mysql-tdengine` 链接 [script/docker-compose](https://github.com/hertzbeat/hertzbeat/tree/master/script/docker-compose/hertzbeat-mysql-tdengine) + 脚本文件位于代码仓库下`script/docker-compose/hertzbeat-mysql-tdengine` 链接 [script/docker-compose](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-mysql-tdengine) 2. 添加 MYSQL jdbc 驱动 jar 下载 MYSQL jdbc driver jar, 例如 mysql-connector-java-8.0.25.jar. https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip diff --git a/script/docker-compose/hertzbeat-mysql-victoria-metrics/README.md b/script/docker-compose/hertzbeat-mysql-victoria-metrics/README.md index 77227db014b..02a6829ea8f 100644 --- a/script/docker-compose/hertzbeat-mysql-victoria-metrics/README.md +++ b/script/docker-compose/hertzbeat-mysql-victoria-metrics/README.md @@ -15,7 +15,7 @@ ##### docker compose deploys hertzbeat and its dependent services 1. Download the hertzbeat-docker-compose installation deployment script file - The script file is located in `script/docker-compose/hertzbeat-mysql-victoria-metrics` link [script/docker-compose](https://github.com/hertzbeat/hertzbeat/tree/master/script/docker-compose/hertzbeat-mysql-victoria-metrics) + The script file is located in `script/docker-compose/hertzbeat-mysql-victoria-metrics` link [script/docker-compose](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-mysql-victoria-metrics) 2. Add MYSQL jdbc driver jar diff --git a/script/docker-compose/hertzbeat-mysql-victoria-metrics/README_CN.md b/script/docker-compose/hertzbeat-mysql-victoria-metrics/README_CN.md index 3e290238521..6602b34a318 100644 --- a/script/docker-compose/hertzbeat-mysql-victoria-metrics/README_CN.md +++ b/script/docker-compose/hertzbeat-mysql-victoria-metrics/README_CN.md @@ -17,7 +17,7 @@ ##### docker compose部署hertzbeat及其依赖服务 1. 下载hertzbeat-docker-compose安装部署脚本文件 - 脚本文件位于代码仓库下`script/docker-compose/hertzbeat-mysql-victoria-metrics` 链接 [script/docker-compose](https://github.com/hertzbeat/hertzbeat/tree/master/script/docker-compose/hertzbeat-mysql-mysql-victoria-metrics) + 脚本文件位于代码仓库下`script/docker-compose/hertzbeat-mysql-victoria-metrics` 链接 [script/docker-compose](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-mysql-victoria-metrics) 2. 添加 MYSQL jdbc 驱动 jar 下载 MYSQL jdbc driver jar, 例如 mysql-connector-java-8.0.25.jar. https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip diff --git a/script/docker-compose/hertzbeat-postgresql-victoria-metrics/README_CN.md b/script/docker-compose/hertzbeat-postgresql-victoria-metrics/README_CN.md index 88d6ae2f6f6..f809587e05d 100644 --- a/script/docker-compose/hertzbeat-postgresql-victoria-metrics/README_CN.md +++ b/script/docker-compose/hertzbeat-postgresql-victoria-metrics/README_CN.md @@ -17,7 +17,7 @@ ##### docker compose部署hertzbeat及其依赖服务 1. 下载hertzbeat-docker-compose安装部署脚本文件 - 脚本文件位于代码仓库下`script/docker-compose/hertzbeat-postgre-victoria-metrics` 链接 [script/docker-compose](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-postgre-victoria-metrics) + 脚本文件位于代码仓库下`script/docker-compose/hertzbeat-postgre-victoria-metrics` 链接 [script/docker-compose](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-postgresql-victoria-metrics) 2. 进入部署脚本 docker-compose 目录, 执行 From 97aada99569864117bd4f65e90e619fa6df8251b Mon Sep 17 00:00:00 2001 From: aias00 Date: Thu, 22 Aug 2024 16:48:43 +0800 Subject: [PATCH 216/257] [feature] add vastbase help md (#2569) Signed-off-by: aias00 --- home/docs/help/vastbase.md | 56 +++++++++++++++++++ .../current/help/vastbase.md | 56 +++++++++++++++++++ home/sidebars.json | 1 + 3 files changed, 113 insertions(+) create mode 100644 home/docs/help/vastbase.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/vastbase.md diff --git a/home/docs/help/vastbase.md b/home/docs/help/vastbase.md new file mode 100644 index 00000000000..cd0374fc777 --- /dev/null +++ b/home/docs/help/vastbase.md @@ -0,0 +1,56 @@ +--- +id: vastbase +title: Monitoring:Vastbase database monitoring +sidebar_label: Vastbase database +keywords: [open source monitoring tool, open source database monitoring tool, monitoring vastbase database metrics] +--- + +> Collect and monitor the general performance Metrics of PostgreSQL database. Support PostgreSQL 10+. + +### Configuration parameter + +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored Host address. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 5432 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | + +### Collection Metric + +#### Metric set:basic + +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|-------------------------------------------| +| server_version | none | Version number of the database server | +| port | none | Database server exposure service port | +| server_encoding | none | Character set encoding of database server | +| data_directory | none | Database storage data disk address | +| max_connections | connections | Database maximum connections | + +#### Metric set:state + +| Metric name | Metric unit | Metric help description | +|----------------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | none | Database name, or share-object is a shared object | +| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | +| deadlocks | number | Number of deadlocks detected in the database | +| blks_read | times | The number of disk blocks read in the database | +| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the Vastbase buffer, not in the operating system file system buffer) | +| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | +| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | +| stats_reset | none | The last time these statistics were reset | + +#### Metric set:activity + +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------------| +| running | connections | Number of current client connections | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/vastbase.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/vastbase.md new file mode 100644 index 00000000000..f8e2d76aa55 --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/vastbase.md @@ -0,0 +1,56 @@ +--- +id: vastbase +title: 监控:Vastbase 数据库监控 +sidebar_label: Vastbase 数据库 +keywords: [开源监控系统, 开源数据库监控, Vastbase 数据库监控] +--- + +> 对 Vastbase 数据库的通用性能指标进行采集监控。支持Vastbase 9.2.4+。 + +### 配置参数 + +| 参数名称 | 参数帮助描述 | +|--------|-------------------------------------------------| +| 监控Host | 被监控的 Host 地址。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为5432。 | +| 查询超时时间 | 设置 SQL 查询未响应数据时的超时时间,单位 ms 毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | + +### 采集指标 + +#### 指标集合:basic + +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------------| +| server_version | 无 | 数据库服务器的版本号 | +| port | 无 | 数据库服务器端暴露服务端口 | +| server_encoding | 无 | 数据库服务器端的字符集编码 | +| data_directory | 无 | 数据库存储数据盘地址 | +| max_connections | 连接数 | 数据库最大连接数 | + +#### 指标集合:state + +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|------------------------------------------------------------------------| +| name | 无 | 数据库名称,或 share-object 为共享对象。 | +| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | +| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | +| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | +| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 Vastbase 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | +| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | +| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | +| stats_reset | 无 | 这些统计信息上次被重置的时间 | + +#### 指标集合:activity + +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------| +| running | 连接数 | 当前客户端连接数 | + diff --git a/home/sidebars.json b/home/sidebars.json index 90ca715fbc8..3f78b354d29 100755 --- a/home/sidebars.json +++ b/home/sidebars.json @@ -206,6 +206,7 @@ "help/mysql", "help/mariadb", "help/postgresql", + "help/vastbase", "help/kingbase", "help/sqlserver", "help/oracle", From fff291da821231fbfea64098542e9465ed641a56 Mon Sep 17 00:00:00 2001 From: Jast Date: Thu, 22 Aug 2024 22:53:48 +0800 Subject: [PATCH 217/257] [Improve] improve markdown format for MD024 (#2580) Co-authored-by: tomsun28 --- .markdownlint-cli2.jsonc | 4 +++- home/blog/2022-12-19-new-committer.md | 4 ++-- home/docs/help/flink_on_yarn.md | 2 +- home/docs/help/iceberg.md | 1 - home/docs/help/kingbase.md | 1 - .../2022-09-04-hertzbeat-v1.1.3.md | 3 +-- .../2022-12-19-new-committer.md | 4 ++-- .../docusaurus-plugin-content-docs/current/help/iceberg.md | 1 - .../docusaurus-plugin-content-docs/current/help/kingbase.md | 1 - 9 files changed, 9 insertions(+), 12 deletions(-) diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index 9dddffec75a..97a129352eb 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -21,7 +21,9 @@ "MD052": false, "MD003": false, "MD013": false, - "MD024": false, + "MD024": { + "siblings_only": true + }, "MD025": false, "MD029": false, "MD033": false, diff --git a/home/blog/2022-12-19-new-committer.md b/home/blog/2022-12-19-new-committer.md index 7acfd0f5aac..3a324e7817e 100644 --- a/home/blog/2022-12-19-new-committer.md +++ b/home/blog/2022-12-19-new-committer.md @@ -9,7 +9,7 @@ tags: [opensource] > 非常高兴 HertzBeat 迎来了两位新晋社区Committer, 两位都是来自互联网公司的开发工程师,让我们来了解下他们的开源经历吧! -## 第一位 花城 +# 第一位 花城 姓名:王庆华 @@ -51,7 +51,7 @@ github:[wang1027-wqh](https://github.com/wang1027-wqh) --- --- -## 第二位 星辰 +# 第二位 星辰 姓名:郑晨鑫 diff --git a/home/docs/help/flink_on_yarn.md b/home/docs/help/flink_on_yarn.md index 9340b7681aa..cda9abe8201 100644 --- a/home/docs/help/flink_on_yarn.md +++ b/home/docs/help/flink_on_yarn.md @@ -123,7 +123,7 @@ | memoryConfigurationTotalFlinkMemory | Bytes | Total Flink memory configuration | | memoryConfigurationTotalProcessMemory | Bytes | Total process memory configuration | -#### TaskManager Metrics +#### TaskManager Status Metrics | Metric Name | Metric Unit | Metric Help Description | |-----------------------------------|-------------|------------------------------------| diff --git a/home/docs/help/iceberg.md b/home/docs/help/iceberg.md index 5b63bca574d..ceba73377af 100644 --- a/home/docs/help/iceberg.md +++ b/home/docs/help/iceberg.md @@ -74,4 +74,3 @@ hive --service hiveserver2 & | init | MB | The initial amount of memory requested for the memory pool. | | max | MB | The maximum amount of memory that can be allocated for the memory pool. | | used | MB | The amount of memory currently being used by the memory pool. | - diff --git a/home/docs/help/kingbase.md b/home/docs/help/kingbase.md index ccdb5cc0391..7a0b69f0971 100644 --- a/home/docs/help/kingbase.md +++ b/home/docs/help/kingbase.md @@ -53,4 +53,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | Metric name | Metric unit | Metric help description | |-------------|-------------|--------------------------------------| | running | connections | Number of current client connections | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md index cc08092df91..1d7bf0a0042 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md @@ -70,5 +70,4 @@ Bugfix. Online . -Have Fun ---- +Have Fun \ No newline at end of file diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md index 7acfd0f5aac..3a324e7817e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md @@ -9,7 +9,7 @@ tags: [opensource] > 非常高兴 HertzBeat 迎来了两位新晋社区Committer, 两位都是来自互联网公司的开发工程师,让我们来了解下他们的开源经历吧! -## 第一位 花城 +# 第一位 花城 姓名:王庆华 @@ -51,7 +51,7 @@ github:[wang1027-wqh](https://github.com/wang1027-wqh) --- --- -## 第二位 星辰 +# 第二位 星辰 姓名:郑晨鑫 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iceberg.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iceberg.md index 5ee2f434e96..8bb6a51aeab 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iceberg.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iceberg.md @@ -74,4 +74,3 @@ hive --service hiveserver2 & | 内存池初始内存 | MB | 内存池请求的初始内存量。 | | 内存池可分配最大内存 | MB | 内存池可分配的最大内存量。 | | 内存池内存使用量 | MB | 内存池已使用内存量 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kingbase.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kingbase.md index b09187ff456..febaa81db55 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kingbase.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kingbase.md @@ -53,4 +53,3 @@ keywords: [开源监控系统, 开源数据库监控, Kingbase数据库监控] | 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|----------| | running | 连接数 | 当前客户端连接数 | - From eea68d2a27575137427ee31d2a3d71bd04ebeab8 Mon Sep 17 00:00:00 2001 From: aias00 Date: Thu, 22 Aug 2024 23:13:19 +0800 Subject: [PATCH 218/257] [feature] add greenplum help md (#2570) Signed-off-by: aias00 --- home/docs/help/greenplum.md | 56 +++++++++++++++++++ .../current/help/greenplum.md | 56 +++++++++++++++++++ home/sidebars.json | 1 + 3 files changed, 113 insertions(+) create mode 100644 home/docs/help/greenplum.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/greenplum.md diff --git a/home/docs/help/greenplum.md b/home/docs/help/greenplum.md new file mode 100644 index 00000000000..761e256317d --- /dev/null +++ b/home/docs/help/greenplum.md @@ -0,0 +1,56 @@ +--- +id: greenplum +title: Monitoring:GreenPlum database monitoring +sidebar_label: GreenPlum database +keywords: [open source monitoring tool, open source database monitoring tool, monitoring greenplum database metrics] +--- + +> Collect and monitor the general performance Metrics of GreenPlum database. Support GreenPlum 6.23.0+. + +### Configuration parameter + +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored Host address. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| Port | Port provided by the database. The default is 5432 | +| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | +| Database name | Database instance name, optional | +| Username | Database connection user name, optional | +| Password | Database connection password, optional | +| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | + +### Collection Metric + +#### Metric set:basic + +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|-------------------------------------------| +| server_version | none | Version number of the database server | +| port | none | Database server exposure service port | +| server_encoding | none | Character set encoding of database server | +| data_directory | none | Database storage data disk address | +| max_connections | connections | Database maximum connections | + +#### Metric set:state + +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | none | Database name, or share-object is a shared object | +| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | +| deadlocks | number | Number of deadlocks detected in the database | +| blks_read | times | The number of disk blocks read in the database | +| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the GreenPlum buffer, not in the operating system file system buffer) | +| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | +| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | +| stats_reset | none | The last time these statistics were reset | + +#### Metric set:activity + +| Metric name | Metric unit | Metric help description | +|-------------|-------------|--------------------------------------| +| running | connections | Number of current client connections | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/greenplum.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/greenplum.md new file mode 100644 index 00000000000..2d971964799 --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/greenplum.md @@ -0,0 +1,56 @@ +--- +id: greenplum +title: 监控:GreenPlum 数据库监控 +sidebar_label: GreenPlum 数据库 +keywords: [开源监控系统, 开源数据库监控, GreenPlum 数据库监控] +--- + +> 对 GreenPlum 数据库的通用性能指标进行采集监控。支持 GreenPlum 6.23.0+。 + +### 配置参数 + +| 参数名称 | 参数帮助描述 | +|--------|---------------------------------------------------| +| 监控Host | 被监控的 Host 地址。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为5432。 | +| 查询超时时间 | 设置 SQL 查询未响应数据时的超时时间,单位 ms 毫秒,默认3000毫秒。 | +| 数据库名称 | 数据库实例名称,可选。 | +| 用户名 | 数据库连接用户名,可选 | +| 密码 | 数据库连接密码,可选 | +| URL | 数据库连接URL,可选,若配置,则 URL 里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | + +### 采集指标 + +#### 指标集合:basic + +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|---------------| +| server_version | 无 | 数据库服务器的版本号 | +| port | 无 | 数据库服务器端暴露服务端口 | +| server_encoding | 无 | 数据库服务器端的字符集编码 | +| data_directory | 无 | 数据库存储数据盘地址 | +| max_connections | 连接数 | 数据库最大连接数 | + +#### 指标集合:state + +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|-------------------------------------------------------------------------| +| name | 无 | 数据库名称,或 share-object 为共享对象。 | +| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | +| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | +| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | +| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 GreenPlum 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | +| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | +| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | +| stats_reset | 无 | 这些统计信息上次被重置的时间 | + +#### 指标集合:activity + +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|----------| +| running | 连接数 | 当前客户端连接数 | + diff --git a/home/sidebars.json b/home/sidebars.json index 3f78b354d29..3d7678008ae 100755 --- a/home/sidebars.json +++ b/home/sidebars.json @@ -206,6 +206,7 @@ "help/mysql", "help/mariadb", "help/postgresql", + "help/greenplum", "help/vastbase", "help/kingbase", "help/sqlserver", From bad28bccdb230acafdaab31ad1ac2d92dbe11c4c Mon Sep 17 00:00:00 2001 From: YuLuo Date: Thu, 22 Aug 2024 23:49:02 +0800 Subject: [PATCH 219/257] [improve] extra project configuration common filed to constants (#2540) Signed-off-by: yuluo-yx Co-authored-by: Calvin Co-authored-by: tomsun28 --- .../hertzbeat/alert/AlerterProperties.java | 26 +- .../config/AlerterAutoConfiguration.java | 6 +- .../config/CollectorAutoConfiguration.java | 6 +- .../dispatch/CollectorInfoProperties.java | 7 +- .../dispatch/DispatchProperties.java | 11 +- .../common/config/BaseKafkaProperties.java | 46 ++++ .../hertzbeat/common/config/CommonConfig.java | 6 +- .../common/config/CommonProperties.java | 258 ++---------------- .../common/constants/ConfigConstants.java | 61 +++++ .../common/constants/SignConstants.java | 2 + .../manager/config/AiProperties.java | 7 +- .../manager/config/StatusProperties.java | 25 +- .../scheduler/SchedulerProperties.java | 42 +-- .../push/config/PushAutoConfiguration.java | 6 +- .../config/WarehouseAutoConfiguration.java | 6 +- .../constants/WarehouseConstants.java | 59 ++++ .../history/greptime/GreptimeProperties.java | 10 +- .../history/influxdb/InfluxdbProperties.java | 10 +- .../store/history/iotdb/IotDbProperties.java | 12 +- .../store/history/jpa/JpaProperties.java | 10 +- .../history/tdengine/TdEngineProperties.java | 10 +- .../vm/VictoriaMetricsClusterProperties.java | 10 +- .../history/vm/VictoriaMetricsProperties.java | 12 +- .../realtime/memory/MemoryProperties.java | 10 +- .../store/realtime/redis/RedisProperties.java | 10 +- 25 files changed, 341 insertions(+), 327 deletions(-) create mode 100644 common/src/main/java/org/apache/hertzbeat/common/config/BaseKafkaProperties.java create mode 100644 common/src/main/java/org/apache/hertzbeat/common/constants/ConfigConstants.java create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/constants/WarehouseConstants.java diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/AlerterProperties.java b/alerter/src/main/java/org/apache/hertzbeat/alert/AlerterProperties.java index 3e7607649d8..73cdd212e56 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/AlerterProperties.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/AlerterProperties.java @@ -19,16 +19,20 @@ import lombok.Getter; import lombok.Setter; +import org.apache.hertzbeat.common.config.BaseKafkaProperties; +import org.apache.hertzbeat.common.constants.ConfigConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.stereotype.Component; /** * alerter prop config */ -@Component -@ConfigurationProperties(prefix = "alerter") + @Getter @Setter +@Component +@ConfigurationProperties(prefix = + ConfigConstants.FunctionModuleConstants.ALERTER) public class AlerterProperties { /** @@ -92,25 +96,13 @@ public static class EntranceProperties { */ @Getter @Setter - public static class KafkaProperties { + public static class KafkaProperties extends BaseKafkaProperties { + /** * Whether the kafka data entry is started */ private boolean enabled = true; - - /** - * kafka's connection server url - */ - private String servers = "127.0.0.1:9092"; - /** - * The name of the topic that receives the data - */ - private String topic; - /** - * Consumer Group ID - */ - private String groupId; - } } + } diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/config/AlerterAutoConfiguration.java b/alerter/src/main/java/org/apache/hertzbeat/alert/config/AlerterAutoConfiguration.java index f9f78661210..f75e01aacc6 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/config/AlerterAutoConfiguration.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/config/AlerterAutoConfiguration.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.alert.config; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.springframework.boot.autoconfigure.AutoConfiguration; import org.springframework.context.annotation.ComponentScan; @@ -25,6 +27,8 @@ */ @AutoConfiguration -@ComponentScan(basePackages = "org.apache.hertzbeat.alert") +@ComponentScan(basePackages = ConfigConstants.PkgConstant.PKG + + SignConstants.DOT + + ConfigConstants.FunctionModuleConstants.ALERT) public class AlerterAutoConfiguration { } diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/config/CollectorAutoConfiguration.java b/collector/src/main/java/org/apache/hertzbeat/collector/config/CollectorAutoConfiguration.java index a94d74488ba..0d5ccdaaf30 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/config/CollectorAutoConfiguration.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/config/CollectorAutoConfiguration.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.collector.config; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.springframework.boot.autoconfigure.AutoConfiguration; import org.springframework.context.annotation.ComponentScan; @@ -26,6 +28,8 @@ */ @AutoConfiguration -@ComponentScan(basePackages = "org.apache.hertzbeat.collector") +@ComponentScan(basePackages = ConfigConstants.PkgConstant.PKG + + SignConstants.DOT + + ConfigConstants.FunctionModuleConstants.COLLECTOR) public class CollectorAutoConfiguration { } diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/CollectorInfoProperties.java b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/CollectorInfoProperties.java index d23d3f825c7..233908afe1d 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/CollectorInfoProperties.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/CollectorInfoProperties.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.collector.dispatch; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.stereotype.Component; @@ -24,9 +26,10 @@ * Collector info configuration Properties */ @Component -@ConfigurationProperties(prefix = CollectorInfoProperties.INFO_PREFIX) +@ConfigurationProperties(prefix = ConfigConstants.FunctionModuleConstants.COLLECTOR + + SignConstants.DOT + + ConfigConstants.FunctionModuleConstants.INFO) public class CollectorInfoProperties { - protected static final String INFO_PREFIX = "collector.info"; private String version; private String ip; diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/DispatchProperties.java b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/DispatchProperties.java index a844f0569e8..4f22b52745e 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/DispatchProperties.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/DispatchProperties.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.collector.dispatch; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.stereotype.Component; @@ -24,14 +26,11 @@ * Schedule Distribution Task Configuration Properties */ @Component -@ConfigurationProperties(prefix = DispatchProperties.DISPATCH_PREFIX) +@ConfigurationProperties(prefix = ConfigConstants.FunctionModuleConstants.COLLECTOR + + SignConstants.DOT + + ConfigConstants.FunctionModuleConstants.DISPATCH) public class DispatchProperties { - /** - * Schedule Distribution Task Configuration Properties - */ - protected static final String DISPATCH_PREFIX = "collector.dispatch"; - /** * Scheduling entry configuration properties */ diff --git a/common/src/main/java/org/apache/hertzbeat/common/config/BaseKafkaProperties.java b/common/src/main/java/org/apache/hertzbeat/common/config/BaseKafkaProperties.java new file mode 100644 index 00000000000..24d411cff26 --- /dev/null +++ b/common/src/main/java/org/apache/hertzbeat/common/config/BaseKafkaProperties.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.config; + +import lombok.Getter; +import lombok.Setter; + +/** + * Kafka properties + */ + +@Getter +@Setter +public class BaseKafkaProperties { + + /** + * kafka's connection server url + */ + private String servers = "127.0.0.1:9092"; + + /** + * The name of the topic that receives the data + */ + private String topic; + + /** + * Consumer Group ID + */ + private String groupId; + +} diff --git a/common/src/main/java/org/apache/hertzbeat/common/config/CommonConfig.java b/common/src/main/java/org/apache/hertzbeat/common/config/CommonConfig.java index fa51f981e45..81b5892f6f0 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/config/CommonConfig.java +++ b/common/src/main/java/org/apache/hertzbeat/common/config/CommonConfig.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.common.config; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.apache.hertzbeat.common.util.AesUtil; import org.springframework.boot.autoconfigure.AutoConfiguration; import org.springframework.boot.context.properties.EnableConfigurationProperties; @@ -27,7 +29,9 @@ */ @AutoConfiguration -@ComponentScan(basePackages = "org.apache.hertzbeat.common") +@ComponentScan(basePackages = ConfigConstants.PkgConstant.PKG + + SignConstants.DOT + + ConfigConstants.FunctionModuleConstants.COMMON) @EnableConfigurationProperties(CommonProperties.class) public class CommonConfig { diff --git a/common/src/main/java/org/apache/hertzbeat/common/config/CommonProperties.java b/common/src/main/java/org/apache/hertzbeat/common/config/CommonProperties.java index 16eda21c73e..56cc91dccdf 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/config/CommonProperties.java +++ b/common/src/main/java/org/apache/hertzbeat/common/config/CommonProperties.java @@ -17,12 +17,19 @@ package org.apache.hertzbeat.common.config; +import lombok.Getter; +import lombok.Setter; +import org.apache.hertzbeat.common.constants.ConfigConstants; import org.springframework.boot.context.properties.ConfigurationProperties; /** * common module properties */ -@ConfigurationProperties(prefix = "common") + +@Getter +@Setter +@ConfigurationProperties(prefix = + ConfigConstants.FunctionModuleConstants.COMMON) public class CommonProperties { /** @@ -40,33 +47,11 @@ public class CommonProperties { */ private SmsProperties sms; - public String getSecret() { - return secret; - } - - public DataQueueProperties getQueue() { - return queue; - } - - public SmsProperties getSms() { - return sms; - } - - public void setSecret(String secret) { - this.secret = secret; - } - - public void setQueue(DataQueueProperties queue) { - this.queue = queue; - } - - public void setSms(SmsProperties sms) { - this.sms = sms; - } - /** * data queue properties */ + @Getter + @Setter public static class DataQueueProperties { private QueueType type = QueueType.Memory; @@ -75,31 +60,6 @@ public static class DataQueueProperties { private RedisProperties redis; - public QueueType getType() { - return type; - } - - public void setType(QueueType type) { - this.type = type; - } - - public KafkaProperties getKafka() { - return kafka; - } - - public void setKafka(KafkaProperties kafka) { - this.kafka = kafka; - } - - public RedisProperties getRedis() { - - return redis; - } - - public void setRedis(RedisProperties redis) { - - this.redis = redis; - } } /** @@ -121,6 +81,8 @@ public enum QueueType { /** * redis data queue properties */ + @Getter + @Setter public static class RedisProperties { /** @@ -153,76 +115,15 @@ public static class RedisProperties { */ private String alertsDataQueueName; - public int getRedisPort() { - - return redisPort; - } - - public void setRedisPort(int redisPort) { - - this.redisPort = redisPort; - } - - public String getRedisHost() { - - return redisHost; - } - - public void setRedisHost(String redisHost) { - - this.redisHost = redisHost; - } - - public String getMetricsDataQueueNameToAlerter() { - - return metricsDataQueueNameToAlerter; - } - - public void setMetricsDataQueueNameToAlerter(String metricsDataQueueNameToAlerter) { - - this.metricsDataQueueNameToAlerter = metricsDataQueueNameToAlerter; - } - - public String getMetricsDataQueueNameToPersistentStorage() { - - return metricsDataQueueNameToPersistentStorage; - } - - public void setMetricsDataQueueNameToPersistentStorage(String metricsDataQueueNameToPersistentStorage) { - - this.metricsDataQueueNameToPersistentStorage = metricsDataQueueNameToPersistentStorage; - } - - public String getMetricsDataQueueNameToRealTimeStorage() { - - return metricsDataQueueNameToRealTimeStorage; - } - - public void setMetricsDataQueueNameToRealTimeStorage(String metricsDataQueueNameToRealTimeStorage) { - - this.metricsDataQueueNameToRealTimeStorage = metricsDataQueueNameToRealTimeStorage; - } - - public String getAlertsDataQueueName() { - - return alertsDataQueueName; - } - - public void setAlertsDataQueueName(String alertsDataQueueName) { - - this.alertsDataQueueName = alertsDataQueueName; - } - } /** * kafka data queue properties */ - public static class KafkaProperties { - /** - * kafka's connection server url - */ - private String servers; + @Getter + @Setter + public static class KafkaProperties extends BaseKafkaProperties { + /** * metrics data topic */ @@ -231,61 +132,25 @@ public static class KafkaProperties { * alerts data topic */ private String alertsDataTopic; - - public String getServers() { - return servers; - } - - public void setServers(String servers) { - this.servers = servers; - } - - public String getMetricsDataTopic() { - return metricsDataTopic; - } - - public void setMetricsDataTopic(String metricsDataTopic) { - this.metricsDataTopic = metricsDataTopic; - } - - public String getAlertsDataTopic() { - return alertsDataTopic; - } - - public void setAlertsDataTopic(String alertsDataTopic) { - this.alertsDataTopic = alertsDataTopic; - } } /** * sms properties */ + @Getter + @Setter public static class SmsProperties { //Tencent cloud SMS configuration private TencentSmsProperties tencent; //Ali cloud SMS configuration private AliYunSmsProperties aliYun; - - public TencentSmsProperties getTencent() { - return tencent; - } - - public void setTencent(TencentSmsProperties tencent) { - this.tencent = tencent; - } - - public AliYunSmsProperties getAliYun() { - return aliYun; - } - - public void setAliYun(AliYunSmsProperties aliYun) { - this.aliYun = aliYun; - } } /** * tencent sms properties */ + @Getter + @Setter public static class TencentSmsProperties { /** @@ -312,51 +177,13 @@ public static class TencentSmsProperties { * SMS template ID */ private String templateId; - - public String getSecretId() { - return secretId; - } - - public void setSecretId(String secretId) { - this.secretId = secretId; - } - - public String getSecretKey() { - return secretKey; - } - - public void setSecretKey(String secretKey) { - this.secretKey = secretKey; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getSignName() { - return signName; - } - - public void setSignName(String signName) { - this.signName = signName; - } - - public String getTemplateId() { - return templateId; - } - - public void setTemplateId(String templateId) { - this.templateId = templateId; - } } /** * aliYun sms properties */ + @Getter + @Setter public static class AliYunSmsProperties { /** @@ -383,45 +210,6 @@ public static class AliYunSmsProperties { * ID of the SMS template */ private String templateId; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getSecretId() { - return secretId; - } - - public void setSecretId(String secretId) { - this.secretId = secretId; - } - - public String getSecretKey() { - return secretKey; - } - - public void setSecretKey(String secretKey) { - this.secretKey = secretKey; - } - - public String getSignName() { - return signName; - } - - public void setSignName(String signName) { - this.signName = signName; - } - - public String getTemplateId() { - return templateId; - } - - public void setTemplateId(String templateId) { - this.templateId = templateId; - } } + } diff --git a/common/src/main/java/org/apache/hertzbeat/common/constants/ConfigConstants.java b/common/src/main/java/org/apache/hertzbeat/common/constants/ConfigConstants.java new file mode 100644 index 00000000000..5703a75d07d --- /dev/null +++ b/common/src/main/java/org/apache/hertzbeat/common/constants/ConfigConstants.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.common.constants; + +/** + * Extract all public strings that the project needs to configure in yml. + */ + +public interface ConfigConstants { + + /** + * Package name constant. + */ + interface PkgConstant { + String PKG = "org.apache.hertzbeat"; + } + + /** + * hertzbeat project module constant. + */ + interface FunctionModuleConstants { + + String ALERT = "alert"; + + String ALERTER = "alerter"; + + String COLLECTOR = "collector"; + + String COMMON = "common"; + + String WAREHOUSE = "warehouse"; + + String AI = "ai"; + + String STATUS = "status"; + + String SCHEDULER = "scheduler"; + + String PUSH = "push"; + + String DISPATCH = "dispatch"; + + String INFO = "info"; + } + +} diff --git a/common/src/main/java/org/apache/hertzbeat/common/constants/SignConstants.java b/common/src/main/java/org/apache/hertzbeat/common/constants/SignConstants.java index 1918ab45a16..b69e1e16069 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/constants/SignConstants.java +++ b/common/src/main/java/org/apache/hertzbeat/common/constants/SignConstants.java @@ -37,4 +37,6 @@ public interface SignConstants { String COMMA = ","; String BLANK = " "; + + String DOT = "."; } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/config/AiProperties.java b/manager/src/main/java/org/apache/hertzbeat/manager/config/AiProperties.java index d8628493b75..8725d3520a2 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/config/AiProperties.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/config/AiProperties.java @@ -18,15 +18,18 @@ package org.apache.hertzbeat.manager.config; import lombok.Data; +import org.apache.hertzbeat.common.constants.ConfigConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.annotation.Configuration; /** * AiProperties */ -@Configuration -@ConfigurationProperties(prefix = "ai") + @Data +@Configuration +@ConfigurationProperties(prefix = + ConfigConstants.FunctionModuleConstants.AI) public class AiProperties { /** diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/config/StatusProperties.java b/manager/src/main/java/org/apache/hertzbeat/manager/config/StatusProperties.java index cc41b91e8b7..7a2d9daa474 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/config/StatusProperties.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/config/StatusProperties.java @@ -17,14 +17,20 @@ package org.apache.hertzbeat.manager.config; +import lombok.Getter; +import lombok.Setter; +import org.apache.hertzbeat.common.constants.ConfigConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.stereotype.Component; /** * status page properties */ -@ConfigurationProperties(prefix = "status") + +@Getter +@Setter @Component +@ConfigurationProperties(prefix = ConfigConstants.FunctionModuleConstants.STATUS) public class StatusProperties { /** @@ -32,17 +38,11 @@ public class StatusProperties { */ private CalculateProperties calculate; - public CalculateProperties getCalculate() { - return calculate; - } - - public void setCalculate(CalculateProperties calculate) { - this.calculate = calculate; - } - /** * calculate component status properties */ + @Getter + @Setter public static class CalculateProperties { /** @@ -50,13 +50,6 @@ public static class CalculateProperties { */ private Integer interval = 300; - public Integer getInterval() { - return interval; - } - - public void setInterval(Integer interval) { - this.interval = interval; - } } } diff --git a/manager/src/main/java/org/apache/hertzbeat/manager/scheduler/SchedulerProperties.java b/manager/src/main/java/org/apache/hertzbeat/manager/scheduler/SchedulerProperties.java index 54bf19d95d0..826a8049275 100644 --- a/manager/src/main/java/org/apache/hertzbeat/manager/scheduler/SchedulerProperties.java +++ b/manager/src/main/java/org/apache/hertzbeat/manager/scheduler/SchedulerProperties.java @@ -17,29 +17,30 @@ package org.apache.hertzbeat.manager.scheduler; +import lombok.Getter; +import lombok.Setter; +import org.apache.hertzbeat.common.constants.ConfigConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.stereotype.Component; /** * scheduler properties config */ + +@Getter +@Setter @Component -@ConfigurationProperties(prefix = "scheduler") +@ConfigurationProperties(prefix = + ConfigConstants.FunctionModuleConstants.SCHEDULER) public class SchedulerProperties { private ServerProperties server; - - public ServerProperties getServer() { - return server; - } - - public void setServer(ServerProperties server) { - this.server = server; - } /** * server properties */ + @Getter + @Setter public static class ServerProperties { private boolean enabled = true; @@ -52,30 +53,7 @@ public static class ServerProperties { * unit: s */ private int idleStateEventTriggerTime = 100; - - public boolean isEnabled() { - return enabled; - } - - public void setEnabled(boolean enabled) { - this.enabled = enabled; - } - - public int getPort() { - return port; - } - - public void setPort(int port) { - this.port = port; - } - - public int getIdleStateEventTriggerTime() { - return idleStateEventTriggerTime; - } - public void setIdleStateEventTriggerTime(int idleStateEventTriggerTime) { - this.idleStateEventTriggerTime = idleStateEventTriggerTime; - } } } diff --git a/push/src/main/java/org/apache/hertzbeat/push/config/PushAutoConfiguration.java b/push/src/main/java/org/apache/hertzbeat/push/config/PushAutoConfiguration.java index 372897d594e..342dfaa42c6 100644 --- a/push/src/main/java/org/apache/hertzbeat/push/config/PushAutoConfiguration.java +++ b/push/src/main/java/org/apache/hertzbeat/push/config/PushAutoConfiguration.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.push.config; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.springframework.boot.autoconfigure.AutoConfiguration; import org.springframework.context.annotation.ComponentScan; @@ -25,6 +27,8 @@ */ @AutoConfiguration -@ComponentScan(basePackages = "org.apache.hertzbeat.push") +@ComponentScan(basePackages = ConfigConstants.PkgConstant.PKG + + SignConstants.DOT + + ConfigConstants.FunctionModuleConstants.PUSH) public class PushAutoConfiguration { } diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/WarehouseAutoConfiguration.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/WarehouseAutoConfiguration.java index 72a8a1d515e..ff8ef5ca20d 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/WarehouseAutoConfiguration.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/WarehouseAutoConfiguration.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.warehouse.config; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; import org.springframework.boot.autoconfigure.AutoConfiguration; import org.springframework.context.annotation.ComponentScan; @@ -26,6 +28,8 @@ */ @AutoConfiguration -@ComponentScan(basePackages = "org.apache.hertzbeat.warehouse") +@ComponentScan(basePackages = ConfigConstants.PkgConstant.PKG + + SignConstants.DOT + + ConfigConstants.FunctionModuleConstants.WAREHOUSE) public class WarehouseAutoConfiguration { } diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/constants/WarehouseConstants.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/constants/WarehouseConstants.java new file mode 100644 index 00000000000..aee6e4ff95e --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/constants/WarehouseConstants.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.constants; + +/** + * Warehouse configuration constants. + */ + +public interface WarehouseConstants { + + String STORE = "store"; + + String REAL_TIME = "real-time"; + + /** + * History database name. + */ + interface HistoryName { + String GREPTIME = "greptime"; + + String INFLUXDB = "influxdb"; + + String IOT_DB = "iot-db"; + + String JPA = "jpa"; + + String TD_ENGINE = "td-engine"; + + String VM = "victoria-metrics"; + + String VM_CLUSTER = "victoria-metrics.cluster"; + } + + /** + * Real-time database name. + */ + interface RealTimeName { + + String REDIS = "redis"; + + String MEMORY = "memory"; + } + +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/greptime/GreptimeProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/greptime/GreptimeProperties.java index bd02be75c92..8b9bcf2619a 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/greptime/GreptimeProperties.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/greptime/GreptimeProperties.java @@ -17,13 +17,21 @@ package org.apache.hertzbeat.warehouse.store.history.greptime; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; +import org.apache.hertzbeat.warehouse.constants.WarehouseConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.bind.DefaultValue; /** * GrepTimeDB configuration information */ -@ConfigurationProperties(prefix = "warehouse.store.greptime") + +@ConfigurationProperties(prefix = ConfigConstants.FunctionModuleConstants.WAREHOUSE + + SignConstants.DOT + + WarehouseConstants.STORE + + SignConstants.DOT + + WarehouseConstants.HistoryName.GREPTIME) public record GreptimeProperties(@DefaultValue("false") boolean enabled, @DefaultValue("127.0.0.1:4001") String grpcEndpoints, @DefaultValue("jdbc:mysql://127.0.0.1:4002/hertzbeat?connectionTimeZone=Asia/Shanghai&forceConnectionTimeZoneToSession=true") String url, diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/influxdb/InfluxdbProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/influxdb/InfluxdbProperties.java index 0e78e356b86..de9d749b1ca 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/influxdb/InfluxdbProperties.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/influxdb/InfluxdbProperties.java @@ -17,13 +17,21 @@ package org.apache.hertzbeat.warehouse.store.history.influxdb; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; +import org.apache.hertzbeat.warehouse.constants.WarehouseConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.bind.DefaultValue; /** * Influxdb configuration information */ -@ConfigurationProperties(prefix = "warehouse.store.influxdb") + +@ConfigurationProperties(prefix = ConfigConstants.FunctionModuleConstants.WAREHOUSE + + SignConstants.DOT + + WarehouseConstants.STORE + + SignConstants.DOT + + WarehouseConstants.HistoryName.INFLUXDB) public record InfluxdbProperties(@DefaultValue("false") boolean enabled, String serverUrl, String username, diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/iotdb/IotDbProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/iotdb/IotDbProperties.java index 9d1188b690d..9e7293e39ce 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/iotdb/IotDbProperties.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/iotdb/IotDbProperties.java @@ -19,11 +19,12 @@ import java.time.ZoneId; import java.util.List; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; +import org.apache.hertzbeat.warehouse.constants.WarehouseConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.bind.DefaultValue; - - /** * IotDB configuration information * @param enabled Whether the iotDB data store is enabled @@ -34,7 +35,12 @@ * Note: Why use String instead of Long here? Currently, IoTDB's set ttl only supports milliseconds as the unit. * Other units may be added later. In order to be compatible with the future, the String type is used. */ -@ConfigurationProperties(prefix = "warehouse.store.iot-db") + +@ConfigurationProperties(prefix = ConfigConstants.FunctionModuleConstants.WAREHOUSE + + SignConstants.DOT + + WarehouseConstants.STORE + + SignConstants.DOT + + WarehouseConstants.HistoryName.IOT_DB) public record IotDbProperties(@DefaultValue("false") boolean enabled, @DefaultValue("127.0.0.1") String host, @DefaultValue("6667") Integer rpcPort, diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/jpa/JpaProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/jpa/JpaProperties.java index 3b3d7a3b4eb..ab04a7ea50a 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/jpa/JpaProperties.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/jpa/JpaProperties.java @@ -17,6 +17,9 @@ package org.apache.hertzbeat.warehouse.store.history.jpa; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; +import org.apache.hertzbeat.warehouse.constants.WarehouseConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.bind.DefaultValue; @@ -26,7 +29,12 @@ * @param expireTime save data expire time(ms) * @param maxHistoryRecordNum The maximum number of history records retained */ -@ConfigurationProperties(prefix = "warehouse.store.jpa") + +@ConfigurationProperties(prefix = ConfigConstants.FunctionModuleConstants.WAREHOUSE + + SignConstants.DOT + + WarehouseConstants.STORE + + SignConstants.DOT + + WarehouseConstants.HistoryName.JPA) public record JpaProperties(@DefaultValue("true") boolean enabled, @DefaultValue("1h") String expireTime, @DefaultValue("20000") Integer maxHistoryRecordNum) { diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/tdengine/TdEngineProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/tdengine/TdEngineProperties.java index d364d86b4c6..631b8271a22 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/tdengine/TdEngineProperties.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/tdengine/TdEngineProperties.java @@ -17,6 +17,9 @@ package org.apache.hertzbeat.warehouse.store.history.tdengine; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; +import org.apache.hertzbeat.warehouse.constants.WarehouseConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.bind.DefaultValue; @@ -29,7 +32,12 @@ * @param password tdengine password * @param tableStrColumnDefineMaxLength auto create table's string column define max length : NCHAR(200) */ -@ConfigurationProperties(prefix = "warehouse.store.td-engine") + +@ConfigurationProperties(prefix = ConfigConstants.FunctionModuleConstants.WAREHOUSE + + SignConstants.DOT + + WarehouseConstants.STORE + + SignConstants.DOT + + WarehouseConstants.HistoryName.TD_ENGINE) public record TdEngineProperties(@DefaultValue("false") boolean enabled, @DefaultValue("jdbc:TAOS-RS://localhost:6041/demo") String url, @DefaultValue("com.taosdata.jdbc.rs.RestfulDriver") String driverClassName, diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsClusterProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsClusterProperties.java index a89fead34b7..6d02492d545 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsClusterProperties.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsClusterProperties.java @@ -17,12 +17,20 @@ package org.apache.hertzbeat.warehouse.store.history.vm; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; +import org.apache.hertzbeat.warehouse.constants.WarehouseConstants; import org.springframework.boot.context.properties.ConfigurationProperties; /** * Victoriametrics configuration information */ -@ConfigurationProperties(prefix = "warehouse.store.victoria-metrics.cluster") + +@ConfigurationProperties(prefix = ConfigConstants.FunctionModuleConstants.WAREHOUSE + + SignConstants.DOT + + WarehouseConstants.STORE + + SignConstants.DOT + + WarehouseConstants.HistoryName.VM_CLUSTER) public record VictoriaMetricsClusterProperties( VictoriaMetricsInsertProperties insert, VictoriaMetricsSelectProperties select diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsProperties.java index 563458511db..4b87b1cd6ff 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsProperties.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/history/vm/VictoriaMetricsProperties.java @@ -17,13 +17,21 @@ package org.apache.hertzbeat.warehouse.store.history.vm; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; +import org.apache.hertzbeat.warehouse.constants.WarehouseConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.bind.DefaultValue; /** - * Victoriametrics configuration information + * Victoria metrics configuration information. */ -@ConfigurationProperties(prefix = "warehouse.store.victoria-metrics") + +@ConfigurationProperties(prefix = ConfigConstants.FunctionModuleConstants.WAREHOUSE + + SignConstants.DOT + + WarehouseConstants.STORE + + SignConstants.DOT + + WarehouseConstants.HistoryName.VM) public record VictoriaMetricsProperties(@DefaultValue("false") boolean enabled, @DefaultValue("http://localhost:8428") String url, String username, diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/realtime/memory/MemoryProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/realtime/memory/MemoryProperties.java index 3a03a5fdf9e..612255f6a11 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/realtime/memory/MemoryProperties.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/realtime/memory/MemoryProperties.java @@ -17,6 +17,9 @@ package org.apache.hertzbeat.warehouse.store.realtime.memory; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; +import org.apache.hertzbeat.warehouse.constants.WarehouseConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.bind.DefaultValue; @@ -25,7 +28,12 @@ * @param enabled Whether memory data storage is enabled * @param initSize Memory storage map initialization size */ -@ConfigurationProperties(prefix = "warehouse.real-time.memory") + +@ConfigurationProperties(prefix = ConfigConstants.FunctionModuleConstants.WAREHOUSE + + SignConstants.DOT + + WarehouseConstants.REAL_TIME + + SignConstants.DOT + + WarehouseConstants.RealTimeName.MEMORY) public record MemoryProperties(@DefaultValue("true") boolean enabled, @DefaultValue("1024") Integer initSize) { } diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/realtime/redis/RedisProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/realtime/redis/RedisProperties.java index 48c27bf5cc5..3497c2ee630 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/realtime/redis/RedisProperties.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/realtime/redis/RedisProperties.java @@ -17,13 +17,21 @@ package org.apache.hertzbeat.warehouse.store.realtime.redis; +import org.apache.hertzbeat.common.constants.ConfigConstants; +import org.apache.hertzbeat.common.constants.SignConstants; +import org.apache.hertzbeat.warehouse.constants.WarehouseConstants; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.bind.DefaultValue; /** * Redis configuration information */ -@ConfigurationProperties(prefix = "warehouse.real-time.redis") + +@ConfigurationProperties(prefix = ConfigConstants.FunctionModuleConstants.WAREHOUSE + + SignConstants.DOT + + WarehouseConstants.REAL_TIME + + SignConstants.DOT + + WarehouseConstants.RealTimeName.REDIS) public record RedisProperties(@DefaultValue("false") boolean enabled, @DefaultValue("single") String mode, @DefaultValue("127.0.0.1:6379") String address, From b602d8ceb6a30430d7abb341ffca5e9c4e9fbd16 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Fri, 23 Aug 2024 09:26:56 +0800 Subject: [PATCH 220/257] [Improve] remove 1.4 version doc and improve markdown format for MD001 (#2585) Co-authored-by: Jast --- .markdownlint-cli2.jsonc | 6 +- home/docs/community/how-to-release.md | 10 +- home/docs/help/flink_on_yarn.md | 2 +- home/docs/help/greenplum.md | 1 - home/docs/help/kubernetes.md | 4 +- home/docs/help/vastbase.md | 1 - home/docs/start/quickstart.md | 2 +- .../version-v1.4.x.json | 90 ---- .../2024-06-11-hertzbeat-v1.6.0-update.md | 2 +- .../current/community/how-to-release.md | 16 +- .../current/help/dns.md | 2 +- .../current/help/greenplum.md | 1 - .../current/help/kubernetes.md | 4 +- .../current/help/ntp.md | 2 +- .../current/help/vastbase.md | 1 - .../current/start/update-1.6.0.md | 2 +- .../version-v1.4.x.json | 102 ----- .../advanced/extend-http-default.md | 159 ------- .../advanced/extend-http-example-hertzbeat.md | 217 ---------- .../advanced/extend-http-example-token.md | 389 ----------------- .../advanced/extend-http-jsonpath.md | 177 -------- .../version-v1.4.x/advanced/extend-http.md | 205 --------- .../version-v1.4.x/advanced/extend-jdbc.md | 245 ----------- .../version-v1.4.x/advanced/extend-jmx.md | 238 ----------- .../version-v1.4.x/advanced/extend-point.md | 170 -------- .../version-v1.4.x/advanced/extend-push.md | 26 -- .../version-v1.4.x/advanced/extend-snmp.md | 209 ---------- .../version-v1.4.x/advanced/extend-ssh.md | 218 ---------- .../advanced/extend-tutorial.md | 244 ----------- .../version-v1.4.x/help/activemq.md | 145 ------- .../version-v1.4.x/help/airflow.md | 38 -- .../version-v1.4.x/help/alert_console.md | 18 - .../version-v1.4.x/help/alert_dingtalk.md | 41 -- .../version-v1.4.x/help/alert_discord.md | 70 ---- .../version-v1.4.x/help/alert_email.md | 39 -- .../help/alert_enterprise_wechat_app.md | 34 -- .../version-v1.4.x/help/alert_feishu.md | 34 -- .../version-v1.4.x/help/alert_slack.md | 37 -- .../version-v1.4.x/help/alert_smn.md | 43 -- .../version-v1.4.x/help/alert_telegram.md | 67 --- .../version-v1.4.x/help/alert_threshold.md | 36 -- .../help/alert_threshold_expr.md | 54 --- .../version-v1.4.x/help/alert_webhook.md | 66 --- .../version-v1.4.x/help/alert_wework.md | 38 -- .../version-v1.4.x/help/api.md | 34 -- .../version-v1.4.x/help/centos.md | 81 ---- .../version-v1.4.x/help/dm.md | 48 --- .../version-v1.4.x/help/docker.md | 101 ----- .../version-v1.4.x/help/dynamic_tp.md | 101 ----- .../version-v1.4.x/help/fullsite.md | 34 -- .../version-v1.4.x/help/guide.md | 90 ---- .../version-v1.4.x/help/hadoop.md | 89 ---- .../version-v1.4.x/help/hive.md | 76 ---- .../version-v1.4.x/help/iotdb.md | 120 ------ .../version-v1.4.x/help/issue.md | 69 --- .../version-v1.4.x/help/jetty.md | 94 ----- .../version-v1.4.x/help/jvm.md | 80 ---- .../version-v1.4.x/help/kafka.md | 95 ----- .../version-v1.4.x/help/kubernetes.md | 98 ----- .../version-v1.4.x/help/linux.md | 81 ---- .../version-v1.4.x/help/mariadb.md | 53 --- .../version-v1.4.x/help/memcached.md | 69 --- .../version-v1.4.x/help/mysql.md | 53 --- .../version-v1.4.x/help/nebulagraph.md | 123 ------ .../version-v1.4.x/help/nginx.md | 153 ------- .../version-v1.4.x/help/ntp.md | 41 -- .../version-v1.4.x/help/opengauss.md | 55 --- .../version-v1.4.x/help/oracle.md | 63 --- .../version-v1.4.x/help/ping.md | 36 -- .../version-v1.4.x/help/pop3.md | 46 -- .../version-v1.4.x/help/port.md | 28 -- .../version-v1.4.x/help/postgresql.md | 55 --- .../version-v1.4.x/help/rabbitmq.md | 125 ------ .../version-v1.4.x/help/redis.md | 239 ----------- .../version-v1.4.x/help/shenyu.md | 129 ------ .../version-v1.4.x/help/smtp.md | 40 -- .../version-v1.4.x/help/spring_gateway.md | 89 ---- .../version-v1.4.x/help/springboot2.md | 96 ----- .../version-v1.4.x/help/sqlserver.md | 76 ---- .../version-v1.4.x/help/ssl_cert.md | 33 -- .../version-v1.4.x/help/tomcat.md | 75 ---- .../version-v1.4.x/help/ubuntu.md | 81 ---- .../version-v1.4.x/help/website.md | 29 -- .../version-v1.4.x/help/windows.md | 43 -- .../version-v1.4.x/help/zookeeper.md | 99 ----- .../version-v1.4.x/introduce.md | 309 -------------- .../version-v1.4.x/others/contact.md | 21 - .../version-v1.4.x/others/contributing.md | 141 ------- .../version-v1.4.x/others/design.md | 13 - .../version-v1.4.x/others/developer.md | 262 ------------ .../version-v1.4.x/others/huaweicloud.md | 23 - .../version-v1.4.x/others/images-deploy.md | 110 ----- .../version-v1.4.x/others/resource.md | 23 - .../version-v1.4.x/others/sponsor.md | 12 - .../version-v1.4.x/start/account-modify.md | 128 ------ .../version-v1.4.x/start/custom-config.md | 76 ---- .../version-v1.4.x/start/docker-deploy.md | 166 -------- .../version-v1.4.x/start/greptime-init.md | 67 --- .../version-v1.4.x/start/influxdb-init.md | 76 ---- .../version-v1.4.x/start/iotdb-init.md | 88 ---- .../version-v1.4.x/start/mysql-change.md | 96 ----- .../version-v1.4.x/start/package-deploy.md | 99 ----- .../version-v1.4.x/start/postgresql-change.md | 89 ---- .../version-v1.4.x/start/quickstart.md | 127 ------ .../version-v1.4.x/start/rainbond-deploy.md | 32 -- .../version-v1.4.x/start/sslcert-practice.md | 102 ----- .../version-v1.4.x/start/tdengine-init.md | 138 ------ .../version-v1.4.x/start/upgrade.md | 47 --- .../start/victoria-metrics-init.md | 70 ---- .../version-v1.4.x/template.md | 95 ----- .../community/how-to-release.md | 34 +- .../version-v1.5.x/help/dns.md | 2 +- .../version-v1.5.x/help/kubernetes.md | 4 +- .../version-v1.5.x/help/ntp.md | 2 +- .../version-v1.5.x/start/quickstart.md | 2 +- .../advanced/extend-http-default.md | 156 ------- .../advanced/extend-http-example-hertzbeat.md | 227 ---------- .../advanced/extend-http-example-token.md | 393 ------------------ .../advanced/extend-http-jsonpath.md | 174 -------- .../version-v1.4.x/advanced/extend-http.md | 204 --------- .../version-v1.4.x/advanced/extend-jdbc.md | 243 ----------- .../version-v1.4.x/advanced/extend-jmx.md | 194 --------- .../version-v1.4.x/advanced/extend-point.md | 132 ------ .../version-v1.4.x/advanced/extend-push.md | 26 -- .../version-v1.4.x/advanced/extend-snmp.md | 171 -------- .../version-v1.4.x/advanced/extend-ssh.md | 214 ---------- .../advanced/extend-tutorial.md | 226 ---------- .../version-v1.4.x/help/activemq.md | 145 ------- .../version-v1.4.x/help/airflow.md | 38 -- .../version-v1.4.x/help/alert_console.md | 18 - .../version-v1.4.x/help/alert_dingtalk.md | 41 -- .../version-v1.4.x/help/alert_discord.md | 68 --- .../version-v1.4.x/help/alert_email.md | 39 -- .../help/alert_enterprise_wechat_app.md | 34 -- .../version-v1.4.x/help/alert_feishu.md | 34 -- .../version-v1.4.x/help/alert_slack.md | 35 -- .../version-v1.4.x/help/alert_smn.md | 43 -- .../version-v1.4.x/help/alert_telegram.md | 65 --- .../version-v1.4.x/help/alert_threshold.md | 36 -- .../help/alert_threshold_expr.md | 50 --- .../version-v1.4.x/help/alert_webhook.md | 66 --- .../version-v1.4.x/help/alert_wework.md | 38 -- .../versioned_docs/version-v1.4.x/help/api.md | 34 -- .../version-v1.4.x/help/centos.md | 81 ---- home/versioned_docs/version-v1.4.x/help/dm.md | 48 --- .../version-v1.4.x/help/docker.md | 101 ----- .../version-v1.4.x/help/doris_be.md | 170 -------- .../version-v1.4.x/help/doris_fe.md | 126 ------ .../version-v1.4.x/help/dynamic_tp.md | 101 ----- .../version-v1.4.x/help/fullsite.md | 34 -- .../version-v1.4.x/help/guide.md | 91 ---- .../version-v1.4.x/help/hadoop.md | 89 ---- .../version-v1.4.x/help/hive.md | 76 ---- .../version-v1.4.x/help/iotdb.md | 120 ------ .../version-v1.4.x/help/issue.md | 62 --- .../version-v1.4.x/help/jetty.md | 94 ----- .../versioned_docs/version-v1.4.x/help/jvm.md | 76 ---- .../version-v1.4.x/help/kafka.md | 89 ---- .../version-v1.4.x/help/kubernetes.md | 95 ----- .../version-v1.4.x/help/linux.md | 81 ---- .../version-v1.4.x/help/mariadb.md | 53 --- .../version-v1.4.x/help/memcached.md | 69 --- .../version-v1.4.x/help/mysql.md | 53 --- .../version-v1.4.x/help/nebulagraph.md | 74 ---- .../version-v1.4.x/help/nginx.md | 154 ------- .../versioned_docs/version-v1.4.x/help/ntp.md | 37 -- .../version-v1.4.x/help/opengauss.md | 55 --- .../version-v1.4.x/help/oracle.md | 63 --- .../version-v1.4.x/help/ping.md | 36 -- .../version-v1.4.x/help/pop3.md | 47 --- .../version-v1.4.x/help/port.md | 28 -- .../version-v1.4.x/help/postgresql.md | 55 --- .../version-v1.4.x/help/rabbitmq.md | 125 ------ .../version-v1.4.x/help/redis.md | 239 ----------- .../version-v1.4.x/help/shenyu.md | 129 ------ .../version-v1.4.x/help/smtp.md | 40 -- .../version-v1.4.x/help/spark.md | 114 ----- .../version-v1.4.x/help/spring_gateway.md | 89 ---- .../version-v1.4.x/help/springboot2.md | 95 ----- .../version-v1.4.x/help/sqlserver.md | 76 ---- .../version-v1.4.x/help/ssl_cert.md | 33 -- .../version-v1.4.x/help/tomcat.md | 73 ---- .../version-v1.4.x/help/ubuntu.md | 81 ---- .../version-v1.4.x/help/website.md | 29 -- .../version-v1.4.x/help/windows.md | 43 -- .../version-v1.4.x/help/zookeeper.md | 102 ----- .../version-v1.4.x/introduce.md | 312 -------------- .../version-v1.4.x/others/contact.md | 17 - .../version-v1.4.x/others/contributing.md | 147 ------- .../version-v1.4.x/others/design.md | 13 - .../version-v1.4.x/others/developer.md | 262 ------------ .../version-v1.4.x/others/hertzbeat.md | 276 ------------ .../version-v1.4.x/others/huaweicloud.md | 23 - .../version-v1.4.x/others/images-deploy.md | 110 ----- .../version-v1.4.x/others/resource.md | 17 - .../version-v1.4.x/others/sponsor.md | 12 - .../version-v1.4.x/start/account-modify.md | 128 ------ .../version-v1.4.x/start/custom-config.md | 59 --- .../version-v1.4.x/start/docker-deploy.md | 146 ------- .../version-v1.4.x/start/greptime-init.md | 68 --- .../version-v1.4.x/start/influxdb-init.md | 77 ---- .../version-v1.4.x/start/iotdb-init.md | 85 ---- .../version-v1.4.x/start/mysql-change.md | 67 --- .../version-v1.4.x/start/package-deploy.md | 100 ----- .../version-v1.4.x/start/postgresql-change.md | 86 ---- .../version-v1.4.x/start/quickstart.md | 88 ---- .../version-v1.4.x/start/rainbond-deploy.md | 32 -- .../version-v1.4.x/start/sslcert-practice.md | 102 ----- .../version-v1.4.x/start/tdengine-init.md | 126 ------ .../version-v1.4.x/start/upgrade.md | 47 --- .../start/victoria-metrics-init.md | 68 --- .../versioned_docs/version-v1.4.x/template.md | 95 ----- .../community/how-to-release.md | 29 +- .../versioned_docs/version-v1.5.x/help/dns.md | 6 +- .../version-v1.5.x/help/kubernetes.md | 4 +- .../versioned_docs/version-v1.5.x/help/ntp.md | 2 +- .../version-v1.5.x/start/quickstart.md | 2 +- .../version-v1.4.x-sidebars.json | 240 ----------- home/versions.json | 3 +- .../monitor-data-chart.component.ts | 2 +- .../monitor-data-table.component.less | 3 + .../monitor-data-table.component.ts | 2 +- .../monitor-detail.component.less | 3 - yarn.lock | 4 - 224 files changed, 90 insertions(+), 18622 deletions(-) delete mode 100644 home/i18n/en/docusaurus-plugin-content-docs/version-v1.4.x.json delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x.json delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-default.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-token.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-jsonpath.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jdbc.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jmx.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-point.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-push.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-snmp.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-ssh.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-tutorial.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/activemq.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/airflow.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_console.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_dingtalk.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_discord.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_email.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_enterprise_wechat_app.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_feishu.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_smn.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_telegram.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_webhook.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_wework.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/api.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/centos.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dm.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/docker.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dynamic_tp.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/fullsite.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/guide.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hadoop.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hive.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/iotdb.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/issue.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jetty.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jvm.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kafka.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kubernetes.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/linux.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mariadb.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/memcached.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mysql.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nebulagraph.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nginx.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/opengauss.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/oracle.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ping.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/pop3.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/port.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/postgresql.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/rabbitmq.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/redis.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/shenyu.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/smtp.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/spring_gateway.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/springboot2.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ssl_cert.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/tomcat.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ubuntu.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/website.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/windows.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/zookeeper.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contact.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contributing.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/design.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/huaweicloud.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/images-deploy.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/sponsor.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/account-modify.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/custom-config.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/docker-deploy.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/greptime-init.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/influxdb-init.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/iotdb-init.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/mysql-change.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/package-deploy.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/quickstart.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/rainbond-deploy.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/sslcert-practice.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/upgrade.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/victoria-metrics-init.md delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/template.md delete mode 100644 home/versioned_docs/version-v1.4.x/advanced/extend-http-default.md delete mode 100644 home/versioned_docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md delete mode 100644 home/versioned_docs/version-v1.4.x/advanced/extend-http-example-token.md delete mode 100644 home/versioned_docs/version-v1.4.x/advanced/extend-http-jsonpath.md delete mode 100644 home/versioned_docs/version-v1.4.x/advanced/extend-http.md delete mode 100644 home/versioned_docs/version-v1.4.x/advanced/extend-jdbc.md delete mode 100644 home/versioned_docs/version-v1.4.x/advanced/extend-jmx.md delete mode 100644 home/versioned_docs/version-v1.4.x/advanced/extend-point.md delete mode 100644 home/versioned_docs/version-v1.4.x/advanced/extend-push.md delete mode 100644 home/versioned_docs/version-v1.4.x/advanced/extend-snmp.md delete mode 100644 home/versioned_docs/version-v1.4.x/advanced/extend-ssh.md delete mode 100644 home/versioned_docs/version-v1.4.x/advanced/extend-tutorial.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/activemq.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/airflow.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_console.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_dingtalk.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_discord.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_email.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_enterprise_wechat_app.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_feishu.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_slack.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_smn.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_telegram.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_threshold.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_webhook.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/alert_wework.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/api.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/centos.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/dm.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/docker.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/doris_be.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/doris_fe.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/dynamic_tp.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/fullsite.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/guide.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/hadoop.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/hive.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/iotdb.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/issue.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/jetty.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/jvm.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/kafka.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/kubernetes.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/linux.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/mariadb.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/memcached.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/mysql.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/nebulagraph.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/nginx.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/ntp.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/opengauss.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/oracle.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/ping.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/pop3.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/port.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/postgresql.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/rabbitmq.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/redis.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/shenyu.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/smtp.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/spark.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/spring_gateway.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/springboot2.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/sqlserver.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/ssl_cert.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/tomcat.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/ubuntu.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/website.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/windows.md delete mode 100644 home/versioned_docs/version-v1.4.x/help/zookeeper.md delete mode 100644 home/versioned_docs/version-v1.4.x/introduce.md delete mode 100644 home/versioned_docs/version-v1.4.x/others/contact.md delete mode 100644 home/versioned_docs/version-v1.4.x/others/contributing.md delete mode 100644 home/versioned_docs/version-v1.4.x/others/design.md delete mode 100644 home/versioned_docs/version-v1.4.x/others/developer.md delete mode 100644 home/versioned_docs/version-v1.4.x/others/hertzbeat.md delete mode 100644 home/versioned_docs/version-v1.4.x/others/huaweicloud.md delete mode 100644 home/versioned_docs/version-v1.4.x/others/images-deploy.md delete mode 100644 home/versioned_docs/version-v1.4.x/others/resource.md delete mode 100644 home/versioned_docs/version-v1.4.x/others/sponsor.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/account-modify.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/custom-config.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/docker-deploy.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/greptime-init.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/influxdb-init.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/iotdb-init.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/mysql-change.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/package-deploy.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/postgresql-change.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/quickstart.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/rainbond-deploy.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/sslcert-practice.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/tdengine-init.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/upgrade.md delete mode 100644 home/versioned_docs/version-v1.4.x/start/victoria-metrics-init.md delete mode 100644 home/versioned_docs/version-v1.4.x/template.md delete mode 100644 home/versioned_sidebars/version-v1.4.x-sidebars.json delete mode 100644 yarn.lock diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index 97a129352eb..e2f39cfecaf 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -17,7 +17,7 @@ { "config": { "default": true, - "MD001": false, + "MD001": true, "MD052": false, "MD003": false, "MD013": false, @@ -36,6 +36,8 @@ "MD047": false }, "ignore": [ - "node_modules/" + "node_modules/", + "target/", + "dist/" ] } diff --git a/home/docs/community/how-to-release.md b/home/docs/community/how-to-release.md index 3e87aa1b4b5..94c919d30f0 100644 --- a/home/docs/community/how-to-release.md +++ b/home/docs/community/how-to-release.md @@ -168,6 +168,8 @@ $ svn ci -m "add gpg key for muchunjin" ## 3. Prepare material package & release +### Build Package + #### 3.1 Based on the master branch, create a release-${release_version}-rcx branch, such as release-1.6.0-rc1, And create a tag named v1.6.0-rc1 based on the release-1.6.0-rc1 branch, and set this tag as pre-release ```shell @@ -228,6 +230,8 @@ release-1.6.0-rc1 The archive package is here `dist/apache-hertzbeat-1.6.0-incubating-src.tar.gz` +### Sign package + #### 3.5 Sign binary and source packages > The `gpg -u 33545C76` `33545C76` is your gpg secret ID, see from `gpg --keyid-format SHORT --list-keys` @@ -333,7 +337,7 @@ svn commit -m "release for HertzBeat 1.6.0" ## 4. Enter the community voting stage -#### 4.1 Send a Community Vote Email +### 4.1 Send a Community Vote Email Send a voting email in the community requires at least three `+1` and no `-1`. @@ -525,9 +529,9 @@ One item of the email content is `Vote thread`, and the link is obtained here: < Wait a day to see if the tutor has any other comments, if not, send the following announcement email -## 4. Complete the final publishing steps +## 5. Complete the final publishing steps -#### 4.1 Migrating source and binary packages +### 5.1 Migrating source and binary packages ```shell svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 https://dist.apache.org/repos/dist/release/incubator/hertzbeat/1.6.0 -m "transfer packages for 1.6.0-RC1" diff --git a/home/docs/help/flink_on_yarn.md b/home/docs/help/flink_on_yarn.md index cda9abe8201..bc6bd97058c 100644 --- a/home/docs/help/flink_on_yarn.md +++ b/home/docs/help/flink_on_yarn.md @@ -2,7 +2,7 @@ > Measurement and monitoring of general metrics for Flink stream engine in Yarn running mode. -### Configuration Parameters +## Configuration Parameters | Parameter Name | Parameter Help Description | |---------------------|---------------------------------------------------------------------------------------------------------------------| diff --git a/home/docs/help/greenplum.md b/home/docs/help/greenplum.md index 761e256317d..e05539ed8c1 100644 --- a/home/docs/help/greenplum.md +++ b/home/docs/help/greenplum.md @@ -53,4 +53,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | Metric name | Metric unit | Metric help description | |-------------|-------------|--------------------------------------| | running | connections | Number of current client connections | - diff --git a/home/docs/help/kubernetes.md b/home/docs/help/kubernetes.md index 3cb2336e768..d7e6b657ea6 100644 --- a/home/docs/help/kubernetes.md +++ b/home/docs/help/kubernetes.md @@ -13,7 +13,7 @@ If you want to monitor the information in 'Kubernetes', you need to obtain an au Refer to the steps to obtain token -#### method one +### method one 1. Create a service account and bind the default cluster-admin administrator cluster role @@ -27,7 +27,7 @@ kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` -#### method two +### method two ```shell kubectl create serviceaccount cluster-admin diff --git a/home/docs/help/vastbase.md b/home/docs/help/vastbase.md index cd0374fc777..1d595b85201 100644 --- a/home/docs/help/vastbase.md +++ b/home/docs/help/vastbase.md @@ -53,4 +53,3 @@ keywords: [open source monitoring tool, open source database monitoring tool, mo | Metric name | Metric unit | Metric help description | |-------------|-------------|--------------------------------------| | running | connections | Number of current client connections | - diff --git a/home/docs/start/quickstart.md b/home/docs/start/quickstart.md index 5c0a9868252..0911ef7a32c 100644 --- a/home/docs/start/quickstart.md +++ b/home/docs/start/quickstart.md @@ -8,7 +8,7 @@ sidebar_label: Quick Start - If you wish to deploy Apache HertzBeat (incubating) locally, please refer to the following Deployment Documentation for instructions. -### 🍞 Install HertzBeat +#### 🍞 Install HertzBeat > Apache HertzBeat (incubating) supports installation through source code, docker or package, cpu support X86/ARM64. diff --git a/home/i18n/en/docusaurus-plugin-content-docs/version-v1.4.x.json b/home/i18n/en/docusaurus-plugin-content-docs/version-v1.4.x.json deleted file mode 100644 index 288ef4a4d32..00000000000 --- a/home/i18n/en/docusaurus-plugin-content-docs/version-v1.4.x.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "version.label": { - "message": "v1.4.x", - "description": "The label for version v1.4.x" - }, - "sidebar.docs.category.quickstart": { - "message": "quickstart", - "description": "The label for category quickstart in sidebar docs" - }, - "sidebar.docs.category.custom": { - "message": "custom", - "description": "The label for category custom in sidebar docs" - }, - "sidebar.docs.category.http": { - "message": "http", - "description": "The label for category http in sidebar docs" - }, - "sidebar.docs.category.jdbc": { - "message": "jdbc", - "description": "The label for category jdbc in sidebar docs" - }, - "sidebar.docs.category.ssh": { - "message": "ssh", - "description": "The label for category ssh in sidebar docs" - }, - "sidebar.docs.category.jmx": { - "message": "jmx", - "description": "The label for category jmx in sidebar docs" - }, - "sidebar.docs.category.snmp": { - "message": "snmp", - "description": "The label for category snmp in sidebar docs" - }, - "sidebar.docs.category.push": { - "message": "push", - "description": "The label for category push in sidebar docs" - }, - "sidebar.docs.category.help": { - "message": "help", - "description": "The label for category help in sidebar docs" - }, - "sidebar.docs.category.service": { - "message": "service", - "description": "The label for category service in sidebar docs" - }, - "sidebar.docs.category.program": { - "message": "program", - "description": "The label for category program in sidebar docs" - }, - "sidebar.docs.category.database": { - "message": "database", - "description": "The label for category database in sidebar docs" - }, - "sidebar.docs.category.cache": { - "message": "cache", - "description": "The label for category cache in sidebar docs" - }, - "sidebar.docs.category.os": { - "message": "os", - "description": "The label for category os in sidebar docs" - }, - "sidebar.docs.category.mid": { - "message": "mid", - "description": "The label for category mid in sidebar docs" - }, - "sidebar.docs.category.bigdata": { - "message": "bigdata", - "description": "The label for category bigdata in sidebar docs" - }, - "sidebar.docs.category.webserver": { - "message": "webserver", - "description": "The label for category webserver in sidebar docs" - }, - "sidebar.docs.category.cloud-native": { - "message": "cloud-native", - "description": "The label for category cloud-native in sidebar docs" - }, - "sidebar.docs.category.threshold": { - "message": "threshold", - "description": "The label for category threshold in sidebar docs" - }, - "sidebar.docs.category.notice": { - "message": "notice", - "description": "The label for category notice in sidebar docs" - }, - "sidebar.docs.category.Others": { - "message": "Others", - "description": "The label for category Others in sidebar docs" - } -} diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md index 2f612c42c28..1334c16c891 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md @@ -1,4 +1,4 @@ -# HertzBeat 1.6.0 升级指南 +## HertzBeat 1.6.0 升级指南 ### 注意:该指南适用于1.5.0向1.6.0版本升级 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md index 29b1dac509b..5402bc696aa 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md @@ -168,6 +168,8 @@ $ svn ci -m "add gpg key for muchunjin" ## 3. 准备物料 & 发布 +### 准备发布物料 + #### 3.1 基于 master 分支,创建一个名为 release-${release_version}-rcx 的分支,例如 release-1.6.0-rc1。并基于 release-1.6.0-rc1 分支创建一个名为 v1.6.0-rc1 的标签,并将此标签设置为预发布 ```shell @@ -228,6 +230,8 @@ release-1.6.0-rc1 生成的代码归档文件在 `dist/apache-hertzbeat-1.6.0-incubating-src.tar.gz` +### 签名发布物料 + #### 3.5 对二进制和源码包进行签名 将上步骤生成的三个文件包放到`dist`目录下(若无则新建目录),然后对文件包进行签名和SHA512校验和生成。 @@ -335,7 +339,7 @@ svn commit -m "release for HertzBeat 1.6.0-RC1" ## 4. 进入社区投票阶段 -#### 4.1 发送社区投票邮件 +### 4.1 发送社区投票邮件 发送社区投票邮件需要至少三个`+1`,且无`-1`。 @@ -426,7 +430,7 @@ ChunJin Mu 邮件内容中的一项是`Vote thread`,在 查看获取 -#### 3.2 发送孵化社区投票邮件 +### 3.2 发送孵化社区投票邮件 发送孵化社区投票邮件需要至少三个`+1`,且无`-1`。 @@ -529,13 +533,13 @@ ChunJin Mu ## 4. 完成最终发布步骤 -#### 4.1 迁移源代码和二进制包 +### 4.1 迁移源代码和二进制包 ```shell svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 https://dist.apache.org/repos/dist/release/incubator/hertzbeat/1.6.0 -m "transfer packages for 1.6.0-RC1" ``` -#### 4.2 添加新版本下载地址到官网 +### 4.2 添加新版本下载地址到官网 @@ -544,7 +548,7 @@ svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 http > 需要注意的是,下载链接可能需要一个小时后才会生效,请注意。 -#### 4.3 Github 生成 Release +### 4.3 Github 生成 Release 基于 release-1.6.0-rc1 分支修改创建一个名为 v1.6.0 的标签,并将此标签设置为 latest release。 @@ -571,7 +575,7 @@ release note: xxx 然后将 release-1.6.0-rc1 分支重命名 为 release-1.6.0。 -#### 4.4 发送新版本公告邮件 +### 4.4 发送新版本公告邮件 > `Send to`:
> `cc`:
diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dns.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dns.md index 386ec0e91e1..e012e347c20 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dns.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dns.md @@ -5,7 +5,7 @@ sidebar_label: DNS服务器 keywords: [ 开源监控系统, 开源DNS监控工具, 监控DNS指标 ] --- -# 监控:DNS服务器 +## 监控:DNS服务器 > 收集和监控DNS的常规性能指标。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/greenplum.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/greenplum.md index 2d971964799..e1094830096 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/greenplum.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/greenplum.md @@ -53,4 +53,3 @@ keywords: [开源监控系统, 开源数据库监控, GreenPlum 数据库监控] | 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|----------| | running | 连接数 | 当前客户端连接数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md index 4f0363f621d..cc4c7254afe 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md @@ -13,7 +13,7 @@ keywords: [开源监控系统, 开源Kubernetes监控] 参考获取token步骤 -#### 方式一 +### 方式一 1. 创建service account并绑定默认cluster-admin管理员集群角色 @@ -27,7 +27,7 @@ kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` -#### 方式二 +### 方式二 ``` kubectl create serviceaccount cluster-admin diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ntp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ntp.md index a160f2501e4..0806232cee5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ntp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/ntp.md @@ -7,7 +7,7 @@ keywords: [ open source monitoring tool, open source NTP monitoring tool, monito NTP监控的中文文档如下: -# NTP监控 +## NTP监控 > 收集和监控NTP的常规性能指标。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/vastbase.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/vastbase.md index f8e2d76aa55..1a5b020ff8d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/vastbase.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/vastbase.md @@ -53,4 +53,3 @@ keywords: [开源监控系统, 开源数据库监控, Vastbase 数据库监控] | 指标名称 | 指标单位 | 指标帮助描述 | |---------|------|----------| | running | 连接数 | 当前客户端连接数 | - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/update-1.6.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/update-1.6.0.md index 8c8becc369d..7b1a30c7f06 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/update-1.6.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/update-1.6.0.md @@ -4,7 +4,7 @@ title: 如何升级到1.6.0 sidebar_label: 1.6.0升级指南 --- -# HertzBeat 1.6.0 升级指南 +## HertzBeat 1.6.0 升级指南 ### 注意:该指南适用于1.5.0向1.6.0版本升级 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x.json b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x.json deleted file mode 100644 index 53a82b3fbdb..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "version.label": { - "message": "v1.4.x", - "description": "The label for version v1.4.x" - }, - "sidebar.docs.category.quickstart": { - "message": "quickstart", - "description": "The label for category quickstart in sidebar docs" - }, - "sidebar.docs.category.custom": { - "message": "custom", - "description": "The label for category custom in sidebar docs" - }, - "sidebar.docs.category.http": { - "message": "http", - "description": "The label for category http in sidebar docs" - }, - "sidebar.docs.category.jdbc": { - "message": "jdbc", - "description": "The label for category jdbc in sidebar docs" - }, - "sidebar.docs.category.ssh": { - "message": "ssh", - "description": "The label for category ssh in sidebar docs" - }, - "sidebar.docs.category.jmx": { - "message": "jmx", - "description": "The label for category jmx in sidebar docs" - }, - "sidebar.docs.category.snmp": { - "message": "snmp", - "description": "The label for category snmp in sidebar docs" - }, - "sidebar.docs.category.push": { - "message": "push", - "description": "The label for category push in sidebar docs" - }, - "sidebar.docs.category.help": { - "message": "help", - "description": "The label for category help in sidebar docs" - }, - "sidebar.docs.category.service": { - "message": "service", - "description": "The label for category service in sidebar docs" - }, - "sidebar.docs.category.program": { - "message": "program", - "description": "The label for category program in sidebar docs" - }, - "sidebar.docs.category.database": { - "message": "database", - "description": "The label for category database in sidebar docs" - }, - "sidebar.docs.category.cache": { - "message": "cache", - "description": "The label for category cache in sidebar docs" - }, - "sidebar.docs.category.os": { - "message": "os", - "description": "The label for category os in sidebar docs" - }, - "sidebar.docs.category.mid": { - "message": "mid", - "description": "The label for category mid in sidebar docs" - }, - "sidebar.docs.category.bigdata": { - "message": "bigdata", - "description": "The label for category bigdata in sidebar docs" - }, - "sidebar.docs.category.webserver": { - "message": "webserver", - "description": "The label for category webserver in sidebar docs" - }, - "sidebar.docs.category.cloud-native": { - "message": "cloud-native", - "description": "The label for category cloud-native in sidebar docs" - }, - "sidebar.docs.category.threshold": { - "message": "threshold", - "description": "The label for category threshold in sidebar docs" - }, - "sidebar.docs.category.notice": { - "message": "notice", - "description": "The label for category notice in sidebar docs" - }, - "sidebar.docs.category.Others": { - "message": "Others", - "description": "The label for category Others in sidebar docs" - }, - "sidebar.docs.link.Cloud Service": { - "message": "Cloud Service", - "description": "The label for link Cloud Service in sidebar docs, linking to https://console.tancloud.cn" - }, - "sidebar.docs.link.Install via HuaweiCloud": { - "message": "Install via HuaweiCloud", - "description": "The label for link Install via HuaweiCloud in sidebar docs, linking to https://marketplace.huaweicloud.com/contents/0477015c-ad63-4522-a308-816861769f0a#productid=OFFI863735781612109824" - }, - "sidebar.docs.link.Install via Helm": { - "message": "Install via Helm", - "description": "The label for link Install via Helm in sidebar docs, linking to https://artifacthub.io/packages/helm/hertzbeat/hertzbeat" - } -} diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-default.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-default.md deleted file mode 100644 index c0acbf1ea87..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-default.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -id: extend-http-default -title: HTTP协议系统默认解析方式 -sidebar_label: 系统默认解析方式 ---- - -> HTTP接口调用获取响应数据后,用HertzBeat默认的解析方式去解析响应数据。 - -**此需接口响应数据结构符合HertzBeat指定的数据结构规则** - -### HertzBeat数据格式规范 - -注意⚠️ 响应数据为JSON - -单层格式:key-value - -```json -{ - "metricName1": "metricValue", - "metricName2": "metricValue", - "metricName3": "metricValue", - "metricName4": "metricValue" -} -``` - -多层格式:数组里面套key-value - -```json -[ - { - "metricName1": "metricValue", - "metricName2": "metricValue", - "metricName3": "metricValue", - "metricName4": "metricValue" - }, - { - "metricName1": "metricValue", - "metricName2": "metricValue", - "metricName3": "metricValue", - "metricName4": "metricValue" - } -] -``` - -样例: -查询自定义系统的CPU信息,其暴露接口为 `/metrics/cpu`,我们需要其中的`hostname,core,useage`指标 -若只有一台虚拟机,其单层格式为: - -```json -{ - "hostname": "linux-1", - "core": 1, - "usage": 78.0, - "allTime": 200, - "runningTime": 100 -} -``` - -若有多台虚拟机,其多层格式为: - -```json -[ - { - "hostname": "linux-1", - "core": 1, - "usage": 78.0, - "allTime": 200, - "runningTime": 100 - }, - { - "hostname": "linux-2", - "core": 3, - "usage": 78.0, - "allTime": 566, - "runningTime": 34 - }, - { - "hostname": "linux-3", - "core": 4, - "usage": 38.0, - "allTime": 500, - "runningTime": 20 - } -] -``` - -**对应的监控模版YML可以配置为如下** - -```yaml -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: custom -# 监控应用类型(与文件名保持一致) eg: linux windows tomcat mysql aws... -app: example -name: - zh-CN: 模拟应用类型 - en-US: EXAMPLE APP -# 监控参数定义. field 这些为输入参数变量,即可以用^_^host^_^的形式写到后面的配置中,系统自动变量值替换 -# 强制固定必须参数 - host -params: - # field-字段名称标识符 - - field: host - # name-参数字段显示名称 - name: - zh-CN: 主机Host - en-US: Host - # type-字段类型,样式(大部分映射input标签type属性) - type: host - # 是否是必输项 true-必填 false-可选 - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - # 当type为number时,用range表示范围 - range: '[0,65535]' - required: true - # 端口默认值 - defaultValue: 80 - # 参数输入框提示信息 - placeholder: '请输入端口' -# collect metrics config list -# 采集指标配置列表 -metrics: - # First monitoring metric group cpu - # Note: The built-in monitoring metrics include (responseTime - response time) - - name: cpu - # 指标调度优先级(0-127)越小优先级越高,优先级低的指标会等优先级高的指标采集完成后才会被调度,相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 0 - # 具体监控指标列表 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: hostname - type: 1 - label: true - - field: usage - type: 0 - unit: '%' - - field: core - type: 0 -# 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http -# 当protocol为http协议时具体的采集配置 - http: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - # url请求接口路径 - url: /metrics/cpu - # 请求方式 GET POST PUT DELETE PATCH - method: GET - # 是否启用ssl/tls,即是http还是https,默认false - ssl: false - # 响应数据解析方式: default-系统规则,jsonPath-jsonPath脚本,website-网站可用性指标监控 - # 这里使用HertzBeat默认解析 - parseType: default -``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md deleted file mode 100644 index 9317fdbfc21..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -id: extend-http-example-hertzbeat -title: 教程一:适配一款基于HTTP协议的监控类型 -sidebar_label: 教程一:适配一款HTTP协议监控 ---- - -通过此教程我们一步一步描述如何在hertzbeat监控系统下新增适配一款基于http协议的监控类型。 - -阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 - -### HTTP协议解析通用响应结构体,获取指标数据 - -> 很多场景我们需要对提供的 HTTP API 接口进行监控,获取接口返回的指标值。这篇文章我们通过http自定义协议来解析我们常见的http接口响应结构,获取返回体中的字段作为指标数据。 - -``` -{ - "code": 200, - "msg": "success", - "data": {} -} - -``` - -如上,通常我们的后台API接口会设计这这样一个通用返回。hertzbeat系统的后台也是如此,我们今天就用hertzbeat的 API 做样例,新增适配一款新的监控类型 **hertzbeat**,监控采集它的系统摘要统计API -`http://localhost:1157/api/summary`, 其响应数据为: - -``` -{ - "msg": null, - "code": 0, - "data": { - "apps": [ - { - "category": "service", - "app": "jvm", - "status": 0, - "size": 2, - "availableSize": 0, - "unManageSize": 2, - "unAvailableSize": 0, - "unReachableSize": 0 - }, - { - "category": "service", - "app": "website", - "status": 0, - "size": 2, - "availableSize": 0, - "unManageSize": 2, - "unAvailableSize": 0, - "unReachableSize": 0 - } - ] - } -} -``` - -**我们这次获取其app下的 `category`,`app`,`status`,`size`,`availableSize`等指标数据。** - -### 新增自定义监控模版YML - -**HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** - -> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 -> 即我们通过自定义这个监控模版,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 - -样例:自定义一个名称为`hertzbeat`的自定义监控类型,其使用HTTP协议采集指标数据。 - -```yaml -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: custom -# 监控应用类型名称(与文件名保持一致) eg: linux windows tomcat mysql aws... -app: hertzbeat -name: - zh-CN: HertzBeat监控系统 - en-US: HertzBeat Monitor -params: - - field: host - name: - zh-CN: 主机Host - en-US: Host - type: host - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - range: '[0,65535]' - required: true - defaultValue: 1157 - - field: ssl - name: - zh-CN: 启用HTTPS - en-US: HTTPS - type: boolean - required: true - - field: timeout - name: - zh-CN: 超时时间(ms) - en-US: Timeout(ms) - type: number - required: false - hide: true - - field: authType - name: - zh-CN: 认证方式 - en-US: Auth Type - type: radio - required: false - hide: true - options: - - label: Basic Auth - value: Basic Auth - - label: Digest Auth - value: Digest Auth - - field: username - name: - zh-CN: 用户名 - en-US: Username - type: text - limit: 20 - required: false - hide: true - - field: password - name: - zh-CN: 密码 - en-US: Password - type: password - required: false - hide: true -# collect metrics config list -# 采集指标配置列表 -metrics: - # metrics - summary - # 监控指标 - summary - - name: summary - # 指标调度优先级(0-127)越小优先级越高,优先级低的指标会等优先级高的指标采集完成后才会被调度,相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 0 - # 具体监控指标列表 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: responseTime - type: 0 - unit: ms - - field: app - type: 1 - label: true - - field: category - type: 1 - - field: status - type: 0 - - field: size - type: 0 - - field: availableSize - type: 0 -# 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk, 我们这里使用HTTP协议 - protocol: http -# 当protocol为http协议时具体的采集配置 - http: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - # url请求接口路径,我们这里不需要输入传参,写死为 /api/summary - url: /api/summary - timeout: ^_^timeout^_^ - # 请求方式 GET POST PUT DELETE PATCH,写死为 - method: GET - # 是否启用ssl/tls,即是http还是https,默认false - ssl: ^_^ssl^_^ - # 认证 - authorization: - # 认证方式: Basic Auth, Digest Auth, Bearer Token - type: ^_^authType^_^ - basicAuthUsername: ^_^username^_^ - basicAuthPassword: ^_^password^_^ - digestAuthUsername: ^_^username^_^ - digestAuthPassword: ^_^password^_^ - # 响应数据解析方式: default-系统规则,jsonPath-jsonPath脚本,website-网站可用性指标监控,我们这里使用jsonpath来解析响应数据 - parseType: jsonPath - parseScript: '$.data.apps.*' - -``` - -**新增完毕,现在我们重启hertzbeat系统。我们可以看到系统页面已经多了一个`hertzbeat`监控类型了。** - -![](/img/docs/advanced/extend-http-example-1.png) - -### 系统页面添加对`hertzbeat`监控类型的监控 - -> 我们点击新增 `HertzBeat监控系统`,配置监控IP,端口,采集周期,高级设置里的账户密码等, 点击确定添加监控。 - -![](/img/docs/advanced/extend-http-example-2.png) - -![](/img/docs/advanced/extend-http-example-3.png) - -> 过一定时间(取决于采集周期)我们就可以在监控详情看到具体的指标数据和历史图表啦! - -![](/img/docs/advanced/extend-http-example-4.png) - -### 设置阈值告警通知 - -> 接下来我们就可以正常的设置阈值,告警触发后可以在告警中心查看,也可以新增接收人,设置告警通知等,Have Fun!!! - ----- - -#### 完 - -HTTP协议的自定义监控的实践就到这里,HTTP协议还带其他参数headers,params等,我们可以像用postman一样去定义它,可玩性也非常高! - -如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! - -**github: ** - -**gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-token.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-token.md deleted file mode 100644 index bbcaa5299d0..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-example-token.md +++ /dev/null @@ -1,389 +0,0 @@ ---- -id: extend-http-example-token -title: 教程二:基于HTTP协议获取TOKEN指标值,给后续采集认证使用 -sidebar_label: 教程二:获取TOKEN后续认证使用 ---- - -通过此教程我们一步一步描述如何在教程一的基础上改造,新增一个监控指标,先调用认证接口获取TOKEN后,使用TOKEN作为参数供后面的监控指标采集认证使用。 - -阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 - -### 请求流程 - -【**认证信息监控指标(优先级最高)**】【**HTTP接口携带账户密码调用**】->【**响应数据解析**】->【**解析签发TOKEN-accessToken作为指标**】->【**将accessToken作为变量参数赋值给其他采集监控指标**】 - -> 这里我们依然用教程一的hertzbeat监控举例!hertzbeat后台接口不仅仅支持教程一使用的basic直接账户密码认证,也支持token认证。 - -**我们需要`POST`调用登录接口`/api/account/auth/form`获取`accessToken`,请求body(json格式)如下**: - -```json -{ - "credential": "hertzbeat", - "identifier": "admin" -} -``` - -**响应结构数据如下**: - -```json -{ - "data": { - "token": "xxxx", - "refreshToken": "xxxx" - }, - "msg": null, - "code": 0 -} -``` - -### 新增自定义监控类型`hertzbeat_token` - -1. 自定义监控类型需新增配置监控模版YML,我们直接复用教程一的 `hertzbeat` 监控类型,在其基础上修改 - -监控配置定义文件是用来定义采集类型是啥,需要用哪种协议采集方式,采集的指标是啥,协议的配置参数等。 -我们直接复用 app-hertzbeat.yml 里面的定义内容,修改为我们当前的监控类型`hertzbeat_auth`配置参数, 比如 `app, category等`。 - -```yaml -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: custom -# 监控应用类型(与文件名保持一致) eg: linux windows tomcat mysql aws... -app: hertzbeat_token -name: - zh-CN: HertzBeat监控(Token) - en-US: HertzBeat Monitor(Token) -params: - # field-字段名称标识符 - - field: host - # name-参数字段显示名称 - name: - zh-CN: 主机Host - en-US: Host - # type-字段类型,样式(大部分映射input标签type属性) - type: host - # 是否是必输项 true-必填 false-可选 - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - # 当type为number时,用range表示范围 - range: '[0,65535]' - required: true - # 端口默认值 - defaultValue: 1157 - # 参数输入框提示信息 - placeholder: '请输入端口' - - field: ssl - name: - zh-CN: 启动SSL - en-US: SSL - # 当type为boolean时,前端用switch展示开关 - type: boolean - required: false - - field: contentType - name: - zh-CN: Content-Type - en-US: Content-Type - type: text - placeholder: 'Request Body Type' - required: false - - field: payload - name: - zh-CN: 请求BODY - en-US: BODY - type: textarea - placeholder: 'Available When POST PUT' - required: false -# 采集指标配置列表 todo 下方配置 -metrics: ...... - -``` - -### 定义监控指标`auth`登录请求获取`token` - -1. 在`app-hertzbeat_token.yml`新增一个监控指标定义 `auth`, 设置采集优先级为最高0,采集指标 `token`. - -```yaml - -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: custom -# 监控应用类型(与文件名保持一致) eg: linux windows tomcat mysql aws... -app: hertzbeat_token -name: - zh-CN: HertzBeat监控(Token) - en-US: HertzBeat Monitor(Token) -params: - # field-字段名称标识符 - - field: host - # name-参数字段显示名称 - name: - zh-CN: 主机Host - en-US: Host - # type-字段类型,样式(大部分映射input标签type属性) - type: host - # 是否是必输项 true-必填 false-可选 - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - # 当type为number时,用range表示范围 - range: '[0,65535]' - required: true - # 端口默认值 - defaultValue: 1157 - # 参数输入框提示信息 - placeholder: '请输入端口' - - field: ssl - name: - zh-CN: 启动SSL - en-US: SSL - # 当type为boolean时,前端用switch展示开关 - type: boolean - required: false - - field: contentType - name: - zh-CN: Content-Type - en-US: Content-Type - type: text - placeholder: 'Request Body Type' - required: false - - field: payload - name: - zh-CN: 请求BODY - en-US: BODY - type: textarea - placeholder: 'Available When POST PUT' - required: false -# 采集指标配置列表 -metrics: - # 第一个监控指标 auth - # 注意:内置监控指标有 (responseTime - 响应时间) - - name: auth - # 指标调度优先级(0-127)越小优先级越高,优先级低的指标会等优先级高的指标采集完成后才会被调度,相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 0 - # 具体监控指标列表 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: token - type: 1 - - field: refreshToken - type: 1 - # 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http - # 当protocol为http协议时具体的采集配置 - http: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - # url请求接口路径 - url: /api/account/auth/form - # 请求方式 GET POST PUT DELETE PATCH - method: POST - # 是否启用ssl/tls,即是http还是https,默认false - ssl: ^_^ssl^_^ - payload: ^_^payload^_^ - # 请求头内容 - headers: - content-type: ^_^contentType^_^ - # 响应数据解析方式: default-系统规则,jsonPath-jsonPath脚本,website-网站可用性指标监控 - parseType: jsonPath - parseScript: '$.data' - -``` - -**此时,重启hertzbeat系统,在系统页面上添加 `hertzbeat_token` 类型监控,配置输入参数,`content-type`填`application/json` , `请求Body`填账户密码json如下:** - -```json -{ - "credential": "hertzbeat", - "identifier": "admin" -} -``` - -![](/img/docs/advanced/extend-http-example-5.png) - -**新增成功后我们就可以在详情页面看到我们采集的 `token`, `refreshToken`指标数据。** - -![](/img/docs/advanced/extend-http-example-6.png) - -![](/img/docs/advanced/extend-http-example-7.png) - -### 将`token`作为变量参数给后面的监控指标采集使用 - -**在`app-hertzbeat_token.yml`新增一个监控指标定义 `summary` 同教程一中的`summary`相同, 设置采集优先级为1** -**设置此监控指标的HTTP协议配置中认证方式为 `Bearer Token` 将上一个监控指标`auth`采集的指标`token`作为参数给其赋值,使用`^o^`作为内部替换符标识,即`^o^token^o^`。如下:** - -```yaml - - name: summary -# 当protocol为http协议时具体的采集配置 - http: - # 认证 - authorization: - # 认证方式: Basic Auth, Digest Auth, Bearer Token - type: Bearer Token - bearerTokenToken: ^o^token^o^ -``` - -**最终`app-hertzbeat_token.yml`定义如下:** - -```yaml - -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: custom -# 监控应用类型(与文件名保持一致) eg: linux windows tomcat mysql aws... -app: hertzbeat_token -name: - zh-CN: HertzBeat监控(Token) - en-US: HertzBeat Monitor(Token) -params: - # field-字段名称标识符 - - field: host - # name-参数字段显示名称 - name: - zh-CN: 主机Host - en-US: Host - # type-字段类型,样式(大部分映射input标签type属性) - type: host - # 是否是必输项 true-必填 false-可选 - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - # 当type为number时,用range表示范围 - range: '[0,65535]' - required: true - # 端口默认值 - defaultValue: 1157 - # 参数输入框提示信息 - placeholder: '请输入端口' - - field: ssl - name: - zh-CN: 启动SSL - en-US: SSL - # 当type为boolean时,前端用switch展示开关 - type: boolean - required: false - - field: contentType - name: - zh-CN: Content-Type - en-US: Content-Type - type: text - placeholder: 'Request Body Type' - required: false - - field: payload - name: - zh-CN: 请求BODY - en-US: BODY - type: textarea - placeholder: 'Available When POST PUT' - required: false -# 采集指标配置列表 -metrics: -# 第一个监控指标 cpu -# 注意:内置监控指标有 (responseTime - 响应时间) - - name: auth - # 指标调度优先级(0-127)越小优先级越高,优先级低的指标会等优先级高的指标采集完成后才会被调度,相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 0 - # 具体监控指标列表 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: token - type: 1 - - field: refreshToken - type: 1 - # 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http - # 当protocol为http协议时具体的采集配置 - http: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - # url请求接口路径 - url: /api/account/auth/form - # 请求方式 GET POST PUT DELETE PATCH - method: POST - # 是否启用ssl/tls,即是http还是https,默认false - ssl: ^_^ssl^_^ - payload: ^_^payload^_^ - # 请求头内容 - headers: - content-type: ^_^contentType^_^ - ^_^headers^_^: ^_^headers^_^ - # 请求参数内容 - params: - ^_^params^_^: ^_^params^_^ - # 响应数据解析方式: default-系统规则,jsonPath-jsonPath脚本,website-网站可用性指标监控 - parseType: jsonPath - parseScript: '$.data' - - - - name: summary - # 指标调度优先级(0-127)越小优先级越高,优先级低的指标会等优先级高的指标采集完成后才会被调度,相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 1 - # 具体监控指标列表 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: category - type: 1 - - field: app - type: 1 - - field: size - type: 0 - - field: status - type: 0 -# 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http -# 当protocol为http协议时具体的采集配置 - http: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - # url请求接口路径 - url: /api/summary - # 请求方式 GET POST PUT DELETE PATCH - method: GET - # 是否启用ssl/tls,即是http还是https,默认false - ssl: ^_^ssl^_^ - # 认证 - authorization: - # 认证方式: Basic Auth, Digest Auth, Bearer Token - type: Bearer Token - bearerTokenToken: ^o^token^o^ - # 响应数据解析方式: default-系统规则,jsonPath-jsonPath脚本,website-网站可用性指标监控 - parseType: jsonPath - parseScript: '$.data.apps.*' - -``` - -**配置完成后,再次重启 `hertzbeat` 系统,查看监控详情页面** - -![](/img/docs/advanced/extend-http-example-8.png) - -![](/img/docs/advanced/extend-http-example-9.png) - -### 设置阈值告警通知 - -> 接下来我们就可以正常设置阈值,告警触发后可以在告警中心查看,也可以新增接收人,设置告警通知等,Have Fun!!! - ----- - -#### 完 - -HTTP协议的自定义监控的实践就到这里,HTTP协议还带其他参数headers,params等,我们可以像用postman一样去定义它,可玩性也非常高! - -如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! - -**github: ** - -**gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-jsonpath.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-jsonpath.md deleted file mode 100644 index 5ce2aad2738..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http-jsonpath.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -id: extend-http-jsonpath -title: HTTP协议JsonPath解析方式 -sidebar_label: JsonPath解析方式 ---- - -> HTTP接口调用获取响应数据后,用JsonPath脚本解析的解析方式去解析响应数据。 - -注意⚠️ 响应数据为JSON格式 - -**使用JsonPath脚本将响应数据解析成符合HertzBeat指定的数据结构规则的数据** - -#### JsonPath操作符 - -[JSONPath在线验证](https://www.jsonpath.cn) - -| JSONPATH | 帮助描述 | -|------------------|-----------------------------------| -| $ | 根对象或元素 | -| @ | 当前对象或元素 | -| . or [] | 子元素操作符 | -| .. | 递归匹配所有子元素 | -| * | 通配符. 匹配所有对象或元素. | -| [] | 下标运算符,JsonPath索引从0开始 | -| [,] | 连接运算符,将多个结果拼成数组返回,JSONPath允许使用别名. | -| [start:end:step] | 数组切片运算符 | -| ?() | 过滤器(脚本)表达式. | -| () | 脚本表达式. | - -#### HertzBeat数据格式规范 - -单层格式:key-value - -```json -{ - "metricName1": "metricValue", - "metricName2": "metricValue", - "metricName3": "metricValue", - "metricName4": "metricValue" -} -``` - -多层格式:数组里面套key-value - -```json -[ - { - "metricName1": "metricValue", - "metricName2": "metricValue", - "metricName3": "metricValue", - "metricName4": "metricValue" - }, - { - "metricName1": "metricValue", - "metricName2": "metricValue", - "metricName3": "metricValue", - "metricName4": "metricValue" - } -] -``` - -#### 样例 - -查询自定义系统的数值信息,其暴露接口为 `/metrics/person`,我们需要其中的`type,num`指标 -接口返回的原始数据如下: - -```json -{ - "firstName": "John", - "lastName" : "doe", - "age" : 26, - "address" : { - "streetAddress": "naist street", - "city" : "Nara", - "postalCode" : "630-0192" - }, - "number": [ - { - "type": "core", - "num": 3343 - }, - { - "type": "home", - "num": 4543 - } - ] -} -``` - -我们使用JsonPath脚本解析,对应的脚本为: `$.number[*]` ,解析后的数据结构如下: - -```json -[ - { - "type": "core", - "num": 3343 - }, - { - "type": "home", - "num": 4543 - } -] -``` - -此数据结构符合HertzBeat的数据格式规范,成功提取指标`type,num`值。 - -**对应的监控模版YML可以配置为如下** - -```yaml -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: custom -# 监控应用类型(与文件名保持一致) eg: linux windows tomcat mysql aws... -app: example -name: - zh-CN: 模拟应用类型 - en-US: EXAMPLE APP -# 监控参数定义. field 这些为输入参数变量,即可以用^_^host^_^的形式写到后面的配置中,系统自动变量值替换 -# 强制固定必须参数 - host -params: - # field-字段名称标识符 - - field: host - # name-参数字段显示名称 - name: - zh-CN: 主机Host - en-US: Host - # type-字段类型,样式(大部分映射input标签type属性) - type: host - # 是否是必输项 true-必填 false-可选 - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - # 当type为number时,用range表示范围 - range: '[0,65535]' - required: true - # 端口默认值 - defaultValue: 80 - # 参数输入框提示信息 - placeholder: '请输入端口' -# collect metrics config list -# 采集指标配置列表 -metrics: - # metrics - cpu - # 监控指标 - cpu - - name: cpu - # 指标调度优先级(0-127)越小优先级越高,优先级低的指标会等优先级高的指标采集完成后才会被调度,相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 0 - # 具体监控指标列表 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: type - type: 1 - label: true - - field: num - type: 0 -# 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http -# 当protocol为http协议时具体的采集配置 - http: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - # url请求接口路径 - url: /metrics/person - # 请求方式 GET POST PUT DELETE PATCH - method: GET - # 是否启用ssl/tls,即是http还是https,默认false - ssl: false - # 响应数据解析方式: default-系统规则,jsonPath-jsonPath脚本,website-网站可用性指标监控 - # 这里使用jsonPath解析 - parseType: jsonPath - parseScript: '$.number[*]' -``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http.md deleted file mode 100644 index 467921638da..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-http.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -id: extend-http -title: HTTP协议自定义监控 -sidebar_label: HTTP协议自定义监控 ---- - -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用HTTP协议自定义指标监控。 - -### HTTP协议采集流程 - -【**HTTP接口调用**】->【**响应校验**】->【**响应数据解析**】->【**默认方式解析|JsonPath脚本解析 | XmlPath解析(todo) | Prometheus解析**】->【**指标数据提取**】 - -由流程可见,我们自定义一个HTTP协议的监控类型,需要配置HTTP请求参数,配置获取哪些指标,对响应数据配置解析方式和解析脚本。 -HTTP协议支持我们自定义HTTP请求路径,请求header,请求参数,请求方式,请求体等。 - -**系统默认解析方式**:http接口返回hertzbeat规定的json数据结构,即可用默认解析方式解析数据提取对应的指标数据,详细介绍见 [**系统默认解析**](extend-http-default) -**JsonPath脚本解析方式**:用JsonPath脚本对响应的json数据进行解析,返回系统指定的数据结构,然后提供对应的指标数据,详细介绍见 [**JsonPath脚本解析**](extend-http-jsonpath) - -### 自定义步骤 - -**HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** - -![](/img/docs/advanced/extend-point-1.png) - -------- - -下面详细介绍下监控模版YML的配置用法,请注意看使用注释。 - -### 监控模版YML - -> 监控模版YML用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 -> 即我们通过自定义这个监控模版,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 - -样例:自定义一个名称为example_http的自定义监控类型,其使用HTTP协议采集指标数据。 - -```yaml -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: custom -# 监控应用类型(与文件名保持一致) eg: linux windows tomcat mysql aws... -app: example_http -name: - zh-CN: 模拟应用类型 - en-US: EXAMPLE APP -# 监控参数定义. field 这些为输入参数变量,即可以用^_^host^_^的形式写到后面的配置中,系统自动变量值替换 -# 强制固定必须参数 - host -params: - # field-字段名称标识符 - - field: host - # name-参数字段显示名称 - name: - zh-CN: 主机Host - en-US: Host - # type-字段类型,样式(大部分映射input标签type属性) - type: host - # 是否是必输项 true-必填 false-可选 - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - # 当type为number时,用range表示范围 - range: '[0,65535]' - required: true - # 端口默认值 - defaultValue: 80 - # 参数输入框提示信息 - placeholder: '请输入端口' - - field: username - name: - zh-CN: 用户名 - en-US: Username - type: text - # 当type为text时,用limit表示字符串限制大小 - limit: 20 - required: false - - field: password - name: - zh-CN: 密码 - en-US: Password - type: password - required: false - - field: ssl - name: - zh-CN: 启动SSL - en-US: Enable SSL - # 当type为boolean时,前端用switch展示开关 - type: boolean - required: false - - field: method - name: - zh-CN: 请求方式 - en-US: Method - type: radio - required: true - # 当type为radio单选框,checkbox复选框时,option表示可选项值列表 {name1:value1,name2:value2} - options: - - label: GET请求 - value: GET - - label: POST请求 - value: POST - - label: PUT请求 - value: PUT - - label: DELETE请求 - value: DELETE -# 采集指标配置列表 -metrics: -# 第一个监控指标 cpu -# 注意:内置监控指标有 (responseTime - 响应时间) - - name: cpu - # 指标调度优先级(0-127)越小优先级越高,优先级低的指标会等优先级高的指标采集完成后才会被调度,相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 0 - # 具体监控指标列表 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: hostname - type: 1 - label: true - - field: usage - type: 0 - unit: '%' - - field: cores - type: 0 - - field: waitTime - type: 0 - unit: s -# (非必须)监控指标别名,与上面的指标名映射。用于采集接口数据字段不直接是最终指标名称,需要此别名做映射转换 - aliasFields: - - hostname - - core1 - - core2 - - usage - - allTime - - runningTime -# (非必须)指标计算表达式,与上面的别名一起作用,计算出最终需要的指标值 -# eg: cores=core1+core2, usage=usage, waitTime=allTime-runningTime - calculates: - - hostname=hostname - - cores=core1+core2 - - usage=usage - - waitTime=allTime-runningTime -# 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http -# 当protocol为http协议时具体的采集配置 - http: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - # url请求接口路径 - url: /metrics/cpu - # 请求方式 GET POST PUT DELETE PATCH - method: GET - # 是否启用ssl/tls,即是http还是https,默认false - ssl: false - # 请求头内容 - headers: - apiVersion: v1 - # 请求参数内容 - params: - param1: param1 - param2: param2 - # 认证 - authorization: - # 认证方式: Basic Auth, Digest Auth, Bearer Token - type: Basic Auth - basicAuthUsername: ^_^username^_^ - basicAuthPassword: ^_^password^_^ - # 响应数据解析方式: default-系统规则,jsonPath-jsonPath脚本,website-网站可用性指标监控 - # todo xmlPath-xmlPath脚本,prometheus-Prometheus数据规则 - parseType: jsonPath - parseScript: '$' - - - name: memory - priority: 1 - fields: - - field: hostname - type: 1 - label: true - - field: total - type: 0 - unit: kb - - field: usage - type: 0 - unit: '%' - - field: speed - type: 0 - protocol: http - http: - host: ^_^host^_^ - port: ^_^port^_^ - url: /metrics/memory - method: GET - headers: - apiVersion: v1 - params: - param1: param1 - param2: param2 - authorization: - type: Basic Auth - basicAuthUsername: ^_^username^_^ - basicAuthPassword: ^_^password^_^ - parseType: default -``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jdbc.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jdbc.md deleted file mode 100644 index 9dd3e547b5b..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jdbc.md +++ /dev/null @@ -1,245 +0,0 @@ ---- -id: extend-jdbc -title: JDBC协议自定义监控 -sidebar_label: JDBC协议自定义监控 ---- - -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JDBC(目前支持mysql,mariadb,postgresql,sqlserver)自定义指标监控。 -> JDBC协议自定义监控可以让我们很方便的通过写SQL查询语句就能监控到我们想监控的指标 - -### JDBC协议采集流程 - -【**系统直连MYSQL**】->【**运行SQL查询语句**】->【**响应数据解析:oneRow, multiRow, columns**】->【**指标数据提取**】 - -由流程可见,我们自定义一个JDBC协议的监控类型,需要配置JDBC请求参数,配置获取哪些指标,配置查询SQL语句。 - -### 数据解析方式 - -SQL查询回来的数据字段和我们需要的指标映射,就能获取对应的指标数据,目前映射解析方式有三种:oneRow, multiRow, columns - -#### **oneRow** - -> 查询一行数据, 通过查询返回结果集的列名称,和查询的字段映射 - -例如: -查询的指标字段为:one tow three four -查询SQL:select one, tow, three, four from book limit 1; -这里指标字段就能和响应数据一一映射为一行采集数据。 - -#### **multiRow** - -> 查询多行数据, 通过查询返回结果集的列名称,和查询的字段映射 - -例如: -查询的指标字段为:one tow three four -查询SQL:select one, tow, three, four from book; -这里指标字段就能和响应数据一一映射为多行采集数据。 - -#### **columns** - -> 采集一行指标数据, 通过查询的两列数据(key-value),key和查询的字段匹配,value为查询字段的值 - -例如: -查询字段:one tow three four -查询SQL:select key, value from book; -SQL响应数据: - -| key | value | -|-------|-------| -| one | 243 | -| two | 435 | -| three | 332 | -| four | 643 | - -这里指标字段就能和响应数据的key映射,获取对应的value为其采集监控数据。 - -### 自定义步骤 - -**HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** - -![](/img/docs/advanced/extend-point-1.png) - -------- - -下面详细介绍下文件的配置用法,请注意看使用注释。 - -### 监控模版YML - -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 -> 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 - -样例:自定义一个名称为example_sql的自定义监控类型,其使用JDBC协议采集指标数据。 - -```yaml -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: db -# 监控应用类型(与文件名保持一致) eg: linux windows tomcat mysql aws... -app: example_sql -name: - zh-CN: 模拟MYSQL应用类型 - en-US: MYSQL EXAMPLE APP -# 监控参数定义. field 这些为输入参数变量,即可以用^_^host^_^的形式写到后面的配置中,系统自动变量值替换 -# 强制固定必须参数 - host -params: - - field: host - name: - zh-CN: 主机Host - en-US: Host - type: host - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - range: '[0,65535]' - required: true - defaultValue: 80 - placeholder: '请输入端口' - - field: database - name: - zh-CN: 数据库名称 - en-US: Database - type: text - required: false - - field: username - name: - zh-CN: 用户名 - en-US: Username - type: text - limit: 20 - required: false - - field: password - name: - zh-CN: 密码 - en-US: Password - type: password - required: false - - field: url - name: - zh-CN: Url - en-US: Url - type: text - required: false -# 采集指标配置列表 -metrics: - - name: basic - # 指标调度优先级(0-127)越小优先级越高,优先级低的指标会等优先级高的指标采集完成后才会被调度,相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 0 - # 具体监控指标列表 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: version - type: 1 - label: true - - field: port - type: 1 - - field: datadir - type: 1 - - field: max_connections - type: 0 - # (非必须)监控指标别名,与上面的指标名映射。用于采集接口数据字段不直接是最终指标名称,需要此别名做映射转换 - aliasFields: - - version - - version_compile_os - - version_compile_machine - - port - - datadir - - max_connections - # (非必须)指标计算表达式,与上面的别名一起作用,计算出最终需要的指标值 - # eg: cores=core1+core2, usage=usage, waitTime=allTime-runningTime - calculates: - - port=port - - datadir=datadir - - max_connections=max_connections - - version=version+"_"+version_compile_os+"_"+version_compile_machine - protocol: jdbc - jdbc: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - platform: mysql - username: ^_^username^_^ - password: ^_^password^_^ - database: ^_^database^_^ - # SQL查询方式: oneRow, multiRow, columns - queryType: columns - # sql - sql: show global variables where Variable_name like 'version%' or Variable_name = 'max_connections' or Variable_name = 'datadir' or Variable_name = 'port'; - url: ^_^url^_^ - - - name: status - priority: 1 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: threads_created - type: 0 - - field: threads_connected - type: 0 - - field: threads_cached - type: 0 - - field: threads_running - type: 0 - # (非必须)监控指标别名,与上面的指标名映射。用于采集接口数据字段不直接是最终指标名称,需要此别名做映射转换 - aliasFields: - - threads_created - - threads_connected - - threads_cached - - threads_running - # (非必须)指标计算表达式,与上面的别名一起作用,计算出最终需要的指标值 - # eg: cores=core1+core2, usage=usage, waitTime=allTime-runningTime - calculates: - - threads_created=threads_created - - threads_connected=threads_connected - - threads_cached=threads_cached - - threads_running=threads_running - protocol: jdbc - jdbc: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - platform: mysql - username: ^_^username^_^ - password: ^_^password^_^ - database: ^_^database^_^ - # SQL查询方式: oneRow, multiRow, columns - queryType: columns - # sql - sql: show global status where Variable_name like 'thread%' or Variable_name = 'com_commit' or Variable_name = 'com_rollback' or Variable_name = 'questions' or Variable_name = 'uptime'; - url: ^_^url^_^ - - - name: innodb - priority: 2 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: innodb_data_reads - type: 0 - unit: 次数 - - field: innodb_data_writes - type: 0 - unit: 次数 - - field: innodb_data_read - type: 0 - unit: kb - - field: innodb_data_written - type: 0 - unit: kb - protocol: jdbc - jdbc: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - platform: mysql - username: ^_^username^_^ - password: ^_^password^_^ - database: ^_^database^_^ - # SQL查询方式: oneRow, multiRow, columns - queryType: columns - # sql - sql: show global status where Variable_name like 'innodb%'; - url: ^_^url^_^ -``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jmx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jmx.md deleted file mode 100644 index 0e7694f76ce..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-jmx.md +++ /dev/null @@ -1,238 +0,0 @@ ---- -id: extend-jmx -title: JMX协议自定义监控 -sidebar_label: JMX协议自定义监控 ---- - -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用JMX协议自定义指标监控。 -> JMX协议自定义监控可以让我们很方便的通过配置 JMX Mbean Object 就能监控采集到我们想监控的 Mbean 指标 - -### JMX协议采集流程 - -【**对端JAVA应用暴露JMX服务**】->【**HertzBeat直连对端JMX服务**】->【**获取配置的 Mbean Object 数据**】->【**指标数据提取**】 - -由流程可见,我们自定义一个JMX协议的监控类型,需要配置JMX请求参数,配置获取哪些指标,配置查询Object信息。 - -### 数据解析方式 - -通过配置监控模版YML的指标`field`, `aliasFields`, `jmx` 协议的 `objectName` 来和对端系统暴露的 `Mbean`对象信息映射解析。 - -### 自定义步骤 - -**HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** - -![](/img/docs/advanced/extend-point-1.png) - -------- - -下面详细介绍下监控模版的配置用法,请注意看使用注释。 - -### 监控模版YML - -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 -> 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 - -样例:自定义一个名称为 `example_jvm` 的自定义监控类型,其使用JMX协议采集指标数据。 - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: service -# The monitoring type eg: linux windows tomcat mysql aws... -# 监控类型 eg: linux windows tomcat mysql aws... -app: example_jvm -# The monitoring i18n name -# 监控类型国际化名称 -name: - zh-CN: 自定义JVM虚拟机 - en-US: CUSTOM JVM -# Input params define for monitoring(render web ui by the definition) -# 监控所需输入参数定义(根据定义渲染页面UI) -params: - # field-param field key - # field-字段名称标识符 - - field: host - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 主机Host - en-US: Host - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: host - # required-true or false - # 是否是必输项 true-必填 false-可选 - required: true - # field-param field key - # field-变量字段标识符 - - field: port - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 端口 - en-US: Port - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: number - # when type is number, range is required - # 当type为number时,用range表示范围 - range: '[0,65535]' - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: true - # default value - # 端口默认值 - defaultValue: 9999 - # field-param field key - # field-变量字段标识符 - - field: url - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: JMX URL - en-US: JMX URL - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: text - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: false - # hide param-true or false - # 是否隐藏字段 true or false - hide: true - # param field input placeholder - # 参数输入框提示信息 - placeholder: 'service:jmx:rmi:///jndi/rmi://host:port/jmxrmi' - # field-param field key - # field-变量字段标识符 - - field: username - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 用户名 - en-US: Username - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: text - # when type is text, use limit to limit string length - # 当type为text时,用limit表示字符串限制大小 - limit: 20 - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: false - # hide param-true or false - # 是否隐藏字段 true or false - hide: true - # field-param field key - # field-变量字段标识符 - - field: password - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 密码 - en-US: Password - # type-param field type(most mapping the html input tag) - # type-字段类型,样式(大部分映射input标签type属性) - type: password - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: false - # hide param-true or false - # 是否隐藏字段 true or false - hide: true -# collect metrics config list -# 采集指标配置列表 -metrics: - # metrics - basic - # 监控指标 - basic - - name: basic - # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel - # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue - # 指标采集调度优先级(0->127)->(优先级高->低) 优先级低的指标会等优先级高的指标采集完成后才会被调度, 相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 0 - # collect metrics content - # 具体监控指标列表 - fields: - # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-if is metrics label - # field-指标名称, type-指标类型(0-number数字,1-string字符串), unit-指标单位('%','ms','MB'), instance-是否是指标集合唯一标识符字段 - - field: VmName - type: 1 - - field: VmVendor - type: 1 - - field: VmVersion - type: 1 - - field: Uptime - type: 0 - unit: ms - # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk - # 用于监控的协议,例: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: jmx - # the config content when protocol is jmx - jmx: - # host: ipv4 ipv6 domain - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # port - # 端口 - port: ^_^port^_^ - username: ^_^username^_^ - password: ^_^password^_^ - # jmx mbean object name - # jmx mbean 对象名称 - objectName: java.lang:type=Runtime - url: ^_^url^_^ - - - name: memory_pool - priority: 1 - fields: - - field: name - type: 1 - label: true - - field: committed - type: 0 - unit: MB - - field: init - type: 0 - unit: MB - - field: max - type: 0 - unit: MB - - field: used - type: 0 - unit: MB - units: - - committed=B->MB - - init=B->MB - - max=B->MB - - used=B->MB - # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field - # (可选)监控指标别名, 做为中间字段与采集数据字段和指标字段映射转换 - aliasFields: - - Name - - Usage->committed - - Usage->init - - Usage->max - - Usage->used - # mapping and conversion expressions, use these and aliasField above to calculate metrics value - # (可选)指标映射转换计算表达式,与上面的别名一起作用,计算出最终需要的指标值 - # eg: cores=core1+core2, usage=usage, waitTime=allTime-runningTime - calculates: - - name=Name - - committed=Usage->committed - - init=Usage->init - - max=Usage->max - - used=Usage->used - protocol: jmx - jmx: - # host: ipv4 ipv6 domain - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # port - # 端口 - port: ^_^port^_^ - username: ^_^username^_^ - password: ^_^password^_^ - objectName: java.lang:type=MemoryPool,name=* - url: ^_^url^_^ -``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-point.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-point.md deleted file mode 100644 index 9f7ae8ee1b1..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-point.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -id: extend-point -title: 自定义监控 -sidebar_label: 自定义监控 ---- - -> HertzBeat拥有自定义监控能力,您只需配置监控模版YML就能适配一款自定义的监控类型。 -> 目前自定义监控支持[HTTP协议](extend-http),[JDBC协议](extend-jdbc),[SSH协议](extend-ssh),[JMX协议](extend-jmx),[SNMP协议](extend-snmp),后续会支持更多通用协议。 - -### 自定义流程 - -**HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** - -![](/img/docs/advanced/extend-point-1.png) - -------- - -### 监控模版YML - -**HertzBeat的设计是一个监控模版对应一个监控类型,所有监控类型都是由监控模版来定义的**。 - -> 监控模版YML定义了 *监控类型的名称(国际化), 配置参数映射, 采集指标信息, 采集协议配置* 等。 - -下面使用样例详细介绍下这监控模版YML的配置用法。 - -样例:自定义一个 `app` 名称为 `example2` 的自定义监控类型,其使用HTTP协议采集指标数据。 - -[监控模版] -> [新增监控类型] -> [右边配置如下监控模版YML] -> [保存并应用] - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: custom -# The monitoring type eg: linux windows tomcat mysql aws... -# 监控类型 eg: linux windows tomcat mysql aws... -app: example2 -# The monitoring i18n name -# 监控类型国际化名称 -name: - zh-CN: 模拟网站监测 - en-US: EXAMPLE WEBSITE -# 监控所需输入参数定义(根据定义渲染页面UI) -# Input params define for monitoring(render web ui by the definition) -params: - # field-param field key - # field-变量字段标识符 - - field: host - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 主机Host - en-US: Host - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: host - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: true - # field-param field key - # field-变量字段标识符 - - field: port - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 端口 - en-US: Port - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: number - # when type is number, range is required - # 当type为number时,用range表示范围 - range: '[0,65535]' - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: true - # default value - # 默认值 - defaultValue: 80 - # field-param field key - # field-变量字段标识符 - - field: uri - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 相对路径 - en-US: URI - # type-param field type(most mapping the html input tag) - # type-字段类型,样式(大部分映射input标签type属性) - type: text - # when type is text, use limit to limit string length - # 当type为text时,用limit表示字符串限制大小 - limit: 200 - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: false - # 参数输入框提示信息 - # param field input placeholder - placeholder: 'Website uri path(no ip port) EG:/console' - # field-param field key - # field-变量字段标识符 - - field: ssl - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 启用HTTPS - en-US: HTTPS - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: boolean - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: true - # field-param field key - # field-变量字段标识符 - - field: timeout - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 超时时间(ms) - en-US: Timeout(ms) - # type-param field type(most mapping the html input tag) - # type-字段类型,样式(大部分映射input标签type属性) - type: number - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: false - # hide param-true or false - # 是否隐藏字段 true or false - hide: true - -metrics: - # metrics - summary, inner monitoring metrics (responseTime - response time, keyword - number of keywords) - # 监控指标 - summary, 内置监控指标有 (responseTime - 响应时间, keyword - 关键字数量) - - name: summary - # 指标调度优先级(0-127)越小优先级越高,优先级低的指标会等优先级高的指标采集完成后才会被调度,相同优先级的指标会并行调度采集 - # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel - # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue - # 指标采集调度优先级(0->127)->(优先级高->低) 优先级低的指标会等优先级高的指标采集完成后才会被调度, 相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 0 - # collect metrics content - # 具体监控指标列表 - fields: - # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-if is metrics label - # field-指标名称, type-指标类型(0-number数字,1-string字符串), unit-指标单位('%','ms','MB'), instance-是否是指标集合唯一标识符字段 - - field: responseTime - type: 0 - unit: ms - - field: keyword - type: 0 - # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http - # the config content when protocol is http - http: - # http host: ipv4 ipv6 domain - host: ^_^host^_^ - # http port - port: ^_^port^_^ - # http url - url: ^_^uri^_^ - timeout: ^_^timeout^_^ - # http method: GET POST PUT DELETE PATCH - method: GET - # if enabled https - ssl: ^_^ssl^_^ - # http response data parse type: default-hertzbeat rule, jsonpath-jsonpath script, website-for website monitoring, prometheus-prometheus exporter rule - # http 响应数据解析方式: default-系统规则, jsonPath-jsonPath脚本, website-网站可用性指标监控, prometheus-Prometheus数据规则 - parseType: website - -``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-push.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-push.md deleted file mode 100644 index 7245afe2a12..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-push.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: extend-push -title: Push Style Custom Monitoring -sidebar_label: Push Style Custom Monitoring ---- - -> 推送方式监控是一种特殊的监控,允许用户配置数据格式并编写代码将指标推送到 Hertzbeat。 -> 下面我们将介绍如何使用这一功能。 - -### 推送方式监控的采集流程 - -【用户开始推送数据】->【HertzBeat推送模块暂存数据】->【HertzBeat采集模块定期采集数据】 - -### 数据解析方式 - -HertzBeat会使用用户添加新监控时配置的格式来解析数据。 - -### 创建监控步骤 - -HertzBeat页面 -> 应用服务监控 -> 推送方式监控 -> 新建推送方式监视器 -> 设置推送模块主机(Hertzbeat服务器ip,通常为127.0.0.1或localhost) -> 设置推送模块端口(hertzbeat服务器端口,通常为1157) -> 配置数据字段(单位:字符串表示,类型:0表示数字/1表示字符串)-> 结束 - ---- - -### 监控配置示例 - -![](/img/docs/advanced/extend-push-example-1.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-snmp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-snmp.md deleted file mode 100644 index 1172b263c2e..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-snmp.md +++ /dev/null @@ -1,209 +0,0 @@ ---- -id: extend-snmp -title: SNMP协议自定义监控 -sidebar_label: SNMP协议自定义监控 ---- - -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用 SNMP 协议自定义指标监控。 -> SNMP 协议自定义监控可以让我们很方便的通过配置 Mib OID信息 就能监控采集到我们想监控的OID指标 - -### SNMP协议采集流程 - -【**对端开启SNMP服务**】->【**HertzBeat直连对端SNMP服务**】->【**根据配置抓取对端OID指标信息**】->【**指标数据提取**】 - -由流程可见,我们自定义一个SNMP协议的监控类型,需要配置SNMP请求参数,配置获取哪些指标,配置查询OID信息。 - -### 数据解析方式 - -通过配置监控模版YML的指标`field`, `aliasFields`, `snmp` 协议下的 `oids`来抓取对端指定的数据并解析映射。 - -### 自定义步骤 - -**HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** - -![](/img/docs/advanced/extend-point-1.png) - -------- - -下面详细介绍下文件的配置用法,请注意看使用注释。 - -### 监控模版YML - -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 -> 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 - -样例:自定义一个名称为 example_windows 的自定义监控类型,其使用 SNMP 协议采集指标数据。 - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring mid-middleware custom-custom monitoring os-operating system monitoring -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: os -# The monitoring type eg: linux windows tomcat mysql aws... -# 监控类型 eg: linux windows tomcat mysql aws... -app: windows -# The monitoring i18n name -# 监控类型国际化名称 -name: - zh-CN: Windows操作系统 - en-US: OS Windows -# 监控所需输入参数定义(根据定义渲染页面UI) -# Input params define for monitoring(render web ui by the definition) -params: - # field-param field key - # field-变量字段标识符 - - field: host - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 主机Host - en-US: Host - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: host - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: true - # field-param field key - # field-变量字段标识符 - - field: port - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 端口 - en-US: Port - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: number - # when type is number, range is required - # 当type为number时,用range表示范围 - range: '[0,65535]' - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: true - # default value - # 默认值 - defaultValue: 161 - # field-param field key - # field-变量字段标识符 - - field: version - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: SNMP 版本 - en-US: SNMP Version - # type-param field type(radio mapping the html radio tag) - # type-当type为radio时,前端用radio展示开关 - type: radio - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: true - # when type is radio checkbox, use option to show optional values {name1:value1,name2:value2} - # 当type为radio单选框, checkbox复选框时, option表示可选项值列表 {name1:value1,name2:value2} - options: - - label: SNMPv1 - value: 0 - - label: SNMPv2c - value: 1 - # field-param field key - # field-变量字段标识符 - - field: community - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: SNMP 团体字 - en-US: SNMP Community - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: text - # when type is text, use limit to limit string length - # 当type为text时,用limit表示字符串限制大小 - limit: 100 - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: true - # 参数输入框提示信息 - # param field input placeholder - placeholder: 'Snmp community for v1 v2c' - # field-param field key - # field-变量字段标识符 - - field: timeout - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 超时时间(ms) - en-US: Timeout(ms) - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: number - # when type is number, range is required - # 当type为number时,用range表示范围 - range: '[0,100000]' - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: false - # hide-is hide this field and put it in advanced layout - # hide-是否隐藏此参数将其放入高级设置中 - hide: true - # default value - # 默认值 - defaultValue: 6000 -# collect metrics config list -# 采集指标配置列表 -metrics: - # metrics - system - # 监控指标 - system - - name: system - # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel - # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue - # 指标采集调度优先级(0->127)->(优先级高->低) 优先级低的指标会等优先级高的指标采集完成后才会被调度, 相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 0 - # collect metrics content - # 具体监控指标列表 - fields: - # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-if is metrics label - # field-指标名称, type-指标类型(0-number数字,1-string字符串), unit-指标单位('%','ms','MB'), instance-是否是指标集合唯一标识符字段 - - field: name - type: 1 - - field: descr - type: 1 - - field: uptime - type: 1 - - field: numUsers - type: 0 - - field: services - type: 0 - - field: processes - type: 0 - - field: responseTime - type: 0 - unit: ms - - field: location - type: 1 - # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: snmp - # the config content when protocol is snmp - snmp: - # server host: ipv4 ipv6 domain - host: ^_^host^_^ - # server port - port: ^_^port^_^ - # snmp connect timeout - timeout: ^_^timeout^_^ - # snmp community - # snmp 团体字 - community: ^_^community^_^ - # snmp version - version: ^_^version^_^ - # snmp operation: get, walk - operation: get - # metrics oids: metric_name - oid_value - oids: - name: 1.3.6.1.2.1.1.5.0 - descr: 1.3.6.1.2.1.1.1.0 - uptime: 1.3.6.1.2.1.25.1.1.0 - numUsers: 1.3.6.1.2.1.25.1.5.0 - services: 1.3.6.1.2.1.1.7.0 - processes: 1.3.6.1.2.1.25.1.6.0 - location: 1.3.6.1.2.1.1.6.0 -``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-ssh.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-ssh.md deleted file mode 100644 index 0300c14b31d..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-ssh.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -id: extend-ssh -title: SSH协议自定义监控 -sidebar_label: SSH协议自定义监控 ---- - -> 从[自定义监控](extend-point)了解熟悉了怎么自定义类型,指标,协议等,这里我们来详细介绍下用SSH协议自定义指标监控。 -> SSH协议自定义监控可以让我们很方便的通过写sh命令脚本就能监控采集到我们想监控的Linux指标 - -### SSH协议采集流程 - -【**系统直连Linux**】->【**运行SHELL命令脚本语句**】->【**响应数据解析:oneRow, multiRow**】->【**指标数据提取**】 - -由流程可见,我们自定义一个SSH协议的监控类型,需要配置SSH请求参数,配置获取哪些指标,配置查询脚本语句。 - -### 数据解析方式 - -SHELL脚本查询回来的数据字段和我们需要的指标映射,就能获取对应的指标数据,目前映射解析方式有两种:oneRow, multiRow,能满足绝大部分指标需求。 - -#### **oneRow** - -> 查询出一列数据, 通过查询返回结果集的字段值(一行一个值)与字段映射 - -例如: -需要查询Linux的指标 hostname-主机名称,uptime-启动时间 -主机名称原始查询命令:`hostname` -启动时间原始查询命令:`uptime | awk -F "," '{print $1}'` -则在hertzbeat对应的这两个指标的查询脚本为(用`;`将其连接到一起): -`hostname; uptime | awk -F "," '{print $1}'` -终端响应的数据为: - -``` -tombook -14:00:15 up 72 days -``` - -则最后采集到的指标数据一一映射为: -hostname值为 `tombook` -uptime值为 `14:00:15 up 72 days` - -这里指标字段就能和响应数据一一映射为一行采集数据。 - -#### **multiRow** - -> 查询多行数据, 通过查询返回结果集的列名称,和查询的指标字段映射 - -例如: -查询的Linux内存相关指标字段:total-内存总量 used-已使用内存 free-空闲内存 buff-cache-缓存大小 available-可用内存 -内存指标原始查询命令为:`free -m`, 控制台响应: - -```shell - total used free shared buff/cache available -Mem: 7962 4065 333 1 3562 3593 -Swap: 8191 33 8158 -``` - -在hertzbeat中multiRow格式解析需要响应数据列名称和指标值一一映射,则对应的查询SHELL脚本为: -`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` -控制台响应为: - -```shell -total used free buff_cache available -7962 4066 331 3564 3592 -``` - -这里指标字段就能和响应数据一一映射为采集数据。 - -### 自定义步骤 - -**HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** -> **点击保存应用** -> **使用新监控类型添加监控** - -![](/img/docs/advanced/extend-point-1.png) - -------- - -下面详细介绍下文件的配置用法,请注意看使用注释。 - -### 监控模版YML - -> 监控配置定义文件用于定义 *监控类型的名称(国际化), 请求参数结构定义(前端页面根据配置自动渲染UI), 采集指标信息, 采集协议配置* 等。 -> 即我们通过自定义这个YML,配置定义什么监控类型,前端页面需要输入什么参数,采集哪些性能指标,通过什么协议去采集。 - -样例:自定义一个名称为example_linux的自定义监控类型,其使用SSH协议采集指标数据。 - -```yaml -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: os -# 监控应用类型(与文件名保持一致) eg: linux windows tomcat mysql aws... -app: example_linux -name: - zh-CN: 模拟LINUX应用类型 - en-US: LINUX EXAMPLE APP -# 监控参数定义. field 这些为输入参数变量,即可以用^_^host^_^的形式写到后面的配置中,系统自动变量值替换 -# 强制固定必须参数 - host -params: - - field: host - name: - zh-CN: 主机Host - en-US: Host - type: host - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - range: '[0,65535]' - required: true - defaultValue: 22 - placeholder: '请输入端口' - - field: username - name: - zh-CN: 用户名 - en-US: Username - type: text - limit: 20 - required: true - - field: password - name: - zh-CN: 密码 - en-US: Password - type: password - required: true -# collect metrics config list -# 采集指标配置列表 -metrics: - # 第一个监控指标 basic - # 注意:内置监控指标有 (responseTime - 响应时间) - - name: basic - # 指标调度优先级(0-127)越小优先级越高,优先级低的指标会等优先级高的指标采集完成后才会被调度,相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 0 - # 具体监控指标列表 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: hostname - type: 1 - label: true - - field: version - type: 1 - - field: uptime - type: 1 - # 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: ssh - # 当protocol为http协议时具体的采集配置 - ssh: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - username: ^_^username^_^ - password: ^_^password^_^ - script: (uname -r ; hostname ; uptime | awk -F "," '{print $1}' | sed "s/ //g") | sed ":a;N;s/\n/^/g;ta" | awk -F '^' 'BEGIN{print "version hostname uptime"} {print $1, $2, $3}' - # 响应数据解析方式:oneRow, multiRow - parseType: multiRow - - - name: cpu - priority: 1 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: info - type: 1 - - field: cores - type: 0 - unit: 核数 - - field: interrupt - type: 0 - unit: 个数 - - field: load - type: 1 - - field: context_switch - type: 0 - unit: 个数 - # 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: ssh - # 当protocol为http协议时具体的采集配置 - ssh: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - username: ^_^username^_^ - password: ^_^password^_^ - script: "LANG=C lscpu | awk -F: '/Model name/ {print $2}';awk '/processor/{core++} END{print core}' /proc/cpuinfo;uptime | sed 's/,/ /g' | awk '{for(i=NF-2;i<=NF;i++)print $i }' | xargs;vmstat 1 1 | awk 'NR==3{print $11}';vmstat 1 1 | awk 'NR==3{print $12}'" - parseType: oneRow - - - name: memory - priority: 2 - fields: - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: total - type: 0 - unit: Mb - - field: used - type: 0 - unit: Mb - - field: free - type: 0 - unit: Mb - - field: buff_cache - type: 0 - unit: Mb - - field: available - type: 0 - unit: Mb - # 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: ssh - # 当protocol为http协议时具体的采集配置 - ssh: - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # 端口 - port: ^_^port^_^ - username: ^_^username^_^ - password: ^_^password^_^ - script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' - parseType: multiRow -``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-tutorial.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-tutorial.md deleted file mode 100644 index 273fb4b4406..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/advanced/extend-tutorial.md +++ /dev/null @@ -1,244 +0,0 @@ ---- -id: extend-tutorial -title: 自定义适配一款基于HTTP协议的新监控类型 -sidebar_label: 教程案例 ---- - -通过此教程我们一步一步描述如何在hertzbeat监控系统下自定义新增适配一款基于http协议的监控类型。 - -阅读此教程前我们希望您已经从[自定义监控](extend-point)和[http协议自定义](extend-http)了解熟悉了怎么自定义类型,指标,协议等。 - -### HTTP协议解析通用响应结构体,获取指标数据 - -> 很多场景我们需要对提供的 HTTP API 接口进行监控,获取接口返回的指标值。这篇文章我们通过http自定义协议来解析我们常见的http接口响应结构,获取返回体中的字段作为指标数据。 - -``` -{ - "code": 200, - "msg": "success", - "data": {} -} - -``` - -如上,通常我们的后台API接口会设计这这样一个通用返回。hertzbeat系统的后台也是如此,我们今天就用hertzbeat的 API 做样例,新增适配一款新的监控类型 **hertzbeat**,监控采集它的系统摘要统计API -`http://localhost:1157/api/summary`, 其响应数据为: - -``` -{ - "msg": null, - "code": 0, - "data": { - "apps": [ - { - "category": "service", - "app": "jvm", - "status": 0, - "size": 2, - "availableSize": 0, - "unManageSize": 2, - "unAvailableSize": 0, - "unReachableSize": 0 - }, - { - "category": "service", - "app": "website", - "status": 0, - "size": 2, - "availableSize": 0, - "unManageSize": 2, - "unAvailableSize": 0, - "unReachableSize": 0 - } - ] - } -} -``` - -**我们这次获取其app下的 `category`,`app`,`status`,`size`,`availableSize`等指标数据。** - -### 新增配置监控模版YML - -**HertzBeat页面** -> **监控模版菜单** -> **新增监控类型** -> **配置自定义监控模版YML** - -定义我们在页面上需要输入哪些参数,一般的HTTP协议参数主要有ip, port, headers, params, uri, 账户密码等,我们直接复用 `api`监控模版 里面的参数定义内容,删除其中的我们不需要输入的uri参数和keyword关键字等参数即可。 - -定义采集类型是啥,需要用哪种协议采集方式,采集的指标是啥,协议的配置参数等。我们直接复用 `api`监控模版 里面的定义内容,修改为我们当前的监控类型`hertzbeat`配置参数即可,如下:注意⚠️我们这次获取接口响应数据中的`category`,`app`,`status`,`size`,`availableSize`等字段作为指标数据。 - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 -category: custom -# The monitoring type eg: linux windows tomcat mysql aws... -# 监控类型 eg: linux windows tomcat mysql aws... -app: hertzbeat -# The monitoring i18n name -# 监控类型国际化名称 -name: - zh-CN: HertzBeat监控系统 - en-US: HertzBeat Monitor -# Input params define for monitoring(render web ui by the definition) -# 监控所需输入参数定义(根据定义渲染页面UI) -params: - # field-param field key - # field-字段名称标识符 - - field: host - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 主机Host - en-US: Host - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: host - # required-true or false - # 是否是必输项 true-必填 false-可选 - required: true - # field-param field key - # field-变量字段标识符 - - field: port - # name-param field display i18n name - # name-参数字段显示名称 - name: - zh-CN: 端口 - en-US: Port - # type-param field type(most mapping the html input type) - # type-字段类型,样式(大部分映射input标签type属性) - type: number - # when type is number, range is required - # 当type为number时,用range表示范围 - range: '[0,65535]' - # required-true or false - # required-是否是必输项 true-必填 false-可选 - required: true - # default value - # 端口默认值 - defaultValue: 1157 - - field: ssl - name: - zh-CN: 启用HTTPS - en-US: HTTPS - type: boolean - required: true - - field: timeout - name: - zh-CN: 超时时间(ms) - en-US: Timeout(ms) - type: number - required: false - hide: true - - field: authType - name: - zh-CN: 认证方式 - en-US: Auth Type - type: radio - required: false - hide: true - options: - - label: Basic Auth - value: Basic Auth - - label: Digest Auth - value: Digest Auth - - field: username - name: - zh-CN: 用户名 - en-US: Username - type: text - limit: 20 - required: false - hide: true - - field: password - name: - zh-CN: 密码 - en-US: Password - type: password - required: false - hide: true -metrics: - # the first metrics summary - # attention: Built-in monitoring metrics contains (responseTime - Response time) - - name: summary - # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel - # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue - # 指标调度优先级(0-127)越小优先级越高,优先级低的指标会等优先级高的指标采集完成后才会被调度,相同优先级的指标会并行调度采集 - # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 - priority: 0 - # collect metrics content - # 具体监控指标列表 - fields: - # metrics content contains field-metric name, type-metric type:0-number,1-string, label-if is metrics label, unit-metric unit('%','ms','MB') - # 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 label是否为标签 unit:指标单位 - - field: app - type: 1 - label: true - - field: category - type: 1 - - field: status - type: 0 - - field: size - type: 0 - - field: availableSize - type: 0 - # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk, we use HTTP protocol here - # 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk, 我们这里使用HTTP协议 - protocol: http - # the config content when protocol is http - # 当protocol为http协议时具体的采集配置 - http: - # http host: ipv4 ipv6 domain - # 主机host: ipv4 ipv6 域名 - host: ^_^host^_^ - # http port - # 端口 - port: ^_^port^_^ - # http url, we don't need to enter a parameter here, just set the fixed value to /api/summary - # url请求接口路径,我们这里不需要输入传参,写死为 /api/summary - url: /api/summary - timeout: ^_^timeout^_^ - # http method: GET POST PUT DELETE PATCH, default fixed value is GET - # 请求方式 GET POST PUT DELETE PATCH,写死为 GET - method: GET - # if enabled https, default value is false - # 是否启用ssl/tls,即是http还是https,默认false - ssl: ^_^ssl^_^ - # http auth - # 认证 - authorization: - # http auth type: Basic Auth, Digest Auth, Bearer Token - # 认证方式: Basic Auth, Digest Auth, Bearer Token - type: ^_^authType^_^ - basicAuthUsername: ^_^username^_^ - basicAuthPassword: ^_^password^_^ - digestAuthUsername: ^_^username^_^ - digestAuthPassword: ^_^password^_^ - # http response data parse type: default-hertzbeat rule, jsonpath-jsonpath script, website-for website monitoring, we use jsonpath to parse response data here - # 响应数据解析方式: default-系统规则,jsonPath-jsonPath脚本,website-网站可用性指标监控,我们这里使用jsonpath来解析响应数据 - parseType: jsonPath - parseScript: '$.data.apps.*' -``` - -**点击保存并应用。我们可以看到系统页面的自定义监控菜单已经多了一个`hertzbeat`监控类型了。** - -![](/img/docs/advanced/extend-http-example-1.png) - -### 页面添加对`hertzbeat`监控类型的监控 - -> 我们点击新增 `HertzBeat监控系统`,配置监控IP,端口,采集周期,高级设置里的账户密码等, 点击确定添加监控。 - -![](/img/docs/advanced/extend-http-example-2.png) - -![](/img/docs/advanced/extend-http-example-3.png) - -> 过一定时间(取决于采集周期)我们就可以在监控详情看到具体的指标数据和历史图表啦! - -![](/img/docs/advanced/extend-http-example-4.png) - -### 设置阈值告警通知 - -> 接下来我们就可以正常设置阈值,告警触发后可以在告警中心查看,也可以新增接收人,设置告警通知等,Have Fun!!! - ----- - -#### 完 - -HTTP协议的自定义监控的实践就到这里,HTTP协议还带其他参数 `headers,params` 等,我们可以像用postman一样去定义它,可玩性也非常高! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/activemq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/activemq.md deleted file mode 100644 index 94e2ad54899..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/activemq.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -id: activemq -title: 监控 Apache ActiveMQ 消息中间件 -sidebar_label: ActiveMQ消息中间件 -keywords: [开源监控系统, 开源中间件监控, ActiveMQ消息中间件监控] ---- - -> 对 Apache ActiveMQ 消息中间件的运行状态,节点,Topic等相关指标进行监测。 - -**使用协议:JMX** - -### 监控前操作 - -> 您需要在 ActiveMQ 开启 `JMX` 服务,HertzBeat 使用 JMX 协议对 ActiveMQ 进行指标采集。 - -1. 修改安装目录下的 `conf/activemq.xml` 文件,开启JMX - -> 在 `broker` 标签中添加 `userJmx="true"` 属性 - -```xml - - - -``` - -2. 修改安装目录下的 `bin/env` 文件,配置JMX 端口 IP等 - -将如下原配置信息 - -```text -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" -``` - -更新为如下配置,⚠️注意修改`本机对外IP` - -```text -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.authenticate=false" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Djava.rmi.server.hostname=本机对外IP" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" -``` - -3. 重启 ACTIVEMQ 服务,在 HertzBeat 添加对应 ActiveMQ 监控即可,参数使用 JMX 配置的 IP 端口。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|---------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| JMX端口 | JMX 对外提供的HTTP端口,默认为 11099。 | -| JMX URL | 可选,自定义 JMX URL 连接 | -| 用户名 | 认证时使用的用户名 | -| 密码 | 认证时使用的密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:broker - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------|------|-----------------------------------------------------------------------| -| BrokerName | 无 | The name of the broker. | -| BrokerVersion | 无 | The version of the broker. | -| Uptime | 无 | Uptime of the broker. | -| UptimeMillis | ms | Uptime of the broker in milliseconds. | -| Persistent | 无 | Messages are synchronized to disk. | -| MemoryPercentUsage | % | Percent of memory limit used. | -| StorePercentUsage | % | Percent of store limit used. | -| TempPercentUsage | % | Percent of temp limit used. | -| CurrentConnectionsCount | 无 | Attribute exposed for management | -| TotalConnectionsCount | 无 | Attribute exposed for management | -| TotalEnqueueCount | 无 | Number of messages that have been sent to the broker. | -| TotalDequeueCount | 无 | Number of messages that have been acknowledged on the broker. | -| TotalConsumerCount | 无 | Number of message consumers subscribed to destinations on the broker. | -| TotalProducerCount | 无 | Number of message producers active on destinations on the broker. | -| TotalMessageCount | 无 | Number of unacknowledged messages on the broker. | -| AverageMessageSize | 无 | Average message size on this broker | -| MaxMessageSize | 无 | Max message size on this broker | -| MinMessageSize | 无 | Min message size on this broker | - -#### 指标集合:topic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------|------|-------------------------------------------------------------------------------------------| -| Name | 无 | Name of this destination. | -| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | -| MemoryPercentUsage | 无 | The percentage of the memory limit used | -| ProducerCount | 无 | Number of producers attached to this destination | -| ConsumerCount | 无 | Number of consumers subscribed to this destination. | -| EnqueueCount | 无 | Number of messages that have been sent to the destination. | -| DequeueCount | 无 | Number of messages that has been acknowledged (and removed) from the destination. | -| ForwardCount | 无 | Number of messages that have been forwarded (to a networked broker) from the destination. | -| InFlightCount | 无 | Number of messages that have been dispatched to, but not acknowledged by, consumers. | -| DispatchCount | 无 | Number of messages that has been delivered to consumers, including those not acknowledged | -| ExpiredCount | 无 | Number of messages that have been expired. | -| StoreMessageSize | B | The memory size of all messages in this destination's store. | -| AverageEnqueueTime | ms | Average time a message was held on this destination. | -| MaxEnqueueTime | ms | The longest time a message was held on this destination | -| MinEnqueueTime | ms | The shortest time a message was held on this destination | -| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | -| AverageMessageSize | B | Average message size on this destination | -| MaxMessageSize | B | Max message size on this destination | -| MinMessageSize | B | Min message size on this destination | - -#### 指标集合:memory_pool - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------|------|--------| -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | - -#### 指标集合:class_loading - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------|------|----------| -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - -#### 指标集合:thread - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------|------|-----------| -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/airflow.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/airflow.md deleted file mode 100644 index a7f77f7f5b6..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/airflow.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: airflow -title: 监控:Apache Airflow监控 -sidebar_label: Apache Airflow -keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] ---- - -> 对Apache Airflow通用性能指标进行采集监控。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|-----------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8080 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| HTTPS | 是否启用HTTPS | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:airflow_health - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------|------|------------------| -| metadatabase | 无 | metadatabase健康情况 | -| scheduler | 无 | scheduler健康情况 | -| triggerer | 无 | triggerer健康情况 | - -#### 指标集合:airflow_version - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------|------|---------------| -| value | 无 | Airflow版本 | -| git_version | 无 | Airflow git版本 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_console.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_console.md deleted file mode 100644 index 5198b961b66..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_console.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: alert_console -title: 告警模板中自定义的控制台地址 -sidebar_label: 告警模板登录台地址 ---- - -> 阈值触发后发送告警信息,通过钉钉/企业微信/飞书机器人通知或者使用邮箱通知的时候,告警内容中有登录控制台的详情链接 - -### 自定义设置 - -在我们的启动配置文件application.yml中,找到下面的配置 - -```yml -alerter: - console-url: #这里就是我们的自定义控制台地址 -``` - -默认值是赫兹跳动的官方控制台地址 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_dingtalk.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_dingtalk.md deleted file mode 100644 index ba6b49bc58a..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_dingtalk.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -id: alert_dingtalk -title: 告警钉钉机器人通知 -sidebar_label: 告警钉钉机器人通知 -keywords: [告警钉钉机器人通知, 开源告警系统, 开源监控告警系统] ---- - -> 阈值触发后发送告警信息,通过钉钉机器人通知到接收人。 - -### 操作步骤 - -1. **【钉钉桌面客户端】-> 【群设置】-> 【智能群助手】-> 【添加新建机器人-选自定义】-> 【设置机器人名称头像】-> 【注意⚠️设置自定义关键字: HertzBeat】 ->【添加成功后复制其WebHook地址】** - -> 注意⚠️ 新增机器人时需在安全设置块需设置其自定义关键字: HertzBeat ,其它安全设置加签或IP段不填写 - -![email](/img/docs/help/alert-notice-8.png) - -2. **【保存机器人的WebHook地址access_token值】** - -> 例如: webHook地址:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` -> 其机器人access_token值为 `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` - -3. **【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】** - -![email](/img/docs/help/alert-notice-9.png) - -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 - -![email](/img/docs/help/alert-notice-4.png) - -### 钉钉机器人通知常见问题 - -1. 钉钉群未收到机器人告警通知 - -> 请排查在告警中心是否已有触发的告警信息 -> 请排查钉钉机器人是否配置了安全自定义关键字:HertzBeat -> 请排查是否配置正确机器人ACCESS_TOKEN,是否已配置告警策略关联 - -其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_discord.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_discord.md deleted file mode 100644 index bb3c6287cd4..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_discord.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -id: alert_discord -title: 告警 Discord 机器人通知 -sidebar_label: 告警 Discord 机器人通知 -keywords: [告警 Discord 机器人通知, 开源告警系统, 开源监控告警系统] ---- - -> 阈值触发后发送告警信息,通过 Discord 机器人通知到接收人。 - -## 操作步骤 - -> 部署网络本身需支持科学上网,不支持设置代理 - -### 在 Discord 创建应用, 应用下创建机器人, 获取机器人 Token - -1. 访问 [https://discord.com/developers/applications](https://discord.com/developers/applications) 创建应用 - -![bot](/img/docs/help/discord-bot-1.png) - -2. 在应用下创建机器人,获取机器人 Token - -![bot](/img/docs/help/discord-bot-2.png) - -![bot](/img/docs/help/discord-bot-3.png) - -3. 授权机器人到聊天服务器 - -> 在 OAuth2 菜单下给此机器人授权,`SCOPES` 范围选 `bot`, `BOT PERMISSIONS` 选发送消息 `Send Messages` - -![bot](/img/docs/help/discord-bot-4.png) - -> 获取到最下方生成的 URL, 浏览器访问此 URL 给机器人正式授权,即设置将机器人加入哪个聊天服务器。 - -4. 查看您的聊天服务器是否已经加入机器人成员 - -![bot](/img/docs/help/discord-bot-5.png) - -### 开启开发者模式,获取频道 Channel ID - -1. 个人设置 -> 高级设置 -> 开启开发者模式 - -![bot](/img/docs/help/discord-bot-6.png) - -2. 获取频道 Channel ID - -> 右键选中您想要发送机器人消息的聊天频道,点击 COPY ID 按钮获取 Channel ID - -![bot](/img/docs/help/discord-bot-7.png) - -### 在 HertzBeat 新增告警通知人,通知方式为 Discord Bot - -1. **【告警通知】->【新增接收人】 ->【选择 Discord 机器人通知方式】->【设置机器人Token和ChannelId】-> 【确定】** - -![email](/img/docs/help/discord-bot-8.png) - -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 - -![email](/img/docs/help/alert-notice-policy.png) - -### Discord 机器人通知常见问题 - -1. Discord 未收到机器人告警通知 - -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人Token, ChannelId,是否已配置告警策略关联 -> 请排查机器人是否被 Discord聊天服务器正确赋权 - -其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_email.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_email.md deleted file mode 100644 index 0f53b58e71d..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_email.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -id: alert_email -title: 告警邮件通知 -sidebar_label: 告警邮件通知 -keywords: [告警邮件通知, 开源告警系统, 开源监控告警系统] ---- - -> 阈值触发后发送告警信息,通过邮件通知到接收人。 - -### 操作步骤 - -1. **【告警通知】->【新增接收人】 ->【选择邮件通知方式】** - -![email](/img/docs/help/alert-notice-1.png) - -2. **【获取验证码】-> 【输入邮箱验证码】-> 【确定】** - ![email](/img/docs/help/alert-notice-2.png) - -![email](/img/docs/help/alert-notice-3.png) - -3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 - -![email](/img/docs/help/alert-notice-4.png) - -### 邮件通知常见问题 - -1. 自己内网部署的HertzBeat无法接收到邮件通知 - -> HertzBeat需要自己配置邮件服务器,TanCloud无需,请确认是否在application.yml配置了自己的邮件服务器 - -2. 云环境TanCloud无法接收到邮件通知 - -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确邮箱,是否已配置告警策略关联 -> 请查询邮箱的垃圾箱里是否把告警邮件拦截 - -其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_enterprise_wechat_app.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_enterprise_wechat_app.md deleted file mode 100644 index b70c8b10c40..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_enterprise_wechat_app.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: alert_enterprise_wechat_app -title: 企业微信应用告警通知 -sidebar_label: 企业微信应用告警通知 -keywords: [开源告警系统, 开源监控告警系统, 企业微信应用告警通知] ---- - -> 阈值触发后发送告警信息,通过企业微信应用通知到接收人. - -### Operation steps - -1. **【企业微信后台管理】-> 【App管理】-> 【创建一个新的应用】-> 【设置应用信息】->【添加成功后复制应用的AgentId和Secret】** - -![email](/img/docs/help/alert-wechat-1.jpg) - -2. **【告警通知】->【新增接收人】 ->【选择企业微信应用通知方式】->【设置企业ID,企业应用id和应用的secret 】-> 【确定】** - -![email](/img/docs/help/alert-wechat-2.jpg) - -3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人。** - -![email](/img/docs/help/alert-wechat-3.jpg) - -### 企业微信应用通知常见问题 - -1. 企业微信应用未收到告警通知. - -> 请检查用户是否具有应用程序权限. -> 请检查企业应用程序回调地址设置是否正常. -> 请检查服务器IP是否在企业应用程序白名单上. - -其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_feishu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_feishu.md deleted file mode 100644 index 5a6e95d7067..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_feishu.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: alert_feishu -title: 告警飞书机器人通知 -sidebar_label: 告警飞书机器人通知 -keywords: [告警飞书机器人通知, 开源告警系统, 开源监控告警系统] ---- - -> 阈值触发后发送告警信息,通过飞书机器人通知到接收人。 - -### 操作步骤 - -1. **【飞书客户端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** - -2. **【保存机器人的WebHook地址的KEY值】** - -> 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` - -3. **【告警通知】->【新增接收人】 ->【选择飞书机器人通知方式】->【设置飞书机器人KEY】-> 【确定】** - -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 - -![email](/img/docs/help/alert-notice-4.png) - -### 飞书机器人通知常见问题 - -1. 飞书群未收到机器人告警通知 - -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 - -其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md deleted file mode 100644 index 5c5c38c56be..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_slack.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: alert_slack -title: 告警 Slack Webhook 通知 -sidebar_label: 告警 Slack Webhook 通知 -keywords: [告警 Slack Webhook 通知, 开源告警系统, 开源监控告警系统] ---- - -> 阈值触发后发送告警信息,通过 Slack Webhook 通知到接收人。 - -## 操作步骤 - -> 部署网络本身需支持科学上网,不支持设置代理 - -### 在 Slack 开启 Webhook, 获取 Webhook URL - -参考官网文档 [Sending messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) - -### 在 HertzBeat 新增告警通知人,通知方式为 Slack Webhook - -1. **【告警通知】->【新增接收人】 ->【选择 Slack Webhook 通知方式】->【设置 Webhook URL】-> 【确定】** - -![email](/img/docs/help/slack-bot-1.png) - -2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 - -![email](/img/docs/help/alert-notice-policy.png) - -### Slack 机器人通知常见问题 - -1. Slack 未收到机器人告警通知 - -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 - -其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_smn.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_smn.md deleted file mode 100644 index d6bca9843a4..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_smn.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: alert_smn -title: 华为云SMN通知 -sidebar_label: 告警华为云SMN通知 -keywords: [ 告警华为云SMN通知, 开源告警系统, 开源监控告警系统 ] ---- - -> 阈值触发后发送告警信息,通过华为云SMN通知到接收人。 - -### 操作步骤 - -1. **按照[华为云SMN官方文档](https://support.huaweicloud.com/qs-smn/smn_json.html)开通SMN服务并配置SMN** - -![alert-notice-10](/img/docs/help/alert-notice-10.png) - -2. **保存SMN的主题URN** - -![alert-notice-11](/img/docs/help/alert-notice-11.png) - -3. **按照[华为云签名文档](https://support.huaweicloud.com/devg-apisign/api-sign-provide.html)获取AK、SK和项目ID** - -![alert-notice-12](/img/docs/help/alert-notice-12.png) - -![alert-notice-13](/img/docs/help/alert-notice-13.png) - -4. **【告警通知】->【新增接收人】 ->【选择华为云SMN通知方式】->【设置华为云SMN AK、SK等配置】-> 【确定】** - -![alert-notice-14](/img/docs/help/alert-notice-14.png) - -5. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 - -![email](/img/docs/help/alert-notice-4.png) - -### 华为云SMN通知常见问题 - -1. 华为云SMN群未收到告警通知 - -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否正确配置华为云SMN AK、SK等配置,是否已配置告警策略关联 - -其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_telegram.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_telegram.md deleted file mode 100644 index dfb1aa48d8a..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_telegram.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -id: alert_telegram -title: 告警 Telegram 机器人通知 -sidebar_label: 告警 Telegram 机器人通知 -keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] ---- - -> 阈值触发后发送告警信息,通过 Telegram 机器人通知到接收人。 - -## 操作步骤 - -> 部署网络本身需支持科学上网,不支持设置代理 - -### 在 Telegram 创建机器人, 获取 Bot Token 和 UserId - -1. 使用 [@BotFather](https://t.me/BotFather) 创建自己的机器人并获取访问令牌`Token` - -![telegram-bot](/img/docs/help/telegram-bot-1.png) - -2. 获取接收人的 `User ID` - -**使用您要通知的接收人账户给刚创建 Bot 账户随便发送一个信息**, -访问 ```https://api.telegram.org/bot/getUpdates``` , **`使用上一步的 Bot Token 替换其中的`**, 响应`Json`数据中第一个`result.message.from.id` 值即为接收人的 `User ID` - -```json -{ - "ok":true, - "result":[ - { - "update_id":632299191, - "message":{ - "from":{ - "id": "User ID" - }, - "chat":{ - }, - "date":1673858065, - "text":"111" - } - } - ] -} -``` - -3. 记录保存我们获得的 `Token` 和 `User Id` - -### 在 HertzBeat 新增告警通知人,通知方式为 Telegram Bot - -1. **【告警通知】->【新增接收人】 ->【选择 Telegram 机器人通知方式】->【设置机器人Token和UserId】-> 【确定】** - -![email](/img/docs/help/telegram-bot-2.png) - -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 - -![email](/img/docs/help/alert-notice-policy.png) - -### Telegram 机器人通知常见问题 - -1. Telegram 未收到机器人告警通知 - -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 -> UserId 应为消息接收对象的UserId - -其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold.md deleted file mode 100644 index 9a68175fad7..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -id: alert_threshold -title: 阈值告警配置 -sidebar_label: 阈值告警配置 ---- - -> 对监控指标配置告警阈值(警告告警,严重告警,紧急告警),系统根据阈值配置和采集指标数据计算触发告警。 - -### 操作步骤 - -1. **【告警配置】->【新增阈值】-> 【配置后确定】** - -![threshold](/img/docs/help/alert-threshold-1.png) - -如上图: - -**指标对象**:选择我们需要配置阈值的监控指标对象 例如:网站监控类型下的 -> summary指标集合下的 -> responseTime响应时间指标 -**阈值触发表达式**:根据此表达式来计算判断是否触发阈值,表达式环境变量和操作符见页面提示,例如:设置响应时间大于50触发告警,表达式为 `responseTime > 50`。阈值表达式详细帮助见 [阈值表达式帮助](alert_threshold_expr) -**告警级别**:触发阈值的告警级别,从低到高依次为:警告-warning,严重-critical,紧急-emergency -**触发次数**:设置触发阈值多少次之后才会真正的触发告警 -**通知模版**:告警触发后发送的通知信息模版,模版环境变量见页面提示,例如:`${app}.${metrics}.${metric}指标的值为${responseTime},大于50触发告警` -**全局默认**: 设置此阈值是否对全局的此类指标都应用有效,默认否。新增阈值后还需将阈值与监控对象关联,这样阈值才会对此监控生效。 -**启用告警**:此告警阈值配置开启生效或关闭 - -2. **阈值关联监控⚠️ 【告警配置】-> 【将刚设置的阈值】-> 【配置关联监控】-> 【配置后确定】** - -> **注意⚠️ 新增阈值后还需将阈值与监控对象关联(即设置此阈值对哪些监控有效),这样阈值才会对此监控生效**。 - -![threshold](/img/docs/help/alert-threshold-2.png) - -![threshold](/img/docs/help/alert-threshold-3.png) - -**阈值告警配置完毕,已经被成功触发的告警信息可以在【告警中心】看到。** -**若需要将告警信息邮件,微信,钉钉飞书通知给相关人员,可以在【告警通知】配置。** - -其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md deleted file mode 100644 index 6d15d14ea6e..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_threshold_expr.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -id: alert_threshold_expr -title: 阈值触发表达式 -sidebar_label: 阈值触发表达式 ---- - -> 在我们配置阈值告警时,需要配置阈值触发表达式,系统根据表达式和监控指标值计算触发是否告警,这里详细介绍下表达式使用。 - -#### 表达式支持的操作符 - -``` -equals(str1,str2) -== -< -<= -> ->= -!= -( ) -+ -- -&& -|| -``` - -丰富的操作符让我们可以很自由的定义表达式。 -注意⚠️ 字符串的相等请用 `equals(str1,str2)` 数字类型的相等判断请用== 或 != - -#### 表达式函数库列表 - -参考: - -#### 支持的环境变量 - -> 环境变量即指标值等支持的变量,用于在表达式中,阈值计算判断时会将变量替换成实际值进行计算 - -非固定环境变量:这些变量会根据我们选择的监控指标对象而动态变化,例如我们选择了**网站监控的响应时间指标**,则环境变量就有 `responseTime - 此为响应时间变量` -如果我们想设置**网站监控的响应时间大于400时**触发告警,则表达式为 `responseTime>400` - -固定环境变量(不常用):`instance : 所属行实例值` -此变量主要用于计算多实例时,比如采集到c盘d盘的`usage`(`usage为非固定环境变量`),我们只想设置**c盘的usage大于80**时告警,则表达式为 `equals(instance,"c")&&usage>80` - -#### 表达式设置案例 - -1. 网站监控->响应时间大于等于400ms时触发告警 - `responseTime>=400` -2. API监控->响应时间大于3000ms时触发告警 - `responseTime>3000` -3. 全站监控->URL(instance)路径为 `https://baidu.com/book/3` 的响应时间大于200ms时触发告警 - `equals(instance,"https://baidu.com/book/3")&&responseTime>200` -4. MYSQL监控->status指标->threads_running(运行线程数)指标大于7时触发告警 - `threads_running>7` - -若遇到问题可以通过交流群ISSUE交流反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_webhook.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_webhook.md deleted file mode 100644 index 272c59cfd4c..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_webhook.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -id: alert_webhook -title: 告警 Webhook 回调通知 -sidebar_label: 告警 Webhook 回调通知 -keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警系统] ---- - -> 阈值触发后发送告警信息,通过post请求方式调用WebHook接口通知到接收人。 - -## 操作步骤 - -1. **【告警通知】->【新增接收人】 ->【选择WebHook通知方式】-> 【设置WebHook回调地址】 -> 【确定】** - -![email](/img/docs/help/alert-notice-5.png) - -2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 - -![email](/img/docs/help/alert-notice-4.png) - -### WebHook回调POST请求体BODY内容 - -内容格式:JSON - -```json -{ - "alarmId": 76456, - "target": "${target}", - "thresholdId": 33455, - "priority": 0, - "content": "udp_port monitoring availability alert, code is FAIL", - "status": 0, - "times": 1, - "triggerTime": "2022-02-25T13:32:13", - "tags": { - "app": "windows", - "monitorId": "180427708350720", - "metrics": "availability", - "code": "UN_CONNECTABLE", - "thresholdId": "112", - "monitorName": "WINDOWS_192.168.124.12" - } -} -``` - -| | | -|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | -| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | -| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | -| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | -| content | string title: The actual content of the alarm notification 告警通知实际内容 | -| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | -| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | -| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | -| tags | example: {key1:value1} | - -### webhook通知常见问题 - -1. WebHook回调未生效 - -> 请查看告警中心是否已经产生此条告警信息 -> 请排查配置的WebHook回调地址是否正确 - -其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_wework.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_wework.md deleted file mode 100644 index 5c73ffee2a6..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/alert_wework.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: alert_wework -title: 告警企业微信通知 -sidebar_label: 告警企业微信通知 -keywords: [告警企业微信通知, 开源告警系统, 开源监控告警系统] ---- - -> 阈值触发后发送告警信息,通过企业微信机器人通知到接收人。 - -### 操作步骤 - -1. **【企业微信端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** - -![email](/img/docs/help/alert-notice-6.jpg) - -2. **【保存机器人的WebHook地址的KEY值】** - -> 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` - -3. **【告警通知】->【新增接收人】 ->【选择企业微信机器人通知方式】->【设置企业微信机器人KEY】-> 【确定】** - -![email](/img/docs/help/alert-notice-7.png) - -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** - -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 - -![email](/img/docs/help/alert-notice-4.png) - -### 企业微信机器人通知常见问题 - -1. 企业微信群未收到机器人告警通知 - -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 - -其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/api.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/api.md deleted file mode 100644 index 0390259fc70..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/api.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: api -title: 监控:HTTP API -sidebar_label: HTTP API -keywords: [开源监控系统, 开源网站监控, HTTP API监控] ---- - -> 调用HTTP API接口,查看接口是否可用,对其响应时间等指标进行监测 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------------|-------------------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | -| 请求方式 | 设置接口调用的请求方式:GET,POST,PUT,DELETE。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| 用户名 | 接口Basic认证或Digest认证时使用的用户名 | -| 密码 | 接口Basic认证或Digest认证时使用的密码 | -| Content-Type | 设置携带BODY请求体数据请求时的资源类型 | -| 请求BODY | 设置携带BODY请求体数据,PUT POST请求方式时有效 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:summary - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------|------|--------| -| responseTime | ms毫秒 | 网站响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/centos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/centos.md deleted file mode 100644 index 02a93f751c5..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/centos.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -id: centos -title: 监控:CentOS操作系统监控 -sidebar_label: CentOS操作系统 -keywords: [开源监控系统, 开源操作系统监控, CentOS操作系统监控] ---- - -> 对CentOS操作系统的通用性能指标进行采集监控。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:basic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------|------|--------| -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | - -#### 指标集合:cpu - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------------|------|--------------------| -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - -#### 指标集合:memory - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------|------|----------| -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | - -#### 指标集合:disk - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------|------|-----------| -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | - -#### 指标集合:interface - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------------|------|---------------| -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | - -#### 指标集合:disk_free - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------|------|---------| -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dm.md deleted file mode 100644 index 12cb13b8422..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dm.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -id: dm -title: 监控:达梦数据库监控 -sidebar_label: 达梦数据库 -keywords: [开源监控系统, 开源数据库监控, 达梦数据库监控] ---- - -> 对DM达梦数据库的通用性能指标进行采集监控。支持DM8+。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为5236。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:basic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------|------|-----------| -| PORT_NUM | 无 | 数据库暴露服务端口 | -| CTL_PATH | 无 | 控制文件路径 | -| MAX_SESSIONS | 无 | 数据库最大连接数 | - -#### 指标集合:status - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|------------| -| status$ | 无 | DM数据库的开闭状态 | - -#### 指标集合:thread - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------|------|---------------------------------| -| dm_sql_thd | 无 | 用于编写 dmsql dmserver 的线程 | -| dm_io_thd | 无 | IO线程,由IO_THR_GROUPS参数控制,默认为2个线程 | -| dm_quit_thd | 无 | 用于执行正常关闭数据库的线程 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/docker.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/docker.md deleted file mode 100644 index 0b81365780b..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/docker.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -id: docker -title: 监控:Docker 监控 -sidebar_label: Docker 容器监控 -keywords: [开源监控系统, 开源容器监控, Docker容器监控] ---- - -> 对Docker容器的通用性能指标进行采集监控。 - -## 监控前操作 - -如果想要监控 `Docker` 中的容器信息,则需要按照一下步骤打开端口,让采集请求获取到对应的信息。 - -**1、编辑docker.server文件:** - -```shell -vi /usr/lib/systemd/system/docker.service -``` - -找到 **[Service]** 节点,修改 ExecStart 属性,增加 `-H tcp://0.0.0.0:2375` - -```shell -ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock -H tcp://0.0.0.0:2375 -``` - -这样相当于对外开放的是 **2375** 端口,当然也可以根据自己情况修改成其他的。 - -**2、重新加载Docker配置生效:** - -```shell -systemctl daemon-reload -systemctl restart docker -``` - -**注意:记得在服务器中打开 `2375` 端口号。** - -**3、如果上述方法不行则:** - -在服务器内部打开 `2375` 端口号。 - -```shell -firewall-cmd --zone=public --add-port=2375/tcp --permanent -firewall-cmd --reload -``` - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为2375。 | -| 查询超时时间 | 设置获取Docker服务器API接口时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 器名称 | 一般是监控所有运行中的容器信息。 | -| 用户名 | 连接用户名,可选 | -| 密码 | 连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:system - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------|------|--------------------------------| -| Name | 无 | 服务器名称 | -| version | 无 | docker本版号 | -| os | 无 | 服务器版本 例如:linux x86_64 | -| root_dir | 无 | docker文件夹目录 例如:/var/lib/docker | -| containers | 无 | 容器总数(在运行+未运行) | -| containers_running | 无 | 运行中的容器数目 | -| containers_paused | 无 | 暂停中的容器数目 | -| images | 无 | 容器景象的总数目。 | -| ncpu | 无 | NCPU | -| mem_total | MB | 占用的内存总大小 | -| system_time | 无 | 系统时间 | - -#### 指标集合:containers - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|----------------| -| id | 无 | Docker中容器的ID | -| name | 无 | Docker容器中的容器名称 | -| image | 无 | Docker容器使用的镜像 | -| command | 无 | Docker中的默认启动命令 | -| state | 无 | Docker中容器的运行状态 | -| status | 无 | Docker容器中的更新时间 | - -#### 指标集合:stats - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------------|------|--------------------| -| name | 无 | Docker容器中的名字 | -| available_memory | MB | Docker容器可以利用的内存大小 | -| used_memory | MB | Docker容器已经使用的内存大小 | -| memory_usage | 无 | Docker容器的内存使用率 | -| cpu_delta | 无 | Docker容器已经使用的CPU数量 | -| number_cpus | 无 | Docker容器可以使用的CPU数量 | -| cpu_usage | 无 | Docker容器CPU使用率 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dynamic_tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dynamic_tp.md deleted file mode 100644 index 1abcb732289..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/dynamic_tp.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -id: dynamic_tp -title: 监控:DynamicTp 线程池监控 -sidebar_label: DynamicTp线程池监控 -keywords: [开源监控系统, 开源中间件监控, DynamicTp线程池监控] ---- - -> 对DynamicTp actuator 暴露的线程池性能指标进行采集监控。 - -### 前提 - -1. 集成使用 `DynamicTp` - -`DynamicTp` 是Jvm语言的基于配置中心的轻量级动态线程池,内置监控告警功能,可通过SPI自定义扩展实现。 - -集成使用,请参考文档 [快速接入](https://dynamictp.cn/guide/use/quick-start.html) - -2. 开启SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 - -```yaml -management: - endpoints: - web: - exposure: - include: '*' -``` - -测试访问指标接口 `ip:port/actuator/dynamic-tp` 是否有响应json数据如下: - -```json -[ - { - "poolName": "commonExecutor", - "corePoolSize": 1, - "maximumPoolSize": 1, - "queueType": "LinkedBlockingQueue", - "queueCapacity": 2147483647, - "queueSize": 0, - "fair": false, - "queueRemainingCapacity": 2147483647, - "activeCount": 0, - "taskCount": 0, - "completedTaskCount": 0, - "largestPoolSize": 0, - "poolSize": 0, - "waitTaskCount": 0, - "rejectCount": 0, - "rejectHandlerName": null, - "dynamic": false, - "runTimeoutCount": 0, - "queueTimeoutCount": 0 - }, - { - "maxMemory": "4 GB", - "totalMemory": "444 MB", - "freeMemory": "250.34 MB", - "usableMemory": "3.81 GB" - } -] -``` - -3. 在HertzBeat中间件监控下添加DynamicTp监控即可 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|-----------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 应用服务对外提供的端口,默认为8080。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| Base Path | 暴露接口路径前缀,默认 /actuator | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:thread_pool - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------------|------|--------------------------| -| pool_name | 无 | 线程池名称 | -| core_pool_size | 无 | 核心线程数 | -| maximum_pool_size | 无 | 最大线程数 | -| queue_type | 无 | 任务队列类型 | -| queue_capacity | MB | 任务队列容量 | -| queue_size | 无 | 任务队列当前占用大小 | -| fair | 无 | 队列模式,SynchronousQueue会用到 | -| queue_remaining_capacity | MB | 任务队列剩余大小 | -| active_count | 无 | 活跃线程数 | -| task_count | 无 | 任务总数 | -| completed_task_count | 无 | 已完成任务数 | -| largest_pool_size | 无 | 历史最大线程数 | -| pool_size | 无 | 当前线程数 | -| wait_task_count | 无 | 等待执行任务数 | -| reject_count | 无 | 拒绝任务数 | -| reject_handler_name | 无 | 拒绝策略类型 | -| dynamic | 无 | 是否动态线程池 | -| run_timeout_count | 无 | 运行超时任务数 | -| queue_timeout_count | 无 | 等待超时任务数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/fullsite.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/fullsite.md deleted file mode 100644 index 54553c1200c..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/fullsite.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: fullsite -title: 监控:全站监控 -sidebar_label: 全站监控 -keywords: [开源监控系统, 开源网站监控, SiteMap监控] ---- - -> 对网站的全部页面监测是否可用 -> 往往一个网站有多个不同服务提供的页面,我们通过采集网站暴露出来的网站地图SiteMap来监控全站。 -> 注意⚠️,此监控需您网站支持SiteMap。我们支持XML和TXT格式的SiteMap。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|---------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 网站地图 | 网站SiteMap地图地址的相对路径,例如:/sitemap.xml。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:summary - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------|------|-----------------| -| url | 无 | 网页的URL路径 | -| statusCode | 无 | 请求此网页的响应HTTP状态码 | -| responseTime | ms毫秒 | 网站响应时间 | -| errorMsg | 无 | 请求此网站反馈的错误信息 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/guide.md deleted file mode 100644 index da07e912f00..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/guide.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -id: guide -title: 帮助中心 -sidebar_label: 帮助入门 ---- - -> 易用友好的实时监控工具,无需Agent,强大自定义监控能力。 -> 使用过程中的帮助文档,辅助信息。 - -## 🔬 监控服务 - -> 定时采集监控对端服务暴露的性能指标,提供可视化界面,处理数据供告警等服务调度。 -> 规划的监控类型:应用服务,数据库,操作系统,云原生,开源中间件 - -### 应用服务监控 - - 👉 [网站监测](website)
- 👉 [HTTP API](api)
- 👉 [PING连通性](ping)
- 👉 [端口可用性](port)
- 👉 [全站监控](fullsite)
- 👉 [SSL证书有效期](ssl_cert)
- 👉 [JVM虚拟机](jvm)
- 👉 [SpringBoot2.0](springboot2)
- -### 数据库监控 - - 👉 [MYSQL数据库监控](mysql)
- 👉 [MariaDB数据库监控](mariadb)
- 👉 [PostgreSQL数据库监控](postgresql)
- 👉 [SqlServer数据库监控](sqlserver)
- 👉 [Oracle数据库监控](oracle)
- 👉 [达梦数据库监控](dm)
- 👉 [OpenGauss数据库监控](opengauss)
- 👉 [IoTDB数据库监控](iotdb)
- -### 操作系统监控 - - 👉 [Linux操作系统监控](linux)
- 👉 [Windows操作系统监控](windows)
- 👉 [Ubuntu操作系统监控](ubuntu)
- 👉 [Centos操作系统监控](centos)
- -### 中间件监控 - - 👉 [Zookeeper](zookeeper)
- 👉 [Kafka](kafka)
- 👉 [Tomcat](tomcat)
- 👉 [ShenYu](shenyu)
- 👉 [DynamicTp](dynamic_tp)
- 👉 [RabbitMQ](rabbitmq)
- 👉 [ActiveMQ](activemq)
- 👉 [Jetty](jetty)
- -### 云原生监控 - - 👉 [Docker](docker)
- 👉 [Kubernetes](kubernetes)
- -## 💡 告警服务 - -> 更自由化的阈值告警配置,支持邮箱,短信,webhook,钉钉,企业微信,飞书机器人等告警通知。 -> 告警服务的定位是阈值准确及时触发,告警通知及时可达。 - -### 告警中心 - -> 已触发的告警信息中心,提供告警删除,告警处理,标记未处理,告警级别状态等查询过滤。 - -### 告警配置 - -> 指标阈值配置,提供表达式形式的指标阈值配置,可设置告警级别,触发次数,告警通知模版和是否启用,关联监控等功能。 - -详见 👉 [阈值告警](alert_threshold)
-   👉 [阈值表达式](alert_threshold_expr) - -### 告警通知 - -> 触发告警信息后,除了显示在告警中心列表外,还可以用指定方式(邮件钉钉微信飞书等)通知给指定接收人。 -> 告警通知提供设置不同类型的通知方式,如邮件接收人,企业微信机器人通知,钉钉机器人通知,飞书机器人通知。 -> 接收人设置后需要设置关联的告警通知策略,来配置哪些告警信息发给哪些接收人。 - - 👉 [配置邮箱通知](alert_email)
- 👉 [配置 Webhook 通知](alert_webhook)
- 👉 [配置 Telegram 通知](alert_telegram)
- 👉 [配置 Discord 通知](alert_discord)
- 👉 [配置 Slack 通知](alert_slack)
- 👉 [配置企业微信机器人通知](alert_wework)
- 👉 [配置钉钉机器人通知](alert_dingtalk)
- 👉 [配置飞书机器人通知](alert_feishu)
- 👉 [配置华为云SMN通知](alert_smn)
diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hadoop.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hadoop.md deleted file mode 100644 index 186baede498..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hadoop.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -id: hadoop -title: 监控:Hadoop -sidebar_label: Hadoop -keywords: [开源监控系统, 开源JAVA监控, Hadoop的JVM虚拟机监控] ---- - -> 对Hadoop的JVM虚拟机的通用性能指标进行采集监控 - -**使用协议:JMX** - -### 监控前操作 - -> 您需要在 Hadoop 应用开启 `JMX` 服务,HertzBeat 使用 JMX 协议对Hadoop中的 JVM 进行指标采集。 - -#### Hadoop应用开启JMX协议步骤 - -应用启动时添加JVM参数 ⚠️注意可自定义暴露端口,对外IP - -- 1.进入hadoop-env.sh配置文件,在命令行终端输入以下命令: - -```shell -vi $HADOOP_HOME/etc/hadoop/hadoop-env.sh -``` - -- 2.添加如下参数,`` 为自定义暴露端口号。 - -```shell -export HADOOP_OPTS= "$HADOOP_OPTS --Djava.rmi.server.hostname=对外ip地址 --Dcom.sun.management.jmxremote.port=9999 --Dcom.sun.management.jmxremote.ssl=false --Dcom.sun.management.jmxremote.authenticate=false " -``` - -- 3.保存并退出,并在 $HADOOP_HOME/sbin 目录下执行 `start-all.sh` 重启服务。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:memory_pool - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------|------|--------| -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | - -#### 指标集合:code_cache (限JDK8及以下) - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------|------|--------| -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | - -#### 指标集合:class_loading - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------|------|----------| -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - -#### 指标集合:thread - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------|------|-----------| -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hive.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hive.md deleted file mode 100644 index 6e1efde0991..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/hive.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -id: hive -Title: 监控 Apache Hive -sidebar_label: Apache Hive -keywords: [开源监控工具, 开源 Apache Hive 监控工具, 监控 Apache Hive 指标] ---- - -> 收集和监控由 SpringBoot Actuator 提供的常规性能指标。 - -## 监控前操作 - -如果您想使用此监控类型监控 Apache Hive 的信息,您需要以远程模式启动您的 Hive Server2。 - -**1、启用元数据存储:** - -```shell -hive --service metastore & -``` - -**2. 启用 Hive Server2:** - -```shell -hive --service hiveserver2 & -``` - -### 配置参数 - -| 参数名称 | 参数描述 | -|----------|--------------------------------------------------------| -| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | -| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | -| 端口 | 数据库提供的默认端口为 10002。 | -| 启用 HTTPS | 是否通过 HTTPS 访问网站,请注意⚠️当启用 HTTPS 时,需要将默认端口更改为 443 | -| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | -| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | -| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | - -### 采集指标 - -#### 指标收集: 基本信息 - -| 指标名称 | 指标单位 | 指标描述 | -|--------|------|-----------------------------| -| 虚拟机名称 | 无 | 运行 HiveServer2 的虚拟机(VM)的名称。 | -| 虚拟机供应商 | 无 | 虚拟机的供应商或提供者。 | -| 虚拟机版本 | 无 | 虚拟机的版本。 | -| 允许持续时间 | 无 | HiveServer2 运行的持续时间。 | - -#### 指标收集: 环境信息 - -| 指标名称 | 指标单位 | 指标描述 | -|------------|------|--------------------------------| -| HTTPS代理端口号 | 无 | 用于 HTTPS 代理通信的端口号。 | -| 操作系统 | 无 | 运行 HiveServer2 的操作系统的名称。 | -| 操作系统版本 | 无 | 操作系统的版本。 | -| 操作系统架构 | 无 | 操作系统的架构。 | -| java运行环境 | 无 | HiveServer2 使用的 Java 运行时环境的名称。 | -| java运行环境版本 | 无 | Java 运行时环境的版本。 | - -#### 指标收集: 线程信息 - -| 指标名称 | 指标单位 | 指标描述 | -|--------|------|------------------------------| -| 线程数量 | None | HiveServer2 当前正在使用的线程数。 | -| 总启动线程数 | None | HiveServer2 启动以来启动的线程总数。 | -| 最高线程数 | None | HiveServer2 在任何给定时间使用的最高线程数。 | -| 守护线程数 | None | HiveServer2 当前活动的守护线程数。 | - -#### 指标收集: 代码缓存 - -| 指标名称 | 指标单位 | 指标描述 | -|------------|------|---------------| -| 内存池当前内存 | MB | 当前为内存池分配的内存量。 | -| 内存池初始内存 | MB | 内存池请求的初始内存量。 | -| 内存池可分配最大内存 | MB | 内存池可分配的最大内存量。 | -| 内存池内存使用量 | MB | 内存池已使用内存量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/iotdb.md deleted file mode 100644 index 8bb3bbb25e0..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/iotdb.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -id: iotdb -title: 监控 Apache IoTDB 物联网时序数据库 -sidebar_label: IoTDB数据库 -keywords: [开源监控系统, 开源数据库监控, IoTDB数据库监控] ---- - -> 对 Apache IoTDB 物联网时序数据库的运行状态(JVM相关),内存任务集群等相关指标进行监测。 - -## 监控前操作 - -您需要在 IoTDB 开启`metrics`功能,他将提供 prometheus metrics 形式的接口数据。 - -开启`metrics`功能, 参考 [官方文档](https://iotdb.apache.org/zh/UserGuide/V0.13.x/Maintenance-Tools/Metric-Tool.html) - -主要如下步骤: - -1. metric 采集默认是关闭的,需要先到 `conf/iotdb-metric.yml` 中修改参数打开后重启 server - -``` -# 是否启动监控模块,默认为false -enableMetric: true - -# 是否启用操作延迟统计 -enablePerformanceStat: false - -# 数据提供方式,对外部通过jmx和prometheus协议提供metrics的数据, 可选参数:[JMX, PROMETHEUS, IOTDB],IOTDB是默认关闭的。 -metricReporterList: - - JMX - - PROMETHEUS - -# 底层使用的metric架构,可选参数:[MICROMETER, DROPWIZARD] -monitorType: MICROMETER - -# 初始化metric的级别,可选参数: [CORE, IMPORTANT, NORMAL, ALL] -metricLevel: IMPORTANT - -# 预定义的指标集, 可选参数: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] -predefinedMetrics: - - JVM - - FILE -``` - -2. 重启 IoTDB, 打开浏览器或者用curl 访问 , 就能看到metric数据了。 - -3. 在 HertzBeat 添加对应 IoTDB 监控即可。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | IoTDB指标接口对外提供的端口,默认为9091。 | -| 超时时间 | HTTP请求查询超时时间 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:cluster_node_status - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------|------|-------------------------| -| name | 无 | 节点名称IP | -| status | 无 | 节点状态,1=online 2=offline | - -#### 指标集合:jvm_memory_committed_bytes - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------------| -| area | 无 | heap内存或nonheap内存 | -| id | 无 | 内存区块 | -| value | MB | 当前向JVM申请的内存大小 | - -#### 指标集合:jvm_memory_used_bytes - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------------| -| area | 无 | heap内存或nonheap内存 | -| id | 无 | 内存区块 | -| value | MB | JVM已使用内存大小 | - -#### 指标集合:jvm_threads_states_threads - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------| -| state | 无 | 线程状态 | -| count | 无 | 线程状态对应线程数量 | - -#### 指标集合:quantity 业务数据 - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--|------|----------------| -| name | 无 | 业务名称 timeSeries/storageGroup/device/deviceUsingTemplate | -| type | 无 | 类型 total/normal/template/template | -| value | 无 | 当前时间timeSeries/storageGroup/device/激活了模板的device的数量 | - -#### 指标集合:cache_hit 缓存 - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------------------------------------| -| name | 无 | 缓存名称 chunk/timeSeriesMeta/bloomFilter | -| value | % | chunk/timeSeriesMeta缓存命中率,bloomFilter拦截率 | - -#### 指标集合:queue 任务队列 - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------|------|----------------------------------------------| -| name | 无 | 队列名称 compaction_inner/compaction_cross/flush | -| status | 无 | 状态 running/waiting | -| value | 无 | 当前时间任务数 | - -#### 指标集合:thrift_connections - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------|------|-------------| -| name | 无 | 名称 | -| connection | 无 | thrift当前连接数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/issue.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/issue.md deleted file mode 100644 index 3d06e0346d2..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/issue.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -id: issue -title: 常见问题 -sidebar_label: 常见问题 ---- - -### 监控常见问题 - -1. **页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名** - -> 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http - -2. **网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK** - -> 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) - -3. 安装包部署的hertzbeat下ping连通性监控异常 - 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 - -> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 -> docker安装默认启用无此问题 -> 详见 - -4. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖服务TDengine时序数据库] - -> 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - TDengine数据库 -> 安装初始化此数据库参考 [TDengine安装初始化](../start/tdengine-init) - -5. 配置了k8s监控,但是实际监控时间并未按照正确间隔时间执行 - 请参考下面几点排查问题: - -> 一:首先查看hertzbeat的错误日志,如果出现了'desc: SQL statement too long, check maxSQLLength config',信息 -> 二:需要调整tdengine配置文件,可在服务器创建taos.cfg文件,调整# max length of an SQL : maxSQLLength 654800,然后重启tdengine,需要加入配置文件的挂载 -> 三:如果遇到了重启tdengine失败,需要调整挂载数据文件中的配置,见 .../taosdata/dnode/dnodeEps.json,中dnodeFqdn调整为启动失败的dockerId即可,然后docker restart tdengine -> 6.配置http api监控,用于进行业务接口探测,确保业务可以用,另外接口有进行token鉴权校验,"Authorization:Bearer eyJhbGciOiJIUzI1....",配置后测试,提示“StatusCode 401”。服务端应用收到的token为"Authorization:Bearer%20eyJhbGciOiJIUzI1....",hertzbeat对空格进行转义为“%20”,服务器没有转义导致鉴权失败,建议转义功能作为可选项。 - -### Docker部署常见问题 - -1. **MYSQL,TDENGINE和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** - 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 - -> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP -> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` - -2. **按照流程部署,访问 无界面** - 请参考下面几点排查问题: - -> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 -> 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 - -3. **日志报错TDengine连接或插入SQL失败** - -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter - -### 安装包部署常见问题 - -1. **按照流程部署,访问 无界面** - 请参考下面几点排查问题: - -> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 - -2. **日志报错TDengine连接或插入SQL失败** - -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jetty.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jetty.md deleted file mode 100644 index 31e297703fc..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jetty.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -id: jetty -title: 监控:Jetty应用服务器 -sidebar_label: Jetty应用服务器 -keywords: [开源监控系统, 开源中间件监控, Jetty应用服务器监控] ---- - -> 对Jetty应用服务器的通用性能指标进行采集监控 - -**使用协议:JMX** - -### 监控前操作 - -> 您需要在 JVM 应用开启 `JMX` 服务,HertzBeat 使用 JMX 协议对 JVM 进行指标采集。 - -#### Jetty应用服务器开启JMX协议步骤 - -[参考官方文档](https://www.eclipse.org/jetty/documentation/jetty-10/operations-guide/index.html#og-jmx-remote) - -1. 在 Jetty 启动 JMX JMX-REMOTE 模块 - -```shell -java -jar $JETTY_HOME/start.jar --add-module=jmx -java -jar $JETTY_HOME/start.jar --add-module=jmx-remote -``` - -命令执行成功会创建出 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件 - -2. 编辑 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件,修改 JMX 的 IP 端口等参数。 - -**`localhost` 需修改为对外暴露 IP** - -```text -## The host/address to bind the RMI server to. -# jetty.jmxremote.rmiserverhost=localhost - -## The port the RMI server listens to (0 means a random port is chosen). -# jetty.jmxremote.rmiserverport=1099 - -## The host/address to bind the RMI registry to. -# jetty.jmxremote.rmiregistryhost=localhost - -## The port the RMI registry listens to. -# jetty.jmxremote.rmiregistryport=1099 - -## The host name exported in the RMI stub. --Djava.rmi.server.hostname=localhost -``` - -3. 重启 Jetty Server 即可。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:memory_pool - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------|------|--------| -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | - -#### 指标集合:class_loading - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------|------|----------| -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - -#### 指标集合:thread - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------|------|-----------| -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jvm.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jvm.md deleted file mode 100644 index 3d9e96e55e1..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/jvm.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -id: jvm -title: 监控:JVM虚拟机监控 -sidebar_label: JVM虚拟机 -keywords: [开源监控系统, 开源JAVA监控, JVM虚拟机监控] ---- - -> 对JVM虚拟机的通用性能指标进行采集监控 - -**使用协议:JMX** - -### 监控前操作 - -> 您需要在 JVM 应用开启 `JMX` 服务,HertzBeat 使用 JMX 协议对 JVM 进行指标采集。 - -#### JVM应用开启JMX协议步骤 - -应用启动时添加JVM参数 ⚠️注意可自定义暴露端口,对外IP - -参考文档: - -```shell --Djava.rmi.server.hostname=对外ip地址 --Dcom.sun.management.jmxremote.port=9999 --Dcom.sun.management.jmxremote.ssl=false --Dcom.sun.management.jmxremote.authenticate=false -``` - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置JVM连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:memory_pool - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------|------|--------| -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | - -#### 指标集合:code_cache (限JDK8及以下) - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------|------|--------| -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | - -#### 指标集合:class_loading - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------|------|----------| -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - -#### 指标集合:thread - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------|------|-----------| -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kafka.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kafka.md deleted file mode 100644 index a79bb0e91c2..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kafka.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -id: kafka -title: 监控:Kafka监控 -sidebar_label: Kafka监控 -keywords: [开源监控系统, 开源消息中间件监控, Kafka监控] ---- - -> 对Kafka的通用性能指标进行采集监控 - -**使用协议:JMX** - -### 监控前操作 - -> 您需要在 Kafka 开启 `JMX` 服务,HertzBeat 使用 JMX 协议对 Kafka 进行指标采集。 - -1. 安装部署 Kafka 服务 - -2. 修改 Kafka 启动脚本 - -修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` -在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 - -```shell -export JMX_PORT=9999; -export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=ip地址 -Dcom.sun.management.jmxremote.rmi.port=9999 -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"; - -# 这是最后一行本来就存在的 -# exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" -``` - -3. 重启 Kafka 服务 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置Kafka连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:server_info - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------|------|---------| -| Version | 无 | Kafka版本 | -| StartTimeMs | ms | 运行时间 | -| CommitId | 无 | 版本提交ID | - -#### 指标集合:code_cache - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------|------|--------| -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | - -#### 指标集合:active_controller_count - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|---------| -| Value | 个 | 活跃监控器数量 | - -#### 指标集合:broker_partition_count - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|--------| -| Value | 个 | 分区数量 | - -#### 指标集合:broker_leader_count - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|--------| -| Value | 个 | 领导者数量 | - -#### 指标集合:broker_handler_avg_percent 请求处理器空闲率 - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------|----------|---------| -| EventType | 无 | 类型 | -| RateUnit | 具体情况具体分析 | 单位 | -| Count | 个 | 数量 | -| OneMinuteRate | % | 一分钟处理率 | -| FiveMinuteRate | % | 五分钟处理率 | -| MeanRate | 无 | 平均处理率 | -| FifteenMinuteRate | 无 | 十五分钟处理率 | - -> 其他指标见文知意,欢迎贡献一起优化文档。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kubernetes.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kubernetes.md deleted file mode 100644 index 4f0363f621d..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/kubernetes.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -id: kubernetes -title: 监控:Kubernetes 监控 -sidebar_label: Kubernetes 监控 -keywords: [开源监控系统, 开源Kubernetes监控] ---- - -> 对kubernetes的通用性能指标进行采集监控。 - -## 监控前操作 - -如果想要监控 `Kubernetes` 中的信息,则需要获取到可访问Api Server的授权TOKEN,让采集请求获取到对应的信息。 - -参考获取token步骤 - -#### 方式一 - -1. 创建service account并绑定默认cluster-admin管理员集群角色 - -```kubectl create serviceaccount dashboard-admin -n kube-system``` - -2. 用户授权 - -```shell -kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin -kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' -kubectl describe secret {secret} -n kube-system -``` - -#### 方式二 - -``` -kubectl create serviceaccount cluster-admin - -kubectl create clusterrolebinding cluster-admin-manual --clusterrole=cluster-admin --serviceaccount=default:cluster-admin - -kubectl create token --duration=1000h cluster-admin - -``` - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|-------------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| APiServer端口 | K8s APiServer端口,默认6443 | -| token | 授权Access Token | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:nodes - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------|------|--------| -| node_name | 无 | 节点名称 | -| is_ready | 无 | 节点状态 | -| capacity_cpu | 无 | CPU容量 | -| allocatable_cpu | 无 | 已分配CPU | -| capacity_memory | 无 | 内存容量 | -| allocatable_memory | 无 | 已分配内存 | -| creation_time | 无 | 节点创建时间 | - -#### 指标集合:namespaces - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------|------|-------------| -| namespace | 无 | namespace名称 | -| status | 无 | 状态 | -| creation_time | 无 | 创建时间 | - -#### 指标集合:pods - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------|------|----------------| -| pod | 无 | pod名称 | -| namespace | 无 | pod所属namespace | -| status | 无 | pod状态 | -| restart | 无 | 重启次数 | -| host_ip | 无 | 所在主机IP | -| pod_ip | 无 | pod ip | -| creation_time | 无 | pod创建时间 | -| start_time | 无 | pod启动时间 | - -#### 指标集合:services - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------|------|--------------------------------------------------------| -| service | 无 | service名称 | -| namespace | 无 | service所属namespace | -| type | 无 | service类型 ClusterIP NodePort LoadBalancer ExternalName | -| cluster_ip | 无 | cluster ip | -| selector | 无 | tag selector匹配 | -| creation_time | 无 | 创建时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/linux.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/linux.md deleted file mode 100644 index abd87de1ef8..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/linux.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -id: linux -title: 监控:Linux操作系统监控 -sidebar_label: Linux操作系统 -keywords: [开源监控系统, 开源操作系统监控, Linux操作系统监控] ---- - -> 对Linux操作系统的通用性能指标进行采集监控。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:basic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------|------|--------| -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | - -#### 指标集合:cpu - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------------|------|--------------------| -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - -#### 指标集合:memory - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------|------|----------| -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | - -#### 指标集合:disk - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------|------|-----------| -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | - -#### 指标集合:interface - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------------|------|---------------| -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | - -#### 指标集合:disk_free - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------|------|---------| -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mariadb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mariadb.md deleted file mode 100644 index 4690b5500ef..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mariadb.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: mariadb -title: 监控:MariaDB数据库监控 -sidebar_label: MariaDB数据库 -keywords: [开源监控系统, 开源数据库监控, MariaDB数据库监控] ---- - -> 对MariaDB数据库的通用性能指标进行采集监控。支持MariaDB5+。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为3306。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:basic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------|------|------------| -| version | 无 | 数据库版本 | -| port | 无 | 数据库暴露服务端口 | -| datadir | 无 | 数据库存储数据盘地址 | -| max_connections | 无 | 数据库最大连接数 | - -#### 指标集合:status - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------|------|------------------| -| threads_created | 无 | MariaDB已经创建的总连接数 | -| threads_connected | 无 | MariaDB已经连接的连接数 | -| threads_cached | 无 | MariaDB当前缓存的连接数 | -| threads_running | 无 | MariaDB当前活跃的连接数 | - -#### 指标集合:innodb - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------------|------|-------------------------| -| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | -| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | -| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | -| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/memcached.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/memcached.md deleted file mode 100644 index f3c1ddfab55..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/memcached.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -id: memcached -title: Monitoring Memcached -sidebar_label: Memcached Monitor -keywords: [ open source monitoring tool, open source Memcached monitoring tool, monitoring memcached metrics ] ---- - -> Collect and monitor the general performance Metrics of Memcached. - -**Protocol Use:Memcached** - -```text -The default YML configuration for the memcache version is in compliance with 1.4.15. -You need to use the stats command to view the parameters that your memcache can monitor -``` - -### - -**1、Obtain usable parameter indicators through commands such as stats、stats setting、stats settings. - -```shell -# telnet ip port -[root@server ~]# telnet localhost 11211 -Trying ::1... -Connected to localhost. -Escape character is '^]'. -stats -STAT pid 15168 -STAT uptime 11691 -STAT time 1702569246 -STAT version 1.4.15 -... -``` - -**There is help_doc: ** - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Memcached | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:server_info - -| Metric name | Metric unit | Metric help description | -|------------------|-------------|---------------------------------------------------| -| pid | | Memcache server process ID | -| uptime | s | The number of seconds the server has been running | -| version | | Memcache version | -| curr_connections | | Current number of connections | -| auth_errors | | Number of authentication failures | -| threads | | Current number of threads | -| item_size | byte | The size of the item | -| item_count | | Number of items | -| curr_items | | The total number of data currently stored | -| total_items | | The total number of data stored since startup | -| bytes | byte | The current number of bytes occupied by storage | -| cmd_get | | Get command request count | -| cmd_set | | Set command request count | -| cmd_flush | | Flush command request count | -| get_misses | | Get command misses | -| delete_misses | | Delete command misses | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mysql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mysql.md deleted file mode 100644 index 47087c88f34..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/mysql.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: mysql -title: 监控:MYSQL数据库监控 -sidebar_label: MYSQL数据库 -keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] ---- - -> 对MYSQL数据库的通用性能指标进行采集监控。支持MYSQL5+。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为3306。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:basic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------|------|------------| -| version | 无 | 数据库版本 | -| port | 无 | 数据库暴露服务端口 | -| datadir | 无 | 数据库存储数据盘地址 | -| max_connections | 无 | 数据库最大连接数 | - -#### 指标集合:status - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------|------|----------------| -| threads_created | 无 | MySql已经创建的总连接数 | -| threads_connected | 无 | MySql已经连接的连接数 | -| threads_cached | 无 | MySql当前缓存的连接数 | -| threads_running | 无 | MySql当前活跃的连接数 | - -#### 指标集合:innodb - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------------|------|-------------------------| -| innodb_data_reads | 无 | innodb平均每秒从文件中读取的次数 | -| innodb_data_writes | 无 | innodb平均每秒从文件中写入的次数 | -| innodb_data_read | KB | innodb平均每秒钟读取的数据量,单位为KB | -| innodb_data_written | KB | innodb平均每秒钟写入的数据量,单位为KB | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nebulagraph.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nebulagraph.md deleted file mode 100644 index 9faed580e1b..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nebulagraph.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -id: nebulaGraph -title: 监控 NebulaGraph 数据库 -sidebar_label: NebulaGraph 数据库 -keywords: [ 开源监控工具, 开源 NebulaGraph 监控工具, 监控 NebulaGraph 指标 ] ---- - -> 收集和监控 NebulaGraph 的常规性能指标。 - -**使用协议:nebulaGraph** - -```text -监控分为两个部分,nebulaGraph_stats 和 rocksdb_stats。 -nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 的统计信息。 -``` - -### - -**1、通过 stats 和 rocksdb stats 接口获取可用参数。** - -1.1、如果只需要获取 nebulaGraph_stats,需要确保可以访问 stats,否则会出现错误。 - -默认端口是 19669,访问地址为 - -1.2、如果需要获取 rocksdb stats 的附加参数,需要确保可以访问 rocksdb stats,否则会报错。 - -首次连接 NebulaGraph 时,必须先注册 Storage 服务,以便正确查询数据。 - -**有帮助文档:** - -**** - -默认端口是 19779,访问地址为: - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|-------------|--------------------------------------------------------------------| -| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️没有协议头(例如:https://、http://) | -| 监控名称 | 识别此监控的名称。名称需要唯一 | -| graphPort | Nebula Graph 提供的 Graph 服务的端口 | -| timePeriod | 可以是 5 秒、60 秒、600 秒或 3600 秒,分别表示最近 5 秒、最近 1 分钟、最近 10 分钟和最近 1 小时的时间段 | -| storagePort | Nebula Graph 提供的 Storage 服务的端口 | -| 超时 | 允许收集响应时间 | -| 收集间隔 | 监控周期性数据收集的间隔时间,单位:秒,最小可设置的间隔为 30 秒 | -| 是否检测 | 是否检测和验证添加监控之前的可用性。只有检测成功后,添加和修改操作才会继续进行 | -| 描述备注 | 用于识别和描述此监控的更多信息,用户可以在此处记录信息 | - -### 收集指标 - -#### 指标集:nebulaGraph_stats - -指标太多,相关链接如下 -**** - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------------------------------------------------------------|------|--------| -| 达到内存水位线的语句的数量(rate) | | | -| 达到内存水位线的语句的数量(sum) | | | -| 服务端主动回收的过期的会话数量(rate) | | | -| 服务端主动回收的过期的会话数量(sum) | | | -| 慢查询延迟时间(avg) | | | -| 慢查询延迟时间(p75) | | | -| 慢查询延迟时间(p95) | | | -| 慢查询延迟时间(p99) | | | -| 慢查询延迟时间(p999) | | | -| 查询延迟时间(avg) | | | -| 查询延迟时间(p75) | | | -| 查询延迟时间(p95) | | | -| 查询延迟时间(p99) | | | -| 查询延迟时间(p999) | | | -| 因用户名密码错误导验证失败的会话数量(rate) | | | -| 因用户名密码错误导验证失败的会话数量(sum) | | | -| 查询次数(rate) | | | -| 查询次数(sum) | | | -| 排序(Sort)算子执行时间(rate) | | | -| 排序(Sort)算子执行时间(sum) | | | -| Graphd 服务发给 Storaged 服务的 RPC 请求失败的数量(rate) | | | -| Graphd 服务发给 Storaged 服务的 RPC 请求失败的数量(sum) | | | -| 登录验证失败的会话数量(rate) | | | -| 登录验证失败的会话数量(sum) | | | -| 查询报错语句数量(rate) | | | -| 查询报错语句数量(sum) | | | -| 被终止的查询数量(rate) | | | -| 被终止的查询数量(sum) | | | -| 因查询错误而导致的 Leader 变更的次数(rate) | | | -| 因查询错误而导致的 Leader 变更的次数(sum) | | | -| Graphd 服务发给 Metad 服务的 RPC 请求数量(rate) | | | -| Graphd 服务发给 Metad 服务的 RPC 请求数量(sum) | | | -| 慢查询次数(rate) | | | -| 慢查询次数(sum) | | | -| 活跃的会话数的变化数(sum) | | | -| 活跃的查询语句数的变化数(sum) | | | -| Graphd 服务接收的语句数(rate) | | | -| Graphd 服务接收的语句数(sum) | | | -| 聚合(Aggregate)算子执行时间(rate) | | | -| 聚合(Aggregate)算子执行时间(sum) | | | -| 优化器阶段延迟时间(avg) | | | -| 优化器阶段延迟时间(p75) | | | -| 优化器阶段延迟时间(p95) | | | -| 优化器阶段延迟时间(p99) | | | -| 优化器阶段延迟时间(p999) | | | -| Graphd 服务发给 Metad 的 RPC 请求失败的数量(rate) | | | -| Graphd 服务发给 Metad 的 RPC 请求失败的数量(sum) | | | -| 索引扫描(IndexScan)算子执行时间(rate) | | | -| 索引扫描(IndexScan)算子执行时间(sum) | | | -| 服务端建立过的会话数量(rate) | | | -| 服务端建立过的会话数量(sum) | | | -| 因为超过FLAG_OUT_OF_MAX_ALLOWED_CONNECTIONS参数导致的验证登录的失败的会话数量(rate) | | | -| 因为超过FLAG_OUT_OF_MAX_ALLOWED_CONNECTIONS参数导致的验证登录的失败的会话数量(sum) | | | -| Graphd 服务发给 Storaged 服务的 RPC 请求数量(rate) | | | -| Graphd 服务发给 Storaged 服务的 RPC 请求数量(sum) | | | - -#### 指标集:rocksdb_stats - -指标太多,相关链接如下 -**** - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------------------------|------|------------------------| -| rocksdb.backup.read.bytes | | 备份 RocksDB 数据库期间读取的字节数 | -| rocksdb.backup.write.bytes | | 指标名称 | -| ... | | ... | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nginx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nginx.md deleted file mode 100644 index 8c81c5a82c2..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/nginx.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -id: nginx -title: 监控 Nginx -sidebar_label: Nginx 监控 -keywords: [开源监控工具, 开源Java监控工具, 监控Nginx指标] ---- - -> 收集和监控 Nginx 的常规性能指标。 - -**使用的协议:Nginx** - -### 需要启用 Nginx 的 `ngx_http_stub_status_module` 和 `ngx_http_reqstat_module` 监控模块 - -如果你想使用这种监控方式监控 'Nginx' 的信息,你需要修改你的 Nginx 配置文件以启用监控模块。 - -### 启用 ngx_http_stub_status_module - -1. 检查是否已添加 `ngx_http_stub_status_module` - -```shell -nginx -V -``` - -查看是否包含 `--with-http_stub_status_module`,如果没有则需要重新编译安装 Nginx。 - -2. 编译安装 Nginx, 添加 `ngx_http_stub_status_module` 模块 - -下载 Nginx 并解压,在目录下执行 - -```shell -./configure --prefix=/usr/local/nginx --with-http_stub_status_module - -make && make install -``` - -3. 修改 Nginx 配置文件 - -修改 `nginx.conf` 文件,添加监控模块暴露端点,如下配置: - -```shell -# modify nginx.conf -server { - listen 80; # port - server_name localhost; - location /nginx-status { - stub_status on; - access_log on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts - } -} -``` - -4. 重新加载 Nginx - -```shell -nginx -s reload -``` - -5. 在浏览器访问 `http://localhost/nginx-status` 即可查看 Nginx 监控状态信息。 - -### 启用 `ngx_http_reqstat_module` - -1. 安装 `ngx_http_reqstat_module` 模块 - -```shell -# install `ngx_http_reqstat_module` -wget https://github.com/zls0424/ngx_req_status/archive/master.zip -O ngx_req_status.zip - -unzip ngx_req_status.zip - -patch -p1 < ../ngx_req_status-master/write_filter.patch - -./configure --prefix=/usr/local/nginx --add-module=/path/to/ngx_req_status-master - -make -j2 - -make install -``` - -2. 修改 Nginx 配置文件 - -修改 `nginx.conf` 文件,添加状态模块暴露端点,如下配置: - -```shell -# modify nginx.conf -http { - req_status_zone server_name $server_name 256k; - req_status_zone server_addr $server_addr 256k; - - req_status server_name server_addr; - - server { - location /req-status { - req_status_show on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts - } - } -} -``` - -3. 重新加载 Nginx - -```shell -nginx -s reload -``` - -4. 在浏览器访问 `http://localhost/req-status` 即可查看 Nginx 监控状态信息。 - -**参考文档: ** - -**⚠️注意监控模块的端点路径为 `/nginx-status` `/req-status`** - -### 配置参数 - -| 参数名 | 参数描述 | -|--------|-----------------------------------------------------| -| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | -| 监控名称 | 标识此监控的名称。名称需要唯一 | -| 端口 | Nginx 提供的端口 | -| 超时时间 | 允许收集响应时间 | -| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | -| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | -| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | - -### 收集指标 - -#### 指标收集:nginx_status - -| 指标名称 | 指标单位 | 指标描述 | -|-------|------|------------| -| 接收连接数 | | 已接受的连接 | -| 处理连接数 | | 成功处理的连接 | -| 活动连接数 | | 当前活动连接 | -| 丢弃连接数 | | 丢弃的连接 | -| 请求连接数 | | 客户端请求 | -| 读连接数 | | 正在执行读操作的连接 | -| 写连接数 | | 正在执行写操作的连接 | -| 等待连接数 | | 等待连接 | - -#### 指标集:req_status - -| 指标名称 | 指标单位 | 指标描述 | -|---------|------|---------| -| 分组类别 | | 分组类别 | -| 分组名称 | | 分组名称 | -| 最大并发连接数 | | 最大并发连接数 | -| 最大带宽 | kb | 最大带宽 | -| 总流量 | kb | 总流量 | -| 总请求数 | | 总请求数 | -| 当前并发连接数 | | 当前并发连接数 | -| 当前带宽 | kb | 当前带宽 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md deleted file mode 100644 index a160f2501e4..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -id: ntp -title: 监控 NTP 服务器 -sidebar_label: NTP 服务器 -keywords: [ open source monitoring tool, open source NTP monitoring tool, monitoring NTP metrics ] ---- - -NTP监控的中文文档如下: - -# NTP监控 - -> 收集和监控NTP的常规性能指标。 - -**协议使用:NTP** - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|------|--------------------------------------------------| -| 监控主机 | 被监控的IPv4、IPv6或域名。注意⚠️不包含协议头(例如:https://,http://) | -| 监控名称 | 标识此监控的名称。名称需要是唯一的 | -| 采集间隔 | 监控周期性数据采集的时间间隔,单位:秒,最小可设置为30秒 | -| 是否检测 | 是否在添加监控之前检测和检查监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | -| 描述备注 | 用于更多关于标识和描述此监控的信息,用户可以在此处添加备注信息 | - -### 采集指标 - -#### 指标集:概要 - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------|------|--------------------------| -| 响应时间 | 毫秒 | NTP服务器响应请求所需的时间。 | -| 时间 | 毫秒 | NTP服务器报告的当前时间。 | -| 日期 | | 与NTP服务器报告的当前时间对应的日期。 | -| 偏移量 | 毫秒 | NTP服务器的时钟与客户端时钟之间的时间差。 | -| 延迟 | 毫秒 | 请求到达NTP服务器并返回响应所需的时间。 | -| 版本号 | | 服务器使用的NTP协议的版本号。 | -| 模式 | | NTP服务器的操作模式,如客户端、服务器或广播。 | -| 层级 | | NTP服务器的层级,表示其与参考时钟的距离。 | -| 参考ID | | 指示NTP服务器使用的参考时钟或时间源的标识符。 | -| 精度 | | NTP服务器时钟的精度,表示其准确性。 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/opengauss.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/opengauss.md deleted file mode 100644 index 964fc909c33..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/opengauss.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: opengauss -title: 监控:OpenGauss数据库监控 -sidebar_label: OpenGauss数据库 -keywords: [开源监控系统, 开源数据库监控, OpenGauss数据库监控] ---- - -> 对PostgreSQL数据库的通用性能指标进行采集监控。支持PostgreSQL 10+。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为5432。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:basic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------|------|---------------| -| server_version | 无 | 数据库服务器的版本号 | -| port | 无 | 数据库服务器端暴露服务端口 | -| server_encoding | 无 | 数据库服务器端的字符集编码 | -| data_directory | 无 | 数据库存储数据盘地址 | -| max_connections | 连接数 | 数据库最大连接数 | - -#### 指标集合:state - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------------|------|--------------------------------------------------------------------------| -| name | 无 | 数据库名称,或share-object为共享对象。 | -| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | -| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | -| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | -| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | -| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | -| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | -| stats_reset | 无 | 这些统计信息上次被重置的时间 | - -#### 指标集合:activity - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|----------| -| running | 连接数 | 当前客户端连接数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/oracle.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/oracle.md deleted file mode 100644 index d6bb80eb98d..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/oracle.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -id: oracle -title: 监控:ORACLE数据库监控 -sidebar_label: ORACLE数据库 -keywords: [开源监控系统, 开源数据库监控, Oracle数据库监控] ---- - -> 对ORACLE数据库的通用性能指标进行采集监控。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为1521。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:basic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------------|------|---------| -| database_version | 无 | 数据库版本 | -| database_type | 无 | 数据库类型 | -| hostname | 无 | 主机名称 | -| instance_name | 无 | 数据库实例名称 | -| startup_time | 无 | 数据库启动时间 | -| status | 无 | 数据库状态 | - -#### 指标集合:tablespace - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------|------|---------| -| file_id | 无 | 文件ID | -| file_name | 无 | 文件名称 | -| tablespace_name | 无 | 所属表空间名称 | -| status | 无 | 状态 | -| bytes | MB | 大小 | -| blocks | 无 | 区块数量 | - -#### 指标集合:user_connect - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------|------|--------| -| username | 无 | 用户名 | -| counts | 个数 | 当前连接数量 | - -#### 指标集合:performance - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------|------|---------------------------------------| -| qps | QPS | I/O Requests per Second 每秒IO请求数量 | -| tps | TPS | User Transaction Per Sec 每秒用户事物处理数量 | -| mbps | MBPS | I/O Megabytes per Second 每秒 I/O 兆字节数量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ping.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ping.md deleted file mode 100644 index 59ac237ed34..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ping.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -id: ping -title: 监控:PING连通性 -sidebar_label: PING连通性 -keywords: [开源监控系统, 开源网络监控, 网络PING监控] ---- - -> 对对端HOST地址进行PING操作,判断其连通性 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|----------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| Ping超时时间 | 设置PING未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:summary - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------|------|--------| -| responseTime | ms毫秒 | 网站响应时间 | - -### 常见问题 - -1. 安装包部署的hertzbeat下ping连通性监控异常 - 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 - -> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 -> docker安装默认启用无此问题 -> 详见 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/pop3.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/pop3.md deleted file mode 100644 index 7a55a98df3e..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/pop3.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -id: pop3 -title: 监控POP3 -sidebar_label: POP3监控 -keywords: [开源监控工具,开源Java监控工具,监控POP3指标] ---- - -> 收集和监控POP3的常规性能指标。 - -**使用的协议:POP3** - -### 启用POP3服务 - -如果您想使用此监控类型来监控"POP3"的信息,请在您的邮件服务器上开启POP3服务。 - -**1、开启POP3服务:** - -```text -以qq邮箱为例【其它邮箱类似】: - 1. 点击`设置`选项 - 2. 选择`账号` - 3. 找到开启SMTP/POP3/IMAP选项,并开启 - 4. 得到POP3服务器域名,端口号,以及授权码【开启SMTP/POP3/IMAP服务后,qq邮箱提供】 - 5. 通过POP3服务器域名,端口号,qq邮箱账号以及授权码连接POP3服务器,采集监控指标 -``` - -### 配置参数 - -| 参数名 | 参数描述 | -|--------|-----------------------------------------------------| -| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️不需要协议头部(例如:https://,http://) | -| 监控名称 | 标识此监控的名称。名称需要唯一 | -| 端口 | POP3 提供的端口 | -| 超时时间 | 允许收集响应时间 | -| 收集间隔时间 | 监控周期性数据收集的间隔时间,单位为秒,最小可设置的间隔时间为30秒 | -| 是否检测 | 是否在添加监控之前检测和确认监控的可用性。只有在检测成功后,添加和修改操作才会继续进行 | -| 描述备注 | 用户可以在此处注明有关标识和描述此监控的更多信息 | - -### 采集指标 - -#### 指标集:email_status - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-----------| -| 邮件数量 | | 邮件数量 | -| 邮箱总大小 | kb | 邮箱中邮件的总大小 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/port.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/port.md deleted file mode 100644 index e774ac53254..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/port.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: port -title: 监控:端口可用性 -sidebar_label: 端口可用性 -keywords: [开源监控系统, 开源网络监控, 端口可用性监控] ---- - -> 判断对端服务暴露端口是否可用,进而判断对端服务是否可用,采集响应时间等指标进行监测 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 连接超时时间 | 端口连接的等待超时时间,单位毫秒,默认3000毫秒。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:summary - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------|------|--------| -| responseTime | ms毫秒 | 网站响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/postgresql.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/postgresql.md deleted file mode 100644 index 12485e62ffa..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/postgresql.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: postgresql -title: 监控:PostgreSQL数据库监控 -sidebar_label: PostgreSQL数据库 -keywords: [开源监控系统, 开源数据库监控, PostgreSQL数据库监控] ---- - -> 对PostgreSQL数据库的通用性能指标进行采集监控。支持PostgreSQL 10+。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为5432。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:basic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------|------|---------------| -| server_version | 无 | 数据库服务器的版本号 | -| port | 无 | 数据库服务器端暴露服务端口 | -| server_encoding | 无 | 数据库服务器端的字符集编码 | -| data_directory | 无 | 数据库存储数据盘地址 | -| max_connections | 连接数 | 数据库最大连接数 | - -#### 指标集合:state - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------------|------|--------------------------------------------------------------------------| -| name | 无 | 数据库名称,或share-object为共享对象。 | -| conflicts | 次数 | 由于与恢复冲突而在这个数据库中被取消的查询的数目 | -| deadlocks | 个数 | 在这个数据库中被检测到的死锁数 | -| blks_read | 次数 | 在这个数据库中被读取的磁盘块的数量 | -| blks_hit | 次数 | 磁盘块被发现已经在缓冲区中的次数,这样不需要一次读取(这只包括 PostgreSQL 缓冲区中的命中,而不包括在操作系统文件系统缓冲区中的命中) | -| blk_read_time | ms | 在这个数据库中后端花费在读取数据文件块的时间 | -| blk_write_time | ms | 在这个数据库中后端花费在写数据文件块的时间 | -| stats_reset | 无 | 这些统计信息上次被重置的时间 | - -#### 指标集合:activity - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|----------| -| running | 连接数 | 当前客户端连接数 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/rabbitmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/rabbitmq.md deleted file mode 100644 index 2210a2452e0..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/rabbitmq.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -id: rabbitmq -title: 监控 RabbitMQ 消息中间件 -sidebar_label: RabbitMQ消息中间件 -keywords: [开源监控系统, 开源消息中间件监控, RabbitMQ消息中间件监控] ---- - -> 对 RabbitMQ 消息中间件的运行状态,节点,队列等相关指标进行监测。 - -### 监控前操作 - -> HertzBeat 使用 RabbitMQ Management 的 Rest Api 对 RabbitMQ 进行指标数据采集。 -> 故需要您的 RabbitMQ 环境开启 Management 插件 - -1. 开启 Management 插件,或使用自开启版本 - -```shell -rabbitmq-plugins enable rabbitmq_management -``` - -2. 浏览器访问 ,默认账户密码 `guest/guest`. 成功登录即开启成功。 - -3. 在 HertzBeat 添加对应 RabbitMQ 监控即可,参数使用 Management 的 IP 端口,默认账户密码。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | RabbitMQ Management 对外提供的HTTP端口,默认为15672。 | -| 用户名 | 接口Basic认证时使用的用户名 | -| 密码 | 接口Basic认证时使用的密码 | -| 超时时间 | HTTP请求查询超时时间 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:overview - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------|------|--------------------------| -| product_version | 无 | 产品版本 | -| product_name | 无 | 产品名称 | -| rabbitmq_version | 无 | rabbitmq 版本 | -| management_version | 无 | rabbitmq management 插件版本 | -| erlang_version | 无 | erlang 版本 | -| cluster_name | 无 | 集群名称 | -| rates_mode | 无 | rates模式 | - -#### 指标集合:object_totals - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------|------|-----------------| -| channels | 无 | channels的总数量 | -| connections | 无 | connections的总数量 | -| consumers | 无 | consumers的总数量 | -| exchanges | 无 | exchanges的总数量 | -| queues | 无 | queues的总数量 | - -#### 指标集合:nodes - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------|------|-------------------------------| -| name | 无 | The node name | -| type | 无 | The node type | -| running | 无 | Running state | -| os_pid | 无 | Pid in OS | -| mem_limit | MB | Memory usage high watermark | -| mem_used | MB | Total amount of memory used | -| fd_total | 无 | File descriptors available | -| fd_used | 无 | File descriptors used | -| sockets_total | 无 | Sockets available | -| sockets_used | 无 | Sockets used | -| proc_total | 无 | Erlang process limit | -| proc_used | 无 | Erlang processes used | -| disk_free_limit | GB | Free disk space low watermark | -| disk_free | GB | Free disk space | -| gc_num | 无 | GC runs | -| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | -| context_switches | 无 | Context_switches num | -| io_read_count | 无 | 总共读操作的数量 | -| io_read_bytes | KB | 总共读入磁盘数据大小 | -| io_read_avg_time | ms | 读操作平均时间,毫秒为单位 | -| io_write_count | 无 | 磁盘写操作总量 | -| io_write_bytes | KB | 写入磁盘数据总量 | -| io_write_avg_time | ms | 每个磁盘写操作的平均时间,毫秒为单位 | -| io_seek_count | 无 | seek操作总量 | -| io_seek_avg_time | ms | seek操作的平均时间,毫秒单位 | -| io_sync_count | 无 | fsync操作的总量 | -| io_sync_avg_time | ms | fsync操作的平均时间,毫秒为单位 | -| connection_created | 无 | connection created num | -| connection_closed | 无 | connection closed num | -| channel_created | 无 | channel created num | -| channel_closed | 无 | channel closed num | -| queue_declared | 无 | queue declared num | -| queue_created | 无 | queue created num | -| queue_deleted | 无 | queue deleted num | -| connection_closed | 无 | connection closed num | - -#### 指标集合:queues - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------------------------|------|--------------------------------------------------------------------------------------------------------------------------------------| -| name | 无 | The name of the queue with non-ASCII characters escaped as in C. | -| node | 无 | The queue on the node name | -| state | 无 | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | -| type | 无 | Queue type, one of: quorum, stream, classic. | -| vhost | 无 | vhost path | -| auto_delete | 无 | Whether the queue will be deleted automatically when no longer used | -| policy | 无 | Effective policy name for the queue. | -| consumers | 无 | Number of consumers. | -| memory | B | Bytes of memory allocated by the runtime for the queue, including stack, heap and internal structures. | -| messages_ready | 无 | Number of messages ready to be delivered to clients | -| messages_unacknowledged | 无 | Number of messages delivered to clients but not yet acknowledged | -| messages | 无 | Sum of ready and unacknowledged messages (queue depth) | -| messages_ready_ram | 无 | Number of messages from messages_ready which are resident in ram | -| messages_persistent | 无 | Total number of persistent messages in the queue (will always be 0 for transient queues) | -| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | -| message_bytes_ready | B | Like message_bytes but counting only those messages ready to be delivered to clients | -| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | -| message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | -| message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/redis.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/redis.md deleted file mode 100644 index 0a0c9f77a65..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/redis.md +++ /dev/null @@ -1,239 +0,0 @@ ---- -id: redis -title: 监控:REDIS数据库监控 -sidebar_label: REDIS数据库 -keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] ---- - -> 对REDIS数据库的通用性能指标进行采集监控。支持REDIS1.0+。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | -| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:server - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------------|------|-----------------------------------------------| -| redis_version | 无 | Redis 服务器版本 | -| redis_git_sha1 | 无 | Git SHA1 | -| redis_git_dirty | 无 | Git dirty flag | -| redis_build_id | 无 | redis 构建的id | -| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | -| os | 无 | Redis 服务器的宿主操作系统 | -| arch_bits | 无 | 架构(32 或 64 位) | -| multiplexing_api | 无 | Redis使用的事件循环机制 | -| atomicvar_api | 无 | Redis使用的原子 API | -| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本 | -| process_id | 无 | 服务器进程的PID | -| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | -| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | -| tcp_port | 无 | TCP/IP侦听端口 | -| server_time_usec | 无 | 微秒级精度的基于时间的系统时间 | -| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | -| uptime_in_days | 无 | 自Redis服务器启动后的天数 | -| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | -| configured_hz | 无 | 服务器配置的频率设置 | -| lru_clock | 无 | 时钟每分钟递增,用于LRU管理 | -| executable | 无 | 服务器可执行文件的路径 | -| config_file | 无 | 配置文件的路径 | -| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志 | -| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。 | - -#### 指标集合:clients - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------------------------|------|--------------------------------------------------------------------------------| -| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | -| cluster_connections | 无 | 群集总线使用的套接字数量的近似值 | -| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。 | -| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | -| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | -| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | -| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING) | -| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | - -#### 指标集合:memory - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------------------|----------|-----------------------------------------------------------------------------------------------| -| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | -| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | -| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字 | -| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_peak | byte | Redis消耗的峰值内存(字节) | -| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | -| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和 | -| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节) | -| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | -| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | -| allocator_allocated | byte | 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同 | -| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片 | -| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | -| total_system_memory | byte | Redis主机的内存总量 | -| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_lua | byte | Lua引擎使用的字节数 | -| used_memory_lua_human | KB | 上一个值的人类可读值 | -| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | -| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | -| number_of_cached_scripts | 无 | 缓存的lua脚本数量 | -| maxmemory | byte | maxmemory配置指令的值 | -| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | -| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | -| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | -| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | -| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | -| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | -| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | -| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | -| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | -| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | -| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。 | -| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | -| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | -| mem_clients_normal | 无 | 普通客户端使用的内存 | -| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | -| mem_allocator | 无 | 内存分配器,在编译时选择。 | -| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | -| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL) | -| lazyfreed_objects | 无 | 已延迟释放的对象数。 | - -#### 指标集合:persistence - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------------------------|--------|-----------------------------------------------------------------------------------------------------| -| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是 | -| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | -| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | -| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比 | -| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | -| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | -| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | -| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | -| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | -| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功 | -| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | -| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | -| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | -| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | -| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite | -| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | -| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | -| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | -| aof_last_write_status | 无 | 上次aof写入状态 | -| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | -| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | - -#### 指标集合:stats - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------------------|------|----------------------------------------------------| -| total_connections_received | 无 | 服务器接受的连接总数 | -| total_commands_processed | 无 | 服务器处理的命令总数 | -| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | -| total_net_input_bytes | byte | 从网络读取的字节总数 | -| total_net_output_bytes | byte | 写入网络的总字节数 | -| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | -| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | -| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数 | -| sync_full | 无 | 具有副本的完整重新同步数 | -| sync_partial_ok | 无 | 接受的部分重新同步请求数 | -| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | -| expired_keys | 无 | 过期的key总数 | -| expired_stale_perc | 无 | 可能过期key的百分比 | -| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | -| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | -| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | -| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | -| keyspace_misses | 无 | 在主dict 中未查到key的次数 | -| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | -| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | -| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | -| total_forks | 无 | 自服务器启动以来的fork操作总数 | -| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | -| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | -| active_defrag_hits | 无 | 主动碎片整理命中次数 | -| active_defrag_misses | 无 | 主动碎片整理未命中次数 | -| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | -| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数 | -| tracking_total_keys | 无 | key 查询的总数 | -| tracking_total_items | 无 | item查询的总数 | -| tracking_total_prefixes | 无 | 前缀查询的总数 | -| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | -| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | -| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | -| total_reads_processed | 无 | 正在读取的请求数 | -| total_writes_processed | 无 | 正在写入的请求数 | -| io_threaded_reads_processed | 无 | 正在读取的线程数 | -| io_threaded_writes_processed | 无 | 正在写入的线程数 | - -#### 指标集合:replication - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------------------|------|-------------------------------------------------------------------------------------| -| role | 无 | 节点角色 master 主节点 slave 从节点 | -| connected_slaves | 无 | 连接的从节点数 | -| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | -| master_replid | 无 | 实例启动的随机字符串 | -| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID | -| master_repl_offset | 无 | 主从同步偏移量 | -| second_repl_offset | 无 | 接受从服务ID的最大偏移量 | -| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | -| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | -| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | -| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | - -#### 指标集合:cpu - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------------------|------|------------------------| -| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和 | -| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和 | -| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和 | -| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | -| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU | -| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | - -#### 指标集合:errorstats - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------|------|-----------| -| errorstat_ERR | 无 | 错误累计出现的次数 | -| errorstat_MISCONF | 无 | | - -#### 指标集合:cluster - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------|------|--------------------| -| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是 | - -#### 指标集合:commandstats - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------|------|---------------------------------------------------------------------------------------------------------------------------| -| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数 | -| cmdstat_get | 无 | get命令的统计信息 | -| cmdstat_setnx | 无 | setnx命令的统计信息 | -| cmdstat_hset | 无 | hset命令的统计信息 | -| cmdstat_hget | 无 | hget命令的统计信息 | -| cmdstat_lpush | 无 | lpush命令的统计信息 | -| cmdstat_rpush | 无 | rpush命令的统计信息 | -| cmdstat_lpop | 无 | lpop命令的统计信息 | -| cmdstat_rpop | 无 | rpop命令的统计信息 | -| cmdstat_llen | 无 | llen命令的统计信息 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/shenyu.md deleted file mode 100644 index 87bb81b7800..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/shenyu.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -id: shenyu -title: 监控 Apache ShenYu API网关 -sidebar_label: ShenYu网关监控 -keywords: [开源监控系统, 开源消息中间件监控, ShenYu网关监控监控] ---- - -> 对 ShenYu 网关的运行状态(JVM相关),请求响应等相关指标进行监测。 - -## 监控前操作 - -您需要在 ShenYu 网关开启`metrics`插件,暴露对应的 prometheus metrics 接口。 - -开启插件, 参考 [官方文档](https://shenyu.apache.org/zh/docs/plugin-center/observability/metrics-plugin) - -主要如下两步骤: - -1. 在网关的 pom.xml 文件中添加 metrics 的依赖。 - -```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - -``` - -2. 在网关的配置yaml文件中编辑如下内容: - -```yaml -shenyu: - metrics: - enabled: true #设置为 true 表示开启 - name : prometheus - host: 127.0.0.1 #暴露的ip - port: 8090 #暴露的端口 - jmxConfig: #jmx配置 - props: - jvm_enabled: true #开启jvm的监控指标 -``` - -最后重启访问网关指标接口 `http://ip:8090` 响应 prometheus 格式数据即可。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网关指标接口对外提供的端口,默认为8090。 | -| 超时时间 | HTTP请求响应超时时间 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:shenyu_request_total - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-------------------| -| value | 无 | 收集ShenYu网关的所有请求数量 | - -#### 指标集合:shenyu_request_throw_created - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-------------------| -| value | 无 | 收集ShenYu网关的异常请求数量 | - -#### 指标集合:process_cpu_seconds_total - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-----------------| -| value | 无 | 用户和系统CPU总计所用的秒数 | - -#### 指标集合:process_open_fds - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-------------| -| value | 无 | 打开的文件描述符的数量 | - -#### 指标集合:process_max_fds - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|---------------| -| value | 无 | 打开的文件描述符的最大数量 | - -#### 指标集合:jvm_info - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|----------| -| runtime | 无 | JVM 版本信息 | -| vendor | 无 | JVM 版本信息 | -| version | 无 | JVM 版本信息 | - -#### 指标集合:jvm_memory_bytes_used - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------------| -| area | 无 | JVM 内存区域 | -| value | MB | 给定 JVM 内存区域的已用大小 | - -#### 指标集合:jvm_memory_pool_bytes_used - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-----------------| -| pool | 无 | JVM 内存池 | -| value | MB | 给定 JVM 内存池的已用大小 | - -#### 指标集合:jvm_memory_pool_bytes_committed - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------------| -| pool | 无 | JVM 内存池 | -| value | MB | 给定 JVM 内存池的已提交大小 | - -#### 指标集合:jvm_memory_pool_bytes_max - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-----------------| -| pool | 无 | JVM 内存池 | -| value | MB | 给定 JVM 内存池的最大大小 | - -#### 指标集合:jvm_threads_state - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-------------| -| state | 无 | 线程状态 | -| value | 无 | 对应线程状态的线程数量 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/smtp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/smtp.md deleted file mode 100644 index 73e9af9ee13..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/smtp.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -id: smtp -title: SMTP 邮件服务器监控 -sidebar_label: SMTP 监控 -keywords: [ open source monitoring tool, open source SMTP monitoring tool, monitoring SMTP metrics ] ---- - -> 收集和监控 SMTP 邮件服务器的常规性能指标。 - -```text -通过 SMTP 的 hello 命令确定服务器是否可用 -``` - -> 详见 - -**协议使用:SMTP** - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|---------|---------------------------------------------------| -| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️无需协议头(例如:https://、http://) | -| 监控名称 | 标识此监控的名称。名称需要保持唯一 | -| 端口 | SMTP 提供的端口号 | -| 电子邮件 | 您的电子邮件名称,用于 hello 命令的参数 | -| 超时时间 | 允许的收集响应时间 | -| 收集间隔 | 监视定期数据收集的间隔时间,单位:秒,最小可设置的间隔为 30 秒 | -| 是否检测可用性 | 是否在添加监控之前检测和验证监控的可用性。只有在检测成功后,才会继续进行添加和修改操作 | -| 描述备注 | 用于标识和描述此监控的更多信息,用户可以在此处添加备注信息 | - -### 收集的指标 - -#### 指标集:概要 - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------|------|-------------------| -| 响应时间 | 毫秒 | SMTP 服务器响应请求所需的时间 | -| 响应状态 | | 响应状态 | -| SMTP 服务器标语 | | SMTP 服务器的标语 | -| helo 命令返回信息 | | helo 命令返回的响应信息 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/spring_gateway.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/spring_gateway.md deleted file mode 100644 index aaba0dd9841..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/spring_gateway.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -id: spring_gateway -Title: 监控 Spring Gateway -sidebar_label: Spring Gateway -keywords: [开源监控工具, 开源 Spring Gateway 监控工具, 监控 Spring Gateway 指标] ---- - -> 收集和监控 SpringBoot Actuator 提供的常规性能指标。 - -## 监控前操作 - -如果您想使用此监控类型监控 'Spring Gateway' 的信息,您需要集成您的 SpringBoot 应用程序并启用 SpringBoot Actuator。 - -**1、添加 POM .XML 依赖:** - -```xml - - org.springframework.boot - spring-boot-starter-actuator - -``` - -**2. 修改 YML 配置以暴露度量接口:** - -```yaml -management: - endpoint: - gateway: - enabled: true # default value - endpoints: - web: - exposure: - include: '*' - enabled-by-default: on -``` - -### 配置参数 - -| 参数名称 | 参数描述 | -|----------|--------------------------------------------------------|-----------------------------------------------| -| 监控主机 | 被监控的目标 IPV4、IPV6 或域名。注意⚠️不要包含协议头(例如:https://,http://)。 | -| 监控名称 | 用于标识此监控的名称,需要保证唯一性。 | -| 端口 | 数据库提供的默认端口为 8080。 | -| 启用 HTTPS | 是否通过 HTTPS 访问网站,请注意⚠️当启用 HTTPS 时,需要将默认端口更改为 443 | -| 采集间隔 | 监控周期性采集数据的时间间隔,单位为秒,最小间隔为 30 秒。 | -| 是否探测 | 在新增监控前是否先进行可用性探测,只有探测成功才会继续新增或修改操作。 | will continue only if the probe is successful | -| 描述备注 | 用于添加关于监控的额外标识和描述信息。 | - -### 采集指标 - -#### 指标收集: 健康状态 - -| 指标名称 | 指标单位 | 指标描述 | -|------|------|-------------------------| -| 服务状态 | 无 | 服务健康状态: UP(正常),Down(异常) | - -#### 指标收集: 环境信息 - -| 指标名称 | 指标单位 | 指标描述 | -|---------|------|----------------------------------------------| -| profile | 无 | 应用程序运行的配置环境: prod(生产环境),dev(开发环境),test(测试环境) | -| 端口号 | 无 | 应用程序暴露的端口 | -| 操作系统 | 无 | 运行操作系统 | -| 操作系统架构 | 无 | 运行操作系统的架构 | -| JDK供应商 | 无 | JDK 供应商 | -| JVM版本 | 无 | JVM 版本 | - -#### 指标收集: 线程信息 - -| 指标名称 | 指标单位 | 指标描述 | -|------|------|-------------| -| 状态 | 无 | 线程状态 | -| 数量 | 无 | 线程状态对应的线程数量 | - -#### 指标收集: 内存使用情况 - -| 指标名称 | 指标单位 | 指标描述 | -|------|------|------------| -| 内存空间 | 无 | 内存空间名称 | -| 内存占用 | MB | 此空间占用的内存大小 | - -#### 指标收集: 路由信息 - -| 指标名称 | 指标单位 | 指标描述 | -|-------|------|---------| -| 路由id | 无 | 路由 ID | -| 匹配规则 | 无 | 路由匹配规则 | -| 资源标识符 | 无 | 服务资源标识符 | -| 优先级 | 无 | 此路由的优先级 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/springboot2.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/springboot2.md deleted file mode 100644 index d39b67d3efd..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/springboot2.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -id: springboot2 -title: 监控:SpringBoot2.0 监控 -sidebar_label: SpringBoot2.0 监控 -keywords: [开源监控系统, 开源消息中间件监控, SpringBoot2.0 监控] ---- - -> 对SpringBoot2.0 actuator 暴露的通用性能指标进行采集监控。 - -## 监控前操作 - -如果想要通过此监控类型监控 `SpringBoot` 中的信息,则需要您的SpringBoot应用集成并开启SpringBoot Actuator。 - -**1、添加POM.XML依赖:** - -```xml - - org.springframework.boot - spring-boot-starter-actuator - -``` - -**2、修改YML配置暴露指标接口:** - -```yaml -management: - endpoints: - web: - exposure: - include: '*' - enabled-by-default: on -``` - -*注意:如果你的项目里还引入了认证相关的依赖,比如springboot-security,那么SpringBoot Actuator暴露出的接口可能会被拦截,此时需要你手动放开这些接口,以springboot-security为例,需要在SecurityConfig配置类中加入以下代码:* - -```java -public class SecurityConfig extends WebSecurityConfigurerAdapter{ - @Override - protected void configure(HttpSecurity httpSecurity) throws Exception{ - httpSecurity - // 配置要放开的接口 ----------------------------------- - .antMatchers("/actuator/**").permitAll() - .antMatchers("/metrics/**").permitAll() - .antMatchers("/trace").permitAll() - .antMatchers("/heapdump").permitAll() - // 。。。 - // 其他接口请参考:https://blog.csdn.net/JHIII/article/details/126601858 ----------------------------------- - } -} -``` - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|-----------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 应用服务对外提供的端口,默认为8080。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| Base Path | 暴露接口路径前缀,默认 /actuator | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:health - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------|------|-----------------| -| status | 无 | 服务健康状态: UP,Down | - -#### 指标集合:environment - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------|------|----------------------------| -| profile | 无 | 应用运行profile: prod,dev,test | -| port | 无 | 应用暴露端口 | -| os | 无 | 运行所在操作系统 | -| os_arch | 无 | 运行所在操作系统架构 | -| jdk_vendor | 无 | jdk vendor | -| jvm_version | 无 | jvm version | - -#### 指标集合:threads - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------|------|--------------| -| state | 无 | 线程状态 | -| number | 无 | 此线程状态对应的线程数量 | - -#### 指标集合:memory_used - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------|------|-----------| -| space | 无 | 内存空间名称 | -| mem_used | MB | 此空间占用内存大小 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md deleted file mode 100644 index 847a7775adc..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/sqlserver.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -id: sqlserver -title: 监控:SqlServer数据库监控 -sidebar_label: SqlServer数据库 -keywords: [开源监控系统, 开源数据库监控, SqlServer数据库监控] ---- - -> 对SqlServer数据库的通用性能指标进行采集监控。支持SqlServer 2017+。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 数据库对外提供的端口,默认为1433。 | -| 查询超时时间 | 设置SQL查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| URL | 数据库连接URL,可选,若配置,则URL里面的数据库名称,用户名密码等参数会覆盖上面配置的参数 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:basic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------|------|--------------------------------------------------| -| machine_name | 无 | 运行服务器实例的 Windows 计算机名称 | -| server_name | 无 | 与Windows实例关联的服务器和实例信息SQL Server | -| version | 无 | 实例的版本,SQL Server,格式为"major.minor.build.revision" | -| edition | 无 | 已安装的 实例的产品SQL Server版本 | -| start_time | 无 | 数据库启动时间 | - -#### 指标集合:performance_counters - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------------------|------|-------------------------------------------------------------------------------------| -| database_pages | 无 | Database pages, 已获得的页面数(缓冲池) | -| target_pages | 无 | Target pages, 缓冲池必须的理想页面数 | -| page_life_expectancy | s,秒 | Page life expectancy, 数据页在缓冲池中驻留的时间,这个时间一般会大于 300 | -| buffer_cache_hit_ratio | % | Buffer cache hit ratio, 数据库缓冲池高速缓冲命中率,被请求的数据在缓冲池中被找到的概率,一般会大于 80% 才算正常,否则可能是缓冲池容量太小 | -| checkpoint_pages_sec | 无 | Checkpoint pages/sec, 检查点每秒写入磁盘的脏页个数,如果数据过高,证明缺少内存容量 | -| page_reads_sec | 无 | Page reads/sec, 缓存池中每秒读的页数 | -| page_writes_sec | 无 | Page writes/sec, 缓存池中每秒写的页数 | - -#### 指标集合:connection - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------|------|---------| -| user_connection | 无 | 已连接的会话数 | - -### 常见问题 - -1. SSL连接问题修复 - -jdk版本:jdk11 -问题描述:SQL Server2019使用SA用户连接报错 -错误信息: - -```text -The driver could not establish a secure connection to SQL Server by using Secure Sockets Layer (SSL) encryption. Error: "PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target". ClientConnectionId:xxxxxxxxxxxxxxxxx -``` - -问题截图: -![issue](https://user-images.githubusercontent.com/38679717/206621658-c0741d48-673d-45ff-9a3b-47d113064c12.png) - -解决方案: -添加`SqlServer`监控时使用高级设置,自定义JDBC URL,拼接的jdbc url后面加上参数配置,```;encrypt=true;trustServerCertificate=true;```这个参数true表示无条件信任server端返回的任何根证书。 - -样例:```jdbc:sqlserver://127.0.0.1:1433;DatabaseName=demo;encrypt=true;trustServerCertificate=true;``` - -参考文档:[microsoft pkix-path-building-failed-unable-to-find-valid-certification](https://techcommunity.microsoft.com/t5/azure-database-support-blog/pkix-path-building-failed-unable-to-find-valid-certification/ba-p/2591304) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ssl_cert.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ssl_cert.md deleted file mode 100644 index e15de6e3e97..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ssl_cert.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: ssl_cert -title: 监控:SSL证书监控 -sidebar_label: SSL证书监控 -keywords: [开源监控系统, 开源网站监控, SSL证书监控监控] ---- - -> 对网站的SSL证书过期时间,响应时间等指标进行监测 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|-------------------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,https一般默认为443。 | -| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:certificate - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------|------|----------| -| subject | 无 | 证书名称 | -| expired | 无 | 是否过期 | -| start_time | 无 | 有效期开始时间 | -| start_timestamp | ms毫秒 | 有效期开始时间戳 | -| end_time | 无 | 过期时间 | -| end_timestamp | ms毫秒 | 过期时间戳 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/tomcat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/tomcat.md deleted file mode 100644 index e1f112777f7..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/tomcat.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -id: tomcat -title: 监控:Tomcat监控 -sidebar_label: Tomcat监控 -keywords: [开源监控系统, 开源网站监控, Tomcat监控] ---- - -> 对Tomcat的通用性能指标进行采集监控 - -**使用协议:JMX** - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 查询超时时间 | 设置Tomcat连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | JMX连接用户名 | -| 密码 | JMX连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:memory_pool - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------|------|--------| -| name | 无 | 指标名称 | -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | - -#### 指标集合:code_cache - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------|------|--------| -| committed | kb | 总量 | -| init | kb | 初始化大小 | -| max | kb | 最大 | -| used | kb | 已使用 | - -#### 指标集合:class_loading - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------------|------|----------| -| LoadedClassCount | 个 | 已加载类数量 | -| TotalLoadedClassCount | 个 | 历史已加载类总量 | -| UnloadedClassCount | 个 | 未加载类数量 | - -#### 指标集合:thread - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------|------|-----------| -| TotalStartedThreadCount | 个 | 已经开始的线程数量 | -| ThreadCount | 个 | 线程数 | -| PeakThreadCount | 个 | 未加载类数量 | -| DaemonThreadCount | 个 | 守护进程数 | -| CurrentThreadUserTime | ms | 使用时间 | -| CurrentThreadCpuTime | ms | 使用CPU时间 | - -### Tomcat开启JMX协议步骤 - -1. 搭建好tomcat后,进入tomcat下的bin目录,修改catalina.sh文件 注意⚠️替换IP地址 - -2. vim catalina.sh - -```aidl -CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote -Djava.rmi.server.hostname=10.1.1.52 -Dcom.sun.management.jmxremote.port=1099 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" -``` - -参考: diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ubuntu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ubuntu.md deleted file mode 100644 index 4425f1c2c06..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ubuntu.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -id: ubuntu -title: 监控:Ubuntu操作系统监控 -sidebar_label: Ubuntu操作系统 -keywords: [开源监控系统, 开源操作系统监控, Ubuntu监控] ---- - -> 对Ubuntu操作系统的通用性能指标进行采集监控。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Linux SSH对外提供的端口,默认为22。 | -| 用户名 | SSH连接用户名,可选 | -| 密码 | SSH连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:basic - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------|------|--------| -| hostname | 无 | 主机名称 | -| version | 无 | 操作系统版本 | -| uptime | 无 | 系统运行时间 | - -#### 指标集合:cpu - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------------|------|--------------------| -| info | 无 | CPU型号 | -| cores | 核数 | CPU内核数量 | -| interrupt | 个数 | CPU中断数量 | -| load | 无 | CPU最近1/5/15分钟的平均负载 | -| context_switch | 个数 | 当前上下文切换数量 | -| usage | % | CPU使用率 | - -#### 指标集合:memory - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------|------|----------| -| total | Mb | 总内存容量 | -| used | Mb | 用户程序内存量 | -| free | Mb | 空闲内存容量 | -| buff_cache | Mb | 缓存占用内存 | -| available | Mb | 剩余可用内存容量 | -| usage | % | 内存使用率 | - -#### 指标集合:disk - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------|------|-----------| -| disk_num | 块数 | 磁盘总数 | -| partition_num | 分区数 | 分区总数 | -| block_write | 块数 | 写入磁盘的总块数 | -| block_read | 块数 | 从磁盘读出的块数 | -| write_rate | iops | 每秒写磁盘块的速率 | - -#### 指标集合:interface - -| 指标名称 | 指标单位 | 指标帮助描述 | -|----------------|------|---------------| -| interface_name | 无 | 网卡名称 | -| receive_bytes | byte | 入站数据流量(bytes) | -| transmit_bytes | byte | 出站数据流量(bytes) | - -#### 指标集合:disk_free - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------|------|---------| -| filesystem | 无 | 文件系统的名称 | -| used | Mb | 已使用磁盘大小 | -| available | Mb | 可用磁盘大小 | -| usage | % | 使用率 | -| mounted | 无 | 挂载点目录 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/website.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/website.md deleted file mode 100644 index 7403f255aec..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/website.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: website -title: 监控:网站监测 -sidebar_label: 网站监测 -keywords: [开源监控系统, 开源网站监控] ---- - -> 对网站是否可用,响应时间等指标进行监测 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|---------|-------------------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | 网站对外提供的端口,http一般默认为80,https一般默认为443。 | -| 相对路径 | 网站地址除IP端口外的后缀路径,例如 `www.tancloud.io/console` 网站的相对路径为 `/console`。 | -| 启用HTTPS | 是否通过HTTPS访问网站,注意⚠️开启HTTPS一般默认对应端口需要改为443 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:summary - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------|------|--------| -| responseTime | ms毫秒 | 网站响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/windows.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/windows.md deleted file mode 100644 index 0b1791435f4..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/windows.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: windows -title: 监控:Windows操作系统监控 -sidebar_label: Windows操作系统 -keywords: [开源监控系统, 开源操作系统监控, Windows操作系统监控] ---- - -> 通过SNMP协议对Windows操作系统的通用性能指标进行采集监控。 -> 注意⚠️ Windows服务器需开启SNMP服务 - -参考资料: -[什么是SNMP协议1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) -[什么是SNMP协议2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) -[Win配置SNMP英文](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) -[Win配置SNMP中文](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|----------|----------------------------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Windows SNMP服务对外提供的端口,默认为 161。 | -| SNMP 版本 | SNMP协议版本 V1 V2c V3 | -| SNMP 团体字 | SNMP 协议团体名(Community Name),用于实现SNMP网络管理员访问SNMP管理代理时的身份验证。类似于密码,默认值为 public | -| 超时时间 | 协议连接超时时间 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:system - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------|------|--------| -| name | 无 | 主机名称 | -| descr | 无 | 操作系统描述 | -| uptime | 无 | 系统运行时间 | -| numUsers | 个数 | 当前用户数 | -| services | 个数 | 当前服务数量 | -| processes | 个数 | 当前进程数量 | -| responseTime | ms | 采集响应时间 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/zookeeper.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/zookeeper.md deleted file mode 100644 index 9752c22bc4e..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/zookeeper.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -id: zookeeper -title: 监控:Zookeeper监控 -sidebar_label: Zookeeper监控 -keywords: [开源监控系统, Zookeeper监控监控] ---- - -> 对Zookeeper的通用性能指标进行采集监控 - -### 监控前操作 - -> 监控 zookeeper 目前的实现方案使用的是 zookeeper 提供的四字命令 + netcat 来收集指标数据 -> 需要用户自己将zookeeper的四字命令加入白名单 - -1. 加白名单步骤 - -> 1.找到我们 zookeeper 的配置文件,一般是 `zoo.cfg` -> -> 2.配置文件中加入以下命令 - -```shell -# 将需要的命令添加到白名单中 -4lw.commands.whitelist=stat, ruok, conf, isro - -# 将所有命令添加到白名单中 -4lw.commands.whitelist=* -``` - -> 3.重启服务 - -```shell -zkServer.sh restart -``` - -2. netcat 协议 - -目前实现方案需要我们部署zookeeper的linux服务器,安装netcat的命令环境 - -> netcat安装步骤 -> -> ```shell -> yum install -y nc -> ``` - -如果终端显示以下信息则说明安装成功 - -```shell -Complete! -``` - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | Zookeeper的Linux服务器SSH端口。 | -| 查询超时时间 | 设置Zookeeper连接的超时时间,单位ms毫秒,默认3000毫秒。 | -| 用户名 | Zookeeper所在Linux连接用户名 | -| 密码 | Zookeeper所在Linux连接密码 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:conf - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------|------|----------------------------------------| -| clientPort | 无 | 端口 | -| dataDir | 无 | 数据快照文件目录,默认10万次操作生成一次快照 | -| dataDirSize | kb | 数据快照文件大小 | -| dataLogDir | 无 | 事务日志文件目录,生产环境放在独立磁盘上 | -| dataLogSize | kb | 事务日志文件大小 | -| tickTime | ms | 服务器之间或客户端与服务器之间维持心跳的时间间隔 | -| minSessionTimeout | ms | 最小session超时时间 心跳时间x2 指定时间小于该时间默认使用此时间 | -| maxSessionTimeout | ms | 最大session超时时间 心跳时间x20 指定时间大于该时间默认使用此时间 | -| serverId | 无 | 服务器编号 | - -#### 指标集合:stats - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------------------|------|------------| -| zk_version | 无 | 服务器版本 | -| zk_server_state | 无 | 服务器角色 | -| zk_num_alive_connections | 个 | 连接数 | -| zk_avg_latency | ms | 平均延时 | -| zk_outstanding_requests | 个 | 堆积请求数 | -| zk_znode_count | 个 | znode结点数量 | -| zk_packets_sent | 个 | 发包数 | -| zk_packets_received | 个 | 收包数 | -| zk_watch_count | 个 | watch数量 | -| zk_max_file_descriptor_count | 个 | 最大文件描述符数量 | -| zk_approximate_data_size | kb | 数据大小 | -| zk_open_file_descriptor_count | 个 | 打开的文件描述符数量 | -| zk_max_latency | ms | 最大延时 | -| zk_ephemerals_count | 个 | 临时节点数 | -| zk_min_latency | ms | 最小延时 | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md deleted file mode 100644 index e1184fcc2ab..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/introduce.md +++ /dev/null @@ -1,309 +0,0 @@ ---- -id: introduce -title: HertzBeat赫兹跳动 -sidebar_label: 介绍 -slug: / ---- - -> 实时监控系统,无需Agent,性能集群,兼容Prometheus,自定义监控和状态页构建能力。 - -[![Discord](https://img.shields.io/badge/Chat-Discord-7289DA?logo=discord)](https://discord.gg/Fb6M73htGr) -[![Reddit](https://img.shields.io/badge/Reddit-Community-7289DA?logo=reddit)](https://www.reddit.com/r/hertzbeat/) -[![Twitter](https://img.shields.io/twitter/follow/hertzbeat1024?logo=twitter)](https://twitter.com/hertzbeat1024) -[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8139/badge)](https://www.bestpractices.dev/projects/8139) -[![Docker Pulls](https://img.shields.io/docker/pulls/apache/hertzbeat?style=%20for-the-badge&logo=docker&label=DockerHub%20Download)](https://hub.docker.com/r/apache/hertzbeat) -[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/hertzbeat)](https://artifacthub.io/packages/search?repo=hertzbeat) -[![QQ](https://img.shields.io/badge/QQ-630061200-orange)](https://qm.qq.com/q/FltGGGIX2m) -[![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UCri75zfWX0GHqJFPENEbLow?logo=youtube&label=YouTube%20Channel)](https://www.youtube.com/channel/UCri75zfWX0GHqJFPENEbLow) - -## 🎡 介绍 - -[HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是一个易用友好的开源实时监控告警系统,无需 Agent,高性能集群,兼容 Prometheus,提供强大的自定义监控和状态页构建能力。 - -### 特点 - -- 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等监控阈值告警通知一步到位。 -- 易用友好,无需 `Agent`,全 `WEB` 页面操作,鼠标点一点就能监控告警,零上手学习成本。 -- 将 `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` 等协议规范可配置化,只需在浏览器配置监控模版 `YML` 就能使用这些协议去自定义采集想要的指标。您相信只需配置下就能立刻适配一款 `K8s` 或 `Docker` 等新的监控类型吗? -- 兼容 `Prometheus` 的系统生态并且更多,只需页面操作就可以监控 `Prometheus` 所能监控的。 -- 高性能,支持多采集器集群横向扩展,支持多隔离网络监控,云边协同。 -- 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱` 等方式消息及时送达。 -- 提供强大的状态页构建能力,轻松向用户传达您产品服务的实时状态。 - -> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 - ----- - -### 强大的监控模版 - -> 开始我们就说 HertzBeat 的特点是自定义监控能力,无需 Agent。在讨论这两点之前,我们先介绍下 HertzBeat 的不一样的监控模版。而正是因为这样的监控模版设计,才会有了后面的高级特性。 - -HertzBeat 自身并没有去创造一种采集数据协议让监控对端来适配它。而是充分使用了现有的生态,`SNMP协议`采集网络交换机路由器信息,`JMX规范`采集JAVA应用信息,`JDBC规范`采集数据集信息,`SSH`直连执行脚本获取回显信息,`HTTP+(JsonPath | prometheus等)`解析API接口信息,`IPMI协议`采集服务器信息等等。 -HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可配置化,最后使其都可以通过编写YML格式监控模版的形式,来制定模版使用这些协议来采集任何想要的指标数据。 - -![hertzbeat](/img/blog/multi-protocol.png) - -你相信用户只需在UI页面编写一个监控模版,点击保存后,就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? - -![hertzbeat](/img/home/9.png) - -### 内置监控类型 - -**官方内置了大量的监控模版类型,方便用户直接在页面添加使用,一款监控类型对应一个YML监控模版** - -- [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), - [Http Api](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml), [Ping Connect](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml), - [Jvm](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml), - [Ssl Certificate](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot2](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml), - [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml), [SpringBoot3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml), - [Udp Port](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-udp.yml), [Dns](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dns.yml), - [Pop3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-pop3.yml), [Ntp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ntp.yml), - [Api Code](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api_code.yml), [Smtp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-smtp.yml), - [Nginx](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nginx.yml) -- [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), - [MariaDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml), - [ElasticSearch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml), - [Oracle](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml), - [DM](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dm.yml), [OpenGauss](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opengauss.yml), - [ClickHouse](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-clickhouse.yml), [IoTDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-iotdb.yml), - [Redis Cluster](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml), [Redis Sentinel](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml) - [Doris BE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_be.yml), [Doris FE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_fe.yml), - [Memcached](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-memcached.yml), [NebulaGraph](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-nebulaGraph.yml) -- [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), - [CentOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml), - [EulerOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml), [Fedora CoreOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml), - [OpenSUSE](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml), [Rocky Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml), - [Red Hat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml), [FreeBSD](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml), - [AlmaLinux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml), [Debian Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml) -- [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), - [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml), - [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml), - [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml), - [Jetty](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jetty.yml), [ActiveMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-activemq.yml), - [Spring Gateway](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spring_gateway.yml), [EMQX MQTT](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-emqx.yml), - [AirFlow](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-airflow.yml), [Hive](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hive.yml), - [Spark](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spark.yml), [Hadoop](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hadoop.yml) -- [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) -- [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), - [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), - [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) -- 和更多自定义监控模版。 -- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook` `Server酱`。 - -### 强大自定义功能 - -> 由前面的**监控模版**介绍,大概清楚了 `HertzBeat` 拥有的强大自定义功能。 -> 我们将每个监控类型都视为一个监控模版,不管是官方内置的还是后期用户自定义新增的。用户都可以方便的通过修改监控模版来新增修改删除监控指标。 -> 模版里面包含各个协议的使用配置,环境变量,指标转换,指标计算,单位转换,指标采集等一系列功能,帮助用户能采集到自己想要的监控指标。 - -![hertzbeat](/img/docs/custom-arch.png) - -### 无需 Agent - -> 对于使用过各种系统的用户来说,可能最麻烦头大的不过就是各种 `agent` 的安装部署调试升级了。 -> 每台主机得装个 `agent`,为了监控不同应用中间件可能还得装几个对应的 `agent`,监控数量上来了轻轻松松上千个,写个批量脚本可能会减轻点负担。 -> `agent` 的版本是否与主应用兼容, `agent` 与主应用的通讯调试, `agent` 的同步升级等等等等,这些全是头大的点。 - -`HertzBeat` 的原理就是使用不同的协议去直连对端系统,采用 `PULL` 的形式去拉取采集数据,无需用户在对端主机上部署安装 `Agent` | `Exporter` 等。 - -- 比如监控 `linux操作系统`, 在 `HertzBeat` 端输入IP端口账户密码或密钥即可。 -- 比如监控 `mysql数据库`, 在 `HertzBeat` 端输入IP端口账户密码即可。 -**密码等敏感信息全链路加密** - -### 高性能集群 - -> 当监控数量指数级上升,采集性能下降或者环境不稳定容易造成采集器单点故障时,这时我们的采集器集群就出场了。 - -- `HertzBeat` 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 -- 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 -- 单机模式与集群模式相互切换部署非常方便,无需额外组件部署。 - -![hertzbeat](/img/docs/cluster-arch.png) - -### 云边协同 - -> 两地三中心,多云环境,多隔离网络,这些场景名词可能大家略有耳闻。当需要用一套监控系统统一监控不同隔离网络的IT资源时,这时我们的云边协同就来啦。 - -- `HertzBeat` 支持部署边缘采集器集群,与主 `HertzBeat` 服务云边协同提升采集能力。 - -在多个网络不相通的隔离网络中,在以往方案中我们需要在每个网络都部署一套监控系统,这导致数据不互通,管理部署维护都不方便。 -`HertzBeat` 提供的云边协同能力,可以在多个隔离网络部署边缘采集器,采集器在隔离网络内部进行监控任务采集,采集数据上报,由主服务统一调度管理展示。 - -![hertzbeat](/img/docs/cluster-arch.png) - -### 易用友好 - -- 集 **监控+告警+通知** All in one, 无需单独部署多个组件服务。 -- 全UI界面操作,不管是新增监控,修改监控模版,还是告警阈值通知,都可在WEB界面操作完成,无需要修改文件或脚本或重启。 -- 无需 Agent, 监控对端我们只需在WEB界面填写所需IP端口账户密码等参数即可。 -- 自定义友好,只需一个监控模版YML,自动生成对应监控类型的监控管理页面,数据图表页面,阈值配置等。 -- 阈值告警通知友好,基于表达式阈值配置,多种告警通知渠道,支持告警静默,时段标签告警级别过滤等。 - -### 完全开源 - -- Dromara开源社区顶级项目,Gitee GVP,使用`Apache2`协议,由自由开放的开源社区主导维护的开源协作产品。 -- 无监控数量`License`,监控类型等伪开源限制。 -- 基于`Java+SpringBoot+TypeScript+Angular`主流技术栈构建,方便的二次开发。 -- 开源不等同于免费,基于HertzBeat二次开发需保留logo,名称,页面脚注,版权等。 - -**HertzBeat 已被 [CNCF云原生全景图](https://landscape.cncf.io/card-mode?category=monitoring&grouping=category) 收录** - -![cncf](/img/home/cncf-landscape-left-logo.svg) - ---- -**`HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。** - ------ - -## 即刻体验一波 - -Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` -浏览器访问 `http://localhost:1157` 默认账户密码 `admin/hertzbeat` - -### 登陆页面 - -- HertzBeat 的用户管理统一由配置文件 `sureness.yml` 维护,用户可以通过修改此文件来新增删除修改用户信息,用户角色权限等。默认账户密码 admin/hertzbeat - -![hertzbeat](/img/home/0.png) - -### 概览页面 - -- 全局概览页面,分类展示了当前监控大类别数量分布,用户可直观查看当前的监控类型与数量并点击跳转至对应监控类型进行维护管理。 -- 展示当前注册的采集器集群状态,包括采集器的上线状态,监控任务,启动时间,IP地址,名称等。 -- 下发展示了最近告警信息列表,告警级别分布情况,告警处理率情况。 - -![hertzbeat](/img/home/1.png) - -### 监控中心 - -- 监控入口,支持对应用服务,数据库,操作系统,中间件,网络,自定义等监控的管理。 -- 以列表的形式展示当前已添加的监控,支持对监控的新增,修改,删除,取消监控,导入导出,批量管理等。 -- 支持标签分组,查询过滤,查看监控详情入口等。 - -内置支持的监控类型包括: - -- [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), - [Http Api](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml), [Ping Connect](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml), - [Jvm](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml), - [Ssl Certificate](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot2](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml), - [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml), [SpringBoot3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml), - [Udp Port](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-udp.yml), [Dns](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dns.yml), - [Pop3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-pop3.yml), [Ntp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ntp.yml), - [Api Code](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api_code.yml), [Smtp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-smtp.yml), - [Nginx](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nginx.yml) -- [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), - [MariaDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml), - [ElasticSearch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml), - [Oracle](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml), - [DM](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dm.yml), [OpenGauss](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opengauss.yml), - [ClickHouse](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-clickhouse.yml), [IoTDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-iotdb.yml), - [Redis Cluster](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml), [Redis Sentinel](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml) - [Doris BE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_be.yml), [Doris FE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_fe.yml), - [Memcached](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-memcached.yml), [NebulaGraph](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-nebulaGraph.yml) -- [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), - [CentOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml), - [EulerOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml), [Fedora CoreOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml), - [OpenSUSE](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml), [Rocky Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml), - [Red Hat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml), [FreeBSD](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml), - [AlmaLinux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml), [Debian Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml) -- [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), - [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml), - [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml), - [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml), - [Jetty](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jetty.yml), [ActiveMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-activemq.yml), - [Spring Gateway](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spring_gateway.yml), [EMQX MQTT](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-emqx.yml), - [AirFlow](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-airflow.yml), [Hive](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hive.yml), - [Spark](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spark.yml), [Hadoop](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hadoop.yml) -- [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) -- [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), - [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), - [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) - -![hertzbeat](/img/home/2.png) - -### 新增修改监控 - -- 新增或修改指定监控类型的监控实例,配置对端监控的IP,端口等参数,设置采集周期,采集任务调度方式,支持提前探测可用性等。 -- 页面上配置的监控参数由对应监控类型的监控模版所定义,用户可以通过修改监控模版来修改页面配置参数。 -- 支持关联标签,用标签来管理监控分组,告警匹配等。 - -![hertzbeat](/img/home/10.png) - -### 监控详情 - -- 监控的数据详情页面,展示了当前监控的基本参数信息,监控指标数据信息。 -- 监控实时数据报告,以小卡片列表的形式展示了当前监控的所有指标实时值,用户可根据实时值参考配置告警阈值规则。 -- 监控历史数据报告,以趋势图表的形式展示了当前监控数值类型的指标的历史值,支持查询小时,天,月的历史数据,支持配置页面刷新时间。 -- ⚠️注意监控历史图表需配置外置时序数据库才能获取完整功能,时序数据库支持: IOTDB, TDengine, InfluxDB, GreptimeDB - -![hertzbeat](/img/home/3.png) - -![hertzbeat](/img/home/4.png) - -### 告警中心 - -- 已触发告警消息的管理展示页面,使用户有直观的展示当前告警情况。 -- 支持告警处理,告警标记未处理,告警删除清空等批量操作。 - -![hertzbeat](/img/home/7.png) - -### 阈值规则 - -- 对于监控的可用性状态设置阈值规则,特定指标的值超过我们预期范围时发出告警,这些都可以在阈值规则这里配置。 -- 告警级别分为三级:通知告警,严重告警,紧急告警。 -- 阈值规则支持可视化页面配置或表达式规则配置,灵活性更高。 -- 支持配置触发次数,告警级别,通知模版,关联指定监控等。 - -![hertzbeat](/img/home/6.png) - -![hertzbeat](/img/home/11.png) - -### 告警收敛 - -- 当通过阈值规则判断触发告警后,会进入到告警收敛,告警收敛会根据规则对特定时间段的重复告警消息去重收敛,已避免大量重复性告警导致接收人告警麻木。 -- 告警收敛规则支持重复告警生效时间段,标签匹配和告警级别匹配过滤。 - -![hertzbeat](/img/home/12.png) - -![hertzbeat](/img/home/13.png) - -### 告警静默 - -- 当通过阈值规则判断触发告警后,会进入到告警静默,告警静默会根据规则对特定一次性时间段或周期性时候段的告警消息屏蔽静默,此时间段不发送告警消息。 -- 此应用场景如用户在系统维护中,无需发已知告警。用户在工作日时间才会接收告警消息,用户在晚上需避免打扰等。 -- 告警静默规则支持一次性时间段或周期性时间段,支持标签匹配和告警级别匹配。 - -![hertzbeat](/img/home/14.png) - -![hertzbeat](/img/home/15.png) - -### 消息通知 - -- 消息通知功能是把告警消息通过不同媒体渠道通知给指定的接收人,告警消息及时触达。 -- 功能包含接收人信息管理和通知策略管理,接收人管理维护接收人信息以其通知方式信息,通知策略管理维护把哪些告警信息通知给哪些接收人的策略规则。 -- 通知方式支持 `邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式。 -- 通知策略支持标签匹配和告警级别匹配,方便的使不同标签的告警和告警级别分派给不同的接收处理人。 -- 支持通知模版,用户可以自定义通过模版内容格式来满足自己的个性化通知展示需求。 - -![hertzbeat](/img/home/16.png) - -![hertzbeat](/img/home/17.png) - -![hertzbeat](/img/home/8.png) - -### 监控模版 - -- HertzBeat 将 `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` 等协议规范可配置化,只需在浏览器配置监控模版 `YML` 就能使用这些协议去自定义采集想要的指标。您相信只需配置下就能立刻适配一款 `K8s` 或 `Docker` 等新的监控类型吗? -- 同理我们内置的所有监控类型(mysql,website,jvm,k8s)也一一映射为对应的监控模版,用户可以新增修改监控模版来自定义监控功能。 - -![hertzbeat](/img/home/9.png) - ---- - -**还有更多强大的功能快去探索呀。Have Fun!** - ------ - -**官网: ** -**Github: ** -**Gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contact.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contact.md deleted file mode 100644 index 974009005cd..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contact.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -id: contact -title: 交流联系 -sidebar_label: 交流联系 ---- - -> 如果您在使用过程有任何需要帮助或者想交流建议,可以通过 群 ISSUE 讨论交流。 - -[GITHUB ISSUES](https://github.com/apache/hertzbeat/issues) - -[Chat On Discord](https://discord.gg/Fb6M73htGr) - -[Follow Us Twitter](https://twitter.com/hertzbeat1024) - -**微信交流群** 加微信号 ahertzbeat 邀请您进微信群。 - -**QQ交流群** 加QQ群号 630061200 - -##### Github Discussion - -[Github Discussion](https://github.com/apache/hertzbeat/discussions) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contributing.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contributing.md deleted file mode 100644 index 6913149bab1..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/contributing.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -id: contributing -title: 贡献者指南 -sidebar_label: 贡献者指南 ---- - -> 非常欢迎参与项目贡献,我们致力于维护一个互相帮助的快乐社区。 - -### 贡献方式 - -> 在 HertzBeat 社区,贡献方式有很多: - -- 💻**代码**:可以帮助社区完成一些任务、编写新的feature或者是修复一些bug; - -- ⚠️**测试**:可以来参与测试代码的编写,包括了单元测试、集成测试、e2e测试; - -- 📖**文档**:可以编写或完善文档,来帮助用户更好地了解和使用 HertzBeat; - -- 📝**博客**:可以撰写 HertzBeat 的相关文章,来帮助社区更好地推广; - -- 🤔**讨论**:可以参与 HertzBeat 新的feature的讨论,将您的想法跟 HertzBeat 融合; - -- 💡**布道**:可以帮助宣传或推广 HertzBeat 社区,在 meetup 或 summit 中演讲; - -- 💬**建议**:也可以对项目或者社区提出一些建议,促进社区的良性发展; - -更多贡献方式参见 [Contribution Types](https://allcontributors.org/docs/en/emoji-key) - -即便是小到错别字的修正我们也都非常欢迎 :) - -### 让 HertzBeat 运行起来 - -> 让 HertzBeat 的代码在您的开发工具上运行起来,并且能够断点调试。 -> 此为前后端分离项目,本地代码启动需将后端 [manager](https://github.com/apache/hertzbeat/tree/master/manager) 和前端 [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) 分别启动生效。 - -- 后端启动 - -1. 需要`maven3+`, `java11`和`lombok`环境 -2. (可选)修改配置文件配置信息-`manager/src/main/resources/application.yml` -3. 启动`springboot manager`服务 `manager/src/main/java/org/apache/hertzbeat/manager/Manager.java` - -- 前端启动 - -1. 需要`nodejs npm angular-cli`环境 -2. 安装yarn `npm install -g yarn` -3. 在前端工程目录web-app下执行 `yarn install` -4. 全局安装angular-cli `npm install -g @angular/cli@14 --registry=https://registry.npm.taobao.org` -5. 待本地后端启动后,在web-app目录下启动本地前端 `ng serve --open` -6. 浏览器访问 localhost:4200 即可开始,默认账号密码 admin/hertzbeat - -### 寻找任务 - -寻找您感兴趣的Issue!在我们的GitHub仓库和邮件列表中,我们经常会发布一些带有 good first issue 或者 status: volunteer wanted 标签的issue,这些issue都欢迎贡献者的帮助。 -其中good first issue往往门槛较低、适合新手。 - -当然,如果您有好的想法,也可以直接在GitHub Discussion 中提出或者联系社区。 - -### 提交 Pull Request - -1. 首先您需要 Fork 目标仓库 [hertzbeat repository](https://github.com/apache/hertzbeat). -2. 然后 用git命令 将代码下载到本地: - -```shell -git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended -``` - -3. 下载完成后,请参考目标仓库的入门指南或者 README 文件对项目进行初始化。 -4. 接着,您可以参考如下命令进行代码的提交, 切换新的分支, 进行开发: - -```shell -git checkout -b a-feature-branch #Recommended -``` - -5. 提交 commit , commit 描述信息需要符合约定格式: [module name or type name]feature or bugfix or doc: custom message. - -```shell -git add -git commit -m '[docs]feature: necessary instructions' #Recommended -``` - -6. 推送到远程仓库 - -```shell -git push origin a-feature-branch -``` - -7. 然后您就可以在 GitHub 上发起新的 PR (Pull Request)。 - -请注意 PR 的标题需要符合我们的规范,并且在 PR 中写上必要的说明,来方便 Committer 和其他贡献者进行代码审查。 - -### 等待PR代码被合并 - -在提交了 PR 后,Committer 或者社区的小伙伴们会对您提交的代码进行审查(Code Review),会提出一些修改建议,或者是进行一些讨论,请及时关注您的PR。 - -若后续需要改动,不需要发起一个新的 PR,在原有的分支上提交 commit 并推送到远程仓库后,PR会自动更新。 - -另外,我们的项目有比较规范和严格的 CI 检查流程,在提交 PR 之后会触发 CI,请注意是否通过 CI 检查。 - -最后,Committer 可以将 PR 合并入 master 主分支。 - -### 代码被合并后 - -在代码被合并后,您就可以在本地和远程仓库删除这个开发分支了: - -```shell -git branch -d a-dev-branch -git push origin --delete a-dev-branch -``` - -在主分支上,您可以执行以下操作来同步上游仓库: - -```shell -git remote add upstream https://github.com/apache/hertzbeat.git #Bind the remote warehouse, if it has been executed, it does not need to be executed again -git checkout master -git pull upstream master -``` - -### 如何成为Committer? - -通过上述步骤,您就是 HertzBeat 的贡献者了。重复前面的步骤,在社区中保持活跃,坚持下去,您就能成为 Committer! - -### 加入讨论交流 - -[Github Discussion](https://github.com/apache/hertzbeat/discussions) - -加微信号 `ahertzbeat` 拉您进微信交流群 - -### 模块 - -- **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** 提供监控管理,系统管理基础服务 - -> 提供对监控的管理,监控应用配置的管理,系统用户租户后台管理等。 -> -> - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** 提供监控数据采集服务 -> 使用通用协议远程采集获取对端指标数据。 -> - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** 提供监控数据仓储服务 -> 采集指标结果数据管理,数据落盘,查询,计算统计。 -> - **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** 提供告警服务 -> 告警计算触发,任务状态联动,告警配置,告警通知。 -> - **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** 提供可视化控制台页面 -> 监控告警系统可视化控制台前端 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/design.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/design.md deleted file mode 100644 index 08bd383d97d..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/design.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: design -title: 设计文档 -sidebar_label: 设计文档 ---- - -### HertzBeat架构 - -![architecture](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/docs/hertzbeat-arch.svg) - -### TanCloud架构 - -TanCloud是基于HertzBeat的公网SAAS集群版本,采用多集群,多租户的架构模式。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md deleted file mode 100644 index 9492da3f9ca..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/developer.md +++ /dev/null @@ -1,262 +0,0 @@ ---- -id: developer -title: 开发者们 -sidebar_label: 开发者们 ---- - -## ✨ HertzBeat的成员们 - - - - - - - - - - - - - - - - - - - -
tomsun28
tomsun28

💻 📖 🎨
会编程的王学长
会编程的王学长

💻 📖 🎨
zcx
zcx

💻 🐛 🎨
进击的阿晨
进击的阿晨

💻 🎨 🐛
铁甲小宝
铁甲小宝

🐛 💻 📖
cuipiheqiuqiu
cuipiheqiuqiu

💻 ⚠️ 🎨
hudongdong129
hudongdong129

💻 ⚠️ 📖
zqr10159
Logic

📖 💻🎨
vinci
vinci

💻 📖 🎨
淞筱
淞筱

💻 📖 🎨
东风
东风

💻 🎨 📖
- -cert - -## ✨ HertzBeat的开发者们 - -Thanks these wonderful people, welcome to join us: [贡献者指南](contributing) - -cert - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
tomsun28
tomsun28

💻 📖 🎨
会编程的王学长
会编程的王学长

💻 📖 🎨
MaxKey
MaxKey

💻 🎨 🤔
观沧海
观沧海

💻 🎨 🐛
yuye
yuye

💻 📖
jx10086
jx10086

💻 🐛
winnerTimer
winnerTimer

💻 🐛
goo-kits
goo-kits

💻 🐛
brave4Time
brave4Time

💻 🐛
WalkerLee
WalkerLee

💻 🐛
jianghang
jianghang

💻 🐛
ChineseTony
ChineseTony

💻 🐛
wyt199905
wyt199905

💻
卫傅庆
卫傅庆

💻 🐛
zklmcookle
zklmcookle

💻
DevilX5
DevilX5

📖 💻
tea
tea

💻
yangshihui
yangshihui

💻 🐛
DreamGirl524
DreamGirl524

💻 📖
gzwlly
gzwlly

📖
cuipiheqiuqiu
cuipiheqiuqiu

💻 ⚠️ 🎨
lambert
lambert

💻
mroldx
mroldx

📖
woshiniusange
woshiniusange

📖
VampireAchao
VampireAchao

💻
zcx
zcx

💻 🐛 🎨
CharlieXCL
CharlieXCL

📖
Privauto
Privauto

💻 📖
emrys
emrys

📖
SxLiuYu
SxLiuYu

🐛
All Contributors
All Contributors

📖
铁甲小宝
铁甲小宝

💻 📖
click33
click33

📖
蒋小小
蒋小小

📖
Kevin Huang
Kevin Huang

📖
铁甲小宝
铁甲小宝

🐛 💻 📖
Captain Jack
Captain Jack

📖
haibo.duan
haibo.duan

⚠️ 💻
assassin
assassin

🐛 💻
Reverse wind
Reverse wind

⚠️ 💻
luxx
luxx

💻
Ikko Ashimine
Ikko Ashimine

📖
leizenan
leizenan

💻
BKing
BKing

📖
xingshuaiLi
xingshuaiLi

📖
wangke6666
wangke6666

📖
刺猬
刺猬

🐛 💻
Haste
Haste

💻
zhongshi.yi
zhongshi.yi

📖
Qi Zhang
Qi Zhang

📖
MrAndyMing
MrAndyMing

📖
idongliming
idongliming

💻
Zichao Lin
Zichao Lin

💻 📖
liudonghua
liudonghua

💻 🤔
Jerry
Jerry

💻 ⚠️ 🤔
yanhom
yanhom

📖
fsl
fsl

💻
xttttv
xttttv

📖
NavinKumarBarnwal
NavinKumarBarnwal

💻
Zakkary
Zakkary

📖
sunxinbo
sunxinbo

💻 ⚠️
ldzbook
ldzbook

📖 🐛
余与雨
余与雨

💻 ⚠️
MysticalDream
MysticalDream

💻 ⚠️
zhouyoulin12
zhouyoulin12

💻 ⚠️
jerjjj
jerjjj

💻
wjl110
wjl110

💻
Sean
Sean

📖
chenyiqin
chenyiqin

💻 ⚠️
hudongdong129
hudongdong129

💻 ⚠️ 📖
TherChenYang
TherChenYang

💻 ⚠️
HattoriHenzo
HattoriHenzo

💻 ⚠️
ycilry
ycilry

📖
aoshiguchen
aoshiguchen

📖 💻
蔡本祥
蔡本祥

💻
浮游
浮游

💻
Grass-Life
Grass-Life

💻
xiaohe428
xiaohe428

💻 📖
TableRow
TableRow

📖 💻
ByteIDance
ByteIDance

💻
Jangfe
Jangfe

💻
zqr10159
zqr10159

📖 💻
vinci
vinci

💻 📖 🎨
js110
js110

💻
CrazyLionLi
CrazyLionLi

📖
banmajio
banmajio

💻
topsuder
topsuder

💻
richar2022
richar2022

💻
fcb-xiaobo
fcb-xiaobo

💻
wenkyzhang
wenkyzhang

📖
ZangJuxy
ZangJuxy

📖
l646505418
l646505418

💻
Carpe-Wang
Carpe-Wang

💻
莫枢
莫枢

💻
huangcanda
huangcanda

💻
世纪末的架构师
世纪末的架构师

💻
ShuningWan
ShuningWan

📖
MrYZhou
MrYZhou

📖
suncqujsj
suncqujsj

📖
sunqinbo
sunqinbo

💻
haoww
haoww

📖
i-mayuan
i-mayuan

📖
fengruge
fengruge

📖
zhanghuan
zhanghuan

💻
shenymin
shenymin

💻
Dhruva Chandra
Dhruva Chandra

💻
miss_z
miss_z

📖
wyt990
wyt990

💻
licocon
licocon

💻
Mi Na
Mi Na

💻
Kylin-Guo
Kylin-Guo

📖
Mr灬Dong先生
Mr灬Dong先生

💻
Pratyay Banerjee
Pratyay Banerjee

📖 💻
yujianzhong520
yujianzhong520

💻
SPPan
SPPan

💻
ZhangJiashu
ZhangJiashu

💻
impress
impress

💻
凌晨一点半
凌晨一点半

📖
Eeshaan Sawant
Eeshaan Sawant

💻
nandofromthebando
nandofromthebando

💻
caiboking
caiboking

💻
baixing99
baixing99

💻
Yang Chuang
Yang Chuang

💻
wlin20
wlin20

💻
guojing1983
guojing1983

💻
moxi
moxi

📖
qq471754603
qq471754603

💻
渭雨
渭雨

💻
liuxuezhuo
liuxuezhuo

💻
lisongning
lisongning

💻
YutingNie
YutingNie

💻 📖 🎨
Mike Zhou
Mike Zhou

💻 📖 🎨
小笨蛋
小笨蛋

💻
littlezhongzer
littlezhongzer

💻
ChenXiangxxxxx
ChenXiangxxxxx

💻
Mr.zhou
Mr.zhou

💻
姚贤丰
姚贤丰

💻
lingluojun
lingluojun

💻
1ue
1ue

💻
qyaaaa
qyaaaa

💻
novohit
novohit

💻
zhuoshangyi
zhuoshangyi

💻
ruanliang
ruanliang

📖 💻
Eden4701
Eden4701

💻 📖 🎨
XiaTian688
XiaTian688

📖
liyinjiang
liyinjiang

💻
ZhangJiashu
ZhangJiashu

📖
moghn
moghn

📖
xiaoguolong
xiaoguolong

💻
Smliexx
Smliexx

💻
Naruse
Naruse

📖 💻
Bala Sukesh
Bala Sukesh

💻
Jinyao Ma
Jinyao Ma

💻
Rick
Rick

💻 ⚠️
东风
东风

💻 🎨 📖
sonam singh
sonam singh

💻
ZhangZixuan1994
ZhangZixuan1994

💻
SHIG
SHIG

💻
泰上老菌
泰上老菌

💻
ldysdu
ldysdu

💻
梁同学
梁同学

💻
avv
avv

💻
yqxxgh
yqxxgh

📖
CharlieShi46
CharlieShi46

💻
Nctllnty
Nctllnty

💻
Wang-Yonghao
Wang-Yonghao

📖
- - - - diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/huaweicloud.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/huaweicloud.md deleted file mode 100644 index bc2c4f50c96..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/huaweicloud.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: huaweicloud -title: HertzBeat & HuaweiCloud -sidebar_label: HertzBeat & HuaweiCloud ---- - -### HertzBeat 与 HuaweiCloud 的开源合作需求Issue - -> 欢迎大家对感兴趣的Issue领取贡献。 - -- [Task] support using Huawei Cloud OBS to store custom define yml file [#841](https://github.com/apache/hertzbeat/issues/841) -- [Task] support Huawei Cloud CCE metrics monitoring [#839](https://github.com/apache/hertzbeat/issues/839) -- [Task] support EulerOS metrics monitoring [#838](https://github.com/apache/hertzbeat/issues/838) -- [Task] support using Huawei Cloud SMN send alarm notification message [#837](https://github.com/apache/hertzbeat/issues/837) -- [Task] support using GaussDB For Influx store history metrics data [#836](https://github.com/apache/hertzbeat/issues/836) - -### 关于 HuaweiCloud 开源活动 - -HuaweiCloud 华为云将面向开源软件工具链与环境、开源应用构建和开源生态组件构建这三大重点场景,提供技术支持、奖金支持、活动支持,邀请更多的开发者,携手构建开源for HuaweiCloud。 - -开发者将开源软件工具、开源应用和开源组件与华为云对象存储OBS、数仓DWS、云容器CCE等云服务对接,同时基于Terraform模板,上架到华为云云商店,支持其他开发者一键部署使用开源组件 ,称为“开源xxx for HuaweiCloud”。 - -感兴趣的开发者可以查看:华为云开源项目仓库 了解更多。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/images-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/images-deploy.md deleted file mode 100644 index 3cdc25e6196..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/images-deploy.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -id: images-deploy -title: HertzBeat 华为云镜像部署 -sidebar_label: HertzBeat 华为云镜像部署快速指引 ---- - -> 易用友好的开源实时监控告警工具,无需Agent,强大自定义监控能力。 - -[![discord](https://img.shields.io/badge/chat-on%20discord-brightgreen)](https://discord.gg/Fb6M73htGr) -[![QQ](https://img.shields.io/badge/qq-630061200-orange)](https://qm.qq.com/q/FltGGGIX2m) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/web-monitor.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/ping-connect.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/port-available.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/database-monitor.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/os-monitor.svg) -![hertzbeat](https://img.shields.io/badge/monitor-cloud%20native-brightgreen) -![hertzbeat](https://img.shields.io/badge/monitor-middleware-blueviolet) -![hertzbeat](https://img.shields.io/badge/monitor-network-red) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/custom-monitor.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/alert.svg) - -## 🎡 介绍 - -> [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 -> 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等监控,阈值告警通知一步到位。 -> 更自由化的阈值规则(计算表达式),`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式及时送达。 -> -> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 -> 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? -> -> `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 -> 当然我们也提供了对应的 **[SAAS版本监控云](https://console.tancloud.cn)**,中小团队和个人无需再为了监控自己的网站资源,而去部署学习一套繁琐的监控系统,**[登录即可免费开始](https://console.tancloud.cn)**。 - ----- - -![hertzbeat](/img/home/1.png) - -![hertzbeat](/img/home/9.png) - -## ⛄ Supported - -- [网站监控](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-website.yml), [端口可用性](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-port.yml), - [Http Api](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-api.yml), [Ping连通性](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-ping.yml), - [Jvm](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap全站](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-fullsite.yml), - [Ssl证书](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-springboot2.yml), - [FTP服务器](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-ftp.yml) -- [Mysql](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-postgresql.yml), - [MariaDB](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-redis.yml), - [ElasticSearch](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-sqlserver.yml), - [Oracle](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-mongodb.yml), - [达梦](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-dm.yml), [OpenGauss](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-opengauss.yml), - [ClickHouse](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-clickhouse.yml), [IoTDB](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-iotdb.yml) -- [Linux](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-ubuntu.yml), - [CentOS](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-windows.yml) -- [Tomcat](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-nacos.yml), - [Zookeeper](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-rabbitmq.yml), - [Flink](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-kafka.yml), - [ShenYu](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-dynamic_tp.yml), - [Jetty](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-jetty.yml), [ActiveMQ](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-activemq.yml) -- [Kubernetes](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-docker.yml) -- 和更多的自定义监控。 -- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 - -## 镜像部署 - -> HertzBeat支持在Linux Windows Mac系统安装运行,CPU支持X86/ARM64。 - -1. 开通服务器时选用 HertzBeat 镜像 -2. 启动服务器 -3. 配置HertzBeat的配置文件(可选) - - 修改位于 `/opt/hertzbeat/config/application.yml` 的配置文件(可选),您可以根据需求修改配置文件 - - 若需使用邮件发送告警,需替换`application.yml`里面的邮件服务器参数 - - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](../start/mysql-change)) - - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](../start/tdengine-init) - - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](../start/iotdb-init) - -4. 配置用户配置文件(可选,自定义配置用户密码) - HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat - 若需要新增删除修改账户或密码,可以通过修改位于 `/opt/hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 - 具体参考 [配置修改账户密码](../start/account-modify) - -5. 部署启动 - 执行位于安装目录/opt/hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat - - ``` - ./startup.sh - ``` - -6. 开始探索HertzBeat - 浏览器访问 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 - -**HAVE FUN** - -### 部署常见问题 - -**最多的问题就是网络问题,请先提前排查** - -1. **按照流程部署,访问 无界面** - 请参考下面几点排查问题: - -> 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 - -2. **监控历史图表长时间都一直无数据** - -> 一:Tdengine或IoTDB是否配置,未配置则无历史图表数据 -> 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 -> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB 或 Tdengine IP账户密码等配置是否正确 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md deleted file mode 100644 index 8b712eb562f..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/resource.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: resource -title: 相关资源 -sidebar_label: 相关资源 ---- - -## HertzBeat PDF介绍文档 - -下载: [PDF](http://cdn.hertzbeat.com/hertzbeat.pdf) - -## 图标资源 - -### HertzBeat LOGO - -![logo](/img/hertzbeat-logo.svg) - -下载: [SVG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.svg) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.png) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.jpg) - -### HertzBeat Brand LOGO - -![logo](/img/hertzbeat-brand.svg) - -下载: [SVG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-brand.svg) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-brand.png) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-brand.png) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/sponsor.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/sponsor.md deleted file mode 100644 index 269c63417e6..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/others/sponsor.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -id: sponsor -title: 赞助我们 -sidebar_label: 赞助我们 ---- - -**HertzBeat对个人或企业完全免费,我们也在全职做这件事情,如果您喜欢这个项目并且愿意提供帮助,请我们喝杯咖啡吧** - -planet - -感谢[吉实信息(构建全新的微波+光交易网络)](https://www.flarespeed.com) 赞助服务器采集节点 -感谢[蓝易云(全新智慧上云)](https://www.tsyvps.com/aff/BZBEGYLX) 赞助服务器采集节点 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/account-modify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/account-modify.md deleted file mode 100644 index decef8a5b5b..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/account-modify.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -id: account-modify -title: 配置修改账户密码 -sidebar_label: 配置修改账户密码 ---- - -HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat -若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 -修改位于安装目录下的 `/hertzbeat/config/sureness.yml` 的配置文件,docker环境目录为`opt/hertzbeat/config/sureness.yml`,建议提前挂载映射 -配置文件内容参考 项目仓库[/script/sureness.yml](https://github.com/apache/hertzbeat/blob/master/script/sureness.yml) - -```yaml - -resourceRole: - - /api/account/auth/refresh===post===[admin,user,guest] - - /api/apps/**===get===[admin,user,guest] - - /api/monitor/**===get===[admin,user,guest] - - /api/monitor/**===post===[admin,user] - - /api/monitor/**===put===[admin,user] - - /api/monitor/**===delete==[admin] - - /api/monitors/**===get===[admin,user,guest] - - /api/monitors/**===post===[admin,user] - - /api/monitors/**===put===[admin,user] - - /api/monitors/**===delete===[admin] - - /api/alert/**===get===[admin,user,guest] - - /api/alert/**===post===[admin,user] - - /api/alert/**===put===[admin,user] - - /api/alert/**===delete===[admin] - - /api/alerts/**===get===[admin,user,guest] - - /api/alerts/**===post===[admin,user] - - /api/alerts/**===put===[admin,user] - - /api/alerts/**===delete===[admin] - - /api/notice/**===get===[admin,user,guest] - - /api/notice/**===post===[admin,user] - - /api/notice/**===put===[admin,user] - - /api/notice/**===delete===[admin] - - /api/tag/**===get===[admin,user,guest] - - /api/tag/**===post===[admin,user] - - /api/tag/**===put===[admin,user] - - /api/tag/**===delete===[admin] - - /api/summary/**===get===[admin,user,guest] - - /api/summary/**===post===[admin,user] - - /api/summary/**===put===[admin,user] - - /api/summary/**===delete===[admin] - -# 需要被过滤保护的资源,不认证鉴权直接访问 -# /api/v1/source3===get 表示 /api/v1/source3===get 可以被任何人访问 无需登录认证鉴权 -excludedResource: - - /api/account/auth/**===* - - /api/i18n/**===get - - /api/apps/hierarchy===get - # web ui 前端静态资源 - - /===get - - /dashboard/**===get - - /monitors/**===get - - /alert/**===get - - /account/**===get - - /setting/**===get - - /passport/**===get - - /**/*.html===get - - /**/*.js===get - - /**/*.css===get - - /**/*.ico===get - - /**/*.ttf===get - - /**/*.png===get - - /**/*.gif===get - - /**/*.jpg===get - - /**/*.svg===get - - /**/*.json===get - # swagger ui 资源 - - /swagger-resources/**===get - - /v2/api-docs===get - - /v3/api-docs===get - -# 用户账户信息 -# 下面有 admin tom lili 三个账户 -# eg: admin 拥有[admin,user]角色,密码为hertzbeat -# eg: tom 拥有[user],密码为hertzbeat -# eg: lili 拥有[guest],明文密码为lili, 加盐密码为1A676730B0C7F54654B0E09184448289 -account: - - appId: admin - credential: hertzbeat - role: [admin,user] - - appId: tom - credential: hertzbeat - role: [user] - - appId: guest - credential: hertzbeat - role: [guest] -``` - -修改`sureness.yml`的如下**部分参数**:**[注意⚠️sureness配置的其它默认参数需保留]** - -```yaml - -# 用户账户信息 -# 下面有 admin tom lili 三个账户 -# eg: admin 拥有[admin,user]角色,密码为hertzbeat -# eg: tom 拥有[user],密码为hertzbeat -# eg: lili 拥有[guest],明文密码为lili, 加盐密码为1A676730B0C7F54654B0E09184448289 -account: - - appId: admin - credential: hertzbeat - role: [admin,user] - - appId: tom - credential: hertzbeat - role: [user] - - appId: guest - credential: hertzbeat - role: [guest] -``` - -## 更新安全密钥 - -> 此密钥为账户安全加密管理的密钥,需要更新为相同长度的你自定义密钥串。 - -更新 `config` 目录下的 `application.yml` 文件,修改 `sureness.jwt.secret` 参数为你自定义的相同长度的密钥串。 - -```yaml -sureness: - jwt: - secret: 'CyaFv0bwq2Eik0jdrKUtsA6bx3sDJeFV643R - LnfKefTjsIfJLBa2YkhEqEGtcHDTNe4CU6+9 - 8tVt4bisXQ13rbN0oxhUZR73M6EByXIO+SV5 - dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' -``` - -**重启 HertzBeat 浏览器访问 即可探索使用 HertzBeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/custom-config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/custom-config.md deleted file mode 100644 index 95bedddc350..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/custom-config.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -id: custom-config -title: 常见参数配置 -sidebar_label: 常见参数配置 ---- - -这里描述了如果配置短信服务器,内置可用性告警触发次数等。 - -**`hertzbeat`的配置文件`application.yml`** - -### 配置HertzBeat的配置文件 - -修改位于 `hertzbeat/config/application.yml` 的配置文件 -注意⚠️docker容器方式需要将application.yml文件挂载到主机本地 -安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - -1. 配置短信发送服务器 - -> 只有成功配置了您自己的短信服务器,监控系统内触发的告警短信才会正常发送。 - -在`application.yml`新增如下腾讯平台短信服务器配置(参数需替换为您的短信服务器配置) - -```yaml -common: - sms: - tencent: - secret-id: AKIDbQ4VhdMr89wDedFrIcgU2PaaMvOuBCzY - secret-key: PaXGl0ziY9UcWFjUyiFlCPMr77rLkJYlyA - app-id: 1435441637 - sign-name: 赫兹跳动 - template-id: 1343434 -``` - -1.1 腾讯云短信创建签名(sign-name) -![image](https://github.com/apache/hertzbeat/assets/40455946/3a4c287d-b23d-4398-8562-4894296af485) - -1.2 腾讯云短信创建正文模板(template-id) - -``` -监控:{1},告警级别:{2}。内容:{3} -``` - -![image](https://github.com/apache/hertzbeat/assets/40455946/face71a6-46d5-452c-bed3-59d2a975afeb) - -1.3 腾讯云短信创建应用(app-id) -![image](https://github.com/apache/hertzbeat/assets/40455946/2732d710-37fa-4455-af64-48bba273c2f8) - -1.4 腾讯云访问管理(secret-id、secret-key) -![image](https://github.com/apache/hertzbeat/assets/40455946/36f056f0-94e7-43db-8f07-82893c98024e) - -2. 配置告警自定义参数 - -```yaml -alerter: - # 自定义控制台地址 - console-url: https://console.tancloud.io -``` - -3. 使用外置redis代替内存存储实时指标数据 - -> 默认我们的指标实时数据存储在内存中,可以配置如下来使用redis代替内存存储。 - -注意⚠️ `memory.enabled: false, redis.enabled: true` - -```yaml -warehouse: - store: - memory: - enabled: false - init-size: 1024 - redis: - enabled: true - host: 127.0.0.1 - port: 6379 - password: 123456 -``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/docker-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/docker-deploy.md deleted file mode 100644 index 1a042ae672e..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/docker-deploy.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -id: docker-deploy -title: 通过 Docker 方式安装 HertzBeat -sidebar_label: Docker方式部署 ---- - -> 推荐使用Docker部署HertzBeat - -1. 下载安装Docker环境 - Docker 工具自身的下载请参考以下资料: - [Docker官网文档](https://docs.docker.com/get-docker/) - [菜鸟教程-Docker教程](https://www.runoob.com/docker/docker-tutorial.html) - 安装完毕后终端查看Docker版本是否正常输出。 - - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` - -2. 拉取HertzBeat Docker镜像 - 镜像版本TAG可查看 [dockerhub 官方镜像仓库](https://hub.docker.com/r/apache/hertzbeat/tags) - 或者使用 [quay.io 镜像仓库](https://quay.io/repository/apache/hertzbeat) - - ```shell - docker pull apache/hertzbeat - docker pull apache/hertzbeat-collector - ``` - - 若网络超时或者使用 - - ```shell - docker pull quay.io/tancloud/hertzbeat - docker pull quay.io/tancloud/hertzbeat-collector - ``` - -3. 部署HertzBeat您可能需要掌握的几条命令 - - ```shell - #查看所有容器(在运行和已经停止运行的容器) - $ docker ps -a - #启动/终止/重启/运行状态 - $ docker start/stop/restart/stats 容器id或者容器名 - #进入容器并打开容器的shell终端 - $ docker exec -it 容器id或者容器名 /bin/bash - #退出容器终端 - ctrl+p然后ctrl+q - #完全退出容器的终端 - ctrl+d或者 - $ exit - ``` - -4. 挂载并配置HertzBeat的配置文件(可选) - 下载 `application.yml` 文件到主机目录下,例如: $(pwd)/application.yml - 下载源 [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 或 [gitee/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml) - - 若需使用邮件发送告警,需替换 `application.yml` 里面的邮件服务器参数 - - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) - - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](tdengine-init) - - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](iotdb-init) -5. 挂载并配置HertzBeat用户配置文件,自定义用户密码(可选) - HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat - 若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 - 下载 `sureness.yml` 文件到主机目录下,例如: $(pwd)/sureness.yml - 下载源 [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) 或 [gitee/script/sureness.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/sureness.yml) - 具体修改步骤参考 [配置修改账户密码](account-modify) -6. 启动HertzBeat Docker容器 - -```shell -$ docker run -d -p 1157:1157 -p 1158:1158 \ - -e LANG=zh_CN.UTF-8 \ - -e TZ=Asia/Shanghai \ - -v $(pwd)/data:/opt/hertzbeat/data \ - -v $(pwd)/logs:/opt/hertzbeat/logs \ - -v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml \ - -v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml \ - --restart=always \ - --name hertzbeat apache/hertzbeat -``` - - 这条命令启动一个运行HertzBeat的Docker容器,并且将容器的1157端口映射到宿主机的1157端口上。若宿主机已有进程占用该端口,则需要修改主机映射端口。 - -- `docker run -d` : 通过Docker运行一个容器,使其在后台运行 -- `-e LANG=zh_CN.UTF-8` : 设置系统语言 -- `-e TZ=Asia/Shanghai` : 设置系统时区 -- `-p 1157:1157 -p 1158:1158` : 映射容器端口到主机端口,请注意,前面是宿主机的端口号,后面是容器的端口号。1157是WEB端口,1158是集群端口。 -- `-v $(pwd)/data:/opt/hertzbeat/data` : (可选,数据持久化)重要⚠️ 挂载H2数据库文件到本地主机,保证数据不会因为容器的创建删除而丢失 -- `-v $(pwd)/logs:/opt/hertzbeat/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 -- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (可选,不需要可删除)挂载上上一步修改的本地配置文件到容器中,即使用本地配置文件覆盖容器配置文件。我们需要修改此配置文件的MYSQL,TDengine配置信息来连接外部服务。 -- `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (可选,不需要可删除)挂载上一步修改的账户配置文件到容器中,若无修改账户需求可删除此命令参数。 -- 注意⚠️ 挂载文件时,前面参数为你自定义本地文件地址,后面参数为docker容器内文件地址(固定) -- `--name hertzbeat` : 命名容器名称 hertzbeat -- `--restart=always`:(可选,不需要可删除)使容器在Docker启动后自动重启。若您未在容器创建时指定该参数,可通过以下命令实现该容器自启。 - - ```shell - docker update --restart=always hertzbeat - ``` - -- `apache/hertzbeat` : 使用拉取最新的的HertzBeat官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat`代替。** - -7. 开始探索HertzBeat - 浏览器访问 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 - -8. 部署采集器集群(可选) - -```shell -$ docker run -d \ - -e IDENTITY=custom-collector-name \ - -e MODE=public \ - -e MANAGER_HOST=127.0.0.1 \ - -e MANAGER_PORT=1158 \ - --name hertzbeat-collector apache/hertzbeat-collector -``` - -这条命令启动一个运行HertzBeat采集器的Docker容器,并直连上了HertzBeat主服务节点。 - -- `docker run -d` : 通过Docker运行一个容器,使其在后台运行 -- `-e IDENTITY=custom-collector-name` : (可选) 设置采集器的唯一标识名称。⚠️注意多采集器时采集器名称需保证唯一性。 -- `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 -- `-e MANAGER_HOST=127.0.0.1` : 重要⚠️ 设置连接的主HertzBeat服务地址IP。 -- `-e MANAGER_PORT=1158` : (可选) 设置连接的主HertzBeat服务地址端口,默认 1158. -- `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 -- `--name hertzbeat-collector` : 命名容器名称 hertzbeat-collector -- `apache/hertzbeat-collector` : 使用拉取最新的的HertzBeat采集器官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat-collector`代替。** - -8. 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 - -**HAVE FUN** - -### Docker部署常见问题 - -**最多的问题就是网络问题,请先提前排查** - -1. **MYSQL,TDENGINE或IotDB和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** - 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 - -> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP -> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` - -2. **按照流程部署,访问 无界面** - 请参考下面几点排查问题: - -> 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 - -3. **日志报错TDengine连接或插入SQL失败** - -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter - -4. **监控历史图表长时间都一直无数据** - -> 一:Tdengine或IoTDB是否配置,未配置则无历史图表数据 -> 二:Tdengine的数据库`hertzbeat`是否创建 -> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB或Tdengine IP账户密码等配置是否正确 - -5. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] - -> 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - -> 安装初始化此数据库参考 [TDengine安装初始化](tdengine-init) 或 [IoTDB安装初始化](iotdb-init) - -6. 安装配置了时序数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] - -> 请检查配置参数是否正确 -> iot-db 或td-engine enable 是否设置为true -> 注意⚠️若hertzbeat和IotDB,TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP -> 可根据logs目录下启动日志排查 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/greptime-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/greptime-init.md deleted file mode 100644 index 908e03e0702..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/greptime-init.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -id: greptime-init -title: 依赖时序数据库服务GreptimeDB安装初始化 -sidebar_label: 使用GreptimeDB存储指标数据(可选) ---- - -HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) -我们推荐使用并长期支持VictoriaMetrics - -GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. -It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage. - -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** - -### 通过Docker方式安装GreptimeDB - -> 可参考官方网站[安装教程](https://docs.greptime.com/getting-started/overview) -> -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装GreptimeDB - -```shell -$ docker run -p 4000-4004:4000-4004 \ - -p 4242:4242 -v /opt/greptimedb:/tmp/greptimedb \ - --name greptime \ - greptime/greptimedb standalone start \ - --http-addr 0.0.0.0:4000 \ - --rpc-addr 0.0.0.0:4001 -``` - -`-v /opt/greptimedb:/tmp/greptimedb` 为greptimedb数据目录本地持久化挂载,需将`/opt/greptimedb`替换为实际本地存在的目录 -使用```$ docker ps```查看数据库是否启动成功 - -### 在hertzbeat的`application.yml`配置文件配置此数据库连接 - -1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - greptime: - enabled: true - endpoint: localhost:4001 -``` - -2. 重启 HertzBeat - -### 常见问题 - -1. 时序数据库 GreptimeDB 或者 IoTDB 或者 TDengine 是否都需要配置,能不能都用 - -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/influxdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/influxdb-init.md deleted file mode 100644 index fb144a25b08..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/influxdb-init.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -id: influxdb-init -title: 依赖时序数据库服务InfluxDB安装初始化 -sidebar_label: 使用InfluxDB存储指标数据(可选) ---- - -HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) -我们推荐使用并长期支持VictoriaMetrics - -InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海量时序数据的高性能读、高性能写、高效存储与实时分析等。 注意支持⚠️ 1.x版本。 - -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** - -### 1. 直接使用华为云服务 GaussDB For Influx - -> 开通使用[华为云云数据库 GaussDB For Influx](https://www.huaweicloud.com/product/gaussdbforinflux.html) -> -> 获取云数据库对外暴露连接地址,账户密码即可 - -⚠️注意云数据库默认开启了SSL,云数据库地址应使用 `https:` - -### 2. 通过Docker方式安装InfluxDB - -> 可参考官方网站[安装教程](https://hub.docker.com/_/influxdb) -> -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装InfluxDB 1.x - -```shell -$ docker run -p 8086:8086 \ - -v /opt/influxdb:/var/lib/influxdb \ - influxdb:1.8 -``` - -`-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 -使用```$ docker ps```查看数据库是否启动成功 - -### 在hertzbeat的`application.yml`配置文件配置此数据库连接 - -1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - influxdb: - enabled: true - server-url: http://localhost:8086 - username: root - password: root - expire-time: '30d' - replication: 1 -``` - -2. 重启 HertzBeat - -### 常见问题 - -1. 时序数据库InfluxDb, IoTDB和TDengine是否都需要配置,能不能都用 - -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/iotdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/iotdb-init.md deleted file mode 100644 index 9a5e9e4c51e..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/iotdb-init.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -id: iotdb-init -title: 依赖时序数据库服务IoTDB安装初始化 -sidebar_label: 使用IoTDB存储指标数据(可选) ---- - -HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) -我们推荐使用并长期支持VictoriaMetrics - -Apache IoTDB是一体化收集、存储、管理与分析物联网时序数据的软件系统,我们使用其存储分析采集到的监控指标历史数据。支持V0.12 - V0.13版本,推荐使用V0.13.*版本。 - -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** - -> 如果您已有IoTDB环境,可直接跳到YML配置那一步。 - -### 通过Docker方式安装IoTDB - -> 可参考官方网站[安装教程](https://iotdb.apache.org/zh/UserGuide/V0.13.x/QuickStart/WayToGetIoTDB.html) -> -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装IoTDB - -```shell -$ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ - -v /opt/iotdb/data:/iotdb/data \ - --name iotdb \ - apache/iotdb:0.13.3-node -``` - -`-v /opt/iotdb/data:/iotdb/data` 为IoTDB数据目录本地持久化挂载,需将`/iotdb/data`替换为实际本地存在的目录 -使用```$ docker ps```查看数据库是否启动成功 - -3. 在hertzbeat的`application.yml`配置文件配置IoTDB数据库连接 - - 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.iot-db`数据源参数,HOST账户密码等,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - # 启用IotDB - iot-db: - enabled: true - host: 127.0.0.1 - rpc-port: 6667 - username: root - password: root - # V_O_12 || V_0_13 - version: V_0_13 - # if iotdb version >= 0.13 use default queryTimeoutInMs = -1; else use default queryTimeoutInMs = 0 - query-timeout-in-ms: -1 - # 数据存储时间:默认'7776000000'(90天,单位为毫秒,-1代表永不过期) - expire-time: '7776000000' -``` - -4. 重启 HertzBeat - -### 常见问题 - -1. 时序数据库IoTDB和TDengine是否都需要配置,能不能都用 - -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 - -2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] - -> 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 - -3. 安装配置了IotDB数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] - -> 请检查配置参数是否正确 -> iot-db enable是否设置为true -> 注意⚠️若hertzbeat和IotDB都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP -> 可根据logs目录下启动日志排查 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/mysql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/mysql-change.md deleted file mode 100644 index ca028f18a63..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/mysql-change.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -id: mysql-change -title: 关系型数据库使用 Mysql 替换依赖的 H2 存储系统元数据 -sidebar_label: 元数据使用Mysql存储(可选) ---- - -MYSQL是一款值得信赖的关系型数据库,HertzBeat除了支持使用默认内置的H2数据库外,还可以切换为使用MYSQL存储监控信息,告警信息,配置信息等结构化关系数据。 - -注意⚠️ 使用外置Mysql数据库替换内置H2数据库为可选项,但建议生产环境配置,以提供更好的性能 - -> 如果您已有MYSQL环境,可直接跳到数据库创建那一步。 - -### 通过Docker方式安装MYSQL - -1. 下载安装Docker环境 - Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 - 安装完毕后终端查看Docker版本是否正常输出。 - - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` - -2. Docker安装MYSQl - - ``` - $ docker run -d --name mysql \ - -p 3306:3306 \ - -v /opt/data:/var/lib/mysql \ - -e MYSQL_ROOT_PASSWORD=123456 \ - --restart=always \ - mysql:5.7 - ``` - - `-v /opt/data:/var/lib/mysql` 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 - 使用```$ docker ps```查看数据库是否启动成功 - -### 数据库创建 - -1. 进入MYSQL或使用客户端连接MYSQL服务 - `mysql -uroot -p123456` -2. 创建名称为hertzbeat的数据库 - `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` -3. 查看hertzbeat数据库是否创建成功 - `show databases;` - -### 修改hertzbeat的配置文件application.yml切换数据源 - -1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 - ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml) - -需修改部分原参数: - -```yaml -spring: - datasource: - driver-class-name: org.h2.Driver - username: sa - password: 123456 - url: jdbc:h2:./data/hertzbeat;MODE=MYSQL -``` - -具体替换参数如下,需根据mysql环境配置账户密码IP: - -```yaml -spring: - datasource: - driver-class-name: com.mysql.cj.jdbc.Driver - username: root - password: 123456 - url: jdbc:mysql://localhost:3306/hertzbeat?useUnicode=true&characterEncoding=utf-8&useSSL=false -``` - -2. 通过docker启动时,需要修改host为宿主机的外网Ip,包括mysql连接字符串和redis。 - -**启动 HertzBeat 浏览器访问 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** - -### 常见问题 - -1. 缺少hibernate的mysql方言,导致启动异常 Caused by: org.hibernate.HibernateException: Access to DialectResolutionInfo cannot be null when 'hibernate.dialect' not set - -如果上述配置启动系统,出现`Caused by: org.hibernate.HibernateException: Access to DialectResolutionInfo cannot be null when 'hibernate.dialect' not set`异常, -需要在`application.yml`文件中增加以下配置: - -```yaml -spring: - jpa: - hibernate: - ddl-auto: update - properties: - hibernate: - dialect: org.hibernate.dialect.MySQL5InnoDBDialect -``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/package-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/package-deploy.md deleted file mode 100644 index 09aa1e2de3f..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/package-deploy.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -id: package-deploy -title: 通过安装包安装 HertzBeat -sidebar_label: 安装包方式部署 ---- - -> HertzBeat支持在Linux Windows Mac系统安装运行,CPU支持X86/ARM64。 - -1. 下载HertzBeat安装包 - 下载您系统环境对应的安装包 `hertzbeat-xx.tar.gz` `hertzbeat-collector-xx.tar.gz` - - 从[GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) 仓库下载 - - 从[Download](https://hertzbeat.apache.org/docs/download) 仓库下载 -2. 配置HertzBeat的配置文件(可选) - 解压安装包到主机 eg: /opt/hertzbeat - - ``` - $ tar zxvf hertzbeat-xx.tar.gz - or - $ unzip -o hertzbeat-xx.zip - ``` - - 修改位于 `hertzbeat/config/application.yml` 的配置文件(可选),您可以根据需求修改配置文件 - - 若需使用邮件发送告警,需替换`application.yml`里面的邮件服务器参数 - - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) - - **强烈推荐** 以后我们将主要支持VictoriaMetrics作为时序数据库,若需使用时序数据库VictoriaMetrics来存储指标数据,需替换`application.yml`里面的`warehouse.store.victoria-metrics`参数 具体步骤参见 [使用VictoriaMetrics存储指标数据](victoria-metrics-init) - - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](tdengine-init) - - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](iotdb-init) - -3. 配置用户配置文件(可选,自定义配置用户密码) - HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat - 若需要新增删除修改账户或密码,可以通过修改位于 `hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 - 具体参考 [配置修改账户密码](account-modify) - -4. 部署启动 - 执行位于安装目录hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat - - ``` - ./startup.sh - ``` - -5. 开始探索HertzBeat - 浏览器访问 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 -6. 部署采集器集群(可选) - - 下载解压您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) - - 配置采集器的配置文件 `hertzbeat-collector/config/application.yml` 里面的连接主HertzBeat服务的对外IP,端口,当前采集器名称(需保证唯一性)等参数 `identity` `mode` (public or private) `manager-host` `manager-port` - - ```yaml - collector: - dispatch: - entrance: - netty: - enabled: true - identity: ${IDENTITY:} - mode: ${MODE:public} - manager-host: ${MANAGER_HOST:127.0.0.1} - manager-port: ${MANAGER_PORT:1158} - ``` - - - 启动 `$ ./bin/startup.sh` 或 `bin/startup.bat` - - 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 - -**HAVE FUN** - -### 安装包部署常见问题 - -**最多的问题就是网络环境问题,请先提前排查** - -1. **若您使用的是不含JDK的安装包,需您提前准备JAVA运行环境** - -安装JAVA运行环境-可参考[官方网站](http://www.oracle.com/technetwork/java/javase/downloads/index.html) -要求:JAVA11环境 -下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) -安装后命令行检查是否成功安装 - -``` -$ java -version -java version "11.0.12" -Java(TM) SE Runtime Environment 18.9 (build 11.0.12+8-LTS-237) -Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.12+8-LTS-237, mixed mode) - -``` - -2. **按照流程部署,访问 无界面** - 请参考下面几点排查问题: - -> 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 - -3. **日志报错TDengine连接或插入SQL失败** - -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter - -4. **监控历史图表长时间都一直无数据** - -> 一:时序数据库是否配置,未配置则无历史图表数据 -> 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 -> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 时序数据库 IP账户密码等配置是否正确 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md deleted file mode 100644 index f3b30ac066d..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/postgresql-change.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -id: postgresql-change -title: 关系型数据库使用 PostgreSQL 替换依赖的 H2 存储系统元数据 -sidebar_label: 元数据使用PostgreSQL存储(可选) ---- - -PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBMS)。HertzBeat除了支持使用默认内置的H2数据库外,还可以切换为使用PostgreSQL存储监控信息,告警信息,配置信息等结构化关系数据。 - -注意⚠️ 使用外置PostgreSQL数据库替换内置H2数据库为可选项,但建议生产环境配置,以提供更好的性能 - -> 如果您已有PostgreSQL环境,可直接跳到数据库创建那一步。 - -### 通过Docker方式安装PostgreSQL - -1. Download and install the Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 - After the installation you can check if the Docker version normally output at the terminal. - - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` - -2. Docker安装 PostgreSQL - - ``` - docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 - ``` - - 使用```$ docker ps```查看数据库是否启动成功 - -3. Create database in container manually or with [script](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-postgresql-iotdb/conf/sql/schema.sql). - -### 数据库创建 - -1. 进入 PostgreSQL 或使用客户端连接 PostgreSQL 服务 - - ``` - su - postgres - psql - ``` - -2. 创建名称为hertzbeat的数据库 - `CREATE DATABASE hertzbeat;` -3. 查看hertzbeat数据库是否创建成功 - `\l` - -### 修改hertzbeat的配置文件application.yml切换数据源 - -1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - 替换里面的`spring.database`数据源参数,IP端口账户密码驱动 - ⚠️注意`application.yml`文件内容需完整,除下方修改内容外其他参数需保留,完整内容见[/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml) - -```yaml -spring: - datasource: - driver-class-name: org.h2.Driver - username: sa - password: 123456 - url: jdbc:h2:./data/hertzbeat;MODE=MYSQL -``` - -具体替换参数如下,需根据 PostgreSQL 环境配置账户密码IP: - -```yaml -spring: - config: - activate: - on-profile: prod - datasource: - driver-class-name: org.postgresql.Driver - username: root - password: 123456 - url: jdbc:postgresql://127.0.0.1:5432/hertzbeat - hikari: - max-lifetime: 120000 - - jpa: - database: postgresql - hibernate: - ddl-auto: update - properties: - hibernate: - dialect: org.hibernate.dialect.PostgreSQLDialect -``` - -**启动 HertzBeat 浏览器访问 开始使用HertzBeat进行监控告警,默认账户密码 admin/hertzbeat** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/quickstart.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/quickstart.md deleted file mode 100644 index 21c956521a9..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/quickstart.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -id: quickstart -title: 快速开始 -sidebar_label: 快速开始 ---- - -### 🐕 开始使用 - -- 如果您是想将HertzBeat部署到本地搭建监控系统,请参考下面的部署文档进行操作。 - -### 🍞 HertzBeat安装 - -> HertzBeat支持通过源码安装启动,Docker容器运行和安装包方式安装部署,CPU架构支持X86/ARM64。 - -#### 方式一:Docker方式快速安装 - -1. `docker` 环境仅需一条命令即可开始 - -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` - -```或者使用 quay.io (若 dockerhub 网络链接超时)``` - -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` - -2. 浏览器访问 `http://localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` - -3. 部署采集器集群 - -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` - -- `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 -- `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 -- `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 -- `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 - -更多配置详细步骤参考 [通过Docker方式安装HertzBeat](docker-deploy) - -#### 方式二:通过安装包安装 - -1. 下载您系统环境对应的安装包`hertzbeat-xx.tar.gz` [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) -2. 配置 HertzBeat 的配置文件 `hertzbeat/config/application.yml`(可选) -3. 部署启动 `$ ./bin/startup.sh` 或 `bin/startup.bat` -4. 浏览器访问 `http://localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` -5. 部署采集器集群 - - 下载您系统环境对应采集器安装包`hertzbeat-collector-xx.tar.gz`到规划的另一台部署主机上 [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) - - 配置采集器的配置文件 `hertzbeat-collector/config/application.yml` 里面的连接主HertzBeat服务的对外IP,端口,当前采集器名称(需保证唯一性)等参数 `identity` `mode` (public or private) `manager-host` `manager-port` - - ```yaml - collector: - dispatch: - entrance: - netty: - enabled: true - identity: ${IDENTITY:} - mode: ${MODE:public} - manager-host: ${MANAGER_HOST:127.0.0.1} - manager-port: ${MANAGER_PORT:1158} - ``` - - - 启动 `$ ./bin/startup.sh` 或 `bin/startup.bat` - - 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 - -更多配置详细步骤参考 [通过安装包安装HertzBeat](package-deploy) - -#### 方式三:本地代码启动 - -1. 此为前后端分离项目,本地代码调试需要分别启动后端工程`manager`和前端工程`web-app` -2. 后端:需要`maven3+`, `java11`和`lombok`环境,修改`YML`配置信息并启动`manager`服务 -3. 前端:需要`nodejs npm angular-cli`环境,待本地后端启动后,在`web-app`目录下启动 `ng serve --open` -4. 浏览器访问 `http://localhost:4200` 即可开始,默认账号密码 `admin/hertzbeat` - -详细步骤参考 [参与贡献之本地代码启动](../others/contributing) - -##### 方式四:Docker-Compose 统一安装 hertzbeat+mysql+iotdb/tdengine - -通过 [docker-compose部署脚本](https://github.com/apache/hertzbeat/tree/master/script/docker-compose) 一次性把 mysql 数据库, iotdb/tdengine 时序数据库和 hertzbeat 安装部署。 - -详细步骤参考 [docker-compose部署方案](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/README.md) - -##### 方式五:Kubernetes Helm Charts 部署 hertzbeat+collector+mysql+iotdb - -通过 Helm Chart 一次性将 HertzBeat 集群组件部署到 Kubernetes 集群中。 - -详细步骤参考 [Artifact Hub](https://artifacthub.io/packages/helm/hertzbeat/hertzbeat) - -**HAVE FUN** - -### 🐵 依赖服务部署(可选) - -> HertzBeat依赖于 **关系型数据库** H2(默认已内置无需安装) 或 [Mysql](mysql-change) 和 **时序性数据库** [TDengine2+](tdengine-init) 或 [IOTDB](iotdb-init) (可选) - -**注意⚠️ 若需要部署时序数据库,IotDB 和 TDengine 任选其一即可!** - -##### 安装Mysql(可选) - -1. docker安装Mysql - `$ docker run -d --name mysql -p 3306:3306 -v /opt/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7` - `-v /opt/data:/var/lib/mysql` - 为mysql数据目录本地持久化挂载,需将`/opt/data`替换为实际本地存在的目录 -2. 创建名称为hertzbeat的数据库 - `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` -3. 在hertzbeat的配置文件`application.yml`配置Mysql数据库替换H2内置数据库连接参数 - -详细步骤参考 [使用Mysql替换内置H2数据库](mysql-change) - -##### 安装TDengine(可选) - -1. docker安装TDengine - `docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp --name tdengine tdengine/tdengine:3.0.4.0` -2. 创建名称为hertzbeat的数据库 -3. 在hertzbeat的配置文件`application.yml`配置tdengine连接 - -详细步骤参考 [使用时序数据库TDengine存储指标数据(可选)](tdengine-init) - -##### 安装IotDB(可选) - -1. Docker安装IoTDB - -```shell -$ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ - -v /opt/iotdb/data:/iotdb/data \ - --name iotdb \ - apache/iotdb:0.13.3-node -``` - -详细步骤参考 [使用时序数据库IoTDB存储指标数据(可选)](iotdb-init) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/rainbond-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/rainbond-deploy.md deleted file mode 100644 index 83afd21fc92..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/rainbond-deploy.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -id: rainbond-deploy -title: 使用 Rainbond 部署 HertzBeat -sidebar_label: Rainbond方式部署 ---- - -如果你不熟悉 Kubernetes,想在 Kubernetes 中安装 HertzBeat,可以使用 Rainbond 来部署。Rainbond 是一个基于 Kubernetes 构建的云原生应用管理平台,可以很简单的将你的应用部署到 Kubernetes中。 - -## 前提 - -安装 Rainbond,请参阅 [Rainbond 快速安装](https://www.rainbond.com/docs/quick-start/quick-install)。 - -## 部署 HertzBeat - -登录 Rainbond 后,点击左侧菜单中的 `应用市场`,切换到开源应用商店,在搜索框中搜索 `HertzBeat`,点击安装按钮。 - -![](/img/docs/start/install-to-rainbond.png) - -填写以下信息,然后点击确认按钮进行安装。 - -* 团队:选择现有团队或创建新的团队 -* 集群:选择对应的集群 -* 应用:选择现有应用或创建新的应用 -* 版本:选择要安装的 HertzBeat 版本 - -等待安装完成,即可访问 HertzBeat 应用。 - -![](/img/docs/start/hertzbeat-topology.png) - -:::tip -通过 Rainbond 安装的 HertzBeat,默认使用了外部的 Mysql 数据库 和 Redis 以及 IoTDB。同时也挂载了 HertzBeat 的配置文件,可以在 `组件 -> 环境配置 -> 配置文件设置` 中修改配置文件。 -::: diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/sslcert-practice.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/sslcert-practice.md deleted file mode 100644 index 835c4625638..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/sslcert-practice.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -id: ssl-cert-practice -title: SSL证书过期监控使用案例 -sidebar_label: 使用案例 ---- - -现在大部分网站都默认支持 HTTPS,我们申请的证书一般是3个月或者1年,很容易随着时间的流逝SSL证书过期了我们却没有第一时间发现,或者在过期之前没有及时更新证书。 - -这篇文章介绍如果使用 hertzbeat 监控系统来检测我们网站的SSL证书有效期,当证书过期时或证书快过期前几天,给我们发告警消息。 - -#### HertzBeat是什么 - -HertzBeat 一个拥有强大自定义监控能力,无需Agent的实时监控工具。网站监测,PING连通性,端口可用性,数据库,操作系统,中间件,API监控,阈值告警,告警通知(邮件微信钉钉飞书)。 - -**官网: | ** - -github: -gitee: - -#### 安装 HertzBeat - -1.如果不想安装可以直接使用云服务 [TanCloud探云 console.tancloud.cn](https://console.tancloud.cn) - -2. `docker` 环境仅需一条命令即可安装 - -`docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` - -3. 安装成功浏览器访问 `localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` - -#### 监控SSL证书 - -1. 点击新增SSL证书监控 - -> 系统页面 -> 监控菜单 -> SSL证书 -> 新增SSL证书 - -![](/img/docs/start/ssl_1.png) - -2. 配置监控网站 - -> 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 -> 点击确定 注意⚠️新增前默认会先去测试网站连接性,连接成功才会新增,当然也可以把**是否测试**按钮置灰。 - -![](/img/docs/start/ssl_2.png) - -3. 查看检测指标数据 - -> 在监控列表可以查看任务状态,进监控详情可以查看指标数据图表等。 - -![](/img/docs/start/ssl_3.png) - -![](/img/docs/start/ssl_11.png) - -4. 设置阈值(证书过期时触发) - -> 系统页面 -> 告警 -> 告警阈值 -> 新增阈值 - -![](/img/docs/start/ssl_4.png) - -> 配置阈值,选择SSL证书指标对象,配置告警表达式-当指标`expired`为`true`触发,即`equals(expired,"true")` , 设置告警级别通知模版信息等。 - -![](/img/docs/start/ssl_5.png) - -> 关联阈值与监控, 在阈值列表设置此阈值应用于哪些监控。 - -![](/img/docs/start/ssl_6.png) - -5. 设置阈值(证书过期前一周触发) - -> 同理如上,新增配置阈值,配置告警表达式-当指标有效期时间戳 `end_timestamp`,`now()`函数为当前时间戳,若配置提前一周触发告警即:`end_timestamp <= (now() + 604800000)` , 其中 `604800000` 为7天总时间差毫秒值。 - -![](/img/docs/start/ssl_7.png) - -> 最终可以在告警中心看到已触发的告警。 - -![](/img/docs/start/ssl_8.png) - -6. 告警通知(通过钉钉微信飞书等及时通知) - -> 监控系统 -> 告警通知 -> 新增接收人 - -![](/img/docs/start/ssl_9.png) - -钉钉微信飞书等token配置可以参考帮助文档 - - - - -> 告警通知 -> 新增告警通知策略 -> 将刚才配置的接收人启用通知 - -![](/img/docs/start/ssl_10.png) - -7. OK 当阈值触发后我们就可以收到对应告警消息啦,如果没有配通知,也可以在告警中心查看告警信息。 - ----- - -#### 完 - -监控SSL证书的实践就到这里,当然对hertzbeat来说这个功能只是冰山一角,如果您觉得hertzbeat这个开源项目不错的话欢迎给我们在GitHub Gitee star哦,灰常感谢。感谢老铁们的支持。笔芯! - -**github: ** - -**gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md deleted file mode 100644 index 757cb363256..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/tdengine-init.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -id: tdengine-init -title: 依赖时序数据库服务TDengine安装初始化 -sidebar_label: 使用TDengine存储指标数据(可选) ---- - -HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) -我们推荐使用并长期支持VictoriaMetrics - -TDengine是一款开源物联网时序型数据库,我们用其存储采集到的监控指标历史数据。 注意支持⚠️ 3.x版本。 - -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** - -> 如果您已有TDengine环境,可直接跳到创建数据库实例那一步。 - -### 通过Docker方式安装TDengine - -> 可参考官方网站[安装教程](https://docs.taosdata.com/get-started/docker/) -> -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装TDengine - -```shell -$ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ - -v /opt/taosdata:/var/lib/taos \ - --name tdengine -e TZ=Asia/Shanghai \ - tdengine/tdengine:3.0.4.0 -``` - -`-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 -`-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 -使用```$ docker ps```查看数据库是否启动成功 - -### 创建数据库实例 - -> [TDengine CLI 小技巧](https://docs.taosdata.com/develop/model/) - -1. 进入数据库Docker容器 - - ``` - docker exec -it tdengine /bin/bash - ``` - -2. 修改账户密码 - - > 建议您修改密码。TDengine默认的账户密码是 root/taosdata - > 进入容器后,执行 `taos` 命令进入TDengine CLI , 如下: - - ``` - root@tdengine-server:~/TDengine-server# taos - Welcome to the TDengine shell from Linux, Client Version - Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - taos> - ``` - - > 在 TDengine CLI 中使用 alter user 命令可以修改用户密码,缺省密码为 taosdata - -3. 创建名称为hertzbeat的数据库 - - 执行创建数据库命令 - - ``` - taos> show databases; - taos> CREATE DATABASE hertzbeat KEEP 90 DURATION 10 BUFFER 16; - ``` - - 上述语句将创建一个名为 hertzbeat 的库,这个库的数据将保留90天(超过90天将被自动删除),每 10 天一个数据文件,每个 VNode 的写入内存池的大小为 16 MB - -4. 查看hertzbeat数据库是否成功创建 - - ``` - taos> show databases; - taos> use hertzbeat; - ``` - -5. 退出TDengine CLI - - ``` - 输入 q 或 quit 或 exit 回车 - ``` - -**注意⚠️若是安装包安装的TDengine** - -> 除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter - -### 在hertzbeat的`application.yml`配置文件配置此数据库连接 - -1. 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - td-engine: - enabled: true - driver-class-name: com.taosdata.jdbc.rs.RestfulDriver - url: jdbc:TAOS-RS://localhost:6041/hertzbeat - username: root - password: taosdata -``` - -2. 重启 HertzBeat - -### 常见问题 - -1. 时序数据库IoTDB和TDengine是否都需要配置,能不能都用 - -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 - -2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] - -> 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 - -3. 监控详情历史图片不展示或无数据,已经配置了TDengine - -> 请确认是否安装的TDengine版本为3.0以上,版本2.x不支持兼容 - -4. 安装配置了TDengine数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] - -> 请检查配置参数是否正确 -> td-engine enable是否设置为true -> 注意⚠️若hertzbeat和TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP -> 可根据logs目录下启动日志排查 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/upgrade.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/upgrade.md deleted file mode 100644 index ed85c460e76..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/upgrade.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -id: upgrade -title: HertzBeat 新版本更新指引 -sidebar_label: 版本更新指引 ---- - -**HertzBeat 的发布版本列表** - -- [Download](https://hertzbeat.apache.org/docs/download) -- [Gitee Release](https://gitee.com/hertzbeat/hertzbeat/releases) -- [DockerHub Release](https://hub.docker.com/r/apache/hertzbeat/tags) -- [Docker quay.io Release](https://quay.io/repository/apache/hertzbeat?tab=tags) - -HertzBeat 的元数据信息保存在 H2 或 Mysql, PostgreSQL 关系型数据库内, 采集指标数据存储在 TDengine, IotDB 等时序数据库内。 - -**升级前您需要保存备份好数据库的数据文件和监控模版文件** - -### Docker部署方式的升级 - -1. 若使用了自定义监控模版 - - 需要备份 `docker cp hertzbeat:/opt/hertzbeat/define ./define` 当前运行 docker 容器里面的 `/opt/hertzbeat/define` 目录到当前主机下 - - `docker cp hertzbeat:/opt/hertzbeat/define ./define` - - 然后在后续升级启动 docker 容器的时候需要挂载上这个 define 目录,`-v $(pwd)/define:/opt/hertzbeat/define` - - `-v $(pwd)/define:/opt/hertzbeat/define` -2. 若使用内置默认 H2 数据库 - - 需挂载或备份 `-v $(pwd)/data:/opt/hertzbeat/data` 容器内的数据库文件目录 `/opt/hertzbeat/data` - - 停止并删除容器,删除本地 HertzBeat docker 镜像,拉取新版本镜像 - - 参考 [Docker安装HertzBeat](docker-deploy) 使用新镜像创建新的容器,注意需要将数据库文件目录挂载 `-v $(pwd)/data:/opt/hertzbeat/data` -3. 若使用外置关系型数据库 Mysql, PostgreSQL - - 无需挂载备份容器内的数据库文件目录 - - 停止并删除容器,删除本地 HertzBeat docker 镜像,拉取新版本镜像 - - 参考 [Docker安装HertzBeat](docker-deploy) 使用新镜像创建新的容器,`application.yml`配置数据库连接即可 - -### 安装包部署方式的升级 - -1. 若使用内置默认 H2 数据库 - - 备份安装包下的数据库文件目录 `/opt/hertzbeat/data` - - 若有自定义监控模版,需备份 `/opt/hertzbeat/define` 下的模版YML - - `bin/shutdown.sh` 停止 HertzBeat 进程,下载新安装包 - - 参考 [安装包安装HertzBeat](package-deploy) 使用新安装包启动 -2. 若使用外置关系型数据库 Mysql, PostgreSQL - - 无需备份安装包下的数据库文件目录 - - 若有自定义监控模版,需备份 `/opt/hertzbeat/define` 下的模版YML - - `bin/shutdown.sh` 停止 HertzBeat 进程,下载新安装包 - - 参考 [安装包安装HertzBeat](package-deploy) 使用新安装包启动,`application.yml`配置数据库连接即可 - -**HAVE FUN** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/victoria-metrics-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/victoria-metrics-init.md deleted file mode 100644 index 9b0bc7a6565..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/start/victoria-metrics-init.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -id: victoria-metrics-init -title: 依赖时序数据库服务VictoriaMetrics安装初始化 -sidebar_label: 使用VictoriaMetrics存储指标数据(推荐) ---- - -HertzBeat的历史数据存储依赖时序数据库,任选其一安装初始化即可,也可不安装(注意⚠️但强烈建议生产环境配置) -我们推荐使用并长期支持VictoriaMetrics - -VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决方案和时序数据库,兼容 Prometheus 生态。推荐版本(VictoriaMetrics:v1.95.1+, HertzBeat:v1.4.3+) - -**注意⚠️ 时序数据库安装配置为可选项,但强烈建议生产环境配置,以提供更完善的历史图表功能,高性能和稳定性** -**⚠️ 若不配置时序数据库,则只会留最近一小时历史数据** - -> 如果您已有VictoriaMetrics环境,可直接跳到YML配置那一步。 - -### 通过Docker方式安装VictoriaMetrics - -> 可参考官方网站[安装教程](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -> -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装VictoriaMetrics - -```shell -$ docker run -d -p 8428:8428 \ - -v $(pwd)/victoria-metrics-data:/victoria-metrics-data \ - --name victoria-metrics \ - victoriametrics/victoria-metrics:v1.95.1 -``` - -`-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` 为VictoriaMetrics数据目录本地持久化挂载 -使用```$ docker ps```查看数据库是否启动成功 - -3. 在hertzbeat的`application.yml`配置文件配置VictoriaMetrics数据库连接 - - 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 - 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 - -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - # 启用 victoria-metrics - victoria-metrics: - enabled: true - url: http://localhost:8428 - username: root - password: root -``` - -4. 重启 HertzBeat - -### 常见问题 - -1. 时序数据库是否都需要配置,能不能都用 - -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,但会影响历史图表数据和存储时长等。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/template.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/template.md deleted file mode 100644 index 5002aa52f7d..00000000000 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/template.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -id: template -title: 监控模版中心 -sidebar_label: 监控模版 ---- - -> Hertzbeat 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 -> -> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 -> 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? - -这是它的架构原理: - -![hertzBeat](/img/docs/hertzbeat-arch.png) - -**我们将所有监控采集类型(mysql,website,jvm,k8s)都定义为yml模版,用户可以导入这些模版到hertzbeat系统中,使其支持对应类型的监控,非常方便!** - -![](/img/docs/advanced/extend-point-1.png) - -**欢迎大家一起贡献你使用过程中自定义的通用监控类型YML模版,可用的模板如下:** - -### 应用服务监控模版 - - 👉 [Website monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml)
- 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
- 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
- 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
- 👉 [Full site monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml)
- 👉 [SSL Cert monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml)
- 👉 [JVM monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml)
- 👉 [SpringBoot2.0](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml)
- 👉 [SpringBoot3.0](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml)
- 👉 [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml)
- -### 数据库监控模版 - - 👉 [MYSQL database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml)
- 👉 [MariaDB database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml)
- 👉 [PostgreSQL database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml)
- 👉 [SqlServer database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml)
- 👉 [Oracle database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml)
- 👉 [DM database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dm.yml)
- 👉 [OpenGauss database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opengauss.yml)
- 👉 [IoTDB database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-iotdb.yml)
- 👉 [ElasticSearch database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml)
- 👉 [MongoDB database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml)
- 👉 [ClickHouse database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-clickhouse.yml)
- 👉 [Redis database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml)
- 👉 [Redis Sentinel database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml)
- 👉 [Redis Cluster database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml)
- -### 操作系统监控模版 - - 👉 [Linux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml)
- 👉 [Windows operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml)
- 👉 [Ubuntu operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml)
- 👉 [Centos operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml)
- 👉 [EulerOS operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml)
- 👉 [Fedora CoreOS operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml)
- 👉 [OpenSUSE operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml)
- 👉 [Rocky Linux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml)
- 👉 [Red Hat operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml)
- 👉 [FreeBSD operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml)
- 👉 [AlmaLinux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml)
- 👉 [Debian operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml)
- -### 中间件监控模版 - - 👉 [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml)
- 👉 [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml)
- 👉 [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml)
- 👉 [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml)
- 👉 [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml)
- 👉 [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml)
- 👉 [ActiveMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-activemq.yml)
- 👉 [Jetty](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jetty.yml)
- 👉 [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml)
- 👉 [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml)
- -### 云原生监控模版 - - 👉 [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml)
- 👉 [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml)
- -### 网络监控模版 - - 👉 [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml)
- 👉 [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml)
- 👉 [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml)
- 👉 [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml)
- 👉 [H3CSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml)
- ---- - -**Have Fun!** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md index 5eabbc561ed..5402bc696aa 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md @@ -168,6 +168,8 @@ $ svn ci -m "add gpg key for muchunjin" ## 3. 准备物料 & 发布 +### 准备发布物料 + #### 3.1 基于 master 分支,创建一个名为 release-${release_version}-rcx 的分支,例如 release-1.6.0-rc1。并基于 release-1.6.0-rc1 分支创建一个名为 v1.6.0-rc1 的标签,并将此标签设置为预发布 ```shell @@ -228,6 +230,8 @@ release-1.6.0-rc1 生成的代码归档文件在 `dist/apache-hertzbeat-1.6.0-incubating-src.tar.gz` +### 签名发布物料 + #### 3.5 对二进制和源码包进行签名 将上步骤生成的三个文件包放到`dist`目录下(若无则新建目录),然后对文件包进行签名和SHA512校验和生成。 @@ -335,7 +339,7 @@ svn commit -m "release for HertzBeat 1.6.0-RC1" ## 4. 进入社区投票阶段 -#### 4.1 发送社区投票邮件 +### 4.1 发送社区投票邮件 发送社区投票邮件需要至少三个`+1`,且无`-1`。 @@ -426,7 +430,7 @@ ChunJin Mu 邮件内容中的一项是`Vote thread`,在 查看获取 -#### 3.2 发送孵化社区投票邮件 +### 3.2 发送孵化社区投票邮件 发送孵化社区投票邮件需要至少三个`+1`,且无`-1`。 @@ -529,13 +533,13 @@ ChunJin Mu ## 4. 完成最终发布步骤 -#### 4.1 迁移源代码和二进制包 +### 4.1 迁移源代码和二进制包 ```shell svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 https://dist.apache.org/repos/dist/release/incubator/hertzbeat/1.6.0 -m "transfer packages for 1.6.0-RC1" ``` -#### 4.2 添加新版本下载地址到官网 +### 4.2 添加新版本下载地址到官网 @@ -544,7 +548,7 @@ svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 http > 需要注意的是,下载链接可能需要一个小时后才会生效,请注意。 -#### 4.3 Github 生成 Release +### 4.3 Github 生成 Release 基于 release-1.6.0-rc1 分支修改创建一个名为 v1.6.0 的标签,并将此标签设置为 latest release。 @@ -571,23 +575,29 @@ release note: xxx 然后将 release-1.6.0-rc1 分支重命名 为 release-1.6.0。 -#### 4.4 发送新版本公告邮件 +### 4.4 发送新版本公告邮件 > `Send to`:
> `cc`:
-> `Title`: [ANNOUNCE] Release Apache HertzBeat (incubating) 1.6.0
+> `Title`: [ANNOUNCE] Apache HertzBeat (incubating) 1.6.0 released
> `Body`: ``` -Hi Incubator Community, +Hi Community, We are glad to announce the release of Apache HertzBeat (incubating) 1.6.0. -Once again I would like to express my thanks to your help. +Thanks again for your help. + +Apache HertzBeat (https://hertzbeat.apache.org/) - a real-time monitoring system with agentless, performance cluster, prometheus-compatible, custom monitoring and status page building capabilities. + +Download Link: +https://hertzbeat.apache.org/docs/download/ -Apache HertzBeat(https://hertzbeat.apache.org/) - a real-time monitoring system with agentless, performance cluster, prometheus-compatible, custom monitoring and status page building capabilities. +Release Note: +https://github.com/apache/hertzbeat/releases/tag/v1.6.0 -Download Links: https://hertzbeat.apache.org/download/ -Release Notes: https://github.com/apache/hertzbeat/releases/tag/v1.6.0 +Website: +https://hertzbeat.apache.org/ HertzBeat Resources: - Issue: https://github.com/apache/hertzbeat/issues diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dns.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dns.md index 386ec0e91e1..e012e347c20 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dns.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dns.md @@ -5,7 +5,7 @@ sidebar_label: DNS服务器 keywords: [ 开源监控系统, 开源DNS监控工具, 监控DNS指标 ] --- -# 监控:DNS服务器 +## 监控:DNS服务器 > 收集和监控DNS的常规性能指标。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md index 4f0363f621d..cc4c7254afe 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md @@ -13,7 +13,7 @@ keywords: [开源监控系统, 开源Kubernetes监控] 参考获取token步骤 -#### 方式一 +### 方式一 1. 创建service account并绑定默认cluster-admin管理员集群角色 @@ -27,7 +27,7 @@ kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` -#### 方式二 +### 方式二 ``` kubectl create serviceaccount cluster-admin diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ntp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ntp.md index a160f2501e4..0806232cee5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ntp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/ntp.md @@ -7,7 +7,7 @@ keywords: [ open source monitoring tool, open source NTP monitoring tool, monito NTP监控的中文文档如下: -# NTP监控 +## NTP监控 > 收集和监控NTP的常规性能指标。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md index 85756d3df46..07d5a8b8ed3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md @@ -87,7 +87,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN **HAVE FUN** -### 🐵 依赖服务部署(可选) +#### 🐵 依赖服务部署(可选) > HertzBeat依赖于 **关系型数据库** H2(默认已内置无需安装) 或 [Mysql](mysql-change) 和 **时序性数据库** [TDengine2+](tdengine-init) 或 [IOTDB](iotdb-init) (可选) diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http-default.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http-default.md deleted file mode 100644 index 1030e382685..00000000000 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http-default.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -id: extend-http-default -title: HTTP Protocol System Default Parsing Method -sidebar_label: Default Parsing Method ---- - -> After calling the HTTP api to obtain the response data, use the default parsing method of hertzbeat to parse the response data. - -**The interface response data structure must be consistent with the data structure rules specified by hertzbeat** - -### HertzBeat data format specification - -Note⚠️ The response data is JSON format. - -Single layer format :key-value - -```json -{ - "metricName1": "metricValue", - "metricName2": "metricValue", - "metricName3": "metricValue", - "metricName4": "metricValue" -} -``` - -Multilayer format:Set key value in the array - -```json -[ - { - "metricName1": "metricValue", - "metricName2": "metricValue", - "metricName3": "metricValue", - "metricName4": "metricValue" - }, - { - "metricName1": "metricValue", - "metricName2": "metricValue", - "metricName3": "metricValue", - "metricName4": "metricValue" - } -] -``` - -eg: -Query the CPU information of the custom system. The exposed interface is `/metrics/cpu`. We need `hostname,core,useage` Metric. -If there is only one virtual machine, its single-layer format is : - -```json -{ - "hostname": "linux-1", - "core": 1, - "usage": 78.0, - "allTime": 200, - "runningTime": 100 -} -``` - -If there are multiple virtual machines, the multilayer format is: : - -```json -[ - { - "hostname": "linux-1", - "core": 1, - "usage": 78.0, - "allTime": 200, - "runningTime": 100 - }, - { - "hostname": "linux-2", - "core": 3, - "usage": 78.0, - "allTime": 566, - "runningTime": 34 - }, - { - "hostname": "linux-3", - "core": 4, - "usage": 38.0, - "allTime": 500, - "runningTime": 20 - } -] -``` - -**The corresponding monitoring template yml can be configured as follows** - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -category: custom -# Monitoring application type(consistent with the file name) eg: linux windows tomcat mysql aws... -app: example -name: - zh-CN: 模拟应用类型 - en-US: EXAMPLE APP -params: - # field-field name identifier - - field: host - # name-parameter field display name - name: - zh-CN: 主机Host - en-US: Host - # type-field type, style(most mappings are input label type attribute) - type: host - # required or not true-required false-optional - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - # When type is number, range is used to represent the range. - range: '[0,65535]' - required: true - # port default - defaultValue: 80 - # Prompt information of parameter input box - placeholder: 'Please enter the port' -# Metric group list -metrics: -# The first monitoring Metric group cpu -# Note:the built-in monitoring Metrics have (responseTime - response time) - - name: cpu - # The smaller Metric group scheduling priority(0-127), the higher the priority. After completion of the high priority Metric group collection,the low priority Metric group will then be scheduled. Metric groups with the same priority will be scheduled in parallel. - # Metric group with a priority of 0 is an availability group which will be scheduled first. If the collection succeeds, the scheduling will continue otherwise interrupt scheduling. - priority: 0 - # metrics fields list - fields: - # Metric information include field: name type: field type(0-number: number, 1-string: string) label-if is metrics label unit: Metric unit - - field: hostname - type: 1 - label: true - - field: usage - type: 0 - unit: '%' - - field: core - type: 0 -# protocol for monitoring and collection eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http -# Specific collection configuration when the protocol is HTTP protocol - http: - # host: ipv4 ipv6 domain name - host: ^_^host^_^ - # port - port: ^_^port^_^ - # url request interface path - url: /metrics/cpu - # request mode: GET POST PUT DELETE PATCH - method: GET - # enable ssl/tls or not, that is to say, HTTP or HTTPS. The default is false - ssl: false - # parsing method for reponse data: default-system rules, jsonPath-jsonPath script, website-website availability Metric monitoring - # Hertzbeat default parsing is used here - parseType: default -``` diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md deleted file mode 100644 index 810556ea95d..00000000000 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-hertzbeat.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -id: extend-http-example-hertzbeat -title: Tutorial 1 Adapting a monitoring type based on HTTP protocol -sidebar_label: Tutorial 1 Adapting an HTTP protocol monitoring ---- - -Through this tutorial, we describe step by step how to add a monitoring type based on the http protocol under the hertzbeat monitoring tool. - -Before reading this tutorial, we hope that you are familiar with how to customize types, metrics, protocols, etc. from [Custom Monitoring](extend-point) and [http Protocol Customization](extend-http). - -### HTTP protocol parses the general response structure to obtain metric data - -> In many scenarios, we need to monitor the provided HTTP API interface and obtain the index value returned by the interface. In this article, we use the http custom protocol to parse our common http interface response structure, and obtain the fields in the returned body as metric data. - -``` -{ - "code": 200, - "msg": "success", - "data": {} -} - -``` - -As above, usually our background API interface will design such a general return. The same is true for the background of the hertzbeat system. Today, we will use the hertzbeat API as an example, add a new monitoring type **hertzbeat**, and monitor and collect its system summary statistics API -`http://localhost:1157/api/summary`, the response data is: - -``` -{ - "msg": null, - "code": 0, - "data": { - "apps": [ - { - "category": "service", - "app": "jvm", - "status": 0, - "size": 2, - "availableSize": 0, - "unManageSize": 2, - "unAvailableSize": 0, - "unReachableSize": 0 - }, - { - "category": "service", - "app": "website", - "status": 0, - "size": 2, - "availableSize": 0, - "unManageSize": 2, - "unAvailableSize": 0, - "unReachableSize": 0 - } - ] - } -} -``` - -**This time we get the metric data such as `category`, `app`, `status`, `size`, `availableSize` under the app.** - -### Add custom monitoring template `hertzbeat` - -**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** - -> We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. -> -> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - -Here we define a custom monitoring type `app` named `hertzbeat` which use the HTTP protocol to collect data. - -**Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -category: custom -# The monitoring type eg: linux windows tomcat mysql aws... -app: hertzbeat -# The monitoring i18n name -name: - zh-CN: HertzBeat监控系统 - en-US: HertzBeat Monitor -# Input params define for monitoring(render web ui by the definition) -params: - # field-param field key - - field: host - # name-param field display i18n name - name: - zh-CN: 主机Host - en-US: Host - # type-param field type(most mapping the html input type) - type: host - # required-true or false - required: true - # field-param field key - - field: port - # name-param field display i18n name - name: - zh-CN: 端口 - en-US: Port - # type-param field type(most mapping the html input type) - type: number - # when type is number, range is required - range: '[0,65535]' - # required-true or false - required: true - # default value - defaultValue: 1157 - - field: ssl - name: - zh-CN: 启用HTTPS - en-US: HTTPS - type: boolean - required: true - - field: timeout - name: - zh-CN: 超时时间(ms) - en-US: Timeout(ms) - type: number - required: false - hide: true - - field: authType - name: - zh-CN: 认证方式 - en-US: Auth Type - type: radio - required: false - hide: true - options: - - label: Basic Auth - value: Basic Auth - - label: Digest Auth - value: Digest Auth - - field: username - name: - zh-CN: 用户名 - en-US: Username - type: text - limit: 20 - required: false - hide: true - - field: password - name: - zh-CN: 密码 - en-US: Password - type: password - required: false - hide: true -metrics: - # the first metrics summary - # attention: Built-in monitoring metrics contains (responseTime - Response time) - - name: summary - # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel - # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue - priority: 0 - # collect metrics content - fields: - # metrics content contains field-metric name, type-metric type:0-number,1-string, label-if is metrics label, unit-metric unit('%','ms','MB') - - field: app - type: 1 - label: true - - field: category - type: 1 - - field: status - type: 0 - - field: size - type: 0 - - field: availableSize - type: 0 - # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk, we use HTTP protocol here - protocol: http - # the config content when protocol is http - http: - # http host: ipv4 ipv6 domain - host: ^_^host^_^ - # http port - port: ^_^port^_^ - # http url, we don't need to enter a parameter here, just set the fixed value to /api/summary - url: /api/summary - timeout: ^_^timeout^_^ - # http method: GET POST PUT DELETE PATCH, default fixed value is GET - method: GET - # if enabled https, default value is false - ssl: ^_^ssl^_^ - # http auth - authorization: - # http auth type: Basic Auth, Digest Auth, Bearer Token - type: ^_^authType^_^ - basicAuthUsername: ^_^username^_^ - basicAuthPassword: ^_^password^_^ - digestAuthUsername: ^_^username^_^ - digestAuthPassword: ^_^password^_^ - # http response data parse type: default-hertzbeat rule, jsonpath-jsonpath script, website-for website monitoring, we use jsonpath to parse response data here - parseType: jsonPath - parseScript: '$.data.apps.*' - -``` - -**The addition is complete, now we save and apply. We can see that the system page has added a `hertzbeat` monitoring type.** - -![](/img/docs/advanced/extend-http-example-1.png) - -### The system page adds the monitoring of `hertzbeat` monitoring type - -> We click Add `HertzBeat Monitoring Tool`, configure monitoring IP, port, collection cycle, account password in advanced settings, etc., click OK to add monitoring. - -![](/img/docs/advanced/extend-http-example-2.png) - -![](/img/docs/advanced/extend-http-example-3.png) - -> After a certain period of time (depending on the collection cycle), we can see the specific metric data and historical charts in the monitoring details! - -![](/img/docs/advanced/extend-http-example-4.png) - -### Set threshold alarm notification - -> Next, we can set the threshold normally. After the alarm is triggered, we can view it in the alarm center, add recipients, set alarm notifications, etc. Have Fun!!! - ----- - -#### over - -This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! - -If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. - -**github: ** - -**gitee: ** diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-token.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-token.md deleted file mode 100644 index d53e304500d..00000000000 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http-example-token.md +++ /dev/null @@ -1,393 +0,0 @@ ---- -id: extend-http-example-token -title: Tutorial 2 Obtain TOKEN index value based on HTTP protocol for subsequent collection and authentication -sidebar_label: Tutorial 2 Get TOKEN for subsequent authentication ---- - -Through this tutorial, we will describe step by step how to modify on the basis of tutorial 1, add an metrics, first call the authentication interface to obtain the TOKEN, and use the TOKEN as a parameter for the subsequent metrics collection and authentication. - -Before reading this tutorial, we hope that you are familiar with how to customize types, metrics, protocols, etc. from [Custom Monitoring](extend-point) and [http Protocol Customization](extend-http). - -### Request process - -【**Authentication information metrics (highest priority)**】【**HTTP interface carries account password call**】->【**Response data analysis**】->【**Analysis and issuance of TOKEN-accessToken as an metric**] -> [**Assign accessToken as a variable parameter to other collection index groups**] - -> Here we still use the hertzbeat monitoring example of Tutorial 1! The hertzbeat background interface not only supports the basic direct account password authentication used in Tutorial 1, but also supports token authentication. - -**We need `POST` to call the login interface `/api/account/auth/form` to get `accessToken`, the request body (json format) is as follows**: - -```json -{ - "credential": "hertzbeat", - "identifier": "admin" -} -``` - -**The response structure data is as follows**: - -```json -{ - "data": { - "token": "xxxx", - "refreshToken": "xxxx" - }, - "msg": null, - "code": 0 -} -``` - -### Add custom monitoring type `hertzbeat_token` - -**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** - -> We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. -> -> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - -1. The custom monitoring type needs to add a new configuration monitoring template yml. We directly reuse the `hertzbeat` monitoring type in Tutorial 1 and modify it based on it - -A monitoring configuration definition file named after the monitoring type - hertzbeat_token - -We directly reuse the definition content in `hertzbeat` and modify it to our current monitoring type `hertzbeat_auth` configuration parameters, such as `app, category, etc`. - -```yaml -# This monitoring type belongs to the category: service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -category: custom -# Monitoring application type (consistent with the file name) eg: linux windows tomcat mysql aws... -app: hertzbeat_token -name: - en-GB: HertzBeat Monitoring (Token) - en-US: Hertz Beat Monitor (Token) -params: - # field - field name identifier - - field: host - # name - parameter field display name - name: - en-CN: Host Host - en-US: Host - # type-field type, style (mostly map input tag type attribute) - type: host - # Whether it is a required item true-required false-optional - required: true - - field: port - name: - en-CN: port - en-US: Port - type: number - # When the type is number, use range to represent the range - range: '[0,65535]' - required: true - # port default - defaultValue: 1157 - # Parameter input box prompt information - placeholder: 'Please enter the port' - - field: ssl - name: - en-CN: Enable SSL - en-US: SSL - # When the type is boolean, the front end uses switch to display the switch - type: boolean - required: false - - field: contentType - name: - en-CN: Content-Type - en-US: Content-Type - type: text - placeholder: 'Request Body Type' - required: false - - field: payload - name: - en-CN: request BODY - en-US: BODY - type: textarea - placeholder: 'Available When POST PUT' - required: false -# Index group list configuration under todo -metrics: .... - -``` - -### Define metrics `auth` login request to get `token` - -1. Add an index group definition `auth` in `hertzbeat_token`, set the collection priority to the highest 0, and collect the index `token`. - -```yaml - -# This monitoring type belongs to the category: service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -category: custom -# Monitoring application type (consistent with the file name) eg: linux windows tomcat mysql aws... -app: hertzbeat_token -name: - en-GB: HertzBeat Monitoring (Token) - en-US: Hertz Beat Monitor (Token) -params: - # field - field name identifier - - field: host - # name - parameter field display name - name: - en-CN: Host Host - en-US: Host - # type-field type, style (mostly map input tag type attribute) - type: host - # Whether it is a required item true-required false-optional - required: true - - field: port - name: - en-CN: port - en-US: Port - type: number - # When the type is number, use range to represent the range - range: '[0,65535]' - required: true - # port default - defaultValue: 1157 - # Parameter input box prompt information - placeholder: 'Please enter the port' - - field: ssl - name: - en-CN: Enable SSL - en-US: SSL - # When the type is boolean, the front end uses switch to display the switch - type: boolean - required: false - - field: contentType - name: - en-CN: Content-Type - en-US: Content-Type - type: text - placeholder: 'Request Body Type' - required: false - - field: payload - name: - en-CN: request BODY - en-US: BODY - type: textarea - placeholder: 'Available When POST PUT' - required: false -# List of metricss -metrics: - # The first monitoring index group auth - # Note: Built-in monitoring metrics have (responseTime - response time) - - name: auth - # The smaller the index group scheduling priority (0-127), the higher the priority, and the index group with low priority will not be scheduled until the collection of index groups with high priority is completed, and the index groups with the same priority will be scheduled and collected in parallel - # The metrics with priority 0 is the availability metrics, that is, it will be scheduled first, and other metricss will continue to be scheduled if the collection is successful, and the scheduling will be interrupted if the collection fails - priority: 0 - # Specific monitoring metrics in the metrics - fields: - # metric information includes field name type field type: 0-number, 1-string , label-if is metrics label, unit: metric unit - - field: token - type: 1 - - field: refreshToken - type: 1 - # Monitoring and collection protocol eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http - # When the protocol is the http protocol, the specific collection configuration - http: - host: ^_^host^_^ - # port - port: ^_^port^_^ - # url request interface path - url: /api/account/auth/form - # Request method GET POST PUT DELETE PATCH - method: POST - # Whether to enable ssl/tls, that is, http or https, default false - ssl: ^_^ssl^_^ - payload: ^_^payload^_^ - # request header content - headers: - content-type: ^_^contentType^_^ - # Response data analysis method: default-system rules, jsonPath-jsonPath script, website-website usability metric monitoring - parseType: jsonPath - parseScript: '$.data' - -``` - -**At this time, save and apply, add `hertzbeat_token` type monitoring on the system page, configure input parameters, `content-type` fill in `application/json`, `request Body` fill in the account password json as follows:** - -```json -{ - "credential": "hertzbeat", - "identifier": "admin" -} -``` - -![](/img/docs/advanced/extend-http-example-5.png) - -**After the addition is successful, we can see the `token`, `refreshToken` metric data we collected on the details page.** - -![](/img/docs/advanced/extend-http-example-6.png) - -![](/img/docs/advanced/extend-http-example-7.png) - -### Use `token` as a variable parameter to collect and use the following metricss - -**Add an index group definition `summary` in `app-hertzbeat_token.yml`, which is the same as `summary` in Tutorial 1, and set the collection priority to 1** - -**Set the authentication method in the HTTP protocol configuration of this index group to `Bearer Token`, assign the index `token` collected by the previous index group `auth` as a parameter, and use `^o^` as the internal replacement symbol, that is `^o^token^o^`. as follows:** - -```yaml - - name: summary -# When the protocol is the http protocol, the specific collection configuration - http: - # authentication - authorization: - # Authentication methods: Basic Auth, Digest Auth, Bearer Token - type: Bearer Token - bearerTokenToken: ^o^token^o^ -``` - -**The final `hertzbeat_token` template yml is defined as follows:** - -```yaml - -# This monitoring type belongs to the category: service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -category: custom -# Monitoring application type (consistent with the file name) eg: linux windows tomcat mysql aws... -app: hertzbeat_token -name: - en-GB: HertzBeat Monitoring (Token) - en-US: Hertz Beat Monitor (Token) -params: - # field - field name identifier - - field: host - # name - parameter field display name - name: - en-CN: Host Host - en-US: Host - # type-field type, style (mostly map input tag type attribute) - type: host - # Whether it is a required item true-required false-optional - required: true - - field: port - name: - en-CN: port - en-US: Port - type: number - # When the type is number, use range to represent the range - range: '[0,65535]' - required: true - # port default - defaultValue: 1157 - # Parameter input box prompt information - placeholder: 'Please enter the port' - - field: ssl - name: - en-CN: Enable SSL - en-US: SSL - # When the type is boolean, the front end uses switch to display the switch - type: boolean - required: false - - field: contentType - name: - en-CN: Content-Type - en-US: Content-Type - type: text - placeholder: 'Request Body Type' - required: false - - field: payload - name: - en-CN: request BODY - en-US: BODY - type: textarea - placeholder: 'Available When POST PUT' - required: false -# List of metricss -metrics: -# The first monitoring index group cpu -# Note: Built-in monitoring metrics have (responseTime - response time) - - name: auth - # The smaller the index group scheduling priority (0-127), the higher the priority, and the index group with low priority will not be scheduled until the collection of index groups with high priority is completed, and the index groups with the same priority will be scheduled and collected in parallel - # The metrics with priority 0 is the availability metrics, that is, it will be scheduled first, and other metricss will continue to be scheduled if the collection is successful, and the scheduling will be interrupted if the collection fails - priority: 0 - # Specific monitoring metrics in the metrics - fields: - # metric information includes field name type field type: 0-number, 1-string , label-if is metrics label, unit: metric unit - - field: token - type: 1 - - field: refreshToken - type: 1 - # Monitoring and collection protocol eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http - # When the protocol is the http protocol, the specific collection configuration - http: - host: ^_^host^_^ - # port - port: ^_^port^_^ - # url request interface path - url: /api/account/auth/form - # Request method GET POST PUT DELETE PATCH - method: POST - # Whether to enable ssl/tls, that is, http or https, default false - ssl: ^_^ssl^_^ - payload: ^_^payload^_^ - # request header content - headers: - content-type: ^_^contentType^_^ - ^_^headers^_^: ^_^headers^_^ - # Request parameter content - params: - ^_^params^_^: ^_^params^_^ - # Response data analysis method: default-system rules, jsonPath-jsonPath script, website-website usability metric monitoring - parseType: jsonPath - parseScript: '$.data' ---- - - name: summary - # The smaller the index group scheduling priority (0-127), the higher the priority, and the index group with low priority will not be scheduled until the collection of index groups with high priority is completed, and the index groups with the same priority will be scheduled and collected in parallel - # The metrics with priority 0 is the availability metrics, that is, it will be scheduled first, and other metricss will continue to be scheduled if the collection is successful, and the scheduling will be interrupted if the collection fails - priority: 1 - # Specific monitoring metrics in the metrics - fields: - # metric information includes field name type field type: 0-number, 1-string , label-if is metrics label, unit: metric unit - - field: category - type: 1 - - field: app - type: 1 - - field: size - type: 0 - - field: status - type: 0 -# Monitoring and collection protocol eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http -# When the protocol is the http protocol, the specific collection configuration - http: - host: ^_^host^_^ - # port - port: ^_^port^_^ - # url request interface path - url: /api/summary - # Request method GET POST PUT DELETE PATCH - method: GET - # Whether to enable ssl/tls, that is, http or https, default false - ssl: ^_^ssl^_^ - # authentication - authorization: - # Authentication methods: Basic Auth, Digest Auth, Bearer Token - type: Bearer Token - bearerTokenToken: ^o^token^o^ - # Response data analysis method: default-system rules, jsonPath-jsonPath script, website-website usability metric monitoring - parseType: jsonPath - parseScript: '$.data.apps.*' - -``` - -**After the configuration is complete, save and apply, and check the monitoring details page** - -![](/img/docs/advanced/extend-http-example-8.png) - -![](/img/docs/advanced/extend-http-example-9.png) - -### Set threshold alarm notification - -> Next, we can set the threshold normally. After the alarm is triggered, we can view it in the alarm center, add a new recipient, set alarm notification, etc. Have Fun!!! - ---- - -#### over - -This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! - -If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. - -**github: ** - -**gitee: ** diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http-jsonpath.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http-jsonpath.md deleted file mode 100644 index 4e12fe86b57..00000000000 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http-jsonpath.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -id: extend-http-jsonpath -title: HTTP Protocol JsonPath Parsing Method -sidebar_label: JsonPath Parsing Method ---- - -> After calling the HTTP api to obtain the response data, use JsonPath script parsing method to parse the response data. - -Note⚠️ The response data is JSON format. - -**Use the JsonPath script to parse the response data into data that conforms to the data structure rules specified by HertzBeat** - -#### JsonPath Operator - -[JSONPath online verification](https://www.jsonpath.cn) - -| JSONPATH | Help description | -|------------------|----------------------------------------------------------------------------------------| -| $ | Root object or element | -| @ | Current object or element | -| . or [] | Child element operator | -| .. | Recursively match all child elements | -| * | Wildcard. Match all objects or elements | -| [] | Subscript operator, jsonpath index starts from 0 | -| [,] | Join operator, return multiple results as an array. Jsonpath allows the use of aliases | -| [start:end:step] | Array slice operator | -| ?() | Filter (script) expression | -| () | Script Expression | - -#### HertzBeat data format specification - -Single layer format :key-value - -```json -{ - "metricName1": "metricValue", - "metricName2": "metricValue", - "metricName3": "metricValue", - "metricName4": "metricValue" -} -``` - -Multilayer format:Set key value in the array - -```json -[ - { - "metricName1": "metricValue", - "metricName2": "metricValue", - "metricName3": "metricValue", - "metricName4": "metricValue" - }, - { - "metricName1": "metricValue", - "metricName2": "metricValue", - "metricName3": "metricValue", - "metricName4": "metricValue" - } -] -``` - -#### Example - -Query the value information of the custom system, and its exposed interface is `/metrics/person`. We need `type,num` Metric. -The raw data returned by the interface is as follows: - -```json -{ - "firstName": "John", - "lastName" : "doe", - "age" : 26, - "address" : { - "streetAddress": "naist street", - "city" : "Nara", - "postalCode" : "630-0192" - }, - "number": [ - { - "type": "core", - "num": 3343 - }, - { - "type": "home", - "num": 4543 - } - ] -} -``` - -We use the jsonpath script to parse, and the corresponding script is: `$.number[*]`,The parsed data structure is as follows: - -```json -[ - { - "type": "core", - "num": 3343 - }, - { - "type": "home", - "num": 4543 - } -] -``` - -This data structure conforms to the data format specification of HertzBeat, and the Metric `type,num` is successfully extracted. - -**The corresponding monitoring template yml can be configured as follows** - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -category: custom -# Monitoring application type(consistent with the file name) eg: linux windows tomcat mysql aws... -app: example -name: - zh-CN: 模拟应用类型 - en-US: EXAMPLE APP -params: - # field-field name identifier - - field: host - # name-parameter field display name - name: - zh-CN: 主机Host - en-US: Host - # type-field type, style(most mappings are input label type attribute) - type: host - # required or not true-required false-optional - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - # When type is number, range is used to represent the range. - range: '[0,65535]' - required: true - # port default - defaultValue: 80 - # Prompt information of parameter input box - placeholder: 'Please enter the port' -# Metric group list -metrics: -# The first monitoring Metric group person -# Note:the built-in monitoring Metrics have (responseTime - response time) - - name: cpu - # The smaller Metric group scheduling priority(0-127), the higher the priority. After completion of the high priority Metric group collection,the low priority Metric group will then be scheduled. Metric groups with the same priority will be scheduled in parallel. - # Metric group with a priority of 0 is an availability group which will be scheduled first. If the collection succeeds, the scheduling will continue otherwise interrupt scheduling. - priority: 0 - # metrics fields list - fields: - # Metric information include field: name type: field type(0-number: number, 1-string: string) label-if is metrics label unit: Metric unit - - field: type - type: 1 - label: true - - field: num - type: 0 -# protocol for monitoring and collection eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http -# Specific collection configuration when the protocol is HTTP protocol - http: - # host: ipv4 ipv6 domain name - host: ^_^host^_^ - # port - port: ^_^port^_^ - # url request interface path - url: /metrics/person - # request mode GET POST PUT DELETE PATCH - method: GET - # enable ssl/tls or not, that is to say, HTTP or HTTPS. The default is false - ssl: false - # parsing method for response data: default-system rules, jsonPath-jsonPath script, website-website availability Metric monitoring - # jsonPath parsing is used here - parseType: jsonPath - parseScript: '$.number[*]' -``` diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-http.md b/home/versioned_docs/version-v1.4.x/advanced/extend-http.md deleted file mode 100644 index 58094187429..00000000000 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-http.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -id: extend-http -title: HTTP Protocol Custom Monitoring -sidebar_label: HTTP Protocol Custom Monitoring ---- - -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use HTTP protocol to customize Metric monitoring - -### HTTP protocol collection process - -【**Call HTTP API**】->【**Response Verification**】->【**Parse Response Data**】->【**Default method parsing|JsonPath script parsing | XmlPath parsing(todo) | Prometheus parsing**】->【**Metric data extraction**】 - -It can be seen from the process that we define a monitoring type of HTTP protocol. We need to configure HTTP request parameters, configure which Metrics to obtain, and configure the parsing method and parsing script for response data. -HTTP protocol supports us to customize HTTP request path, request header, request parameters, request method, request body, etc. - -**System default parsing method**:HTTP interface returns the JSON data structure specified by hertzbeat, that is, the default parsing method can be used to parse the data and extract the corresponding Metric data. For details, refer to [**System Default Parsing**](extend-http-default) -**JsonPath script parsing method**:Use JsonPath script to parse the response JSON data, return the data structure specified by the system, and then provide the corresponding Metric data. For details, refer to [**JsonPath Script Parsing**](extend-http-jsonpath) - -### Custom Steps - -**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** - -------- - -Configuration usages of the monitoring templates yml are detailed below. Please pay attention to usage annotation. - -### Monitoring Templates YML - -> We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. -> -> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - -eg:Define a custom monitoring type `app` named `example_http` which use the HTTP protocol to collect data. - -**Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -category: custom -# Monitoring application type(consistent with the file name) eg: linux windows tomcat mysql aws... -app: example_http -name: - zh-CN: 模拟应用类型 - en-US: EXAMPLE APP -params: - # field-field name identifier - - field: host - # name-parameter field display name - name: - zh-CN: 主机Host - en-US: Host - # type-field type, style(most mappings are input label type attribute) - type: host - # required or not true-required false-optional - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - # When type is number, range is used to represent the range. - range: '[0,65535]' - required: true - # port default - defaultValue: 80 - # Prompt information of parameter input box - placeholder: 'Please enter the port' - - field: username - name: - zh-CN: 用户名 - en-US: Username - type: text - # When type is text, use limit to indicate the string limit size - limit: 20 - required: false - - field: password - name: - zh-CN: 密码 - en-US: Password - type: password - required: false - - field: ssl - name: - zh-CN: 启动SSL - en-US: Enable SSL - # When type is boolean, front end uses switch to show the switch - type: boolean - required: false - - field: method - name: - zh-CN: 请求方式 - en-US: Method - type: radio - required: true - # When type is radio or checkbox, option indicates the list of selectable values {name1:value1,name2:value2} - options: - - label: GET request - value: GET - - label: POST request - value: POST - - label: PUT request - value: PUT - - label: DELETE request - value: DELETE -# Metric group list -metrics: -# The first monitoring Metric group cpu -# Note:the built-in monitoring Metrics have (responseTime - response time) - - name: cpu - # The smaller Metric group scheduling priority(0-127), the higher the priority. After completion of the high priority Metric group collection,the low priority Metric group will then be scheduled. Metric groups with the same priority will be scheduled in parallel. - # Metric group with a priority of 0 is an availability group which will be scheduled first. If the collection succeeds, the scheduling will continue otherwise interrupt scheduling. - priority: 0 - # metrics fields list - fields: - # Metric information include field: name type: field type(0-number: number, 1-string: string) label-if is metrics label unit: Metric unit - - field: hostname - type: 1 - label: true - - field: usage - type: 0 - unit: '%' - - field: cores - type: 0 - - field: waitTime - type: 0 - unit: s -# (optional)Monitoring Metric alias mapping to the Metric name above. The field used to collect interface data is not the final Metric name directly. This alias is required for mapping conversion. - aliasFields: - - hostname - - core1 - - core2 - - usage - - allTime - - runningTime -# (optional)The Metric calculation expression works with the above alias to calculate the final required Metric value. -# eg: cores=core1+core2, usage=usage, waitTime=allTime-runningTime - calculates: - - hostname=hostname - - cores=core1+core2 - - usage=usage - - waitTime=allTime-runningTime -# protocol for monitoring and collection eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http -# Specific collection configuration when the protocol is HTTP protocol - http: - # host: ipv4 ipv6 domain name - host: ^_^host^_^ - # port - port: ^_^port^_^ - # url request interface path - url: /metrics/cpu - # request mode: GET POST PUT DELETE PATCH - method: GET - # enable ssl/tls or not, that is to say, HTTP or HTTPS. The default is false - ssl: false - # request header content - headers: - apiVersion: v1 - # request parameter content - params: - param1: param1 - param2: param2 - # authorization - authorization: - # authorization method: Basic Auth, Digest Auth, Bearer Token - type: Basic Auth - basicAuthUsername: ^_^username^_^ - basicAuthPassword: ^_^password^_^ - # parsing method for reponse data: default-system rules, jsonPath-jsonPath script, website-website availability Metric monitoring - # todo xmlPath-xmlPath script, prometheus-Prometheus data rules - parseType: jsonPath - parseScript: '$' - - - name: memory - priority: 1 - fields: - - field: hostname - type: 1 - label: true - - field: total - type: 0 - unit: kb - - field: usage - type: 0 - unit: '%' - - field: speed - type: 0 - protocol: http - http: - host: ^_^host^_^ - port: ^_^port^_^ - url: /metrics/memory - method: GET - headers: - apiVersion: v1 - params: - param1: param1 - param2: param2 - authorization: - type: Basic Auth - basicAuthUsername: ^_^username^_^ - basicAuthPassword: ^_^password^_^ - parseType: default -``` diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-jdbc.md b/home/versioned_docs/version-v1.4.x/advanced/extend-jdbc.md deleted file mode 100644 index 1f766b08da0..00000000000 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-jdbc.md +++ /dev/null @@ -1,243 +0,0 @@ ---- -id: extend-jdbc -title: JDBC Protocol Custom Monitoring -sidebar_label: JDBC Protocol Custom Monitoring ---- - -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use JDBC(support mysql,mariadb,postgresql,sqlserver at present) to customize Metric monitoring. -> JDBC protocol custom monitoring allows us to easily monitor Metrics we want by writing SQL query statement. - -### JDBC protocol collection process - -【**System directly connected to MYSQL**】->【**Run SQL query statement**】->【**parse reponse data: oneRow, multiRow, columns**】->【**Metric data extraction**】 - -It can be seen from the process that we define a monitoring type of JDBC protocol. We need to configure SSH request parameters, configure which Metrics to obtain, and configure query SQL statements. - -### Data parsing method - -We can obtain the corresponding Metric data through the data fields queried by SQL and the Metric mapping we need. At present, there are three mapping parsing methods:oneRow, multiRow, columns. - -#### **oneRow** - -> Query a row of data, return the column name of the result set through query and map them to the queried field. - -eg: -queried Metric fields:one two three four -query SQL:select one, two, three, four from book limit 1; -Here the Metric field and the response data can be mapped into a row of collected data one by one. - -#### **multiRow** - -> Query multiple rows of data, return the column names of the result set and map them to the queried fields. - -eg: -queried Metric fields:one two three four -query SQL:select one, two, three, four from book; -Here the Metric field and the response data can be mapped into multiple rows of collected data one by one. - -#### **columns** - -> Collect a row of Metric data. By matching the two columns of queried data (key value), key and the queried field, value is the value of the query field. - -eg: -queried fields:one two three four -query SQL:select key, value from book; -SQL response data: - -| key | value | -|-------|-------| -| one | 243 | -| two | 435 | -| three | 332 | -| four | 643 | - -Here by mapping the Metric field with the key of the response data, we can obtain the corresponding value as collection and monitoring data. - -### Custom Steps - -**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** - -------- - -Configuration usages of the monitoring templates yml are detailed below. - -### Monitoring Templates YML - -> We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. -> -> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - -eg:Define a custom monitoring type `app` named `example_sql` which use the JDBC protocol to collect data. - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -category: db -# Monitoring application type(consistent with the file name) eg: linux windows tomcat mysql aws... -app: example_sql -name: - zh-CN: 模拟MYSQL应用类型 - en-US: MYSQL EXAMPLE APP -# Monitoring parameter definition file is used to define required input parameter field structure definition Front-end page render input parameter box according to structure -params: - - field: host - name: - zh-CN: 主机Host - en-US: Host - type: host - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - range: '[0,65535]' - required: true - defaultValue: 80 - placeholder: 'Please enter the port' - - field: database - name: - zh-CN: 数据库名称 - en-US: Database - type: text - required: false - - field: username - name: - zh-CN: 用户名 - en-US: Username - type: text - limit: 20 - required: false - - field: password - name: - zh-CN: 密码 - en-US: Password - type: password - required: false - - field: url - name: - zh-CN: Url - en-US: Url - type: text - required: false -# Metric group list -metrics: - - name: basic - # The smaller Metric group scheduling priority(0-127), the higher the priority. After completion of the high priority Metric group collection,the low priority Metric group will then be scheduled. Metric groups with the same priority will be scheduled in parallel. - # Metric group with a priority of 0 is an availability group which will be scheduled first. If the collection succeeds, the scheduling will continue otherwise interrupt scheduling. - priority: 0 - # metrics fields list - fields: - # Metric information include field: name type: field type(0-number: number, 1-string: string) label-if is metrics label unit: Metric unit - - field: version - type: 1 - label: true - - field: port - type: 1 - - field: datadir - type: 1 - - field: max_connections - type: 0 - # (optional)Monitoring Metric alias mapping to the Metric name above. The field used to collect interface data is not the final Metric name directly. This alias is required for mapping conversion. - aliasFields: - - version - - version_compile_os - - version_compile_machine - - port - - datadir - - max_connections - # (optional)The Metric calculation expression works with the above alias to calculate the final required Metric value. - # eg: cores=core1+core2, usage=usage, waitTime=allTime-runningTime - calculates: - - port=port - - datadir=datadir - - max_connections=max_connections - - version=version+"_"+version_compile_os+"_"+version_compile_machine - protocol: jdbc - jdbc: - # host: ipv4 ipv6 domain name - host: ^_^host^_^ - # port - port: ^_^port^_^ - platform: mysql - username: ^_^username^_^ - password: ^_^password^_^ - database: ^_^database^_^ - # SQL query method:oneRow, multiRow, columns - queryType: columns - # sql - sql: show global variables where Variable_name like 'version%' or Variable_name = 'max_connections' or Variable_name = 'datadir' or Variable_name = 'port'; - url: ^_^url^_^ - - - name: status - priority: 1 - fields: - # Metric information include field: name type: field type(0-number: number, 1-string: string) label-if is metrics label unit: Metric unit - - field: threads_created - type: 0 - - field: threads_connected - type: 0 - - field: threads_cached - type: 0 - - field: threads_running - type: 0 - # (optional)Monitoring Metric alias mapping to the Metric name above. The field used to collect interface data is not the final Metric name directly. This alias is required for mapping conversion. - aliasFields: - - threads_created - - threads_connected - - threads_cached - - threads_running - # (optional)The Metric calculation expression works with the above alias to calculate the final required Metric value. - # eg: cores=core1+core2, usage=usage, waitTime=allTime-runningTime - calculates: - - threads_created=threads_created - - threads_connected=threads_connected - - threads_cached=threads_cached - - threads_running=threads_running - protocol: jdbc - jdbc: - # host: ipv4 ipv6 domain name - host: ^_^host^_^ - # port - port: ^_^port^_^ - platform: mysql - username: ^_^username^_^ - password: ^_^password^_^ - database: ^_^database^_^ - # SQL query method: oneRow, multiRow, columns - queryType: columns - # sql - sql: show global status where Variable_name like 'thread%' or Variable_name = 'com_commit' or Variable_name = 'com_rollback' or Variable_name = 'questions' or Variable_name = 'uptime'; - url: ^_^url^_^ - - - name: innodb - priority: 2 - fields: - # Metric information include field: name type: field type(0-number: number, 1-string: string) label-if is metrics label unit: Metric unit - - field: innodb_data_reads - type: 0 - unit: times - - field: innodb_data_writes - type: 0 - unit: times - - field: innodb_data_read - type: 0 - unit: kb - - field: innodb_data_written - type: 0 - unit: kb - protocol: jdbc - jdbc: - # host: ipv4 ipv6 domain name - host: ^_^host^_^ - # port - port: ^_^port^_^ - platform: mysql - username: ^_^username^_^ - password: ^_^password^_^ - database: ^_^database^_^ - # SQL query method:oneRow, multiRow, columns - queryType: columns - # sql - sql: show global status where Variable_name like 'innodb%'; - url: ^_^url^_^ -``` diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-jmx.md b/home/versioned_docs/version-v1.4.x/advanced/extend-jmx.md deleted file mode 100644 index e354a4152fd..00000000000 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-jmx.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -id: extend-jmx -title: JMX Protocol Custom Monitoring -sidebar_label: JMX Protocol Custom Monitoring ---- - -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use JMX to customize Metric monitoring. -> JMX protocol custom monitoring allows us to easily monitor Metrics we want by config JMX Mbeans Object. - -### JMX protocol collection process - -【**Peer Server Enable Jmx Service**】->【**HertzBeat Connect Peer Server Jmx**】->【**Query Jmx Mbean Object Data**】->【**Metric data extraction**】 - -It can be seen from the process that we define a monitoring type of JMX protocol. We need to configure JMX request parameters, configure which Metrics to obtain, and configure Mbeans Object. - -### Data parsing method - -By configuring the monitoring template YML metrics `field`, `aliasFields`, `objectName` of the `jmx` protocol to map and parse the `Mbean` object information exposed by the peer system. - -### Custom Steps - -**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** - -![](/img/docs/advanced/extend-point-1.png) - -------- - -Configuration usages of the monitoring templates yml are detailed below. - -### Monitoring Templates YML - -> We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. -> -> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - -eg:Define a custom monitoring type `app` named `example_jvm` which use the JVM protocol to collect data. - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -category: service -# The monitoring type eg: linux windows tomcat mysql aws... -app: example_jvm -# The monitoring i18n name -name: - zh-CN: 自定义JVM虚拟机 - en-US: CUSTOM JVM -# Input params define for monitoring(render web ui by the definition) -params: - # field-param field key - - field: host - # name-param field display i18n name - name: - zh-CN: 主机Host - en-US: Host - # type-param field type(most mapping the html input type) - type: host - # required-true or false - required: true - # field-param field key - - field: port - # name-param field display i18n name - name: - zh-CN: 端口 - en-US: Port - # type-param field type(most mapping the html input type) - type: number - # when type is number, range is required - range: '[0,65535]' - # required-true or false - required: true - # default value - defaultValue: 9999 - # field-param field key - - field: url - # name-param field display i18n name - name: - zh-CN: JMX URL - en-US: JMX URL - # type-param field type(most mapping the html input type) - type: text - # required-true or false - required: false - # hide param-true or false - hide: true - # param field input placeholder - placeholder: 'service:jmx:rmi:///jndi/rmi://host:port/jmxrmi' - # field-param field key - - field: username - # name-param field display i18n name - name: - zh-CN: 用户名 - en-US: Username - # type-param field type(most mapping the html input type) - type: text - # when type is text, use limit to limit string length - limit: 20 - # required-true or false - required: false - # hide param-true or false - hide: true - # field-param field key - - field: password - # name-param field display i18n name - name: - zh-CN: 密码 - en-US: Password - # type-param field type(most mapping the html input tag) - type: password - # required-true or false - required: false - # hide param-true or false - hide: true -# collect metrics config list -metrics: - # metrics - basic - - name: basic - # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel - # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue - priority: 0 - # collect metrics content - fields: - # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-if is metrics label - - field: VmName - type: 1 - - field: VmVendor - type: 1 - - field: VmVersion - type: 1 - - field: Uptime - type: 0 - unit: ms - # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: jmx - # the config content when protocol is jmx - jmx: - # host: ipv4 ipv6 domain - host: ^_^host^_^ - # port - port: ^_^port^_^ - username: ^_^username^_^ - password: ^_^password^_^ - # jmx mbean object name - objectName: java.lang:type=Runtime - url: ^_^url^_^ - - - name: memory_pool - priority: 1 - fields: - - field: name - type: 1 - label: true - - field: committed - type: 0 - unit: MB - - field: init - type: 0 - unit: MB - - field: max - type: 0 - unit: MB - - field: used - type: 0 - unit: MB - units: - - committed=B->MB - - init=B->MB - - max=B->MB - - used=B->MB - # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field - aliasFields: - - Name - - Usage->committed - - Usage->init - - Usage->max - - Usage->used - # mapping and conversion expressions, use these and aliasField above to calculate metrics value - # eg: cores=core1+core2, usage=usage, waitTime=allTime-runningTime - calculates: - - name=Name - - committed=Usage->committed - - init=Usage->init - - max=Usage->max - - used=Usage->used - protocol: jmx - jmx: - # host: ipv4 ipv6 domain - host: ^_^host^_^ - # port - port: ^_^port^_^ - username: ^_^username^_^ - password: ^_^password^_^ - objectName: java.lang:type=MemoryPool,name=* - url: ^_^url^_^ -``` diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-point.md b/home/versioned_docs/version-v1.4.x/advanced/extend-point.md deleted file mode 100644 index 3f02f6040f4..00000000000 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-point.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -id: extend-point -title: Custom Monitoring -sidebar_label: Custom Monitoring ---- - -> HertzBeat has custom monitoring ability. You only need to configure monitoring template yml to fit a custom monitoring type. -> Custom monitoring currently supports [HTTP protocol](extend-http),[JDBC protocol](extend-jdbc), [SSH protocol](extend-ssh), [JMX protocol](extend-jmx), [SNMP protocol](extend-snmp). And it will support more general protocols in the future. - -### Custom Monitoring Steps - -**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** - -------- - -Configuration usages of the monitoring templates yml are detailed below. - -### Monitoring Templates YML - -> We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. -> -> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - -eg:Define a custom monitoring type `app` named `example2` which use the HTTP protocol to collect data. - -**Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -category: custom -# The monitoring type eg: linux windows tomcat mysql aws... -app: example2 -# The monitoring i18n name -name: - zh-CN: 模拟网站监测 - en-US: EXAMPLE WEBSITE -# Input params define for monitoring(render web ui by the definition) -params: - # field-param field key - - field: host - # name-param field display i18n name - name: - zh-CN: 主机Host - en-US: Host - # type-param field type(most mapping the html input type) - type: host - # required-true or false - required: true - # field-param field key - - field: port - # name-param field display i18n name - name: - zh-CN: 端口 - en-US: Port - # type-param field type(most mapping the html input type) - type: number - # when type is number, range is required - range: '[0,65535]' - # required-true or false - required: true - # default value - defaultValue: 80 - # field-param field key - - field: uri - # name-param field display i18n name - name: - zh-CN: 相对路径 - en-US: URI - # type-param field type(most mapping the html input tag) - type: text - # when type is text, use limit to limit string length - limit: 200 - # required-true or false - required: false - # param field input placeholder - placeholder: 'Website uri path(no ip port) EG:/console' - # field-param field key - - field: ssl - # name-param field display i18n name - name: - zh-CN: 启用HTTPS - en-US: HTTPS - # type-param field type(most mapping the html input type) - type: boolean - # required-true or false - required: true - # field-param field key - - field: timeout - # name-param field display i18n name - name: - zh-CN: 超时时间(ms) - en-US: Timeout(ms) - # type-param field type(most mapping the html input tag) - type: number - # required-true or false - required: false - # hide param-true or false - hide: true - -metrics: - # metrics - summary, inner monitoring metrics (responseTime - response time, keyword - number of keywords) - - name: summary - # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel - # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue - priority: 0 - # collect metrics content - fields: - # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-if is metrics label - - field: responseTime - type: 0 - unit: ms - - field: keyword - type: 0 - # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: http - # the config content when protocol is http - http: - # http host: ipv4 ipv6 domain - host: ^_^host^_^ - # http port - port: ^_^port^_^ - # http url - url: ^_^uri^_^ - timeout: ^_^timeout^_^ - # http method: GET POST PUT DELETE PATCH - method: GET - # if enabled https - ssl: ^_^ssl^_^ - # http response data parse type: default-hertzbeat rule, jsonpath-jsonpath script, website-for website monitoring, prometheus-prometheus exporter rule - parseType: website - -``` diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-push.md b/home/versioned_docs/version-v1.4.x/advanced/extend-push.md deleted file mode 100644 index 18e190d7f5d..00000000000 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-push.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: extend-push -title: Push Style Custom Monitoring -sidebar_label: Push Style Custom Monitoring ---- - -> Push style curstom monitor is a type of monitor which allow user to configure metrics format and push metrics to hertzbeat with their own service. -> Here we will introduce how to use this feature. - -### Push style custom monitor collection process - -【Peer Server Start Pushing Metrics】 -> 【HertzBeat Push Module Stage Metrics】-> 【HertzBeat Collect Module collect Metrics Periodically】 - -### Data parsing method - -HertzBeat will parsing metrics with the format configured by user while adding new monitor. - -### Create Monitor Steps - -HertzBeat DashBoard -> Service Monitor -> Push Style Monitor -> New Push Style Monitor -> set Push Module Host (hertzbeat server ip, usually 127.0.0.1/localhost) -> set Push Module Port (hertzbeat server port, usually 1157) -> configure metrics field (unit: string, type: 0 number / 1 string) -> end - ---- - -### Monitor Configuration Example - -![](/img/docs/advanced/extend-push-example-1.png) diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-snmp.md b/home/versioned_docs/version-v1.4.x/advanced/extend-snmp.md deleted file mode 100644 index 3dae2b8b6dd..00000000000 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-snmp.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -id: extend-snmp -title: SNMP Protocol Custom Monitoring -sidebar_label: SNMP Protocol Custom Monitoring ---- - -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use SNMP to customize Metric monitoring. -> JMX protocol custom monitoring allows us to easily monitor Metrics we want by config SNMP MIB OIDs. - -### SNMP protocol collection process - -【**Peer Server Enable SNMP Service**】->【**HertzBeat Connect Peer Server SNMP**】->【**Query Oids Data**】->【**Metric data extraction**】 - -It can be seen from the process that we define a monitoring type of Snmp protocol. We need to configure Snmp request parameters, configure which Metrics to obtain, and configure oids. - -### Data parsing method - -By configuring the metrics `field`, `aliasFields`, and `oids` under the `snmp` protocol of the monitoring template YML to capture the data specified by the peer and parse the mapping. - -### Custom Steps - -**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** - -![](/img/docs/advanced/extend-point-1.png) - -------- - -Configuration usages of the monitoring templates yml are detailed below. - -### Monitoring Templates YML - -> We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. -> -> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - -eg:Define a custom monitoring type `app` named `example_windows` which use the SNMP protocol to collect data. - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring mid-middleware custom-custom monitoring os-operating system monitoring -category: os -# The monitoring type eg: linux windows tomcat mysql aws... -app: windows -# The monitoring i18n name -name: - zh-CN: Windows操作系统 - en-US: OS Windows -# Input params define for monitoring(render web ui by the definition) -params: - # field-param field key - - field: host - # name-param field display i18n name - name: - zh-CN: 主机Host - en-US: Host - # type-param field type(most mapping the html input type) - type: host - # required-true or false - required: true - # field-param field key - - field: port - # name-param field display i18n name - name: - zh-CN: 端口 - en-US: Port - # type-param field type(most mapping the html input type) - type: number - # when type is number, range is required - range: '[0,65535]' - # required-true or false - required: true - # default value - defaultValue: 161 - # field-param field key - - field: version - # name-param field display i18n name - name: - zh-CN: SNMP 版本 - en-US: SNMP Version - # type-param field type(radio mapping the html radio tag) - type: radio - # required-true or false - required: true - # when type is radio checkbox, use option to show optional values {name1:value1,name2:value2} - options: - - label: SNMPv1 - value: 0 - - label: SNMPv2c - value: 1 - # field-param field key - - field: community - # name-param field display i18n name - name: - zh-CN: SNMP 团体字 - en-US: SNMP Community - # type-param field type(most mapping the html input type) - type: text - # when type is text, use limit to limit string length - limit: 100 - # required-true or false - required: true - # param field input placeholder - placeholder: 'Snmp community for v1 v2c' - # field-param field key - - field: timeout - # name-param field display i18n name - name: - zh-CN: 超时时间(ms) - en-US: Timeout(ms) - # type-param field type(most mapping the html input type) - type: number - # when type is number, range is required - range: '[0,100000]' - # required-true or false - required: false - # hide-is hide this field and put it in advanced layout - hide: true - # default value - defaultValue: 6000 -# collect metrics config list -metrics: - # metrics - system - - name: system - # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel - # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue - priority: 0 - # collect metrics content - fields: - # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-if is metrics label - - field: name - type: 1 - - field: descr - type: 1 - - field: uptime - type: 1 - - field: numUsers - type: 0 - - field: services - type: 0 - - field: processes - type: 0 - - field: responseTime - type: 0 - unit: ms - - field: location - type: 1 - # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: snmp - # the config content when protocol is snmp - snmp: - # server host: ipv4 ipv6 domain - host: ^_^host^_^ - # server port - port: ^_^port^_^ - # snmp connect timeout - timeout: ^_^timeout^_^ - # snmp community - community: ^_^community^_^ - # snmp version - version: ^_^version^_^ - # snmp operation: get, walk - operation: get - # metrics oids: metric_name - oid_value - oids: - name: 1.3.6.1.2.1.1.5.0 - descr: 1.3.6.1.2.1.1.1.0 - uptime: 1.3.6.1.2.1.25.1.1.0 - numUsers: 1.3.6.1.2.1.25.1.5.0 - services: 1.3.6.1.2.1.1.7.0 - processes: 1.3.6.1.2.1.25.1.6.0 - location: 1.3.6.1.2.1.1.6.0 -``` diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-ssh.md b/home/versioned_docs/version-v1.4.x/advanced/extend-ssh.md deleted file mode 100644 index 2226a2a6334..00000000000 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-ssh.md +++ /dev/null @@ -1,214 +0,0 @@ ---- -id: extend-ssh -title: SSH Protocol Custom Monitoring -sidebar_label: SSH Protocol Custom Monitoring ---- - -> From [Custom Monitoring](extend-point), you are familiar with how to customize types, Metrics, protocols, etc. Here we will introduce in detail how to use SSH protocol to customize Metric monitoring. -> SSH protocol custom monitoring allows us to easily monitor and collect the Linux Metrics we want by writing sh command script. - -### SSH protocol collection process - -【**System directly connected to Linux**】->【**Run shell command script statement**】->【**parse response data: oneRow, multiRow**】->【**Metric data extraction**】 - -It can be seen from the process that we define a monitoring type of SSH protocol. We need to configure SSH request parameters, configure which Metrics to obtain, and configure query script statements. - -### Data parsing method - -We can obtain the corresponding Metric data through the data fields queried by the SHELL script and the Metric mapping we need. At present, there are two mapping parsing methods:oneRow and multiRow which can meet the needs of most Metrics. - -#### **oneRow** - -> Query out a column of data, return the field value (one value per row) of the result set through query and map them to the field. - -eg: -Metrics of Linux to be queried hostname-host name,uptime-start time -Host name original query command:`hostname` -Start time original query command:`uptime | awk -F "," '{print $1}'` -Then the query script of the two Metrics in hertzbeat is(Use `;` Connect them together): -`hostname; uptime | awk -F "," '{print $1}'` -The data responded by the terminal is: - -``` -tombook -14:00:15 up 72 days -``` - -At last collected Metric data is mapped one by one as: -hostname is `tombook` -uptime is `14:00:15 up 72 days` - -Here the Metric field and the response data can be mapped into a row of collected data one by one - -#### **multiRow** - -> Query multiple rows of data, return the column names of the result set through the query, and map them to the Metric field of the query. - -eg: -Linux memory related Metric fields queried:total-Total memory, used-Used memory,free-Free memory, buff-cache-Cache size, available-Available memory -Memory metrics original query command:`free -m`, Console response: - -```shell - total used free shared buff/cache available -Mem: 7962 4065 333 1 3562 3593 -Swap: 8191 33 8158 -``` - -In hertzbeat multiRow format parsing requires a one-to-one mapping between the column name of the response data and the indicaotr value, so the corresponding query SHELL script is: -`free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'` -Console response is: - -```shell -total used free buff_cache available -7962 4066 331 3564 3592 -``` - -Here the Metric field and the response data can be mapped into collected data one by one. - -### Custom Steps - -**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** - -------- - -Configuration usages of the monitoring templates yml are detailed below. - -### Monitoring Templates YML - -> We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. -> -> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - -eg:Define a custom monitoring type `app` named `example_linux` which use the SSH protocol to collect data. - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -category: os -# Monitoring application type(consistent with the file name) eg: linux windows tomcat mysql aws... -app: example_linux -name: - zh-CN: 模拟LINUX应用类型 - en-US: LINUX EXAMPLE APP -params: - - field: host - name: - zh-CN: 主机Host - en-US: Host - type: host - required: true - - field: port - name: - zh-CN: 端口 - en-US: Port - type: number - range: '[0,65535]' - required: true - defaultValue: 22 - placeholder: 'Please enter the port' - - field: username - name: - zh-CN: 用户名 - en-US: Username - type: text - limit: 20 - required: true - - field: password - name: - zh-CN: 密码 - en-US: Password - type: password - required: true -# Metric group list -metrics: - # The first monitoring Metric group basic - # Note:: the built-in monitoring Metrics have (responseTime - response time) - - name: basic - # The smaller Metric group scheduling priority(0-127), the higher the priority. After completion of the high priority Metric group collection,the low priority Metric group will then be scheduled. Metric groups with the same priority will be scheduled in parallel. - # Metric group with a priority of 0 is an availability group which will be scheduled first. If the collection succeeds, the scheduling will continue otherwise interrupt scheduling. - priority: 0 - # metrics fields list - fields: - # Metric information include field: name type: field type(0-number: number, 1-string: string) label-if is metrics label unit: Metric unit - - field: hostname - type: 1 - label: true - - field: version - type: 1 - - field: uptime - type: 1 - # protocol for monitoring and collection eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: ssh - # Specific collection configuration when the protocol is SSH protocol - ssh: - # host: ipv4 ipv6 domain name - host: ^_^host^_^ - # port - port: ^_^port^_^ - username: ^_^username^_^ - password: ^_^password^_^ - script: (uname -r ; hostname ; uptime | awk -F "," '{print $1}' | sed "s/ //g") | sed ":a;N;s/\n/^/g;ta" | awk -F '^' 'BEGIN{print "version hostname uptime"} {print $1, $2, $3}' - # parsing method for reponse data:oneRow, multiRow - parseType: multiRow - - - name: cpu - priority: 1 - fields: - # Metric information include field: name type: field type(0-number: number, 1-string: string) label-if is metrics label unit: Metric unit - - field: info - type: 1 - - field: cores - type: 0 - unit: the number of cores - - field: interrupt - type: 0 - unit: number - - field: load - type: 1 - - field: context_switch - type: 0 - unit: number - # protocol for monitoring and collection eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: ssh - # Specific collection configuration when the protocol is SSH protocol - ssh: - # 主机host: ipv4 ipv6 domain name - host: ^_^host^_^ - # port - port: ^_^port^_^ - username: ^_^username^_^ - password: ^_^password^_^ - script: "LANG=C lscpu | awk -F: '/Model name/ {print $2}';awk '/processor/{core++} END{print core}' /proc/cpuinfo;uptime | sed 's/,/ /g' | awk '{for(i=NF-2;i<=NF;i++)print $i }' | xargs;vmstat 1 1 | awk 'NR==3{print $11}';vmstat 1 1 | awk 'NR==3{print $12}'" - parseType: oneRow - - - name: memory - priority: 2 - fields: - # Metric information include field: name type: field type(0-number: number, 1-string: string) label-if is metrics label unit: Metric unit - - field: total - type: 0 - unit: Mb - - field: used - type: 0 - unit: Mb - - field: free - type: 0 - unit: Mb - - field: buff_cache - type: 0 - unit: Mb - - field: available - type: 0 - unit: Mb - # protocol for monitoring and collection eg: sql, ssh, http, telnet, wmi, snmp, sdk - protocol: ssh - # Specific collection configuration when the protocol is SSH protocol - ssh: - # host: ipv4 ipv6 domain name - host: ^_^host^_^ - # port - port: ^_^port^_^ - username: ^_^username^_^ - password: ^_^password^_^ - script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}' - parseType: multiRow -``` diff --git a/home/versioned_docs/version-v1.4.x/advanced/extend-tutorial.md b/home/versioned_docs/version-v1.4.x/advanced/extend-tutorial.md deleted file mode 100644 index cd594f5a378..00000000000 --- a/home/versioned_docs/version-v1.4.x/advanced/extend-tutorial.md +++ /dev/null @@ -1,226 +0,0 @@ ---- -id: extend-tutorial -title: Quick Tutorial Customize and adapt a monitoring based on HTTP protocol -sidebar_label: Tutorial Case ---- - -Through this tutorial, we describe step by step how to customize and adapt a monitoring type based on the http protocol under the hertzbeat monitoring tool. - -Before reading this tutorial, we hope that you are familiar with how to customize types, metrics, protocols, etc. from [Custom Monitoring](extend-point) and [Http Protocol Customization](extend-http). - -### HTTP protocol parses the general response structure to obtain metrics data - -> In many scenarios, we need to monitor the provided HTTP API interface and obtain the index value returned by the interface. In this article, we use the http custom protocol to parse our common http interface response structure, and obtain the fields in the returned body as metric data. - -``` -{ - "code": 200, - "msg": "success", - "data": {} -} - -``` - -As above, usually our background API interface will design such a general return. The same is true for the background of the hertzbeat system. Today, we will use the hertzbeat API as an example, add a new monitoring type **hertzbeat**, and monitor and collect its system summary statistics API -`http://localhost:1157/api/summary`, the response data is: - -``` -{ - "msg": null, - "code": 0, - "data": { - "apps": [ - { - "category": "service", - "app": "jvm", - "status": 0, - "size": 2, - "availableSize": 0, - "unManageSize": 2, - "unAvailableSize": 0, - "unReachableSize": 0 - }, - { - "category": "service", - "app": "website", - "status": 0, - "size": 2, - "availableSize": 0, - "unManageSize": 2, - "unAvailableSize": 0, - "unReachableSize": 0 - } - ] - } -} -``` - -**This time we get the metrics data such as `category`, `app`, `status`, `size`, `availableSize` under the app.** - -### Add Monitoring Template Yml - -**HertzBeat Dashboard** -> **Monitoring Templates** -> **New Template** -> **Config Monitoring Template Yml** -> **Save and Apply** -> **Add A Monitoring with The New Monitoring Type** - -> We define all monitoring collection types (mysql,jvm,k8s) as yml monitoring templates, and users can import these templates to support corresponding types of monitoring. -> -> Monitoring template is used to define *the name of monitoring type(international), request parameter mapping, index information, collection protocol configuration information*, etc. - -Here we define a custom monitoring type `app` named `hertzbeat` which use the HTTP protocol to collect data. - -**Monitoring Templates** -> **Config New Monitoring Template Yml** -> **Save and Apply** - -```yaml -# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring -category: custom -# The monitoring type eg: linux windows tomcat mysql aws... -app: hertzbeat -# The monitoring i18n name -name: - zh-CN: HertzBeat监控系统 - en-US: HertzBeat Monitor -# Input params define for monitoring(render web ui by the definition) -params: - # field-param field key - - field: host - # name-param field display i18n name - name: - zh-CN: 主机Host - en-US: Host - # type-param field type(most mapping the html input type) - type: host - # required-true or false - required: true - # field-param field key - - field: port - # name-param field display i18n name - name: - zh-CN: 端口 - en-US: Port - # type-param field type(most mapping the html input type) - type: number - # when type is number, range is required - range: '[0,65535]' - # required-true or false - required: true - # default value - defaultValue: 1157 - - field: ssl - name: - zh-CN: 启用HTTPS - en-US: HTTPS - type: boolean - required: true - - field: timeout - name: - zh-CN: 超时时间(ms) - en-US: Timeout(ms) - type: number - required: false - hide: true - - field: authType - name: - zh-CN: 认证方式 - en-US: Auth Type - type: radio - required: false - hide: true - options: - - label: Basic Auth - value: Basic Auth - - label: Digest Auth - value: Digest Auth - - field: username - name: - zh-CN: 用户名 - en-US: Username - type: text - limit: 20 - required: false - hide: true - - field: password - name: - zh-CN: 密码 - en-US: Password - type: password - required: false - hide: true -metrics: - # the first metrics summary - # attention: Built-in monitoring metrics contains (responseTime - Response time) - - name: summary - # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel - # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue - priority: 0 - # collect metrics content - fields: - # metrics content contains field-metric name, type-metric type:0-number,1-string, label-if is metrics label, unit-metric unit('%','ms','MB') - - field: app - type: 1 - label: true - - field: category - type: 1 - - field: status - type: 0 - - field: size - type: 0 - - field: availableSize - type: 0 - # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk, we use HTTP protocol here - protocol: http - # the config content when protocol is http - http: - # http host: ipv4 ipv6 domain - host: ^_^host^_^ - # http port - port: ^_^port^_^ - # http url, we don't need to enter a parameter here, just set the fixed value to /api/summary - url: /api/summary - timeout: ^_^timeout^_^ - # http method: GET POST PUT DELETE PATCH, default fixed value is GET - method: GET - # if enabled https, default value is false - ssl: ^_^ssl^_^ - # http auth - authorization: - # http auth type: Basic Auth, Digest Auth, Bearer Token - type: ^_^authType^_^ - basicAuthUsername: ^_^username^_^ - basicAuthPassword: ^_^password^_^ - digestAuthUsername: ^_^username^_^ - digestAuthPassword: ^_^password^_^ - # http response data parse type: default-hertzbeat rule, jsonpath-jsonpath script, website-for website monitoring, we use jsonpath to parse response data here - parseType: jsonPath - parseScript: '$.data.apps.*' -``` - -**The addition is complete, now we restart the hertzbeat system. We can see that the system page has added a `hertzbeat` monitoring type.** - -![](/img/docs/advanced/extend-http-example-1.png) - -### The system page adds the monitoring of `hertzbeat` monitoring type - -> We click Add `HertzBeat Monitoring Tool`, configure monitoring IP, port, collection cycle, account password in advanced settings, etc., click OK to add monitoring. - -![](/img/docs/advanced/extend-http-example-2.png) - -![](/img/docs/advanced/extend-http-example-3.png) - -> After a certain period of time (depending on the collection cycle), we can see the specific metric data and historical charts in the monitoring details! - -![](/img/docs/advanced/extend-http-example-4.png) - -### Set threshold alarm notification - -> Next, we can set the threshold normally. After the alarm is triggered, we can view it in the alarm center, add a new recipient, set alarm notification, etc. Have Fun!!! - ----- - -#### over - -This is the end of the practice of custom monitoring of the HTTP protocol. The HTTP protocol also has other parameters such as headers and params. We can define it like postman, and the playability is also very high! - -If you think hertzbeat is a good open source project, please star us on GitHub Gitee, thank you very much. Thanks for the old iron support. Refill! - -**github: ** - -**gitee: ** diff --git a/home/versioned_docs/version-v1.4.x/help/activemq.md b/home/versioned_docs/version-v1.4.x/help/activemq.md deleted file mode 100644 index ef3cc911969..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/activemq.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -id: activemq -title: Monitoring Apache ActiveMQ -sidebar_label: Apache ActiveMQ -keywords: [open source monitoring tool, monitoring Apache ActiveMQ metrics] ---- - -> Monitoring the running status of Apache ActiveMQ message middleware, nodes, topics and other related metrics. - -**Use Protocol: JMX** - -### Pre-monitoring Operations - -> You need to enable the `JMX` service on ActiveMQ, HertzBeat uses the JMX protocol to collect metrics from ActiveMQ. - -1. Modify the `conf/activemq.xml` file in the installation directory to enable JMX - -> Add `userJmx="true"` attribute in `broker` tag - -```xml - - - -``` - -2. Modify the `bin/env` file in the installation directory, configure the JMX port IP, etc. - -The original configuration information will be as follows - -```text -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" -``` - -Update to the following configuration, ⚠️ pay attention to modify `local external IP` - -```text -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.authenticate=false" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Djava.rmi.server.hostname=本机对外IP" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" -``` - -3. Restart the ACTIVEMQ service, and add the corresponding ActiveMQ monitoring in HertzBeat. The parameters use the IP port configured by JMX. - -### Configuration parameters - -| Parameter name | Parameter help description | -|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | -| Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | -| JMX port | The HTTP port provided by JMX, the default is 11099. | -| JMX URL | Optional, customize the JMX URL connection | -| Username | Username used for authentication | -| password | password used for authentication | -| Acquisition Interval | Interval time for monitoring periodic data collection, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | -| Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | - -### Collect Metrics - -#### metrics: broker - -| Metric Name | Unit | Description | -|-------------------------|------|-----------------------------------------------------------------------| -| BrokerName | None | The name of the broker. | -| BrokerVersion | None | The version of the broker. | -| Uptime | None | Uptime of the broker. | -| UptimeMillis | ms | Uptime of the broker in milliseconds. | -| Persistent | None | Messages are synchronized to disk. | -| MemoryPercentUsage | % | Percent of memory limit used. | -| StorePercentUsage | % | Percent of store limit used. | -| TempPercentUsage | % | Percent of temp limit used. | -| CurrentConnectionsCount | None | Attribute exposed for management | -| TotalConnectionsCount | None | Attribute exposed for management | -| TotalEnqueueCount | None | Number of messages that have been sent to the broker. | -| TotalDequeueCount | None | Number of messages that have been acknowledged on the broker. | -| TotalConsumerCount | None | Number of message consumers subscribed to destinations on the broker. | -| TotalProducerCount | None | Number of message producers active on destinations on the broker. | -| TotalMessageCount | None | Number of unacknowledged messages on the broker. | -| AverageMessageSize | None | Average message size on this broker | -| MaxMessageSize | None | Max message size on this broker | -| MinMessageSize | None | Min message size on this broker | - -#### metrics: topic - -| Metric Name | Unit | Description | -|--------------------|------|-------------------------------------------------------------------------------------------| -| Name | None | Name of this destination. | -| MemoryLimit | MB | Memory limit, in bytes, used by undelivered messages before paging to temporary storage. | -| MemoryPercentUsage | None | The percentage of the memory limit used | -| ProducerCount | None | Number of producers attached to this destination | -| ConsumerCount | None | Number of consumers subscribed to this destination. | -| EnqueueCount | None | Number of messages that have been sent to the destination. | -| DequeueCount | None | Number of messages that has been acknowledged (and removed) from the destination. | -| ForwardCount | None | Number of messages that have been forwarded (to a networked broker) from the destination. | -| InFlightCount | None | Number of messages that have been dispatched to, but not acknowledged by, consumers. | -| DispatchCount | None | Number of messages that has been delivered to consumers, including those not acknowledged | -| ExpiredCount | None | Number of messages that have been expired. | -| StoreMessageSize | B | The memory size of all messages in this destination's store. | -| AverageEnqueueTime | ms | Average time a message was held on this destination. | -| MaxEnqueueTime | ms | The longest time a message was held on this destination | -| MinEnqueueTime | ms | The shortest time a message was held on this destination | -| TotalBlockedTime | ms | Total time (ms) messages have been blocked by flow control | -| AverageMessageSize | B | Average message size on this destination | -| MaxMessageSize | B | Max message size on this destination | -| MinMessageSize | B | Min message size on this destination | - -#### metrics: memory_pool - -| Metric Name | Unit | Description | -|-------------|------|--------------| -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | - -#### metrics: class_loading - -| Metric Name | Unit | Description | -|-----------------------|------|--------------------------| -| LoadedClassCount | | Loaded Class Count | -| TotalLoadedClassCount | | Total Loaded Class Count | -| UnloadedClassCount | | Unloaded Class Count | - -#### metrics: thread - -| Metric Name | Unit | Description | -|-------------------------|------|----------------------------| -| TotalStartedThreadCount | | Total Started Thread Count | -| ThreadCount | | Thread Count | -| PeakThreadCount | | Peak Thread Count | -| DaemonThreadCount | | Daemon Thread Count | -| CurrentThreadUserTime | ms | Current Thread User Time | -| CurrentThreadCpuTime | ms | Current Thread Cpu Time | diff --git a/home/versioned_docs/version-v1.4.x/help/airflow.md b/home/versioned_docs/version-v1.4.x/help/airflow.md deleted file mode 100644 index a7f77f7f5b6..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/airflow.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: airflow -title: 监控:Apache Airflow监控 -sidebar_label: Apache Airflow -keywords: [开源监控系统, 开源数据库监控, Apache Airflow监控] ---- - -> 对Apache Airflow通用性能指标进行采集监控。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|-----------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8080 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| HTTPS | 是否启用HTTPS | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:airflow_health - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------|------|------------------| -| metadatabase | 无 | metadatabase健康情况 | -| scheduler | 无 | scheduler健康情况 | -| triggerer | 无 | triggerer健康情况 | - -#### 指标集合:airflow_version - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------|------|---------------| -| value | 无 | Airflow版本 | -| git_version | 无 | Airflow git版本 | diff --git a/home/versioned_docs/version-v1.4.x/help/alert_console.md b/home/versioned_docs/version-v1.4.x/help/alert_console.md deleted file mode 100644 index 45ab7d791d3..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_console.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: alert_console -title: Custom console address in alarm template -sidebar_label: Console address in alarm template ---- - -> After the threshold is triggered, send the alarm information. When you notify through DingDing / enterprise Wechat / FeiShu robot or email, the alarm content has a detailed link to log in to the console. - -### Custom settings - -In our startup configuration file application.yml, find the following configuration - -```yml -alerter: - console-url: #Here is our custom console address -``` - -The default value is the official console address of HertzBeat. diff --git a/home/versioned_docs/version-v1.4.x/help/alert_dingtalk.md b/home/versioned_docs/version-v1.4.x/help/alert_dingtalk.md deleted file mode 100644 index 36e332d9b21..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_dingtalk.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -id: alert_dingtalk -title: Alert DingDing robot notification -sidebar_label: Alert DingDing robot notification -keywords: [open source monitoring tool, open source alerter, open source DingDing robot notification] ---- - -> After the threshold is triggered send alarm information and notify the recipient by DingDing robot. - -### Operation steps - -1. **【DingDing desktop client】-> 【Group settings】-> 【Intelligent group assistant】-> 【Add new robot-select custom】-> 【Set robot name and avatar】-> 【Note⚠️Set custom keywords: HertzBeat】 ->【Copy its webhook address after adding successfully】** - -> Note⚠️ When adding a robot, its custom keywords need to be set in the security setting block: HertzBeat. Other security settings or the IP segment don't need to be filled in. - -![email](/img/docs/help/alert-notice-8.png) - -2. **【Save access_token value of the WebHook address of the robot】** - -> eg: webHook address:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` -> Its robot access_token value is `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` - -3. **【Alarm notification】->【Add new recipient】 ->【Select DingDing robot notification method】->【Set DingDing robot ACCESS_TOKEN】-> 【Confirm】** - -![email](/img/docs/help/alert-notice-9.png) - -4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** - -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** - -![email](/img/docs/help/alert-notice-4.png) - -### DingDing robot common issues - -1. DingDing group did not receive the robot alarm notification. - -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether DingDing robot is configured with security custom keywords :HertzBeat. -> Please check whether the robot ACCESS_TOKEN is configured correctly and whether the alarm strategy association is configured. - -Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_discord.md b/home/versioned_docs/version-v1.4.x/help/alert_discord.md deleted file mode 100644 index 68296148f22..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_discord.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -id: alert_discord -title: Alert Discord Bot Notifications -sidebar_label: Alert Discord bot notification -keywords: [open source monitoring tool, open source alerter, open source Discord bot notification] ---- - -> Send an alarm message after the threshold is triggered, and notify the recipient through the Discord robot. - -## Steps - -### Create an application in Discord, create a robot under the application, and get the robot Token - -1. Visit [https://discord.com/developers/applications](https://discord.com/developers/applications) to create an application - -![bot](/img/docs/help/discord-bot-1.png) - -2. Create a robot under the application and get the robot Token - -![bot](/img/docs/help/discord-bot-2.png) - -![bot](/img/docs/help/discord-bot-3.png) - -3. Authorize the bot to the chat server - -> Authorize the robot under the OAuth2 menu, select `bot` for `SCOPES`, `BOT PERMISSIONS` select `Send Messages` - -![bot](/img/docs/help/discord-bot-4.png) - -> Obtain the URL generated at the bottom, and the browser accesses this URL to officially authorize the robot, that is, to set which chat server the robot will join. - -4. Check if your chat server has joined robot members - -![bot](/img/docs/help/discord-bot-5.png) - -### Enable developer mode and get Channel ID - -1. Personal Settings -> Advanced Settings -> Enable Developer Mode - -![bot](/img/docs/help/discord-bot-6.png) - -2. Get channel Channel ID - -> Right-click the chat channel you want to send the robot message to, click the COPY ID button to get the Channel ID - -![bot](/img/docs/help/discord-bot-7.png) - -### Add an alarm notification person in HertzBeat, the notification method is Discord Bot - -1. **[Alarm notification] -> [Add recipient] -> [Select Discord robot notification method] -> [Set robot Token and ChannelId] -> [OK]** - -![email](/img/docs/help/discord-bot-8.png) - -4. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** - -> **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. - -![email](/img/docs/help/alert-notice-policy.png) - -### Discord Bot Notification FAQ - -1. Discord doesn't receive bot alert notifications - -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured -> Please check whether the bot is properly authorized by the Discord chat server - -Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_email.md b/home/versioned_docs/version-v1.4.x/help/alert_email.md deleted file mode 100644 index c507a970bae..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_email.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -id: alert_email -title: Alert email notification -sidebar_label: Alert email notification -keywords: [open source monitoring tool, open source alerter, open source email notification] ---- - -> After the threshold is triggered send alarm information and notify the recipient by email. - -### Operation steps - -1. **【Alarm notification】->【Add new recipient】 ->【Select email notification method】** - -![email](/img/docs/help/alert-notice-1.png) - -2. **【Get verification code】-> 【Enter email verification code】-> 【Confirm】** - ![email](/img/docs/help/alert-notice-2.png) - -![email](/img/docs/help/alert-notice-3.png) - -3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** - -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** - -![email](/img/docs/help/alert-notice-4.png) - -### Email notification common issues - -1. Hertzbeat deployed on its own intranet cannot receive email notifications - -> Hertzbeat needs to configure its own mail server, not tancloud. Please confirm whether you have configured its own mail server in application.yml - -2. Cloud environment tancloud cannot receive email notification - -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the mailbox is configured correctly and whether the alarm strategy association is configured. -> Please check whether the warning email is blocked in the trash can of the mailbox. - -Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_enterprise_wechat_app.md b/home/versioned_docs/version-v1.4.x/help/alert_enterprise_wechat_app.md deleted file mode 100644 index 1d5d41a15bc..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_enterprise_wechat_app.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: alert_enterprise_wechat_app -title: Alert Enterprise Wechat App notification -sidebar_label: Alert Enterprise Wechat App notification -keywords: [open source monitoring tool, open source alerter, open source Enterprise Wechat App notification] ---- - -> After the threshold is triggered send alarm information and notify the recipient by enterprise WeChat App. - -### Operation steps - -1. **【Enterprise Wechat backstage】-> 【App Management】-> 【Create an app】-> 【Set App message】->【Copy AgentId and Secret adding successfully】** - -![email](/img/docs/help/alert-wechat-1.jpg) - -2. **【Alarm notification】->【Add new recipient】 ->【Select Enterprise WeChat App notification method】->【Set Enterprise WeChat ID,Enterprise App ID and Enterprise App Secret 】-> 【Confirm】** - -![email](/img/docs/help/alert-wechat-2.jpg) - -3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** - -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** - -![email](/img/docs/help/alert-wechat-3.jpg) - -### Enterprise WeChat App common issues - -1. Enterprise WeChat App did not receive the alarm notification. - -> Please check if the user has application permissions. -> Please check if the enterprise application callback address settings are normal. -> Please check if the server IP is on the enterprise application whitelist. - -Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_feishu.md b/home/versioned_docs/version-v1.4.x/help/alert_feishu.md deleted file mode 100644 index 38f7c72cf03..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_feishu.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: alert_feishu -title: Alert FeiShu robot notification -sidebar_label: Alert FeiShu robot notification -keywords: [open source monitoring tool, open source alerter, open source feishu bot notification] ---- - -> After the threshold is triggered send alarm information and notify the recipient by FeiShu robot. - -### Operation steps - -1. **【FeiShu client】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** - -2. **【Save the key value of the WebHook address of the robot】** - -> eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` - -3. **【Alarm notification】->【Add new recipient】 ->【Select FeiShu robot notification method】->【Set FeiShu robot KEY】-> 【Confirm】** - -4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** - -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** - -![email](/img/docs/help/alert-notice-4.png) - -### FeiShu robot notification common issues - -1. FeiShu group did not receive the robot alarm notification. - -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. - -Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_slack.md b/home/versioned_docs/version-v1.4.x/help/alert_slack.md deleted file mode 100644 index 26bde4ed2e5..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_slack.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -id: alert_slack -title: Alert Slack Webhook Notifications -sidebar_label: Alert Slack Webhook Notification -keywords: [open source monitoring tool, open source alerter, open source slack webhook notification] ---- - -> Send an alarm message after the threshold is triggered, and notify the recipient through the Slack Webhook. - -## Steps - -### Open Webhook in Slack, get Webhook URL - -Refer to the official website document [Sending messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) - -### Add an alarm notifier to HertzBeat, and the notification method is Slack Webhook - -1. **【Alarm Notification】->【Add Recipient】->【Select Slack Webhook Notification Method】->【Set Webhook URL】-> 【OK】** - -![email](/img/docs/help/slack-bot-1.png) - -2. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** - -> **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. - -![email](/img/docs/help/alert-notice-policy.png) - -### Slack Notification FAQ - -1. Slack did not receive the robot warning notification - -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured - -Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_smn.md b/home/versioned_docs/version-v1.4.x/help/alert_smn.md deleted file mode 100644 index 53774315561..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_smn.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: alert_smn -title: Alert Huawei Cloud SMN Notifications -sidebar_label: Alert Huawei Cloud SMN Notifications -keywords: [ open source monitoring tool, open source alerter, open source Huawei Cloud SMN notification ] ---- - -> Send an alarm message after the threshold is triggered, and notify the recipient through the Huawei Cloud SMN. - -### 操作步骤 - -1. **According to [Huawei Cloud SMN Official Document](https://support.huaweicloud.com/qs-smn/smn_json.html) activate the SMN service and configure SMN** - -![alert-notice-10](/img/docs/help/alert-notice-10.png) - -2. **Save topic URN for SMN** - -![alert-notice-11](/img/docs/help/alert-notice-11.png) - -3. **According to [Huawei Cloud Signature Document](https://support.huaweicloud.com/devg-apisign/api-sign-provide.html) obtain AK, SK, and project ID** - -![alert-notice-12](/img/docs/help/alert-notice-12.png) - -![alert-notice-13](/img/docs/help/alert-notice-13.png) - -4. **【Alarm Notification】->【Add Recipient】->【Select Slack Webhook Notification Method】->【Set Huawei Cloud SMN AK, SK and other configurations】-> 【OK】** - -![alert-notice-14](/img/docs/help/alert-notice-14.png) - -5. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** - -> **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. - -![email](/img/docs/help/alert-notice-4.png) - -### Huawei Cloud SMN Notification FAQ - -1. Huawei Cloud SMN did not receive the robot warning notification - -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the Huawei Cloud SMN AK, SK and other configurations are configured correctly, and whether the alarm policy association has been configured - -Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_telegram.md b/home/versioned_docs/version-v1.4.x/help/alert_telegram.md deleted file mode 100644 index 1689788f0f4..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_telegram.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: alert_telegram -title: Alert Telegram Bot Notification -sidebar_label: Alert Telegram bot notification -keywords: [open source monitoring tool, open source alerter, open source Telegram bot notification] ---- - -> Send an alarm message after the threshold is triggered, and notify the recipient through the Telegram robot. - -## Steps - -### Create a bot in Telegram, get Bot Token and UserId - -1. Use [@BotFather](https://t.me/BotFather) to create your own bot and get an access token `Token` - -![telegram-bot](/img/docs/help/telegram-bot-1.png) - -2. Get the `User ID` of the recipient - -**Use the recipient account you want to notify to send a message to the newly created Bot account**, -Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token from the previous step to replace the `**, and respond to the first in the `Json` data A `result.message.from.id` value is the recipient's `User ID` - -```json -{ - "ok": true, - "result": [ - { - "update_id": 632299191, - "message": { - "from":{ - "id": "User ID" - }, - "chat":{ - }, - "date": 1673858065, - "text": "111" - } - } - ] -} -``` - -3. Record and save the `Token` and `User Id` we got - -### Add an alarm notification person to HertzBeat, the notification method is Telegram Bot - -1. **【Alarm Notification】->【Add Recipient】->【Select Telegram Robot Notification Method】->【Set Robot Token and UserId】-> 【OK】** - -![email](/img/docs/help/telegram-bot-2.png) - -4. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** - -> **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. - -![email](/img/docs/help/alert-notice-policy.png) - -### Telegram Bot Notification FAQ - -1. Telegram did not receive the robot warning notification - -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured -> UserId should be the UserId of the recipient of the message - -Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_threshold.md b/home/versioned_docs/version-v1.4.x/help/alert_threshold.md deleted file mode 100644 index f4b934fd27c..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_threshold.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -id: alert_threshold -title: Threshold alarm configuration -sidebar_label: Threshold alarm configuration ---- - -> Configure the alarm threshold (warning alarm, critical alarm, emergency alarm) for the monitoring Metrics, and the system calculates and triggers the alarm according to the threshold configuration and the collected Metric data. - -### Operation steps - -1. **【Alarm configuration】->【Add new threshold】-> 【Confirm after configuration】** - -![threshold](/img/docs/help/alert-threshold-1.png) - -As shown above: - -**Metric object**:Select the monitoring Metric object for which we need to configure the threshold. Eg:website monitoring type -> summary Metric set -> responseTime-response time Metric -**Threshold trigger expression**:Calculate and judge whether to trigger the threshold according to this expression. See the page prompts for expression environment variables and operators. Eg:set the response time greater than 50 to trigger an alarm, and the expression is `responseTime > 50`. For detailed help on threshold expression, see [Threshold expression help](alert_threshold_expr) -**Alarm level**:The alarm level that triggers the threshold, from low to high: warning, critical, emergency. -**Trigger times**:How many times will the threshold be triggered before the alarm is really triggered. -**Notification template**:Notification information Template sent after alarm triggering, See page prompts for template environment variables, eg:`${app}.${metrics}.${metric} Metric's value is ${responseTime}, greater than 50 triggers an alarm` -**Global default**: Set whether this threshold is valid for such global Metrics, and the default is No. After adding a new threshold, you need to associate the threshold with the monitoring object, so that the threshold will take effect for this monitoring. -**Enable alarm**:This alarm threshold configuration is enabled or disabled. - -2. **Threshold association monitoring⚠️ 【Alarm configuration】-> 【Threshold just set】-> 【Configure associated monitoring】-> 【Confirm after configuration】** - -> **Note⚠️ After adding a new threshold, you need to associate the threshold with the monitoring object(That is, to set this threshold for which monitoring is effective), so that the threshold will take effect for this monitoring.**。 - -![threshold](/img/docs/help/alert-threshold-2.png) - -![threshold](/img/docs/help/alert-threshold-3.png) - -**After the threshold alarm is configured, the alarm information that has been successfully triggered can be seen in 【alarm center】.** -**If you need to notify the relevant personnel of the alarm information by email, Wechat, DingDing and Feishu, it can be configured in 【alarm notification】.** - -Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md b/home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md deleted file mode 100644 index 45d80f82764..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_threshold_expr.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: alert_threshold_expr -title: Threshold trigger expression -sidebar_label: Threshold trigger expression ---- - -> When we configure the threshold alarm, we need to configure the threshold trigger expression. The system calculates whether to trigger the alarm according to the expression and the monitoring index value. Here is a detailed introduction to the use of the expression. - -#### Operators supported by expressions - -``` -equals(str1,str2) -== -< -<= -> ->= -!= -( ) -+ -- -&& -|| -``` - -Rich operators allow us to define expressions freely. -Note⚠️ For the equality of string, please use `equals(str1,str2)`, while for the equality judgment of number, please use == or != - -#### Supported environment variables - -> Environment variables, i.e. supported variables such as Metric values, are used in the expression. When the threshold value is calculated and judged, the variables will be replaced with actual values for calculation. - -Non fixed environment variables:These variables will change dynamically according to the monitoring Metric object we choose. For example, if we choose **response time Metric of website monitoring**, the environment variables will have `responseTime - This is the response time variable` -If we want to set **when the response time of website monitoring is greater than 400** to trigger an alarm,the expression is `responseTime>400` - -Fixed environment variables(Rarely used):`instance : Row instance value` -This variable is mainly used to calculate multiple instances. For example, we collected `usage`(`usage is non fixed environment variables`) of disk C and disk D, but we only want to set the alarm when **the usage of C disk is greater than 80**. Then the expression is `equals(instance,"c")&&usage>80` - -#### Expression setting case - -1. Website monitoring -> Trigger alarm when the response time is greater than or equal to 400ms - `responseTime>=400` -2. API monitoring -> Trigger alarm when the response time is greater than 3000ms - `responseTime>3000` -3. Entire site monitoring -> Trigger alarm when URL(instance) path is `https://baidu.com/book/3` and the response time is greater than 200ms - `equals(instance,"https://baidu.com/book/3")&&responseTime>200` -4. MYSQL monitoring -> status Metric group -> Trigger alarm when hreads_running(number of running threads) Metric is greater than 7 - `threads_running>7` - -Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_webhook.md b/home/versioned_docs/version-v1.4.x/help/alert_webhook.md deleted file mode 100644 index d1741d71481..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_webhook.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -id: alert_webhook -title: Alert WebHook callback notification -sidebar_label: Alert webHook notification -keywords: [open source monitoring tool, open source alerter, open source webhook notification] ---- - -> After the threshold is triggered send alarm information and call the Webhook interface through post request to notify the recipient. - -### Operation steps - -1. **【Alarm notification】->【Add new recipient】 ->【Select WebHook notification method】-> 【Set WebHook callback address】 -> 【Confirm】** - -![email](/img/docs/help/alert-notice-5.png) - -2. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** - -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** - -![email](/img/docs/help/alert-notice-4.png) - -### WebHook callback POST body BODY content - -Content format:JSON - -```json -{ - "alarmId": 76456, - "target": "${target}", - "thresholdId": 33455, - "priority": 0, - "content": "udp_port monitoring availability alert, code is FAIL", - "status": 0, - "times": 1, - "triggerTime": "2022-02-25T13:32:13", - "tags": { - "app": "windows", - "monitorId": "180427708350720", - "metrics": "availability", - "code": "UN_CONNECTABLE", - "thresholdId": "112", - "monitorName": "WINDOWS_192.168.124.12" - } -} -``` - -| | | -|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| alarmId | integer($int64) title: Alarm record entity primary key index ID 告警记录实体主键索引ID | -| target | string title: Alert target object: monitor availability-available metrics-app.metrics.field 告警目标对象: 监控可用性-available 指标-app.metrics.field | -| thresholdId | integer($int64) title: Alarm definition ID associated with the alarm 告警关联的告警定义ID | -| priority | string($byte) title: Alarm level 0: high-emergency-critical alarm-red 1: medium-critical-critical alarm-orange 2: low-warning-warning alarm-yellow 告警级别 0:高-emergency-紧急告警-红色 1:中-critical-严重告警-橙色 2:低-warning-警告告警-黄色 | -| content | string title: The actual content of the alarm notification 告警通知实际内容 | -| status | string($byte) title: Alarm status: 0-normal alarm (to be processed) 1-threshold triggered but not reached the number of alarms 2-recovered alarm 3-processed 告警状态: 0-正常告警(待处理) 1-阈值触发但未达到告警次数 2-恢复告警 3-已处理 | -| times | integer($int32) title: Alarm threshold trigger times 告警阈值触发次数 | -| triggerTime | integer($int64) title: Alarm trigger time (timestamp in milliseconds) 首次告警触发时间(毫秒时间戳) | -| tags | example: {key1:value1} | - -### Webhook notification common issues - -1. WebHook callback did not take effect - -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the configured webhook callback address is correct. - -Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/alert_wework.md b/home/versioned_docs/version-v1.4.x/help/alert_wework.md deleted file mode 100644 index ce344200301..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/alert_wework.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: alert_wework -title: Alert enterprise Wechat notification -sidebar_label: Alert enterprise Wechat notification -keywords: [open source monitoring tool, open source alerter, open source WeWork notification] ---- - -> After the threshold is triggered send alarm information and notify the recipient by enterprise Wechat robot. - -### Operation steps - -1. **【Enterprise Wechat】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** - -![email](/img/docs/help/alert-notice-6.jpg) - -2. **【Save the key value of the WebHook address of the robot】** - -> eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` - -3. **【Alarm notification】->【Add new recipient】 ->【Select enterprise Wechat robot notification method】->【Set enterprise Wechat robot KEY】-> 【Confirm】** - -![email](/img/docs/help/alert-notice-7.png) - -4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** - -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** - -![email](/img/docs/help/alert-notice-4.png) - -### Enterprise Wechat robot common issues - -1. The enterprise wechat group did not receive the robot alarm notification. - -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. - -Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.4.x/help/api.md b/home/versioned_docs/version-v1.4.x/help/api.md deleted file mode 100644 index 958fb532639..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/api.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: api -title: Monitoring HTTP API -sidebar_label: HTTP API -keywords: [open source monitoring tool, monitoring http api] ---- - -> Call HTTP API interface, check whether the interface is available, and monitor its response time and other Metrics. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| Relative path | Suffix path of website address except IP port. For example, the relative path of `www.tancloud.io/console` website is `/console` | -| Request mode | Set the request mode of interface call:GET, POST, PUT, DELETE | -| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | -| Username | User name used for interface Basic authentication or Digest authentication | -| Password | Password used for interface Basic authentication or Digest authentication | -| Content-Type | Set the resource type when carrying the BODY request body data request | -| Request BODY | Set the carry BODY request body data, which is valid when PUT or POST request method is used | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:summary - -| Metric name | Metric unit | Metric help description | -|--------------|-------------|-------------------------| -| responseTime | ms | Website response time | diff --git a/home/versioned_docs/version-v1.4.x/help/centos.md b/home/versioned_docs/version-v1.4.x/help/centos.md deleted file mode 100644 index 858a1d2bb94..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/centos.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -id: centos -title: CentOS operating system monitoring -sidebar_label: CentOS operating system -keywords: [open source monitoring tool, open source os monitoring tool, monitoring CentOS operating system metrics] ---- - -> Collect and monitor the general performance Metrics of CentOS operating system. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Linux SSH. The default is 22 | -| Username | SSH connection user name, optional | -| Password | SSH connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:basic - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|--------------------------| -| hostname | none | Host name | -| version | none | Operating system version | -| uptime | none | System running time | - -#### Metric set:cpu - -| Metric name | Metric unit | Metric help description | -|----------------|-------------|------------------------------------------------| -| info | none | CPU model | -| cores | cores | Number of CPU cores | -| interrupt | number | Number of CPU interrupts | -| load | none | Average load of CPU in the last 1/5/15 minutes | -| context_switch | number | Number of current context switches | -| usage | % | CPU usage | - -#### Metric set:memory - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------------------| -| total | Mb | Total memory capacity | -| used | Mb | User program memory | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory occupied by cache | -| available | Mb | Remaining available memory capacity | -| usage | % | Memory usage | - -#### Metric set:disk - -| Metric name | Metric unit | Metric help description | -|---------------|-------------|----------------------------------------| -| disk_num | blocks | Total number of disks | -| partition_num | partitions | Total number of partitions | -| block_write | blocks | Total number of blocks written to disk | -| block_read | blocks | Number of blocks read from disk | -| write_rate | iops | Rate of writing disk blocks per second | - -#### Metric set:interface - -| Metric name | Metric unit | Metric help description | -|----------------|-------------|------------------------------| -| interface_name | none | Network card name | -| receive_bytes | byte | Inbound data traffic(bytes) | -| transmit_bytes | byte | Outbound data traffic(bytes) | - -#### Metric set:disk_free - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| filesystem | none | File system name | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | usage | -| mounted | none | Mount point directory | diff --git a/home/versioned_docs/version-v1.4.x/help/dm.md b/home/versioned_docs/version-v1.4.x/help/dm.md deleted file mode 100644 index f8e031bfe20..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/dm.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -id: dm -title: Monitoring DM database -sidebar_label: DM Database -keywords: [open source monitoring tool, open source database monitoring tool, monitoring DM database metrics] ---- - -> Collect and monitor the general performance metrics of the DM database. DM8+ is supported. - -### Configuration parameters - -| Parameter name | Parameter help description | -|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | -| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | -| Port | The port provided by the database externally, the default is 5236. | -| Query Timeout | Set the timeout when the SQL query does not respond to data, in ms milliseconds, the default is 3000 milliseconds. | -| database name | database instance name, optional. | -| username | database connection username, optional | -| password | database connection password, optional | -| URL | Database connection URL, optional | -| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | -| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | - -### Collect metrics - -#### Metric collection: basic - -| Metric Name | Metric Unit | Metric Help Description | -|--------------|-------------|-------------------------------| -| PORT_NUM | None | Database exposed service port | -| CTL_PATH | None | Control File Path | -| MAX_SESSIONS | None | Maximum database connections | - -#### Metric collection: status - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|----------------------------------| -| status$ | None | Open/Close status of DM database | - -#### Metric collection: thread - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|-------------------------------------------------------------------------| -| dm_sql_thd | None | Thread for writing dmsql dmserver | -| dm_io_thd | None | IO threads, controlled by IO_THR_GROUPS parameter, default is 2 threads | -| dm_quit_thd | None | Thread used to perform a graceful shutdown of the database | diff --git a/home/versioned_docs/version-v1.4.x/help/docker.md b/home/versioned_docs/version-v1.4.x/help/docker.md deleted file mode 100644 index 63fe3b03a19..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/docker.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -id: docker -title: Monitor:Docker Monitor -sidebar_label: Docker Monitor -keywords: [open source monitoring tool, open source docker monitoring tool, monitoring docker metrics] ---- - -> Collect and monitor general performance Metrics of Docker containers. - -## Pre-monitoring operations - -If you want to monitor the container information in `Docker`, you need to open the port according to the following steps, so that the collection request can obtain the corresponding information. - -**1. Edit the docker.server file:** - -````shell -vi /usr/lib/systemd/system/docker.service -```` - -Find the **[Service]** node, modify the ExecStart property, and add `-H tcp://0.0.0.0:2375` - -````shell -ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock -H tcp://0.0.0.0:2375 -```` - -This is equivalent to the **2375** port that is open to the outside world. Of course, it can be modified to other ports according to your own situation. - -**2. Reload the Docker configuration to take effect:** - -```shell -systemctl daemon-reload -systemctl restart docker -``` - -**Note: Remember to open the `2375` port number in the server console.** - -**3. If the above method does not work:** - -Open the `2375` port number inside the server. - -```shell -firewall-cmd --zone=public --add-port=2375/tcp --permanent -firewall-cmd --reload -``` - -### Configuration parameters - -| Parameter name | Parameter help description | -|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitor Host | Monitored peer IPV4, IPV6 or domain name. Note ⚠️ without protocol headers (eg: https://, http://). | -| Monitor Name | Identifies the name of this monitor. The name needs to be unique. | -| Port | The port provided by the database externally, the default is 2375. | -| Query Timeout | Set the timeout when getting the Docker server API interface, in ms, the default is 3000 ms. | -| Container Name | Generally monitors all running container information. | -| username | connection username, optional | -| password | connection password, optional | -| URL | Database connection URL, optional, if configured, the parameters such as database name, username and password in the URL will override the parameters configured above | -| Collection Interval | Monitor periodical collection data interval, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and then continue to add and modify operations if the detection is successful | -| Description Remarks | More remarks that identify and describe this monitoring, users can remark information here | - -### Collect metrics - -#### Metric collection: system - -| Metric Name | Metric Unit | Metric Help Description | -|--------------------|-------------|----------------------------------------------------| -| Name | None | Server Name | -| version | none | docker version number | -| os | none | server version eg: linux x86_64 | -| root_dir | none | docker folder directory eg: /var/lib/docker | -| containers | None | Total number of containers (running + not running) | -| containers_running | None | Number of running containers | -| containers_paused | none | number of containers in pause | -| images | None | The total number of container images. | -| ncpu | none | ncpu | -| mem_total | MB | Total size of memory used | -| system_time | none | system time | - -#### Metric collection: containers - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|----------------------------------------------| -| id | None | The ID of the container in Docker | -| name | None | The container name in the Docker container | -| image | None | Image used by the Docker container | -| command | None | Default startup command in Docker | -| state | None | The running state of the container in Docker | -| status | None | Update time in Docker container | - -#### Metrics collection: stats - -| Metric Name | Metric Unit | Metric Help Description | -|------------------|-------------|------------------------------------------------------------| -| name | None | The name in the Docker container | -| available_memory | MB | The amount of memory that the Docker container can utilize | -| used_memory | MB | The amount of memory already used by the Docker container | -| memory_usage | None | Memory usage of the Docker container | -| cpu_delta | None | The number of CPUs already used by the Docker container | -| number_cpus | None | The number of CPUs that the Docker container can use | -| cpu_usage | None | Docker container CPU usage | diff --git a/home/versioned_docs/version-v1.4.x/help/doris_be.md b/home/versioned_docs/version-v1.4.x/help/doris_be.md deleted file mode 100644 index 3e6fd37de03..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/doris_be.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -id: doris_be -title: 监控:DORIS数据库BE监控 -sidebar_label: DORIS数据库BE -keywords: [开源监控系统, 开源数据库监控, DORIS数据库BE监控] ---- - -> 对DORIS数据库FE的通用性能指标进行采集监控。支持DORIS2.0.0。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|-----------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8040 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| 数据库名称 | 数据库实例名称,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:doris_be_load_channel_count - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-----------------------| -| value | 无 | 当前打开的 load channel 个数 | - -#### 指标集合:doris_be_memtable_flush_total - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|--------------------| -| value | 无 | memtable写入磁盘的个数累计值 | - -#### 指标集合:doris_be_plan_fragment_count - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------------------------| -| value | 无 | 当前已接收的 fragment instance 的数量 | - -#### 指标集合:doris_be_process_thread_num - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|---------------------------------| -| value | 无 | BE 进程线程数。通过 `/proc/pid/task` 采集 | - -#### 指标集合:doris_be_query_scan_rows - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------------------------------------------------------------------------| -| value | 无 | 读取行数的累计值。这里只统计读取 Olap 表的数据量。并且是 RawRowsRead(部分数据行可能被索引跳过,并没有真正读取,但仍会记录到这个值中) | - -#### 指标集合:doris_be_result_buffer_block_count - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|---------------------| -| value | 无 | 当前查询结果缓存中的 query 个数 | - -#### 指标集合:doris_be_send_batch_thread_pool_queue_size - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|---------------------| -| value | 无 | 导入时用于发送数据包的线程池的排队个数 | - -#### 指标集合:doris_be_tablet_base_max_compaction_score - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-----------------------------| -| value | 无 | 当前最大的 Base Compaction Score | - -#### 指标集合:doris_be_timeout_canceled_fragment_count - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|----------------------------------| -| value | 无 | 因超时而被取消的 fragment instance 数量累计值 | - -#### 指标集合:doris_be_load_rows - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------------------| -| value | 无 | 通过 tablet sink 发送的行数累计 | - -#### 指标集合:doris_be_all_rowsets_num - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-----------------| -| value | 无 | 当前所有 rowset 的个数 | - -#### 指标集合:doris_be_all_segments_num - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------------| -| value | 无 | 当前所有 segment 的个数 | - -#### 指标集合:doris_be_heavy_work_max_threads - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-------------------| -| value | 无 | brpc heavy线程池线程个数 | - -#### 指标集合:doris_be_light_work_max_threads - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-------------------| -| value | 无 | brpc light线程池线程个数 | - -#### 指标集合:doris_be_heavy_work_pool_queue_size - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|---------------------------------| -| value | 无 | brpc heavy线程池队列最大长度,超过则阻塞提交work | - -#### 指标集合:doris_be_light_work_pool_queue_size - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|---------------------------------| -| value | 无 | brpc light线程池队列最大长度,超过则阻塞提交work | - -#### 指标集合:doris_be_heavy_work_active_threads - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|--------------------| -| value | 无 | brpc heavy线程池活跃线程数 | - -#### 指标集合:doris_be_light_work_active_threads - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|--------------------| -| value | 无 | brpc light线程池活跃线程数 | - -#### 指标集合:doris_be_compaction_bytes_total - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------|------|------------------------------| -| base | 字节 | Base Compaction 的数据量累计 | -| cumulative | 字节 | Cumulative Compaction 的数据量累计 | - -#### 指标集合:doris_be_disks_avail_capacity - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|--------------------------------------------| -| path | 无 | 指定数据目录 | -| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的剩余空间 | - -#### 指标集合:doris_be_disks_total_capacity - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|--------------------------------------------| -| path | 无 | 指定数据目录 | -| value | 字节 | `{path="/path1/"}` 表示 `/path1` 目录所在磁盘的全部空间 | - -#### 指标集合:doris_be_local_bytes_read_total - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|----------------------------| -| value | 字节 | 由 `LocalFileReader` 读取的字节数 | - -#### 指标集合:doris_be_local_bytes_written_total - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|----------------------------| -| value | 字节 | 由 `LocalFileWriter` 写入的字节数 | - -#### 指标集合:doris_be_memory_allocated_bytes - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------------------------------------| -| value | 字节 | BE 进程物理内存大小,取自 `/proc/self/status/VmRSS` | diff --git a/home/versioned_docs/version-v1.4.x/help/doris_fe.md b/home/versioned_docs/version-v1.4.x/help/doris_fe.md deleted file mode 100644 index 23432ad2cbd..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/doris_fe.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -id: doris_fe -title: 监控:DORIS数据库FE监控 -sidebar_label: DORIS数据库FE -keywords: [开源监控系统, 开源数据库监控, DORIS数据库FE监控] ---- - -> 对DORIS数据库FE的通用性能指标进行采集监控。支持DORIS2.0.0。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|-----------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://) | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性 | -| 端口 | 数据库对外提供的端口,默认为8030 | -| 查询超时时间 | 设置连接未响应的超时时间,单位ms毫秒,默认3000毫秒 | -| 数据库名称 | 数据库实例名称,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:doris_fe_connection_total - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|-----------------| -| value | 无 | 当前FE的MySQL端口连接数 | - -#### 指标集合:doris_fe_edit_log_clean - -不应失败,如失败,需人工介入 - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|----------------| -| success | 无 | 清理历史元数据日志成功的次数 | -| failed | 无 | 清理历史元数据日志失败的次数 | - -#### 指标集合:doris_fe_edit_log - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------|------|--------------| -| write | 无 | 元数据日志写入次数的计数 | -| read | 无 | 元数据日志读取次数的计数 | -| current | 无 | 元数据日志当前数量 | -| accumulated_bytes | 字节 | 元数据日志写入量的累计值 | -| current_bytes | 字节 | 元数据日志当前值 | - -#### 指标集合:doris_fe_image_clean - -不应失败,如失败,需人工介入 - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|------------------| -| success | 无 | 清理历史元数据镜像文件成功的次数 | -| failed | 无 | 清理历史元数据镜像文件失败的次数 | - -#### 指标集合:doris_fe_image_write - -不应失败,如失败,需人工介入 - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------|------|----------------| -| success | 无 | 生成元数据镜像文件成功的次数 | -| failed | 无 | 生成元数据镜像文件失败的次数 | - -#### 指标集合:doris_fe_query_err - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|----------| -| value | 无 | 错误查询的累积值 | - -#### 指标集合:doris_fe_max_journal_id - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|---------------------------------------------------------------------------------------------------------------| -| value | 无 | 当前FE节点最大元数据日志ID。如果是Master FE,则是当前写入的最大ID,如果是非Master FE,则代表当前回放的元数据日志最大ID。用于观察多个FE之间的 id 是否差距过大。过大则表示元数据同步出现问题 | - -#### 指标集合:doris_fe_max_tablet_compaction_score - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|--------------------------------------------------------------------------------------| -| value | 无 | 所有BE节点中最大的 compaction score 值。该值可以观测当前集群最大的 compaction score,以判断是否过高。如过高则可能出现查询或写入延迟 | - -#### 指标集合:doris_fe_qps - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|---------------------| -| value | 无 | 当前FE每秒查询数量(仅统计查询请求) | - -#### 指标集合:doris_fe_query_err_rate - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|---------| -| value | 无 | 每秒错误查询数 | - -#### 指标集合:doris_fe_report_queue_size - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|--------------------------------------------------------------------| -| value | 无 | BE的各种定期汇报任务在FE端的队列长度,该值反映了汇报任务在 Master FE 节点上的阻塞程度,数值越大,表示FE处理能力不足 | - -#### 指标集合:doris_fe_rps - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|--------------------------| -| value | 无 | 当前FE每秒请求数量(包含查询以及其他各类语句) | - -#### 指标集合:doris_fe_scheduled_tablet_num - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------|------|------------------------------------------------------------------------------------------| -| value | 无 | Master FE节点正在调度的 tablet 数量。包括正在修复的副本和正在均衡的副本,该数值可以反映当前集群,正在迁移的 tablet 数量。如果长时间有值,说明集群不稳定 | - -#### 指标集合:doris_fe_txn_status - -可以观测各个状态下导入事务的数量,来判断是否有堆积 - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------|------|---------| -| unknown | 无 | 未知 | -| prepare | 无 | 准备中 | -| committed | 无 | 已提交 | -| visible | 无 | 可见 | -| aborted | 无 | 已中止/已撤销 | diff --git a/home/versioned_docs/version-v1.4.x/help/dynamic_tp.md b/home/versioned_docs/version-v1.4.x/help/dynamic_tp.md deleted file mode 100644 index 332767b2a39..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/dynamic_tp.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -id: dynamic_tp -title: Monitoring DynamicTp ThreadPool -sidebar_label: DynamicTp Monitor -keywords: [open source monitoring tool, open source dynamicTp monitoring tool, monitoring DynamicTp metrics] ---- - -> Collect and monitor the thread pool performance Metrics exposed by DynamicTp actuator. - -### PreRequisites - -1. Integration Using `DynamicTp` - -`DynamicTp` is a lightweight dynamic thread pool based on the configuration center of the Jvm language. It has built-in monitoring and alarm functions, which can be realized through SPI custom extensions. - -For integrated use, please refer to the document [Quick Start](https://dynamictp.cn/guide/use/quick-start.html) - -2. Open SpringBoot Actuator Endpoint to expose `DynamicTp` Metric interface - -```yaml -management: - endpoints: - web: - exposure: - include: '*' -``` - -Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has response json data as follows: - -```json -[ - { - "poolName": "commonExecutor", - "corePoolSize": 1, - "maximumPoolSize": 1, - "queueType": "LinkedBlockingQueue", - "queueCapacity": 2147483647, - "queueSize": 0, - "fair": false, - "queueRemainingCapacity": 2147483647, - "activeCount": 0, - "taskCount": 0, - "completedTaskCount": 0, - "largestPoolSize": 0, - "poolSize": 0, - "waitTaskCount": 0, - "rejectCount": 0, - "rejectHandlerName": null, - "dynamic": false, - "runTimeoutCount": 0, - "queueTimeoutCount": 0 - }, - { - "maxMemory": "4GB", - "totalMemory": "444MB", - "freeMemory": "250.34 MB", - "usableMemory": "3.81GB" - } -] -``` - -3. Add DynamicTp monitoring under HertzBeat middleware monitoring - -### Configuration parameters - -| Parameter name | Parameter help description | -| ------------ |------------------------------------ ------------------| -| Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | -| Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | -| Port | The port provided by the application service, the default is 8080. | -| Enable HTTPS | Whether to access the website through HTTPS, note ⚠️Enable HTTPS, the default corresponding port needs to be changed to 443 | -| Base Path | Exposed interface path prefix, default /actuator | -| Acquisition Interval | Interval time for monitoring periodic data collection, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | -| Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | - -### Collect metrics - -#### Metric collection: thread_pool - -| Metric Name | Metric Unit | Metric Help Description | -|--------------------------|-------------|---------------------------------------------| -| pool_name | None | Thread pool name | -| core_pool_size | None | Number of core threads | -| maximum_pool_size | None | Maximum number of threads | -| queue_type | None | Task queue type | -| queue_capacity | MB | task queue capacity | -| queue_size | None | The current occupied size of the task queue | -| fair | None | Queue mode, SynchronousQueue will be used | -| queue_remaining_capacity | MB | task queue remaining size | -| active_count | None | Number of active threads | -| task_count | None | Total number of tasks | -| completed_task_count | None | Number of completed tasks | -| largest_pool_size | None | The largest number of threads in history | -| pool_size | none | current number of threads | -| wait_task_count | None | Number of tasks waiting to be executed | -| reject_count | None | Number of rejected tasks | -| reject_handler_name | None | Reject policy type | -| dynamic | None | Dynamic thread pool or not | -| run_timeout_count | None | Number of running timeout tasks | -| queue_timeout_count | None | Number of tasks waiting for timeout | diff --git a/home/versioned_docs/version-v1.4.x/help/fullsite.md b/home/versioned_docs/version-v1.4.x/help/fullsite.md deleted file mode 100644 index bad94c4b751..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/fullsite.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: fullsite -title: Monitoring Full site -sidebar_label: Full site Monitor -keywords: [open source monitoring tool, open source website monitoring tool, monitoring sitemap metrics] ---- - -> Available or not to monitor all pages of the website. -> A website often has multiple pages provided by different services. We monitor the full site by collecting the SiteMap exposed by the website. -> Note⚠️ This monitoring requires your website to support SiteMap. We support SiteMap in XML and TXT formats. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| SiteMap | Relative path of website SiteMap address, eg:/sitemap.xml | -| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:summary - -| Metric name | Metric unit | Metric help description | -|--------------|-------------|------------------------------------------------------| -| url | none | URL path of web page | -| statusCode | none | Response HTTP status code for requesting the website | -| responseTime | ms | Website response time | -| errorMsg | none | Error message feedback after requesting the website | diff --git a/home/versioned_docs/version-v1.4.x/help/guide.md b/home/versioned_docs/version-v1.4.x/help/guide.md deleted file mode 100644 index 9d2e9dcbf6b..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/guide.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -id: guide -title: Help Center -sidebar_label: Help Center ---- - -> Hertzbeat - An open source, real-time monitoring tool with custom-monitor and agentLess. -> Help documents and auxiliary information during use - -## 🔬 Monitoring services - -> Regularly collect and monitor the performance Metrics exposed by end-to-end services, provide visual interfaces, and process data for alarm and other service scheduling. -> Planned monitoring type:application service, database, operating system, cloud native, open source middleware. - -### Application service monitoring - - 👉 [Website monitoring](website)
- 👉 [HTTP API](api)
- 👉 [PING Connectivity](ping)
- 👉 [Port availability](port)
- 👉 [Full site monitoring](fullsite)
- 👉 [SSL Cert monitoring](ssl_cert)
- 👉 [JVM monitoring](jvm)
- 👉 [SpringBoot2.0](springboot2)
- -### Database monitoring - - 👉 [MYSQL database monitoring](mysql)
- 👉 [MariaDB database monitoring](mariadb)
- 👉 [PostgreSQL database monitoring](postgresql)
- 👉 [SqlServer database monitoring](sqlserver)
- 👉 [Oracle database monitoring](oracle)
- 👉 [DM database monitoring](dm)
- 👉 [OpenGauss database monitoring](opengauss)
- 👉 [IoTDB database monitoring](iotdb)
- -### Operating system monitoring - - 👉 [Linux operating system monitoring](linux)
- 👉 [Windows operating system monitoring](windows)
- 👉 [Ubuntu operating system monitoring](ubuntu)
- 👉 [Centos operating system monitoring](centos)
- -### Middleware monitoring - - 👉 [Zookeeper](zookeeper)
- 👉 [Kafka](kafka)
- 👉 [Tomcat](tomcat)
- 👉 [ShenYu](shenyu)
- 👉 [DynamicTp](dynamic_tp)
- 👉 [RabbitMQ](rabbitmq)
- 👉 [ActiveMQ](activemq)
- 👉 [Jetty](jetty)
- -### CloudNative monitoring - - 👉 [Docker](docker)
- 👉 [Kubernetes](kubernetes)
- -*** - -## 💡 Alarm service - -> More liberal threshold alarm configuration (calculation expression), supports email, SMS, WebHook, DingDing, WeChat and FeiShu for alarm notification. -> The positioning of alarm service is to trigger the threshold accurately and timely, and the alarm notification can be reached in time. - -### Alarm center - -> The triggered alarm information center provides query and filtering of alarm deletion, alarm processing, mark unprocessed, alarm level status, etc. - -### Alarm configuration - -> The Metric threshold configuration provides the Metric threshold configuration in the form of expression, which can set the alarm level, trigger times, alarm notification template and whether it is enabled, correlation monitoring and other functions. - -More details see 👉 [threshold alarm](alert_threshold)
-   👉 [Threshold expression](alert_threshold_expr) - -### Alarm notification - -> After triggering the alarm information, in addition to being displayed in the alarm center list, it can also be notified to the designated recipient in a specified way (e-mail, wechat and FeiShu etc.) -> Alarm notification provides different types of notification methods, such as email recipient, enterprise wechat robot notification, DingDing robot notification, and FeiShu robot notification. -> After setting the receiver, you need to set the associated alarm notification strategy to configure which alarm information is sent to which receiver. - - 👉 [Configure Email Notification](alert_email)
- 👉 [Configure Discord Notification](alert_webhook)
- 👉 [Configure Slack Notification](alert_webhook)
- 👉 [Configure Telegram Notification](alert_webhook)
- 👉 [Configure WebHook Notification](alert_webhook)
- 👉 [Configure enterprise WeChat Robot Notification](alert_wework)
- 👉 [Configure DingDing Robot Notification](alert_dingtalk)
- 👉 [Configure FeiShu Robot Notification](alert_feishu)
diff --git a/home/versioned_docs/version-v1.4.x/help/hadoop.md b/home/versioned_docs/version-v1.4.x/help/hadoop.md deleted file mode 100644 index e12a44807ea..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/hadoop.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -id: hadoop -title: Monitoring Hadoop -sidebar_label: Hadoop -keywords: [Open Source Monitoring System, Open Source Java Monitoring, Hadoop JVM Monitoring] ---- - -> Collect and monitor general performance metrics for the JVM virtual machine in Hadoop. - -**Protocol used: JMX** - -### Pre-monitoring steps - -> You need to enable JMX service in the Hadoop application before monitoring. HertzBeat uses the JMX protocol to collect performance metrics from Hadoop's JVM. - -### Steps to enable JMX protocol in the Hadoop application - -Add JVM parameters when the application starts. ⚠️Note that you can customize the exposed port and external IP. - -- 1.Enter the hadoop-env.sh configuration file and enter the following command in the terminal: - -```shell -vi $HADOOP_HOME/etc/hadoop/hadoop-env.sh -``` - -- 2.Add the following parameters, where `port` is the number of the custom-exposed port - -```shell -export HADOOP_OPTS= "$HADOOP_OPTS --Djava.rmi.server.hostname=对外ip地址 --Dcom.sun.management.jmxremote.port=9999 --Dcom.sun.management.jmxremote.ssl=false --Dcom.sun.management.jmxremote.authenticate=false " -``` - -- 3.Save and exit, and then execute "start-all.sh" in the "$HADOOP_HOME/sbin" directory to restart the service. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by JMX | -| Username | JMX connection user name, optional | -| Password | JMX connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:memory_pool - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | - -#### Metrics Set:code_cache (Only Support JDK8) - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | - -#### Metrics Set:class_loading - -| Metric name | Metric unit | Metric help description | -|-----------------------|-------------|--------------------------| -| LoadedClassCount | | Loaded Class Count | -| TotalLoadedClassCount | | Total Loaded Class Count | -| UnloadedClassCount | | Unloaded Class Count | - -#### Metrics Set:thread - -| Metric name | Metric unit | Metric help description | -|-------------------------|-------------|----------------------------| -| TotalStartedThreadCount | | Total Started Thread Count | -| ThreadCount | | Thread Count | -| PeakThreadCount | | Peak Thread Count | -| DaemonThreadCount | | Daemon Thread Count | -| CurrentThreadUserTime | ms | Current Thread User Time | -| CurrentThreadCpuTime | ms | Current Thread Cpu Time | diff --git a/home/versioned_docs/version-v1.4.x/help/hive.md b/home/versioned_docs/version-v1.4.x/help/hive.md deleted file mode 100644 index 1293fbd3802..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/hive.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -id: hive -Title: Monitoring Apache Hive -sidebar_label: Apache Hive -keywords: [open source monitoring tool, open source apache hive monitoring tool, monitoring apache hive metrics] ---- - -> Collect and monitor the general performance metrics exposed by the SpringBoot actuator. - -## Pre-monitoring operations - -If you want to monitor information in `Apache Hive` with this monitoring type, you need to open your `Hive Server2` in remoting mode. - -**1、Enable metastore:** - -```shell -hive --service metastore & -``` - -**2. Enable hive server2:** - -```shell -hive --service hiveserver2 & -``` - -### Configure parameters - -| Parameter name | Parameter Help describes the | -|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| Port | The default port provided by the database is 10002. | -| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | - -### Collect metrics - -#### metric Collection: basic - -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|-----------------------------------------------------------| -| vm_name | None | The name of the virtual machine (VM) running HiveServer2. | -| vm_vendor | None | The vendor or provider of the virtual machine. | -| vm_version | None | The version of the virtual machine. | -| up_time | None | The duration for which HiveServer2 has been running. | - -#### metric Collection: enviroment - -| Metric Name | metric unit | Metrics help describe | -|----------------------|-------------|-------------------------------------------------------------------| -| https_proxyPort | None | The port number used for HTTPS proxy communication. | -| os_name | None | The name of the operating system on which HiveServer2 is running. | -| os_version | None | The version of the operating system. | -| os_arch | None | The architecture of the operating system. | -| java_runtime_name | None | The name of the Java runtime environment used by HiveServer2. | -| java_runtime_version | None | The version of the Java runtime environment. | - -#### metric Collection: thread - -| Metric Name | metric unit | Metrics help describe | -|----------------------|-------------|----------------------------------------------------------------------| -| thread_count | None | The current number of threads being used by HiveServer2. | -| total_started_thread | None | The total count of threads started by HiveServer2 since its launch. | -| peak_thread_count | None | The highest number of threads used by HiveServer2 at any given time. | -| daemon_thread_count | None | The number of daemon threads currently active in HiveServer2. | - -#### metric Collection: code_cache - -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|-------------------------------------------------------------------------| -| committed | MB | The amount of memory currently allocated for the memory pool. | -| init | MB | The initial amount of memory requested for the memory pool. | -| max | MB | The maximum amount of memory that can be allocated for the memory pool. | -| used | MB | The amount of memory currently being used by the memory pool. | diff --git a/home/versioned_docs/version-v1.4.x/help/iotdb.md b/home/versioned_docs/version-v1.4.x/help/iotdb.md deleted file mode 100644 index 011b9cbec12..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/iotdb.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -id: iotdb -title: Monitoring Apache IoTDB Database -sidebar_label: IoTDB Database -keywords: [open source monitoring tool, open source database monitoring tool, monitoring IotDB database metrics] ---- - -> Monitor the running status of the Apache IoTDB Internet of Things time series database (JVM-related), memory task clusters and other related Metrics. - -## Operation before monitoring - -You need to enable the `metrics` function in IoTDB, which will provide interface data in the form of prometheus metrics. - -To enable the `metrics` function, refer to [Official Documentation](https://iotdb.apache.org/UserGuide/V0.13.x/Maintenance-Tools/Metric-Tool.html) - -The main steps are as follows: - -1. The metric collection is disabled by default, you need to modify the parameters in `conf/iotdb-metric.yml` first, then restart the server - -``` -# Whether to start the monitoring module, the default is false -enableMetric: true - -# Whether to enable operation delay statistics -enablePerformanceStat: false - -# Data provision method, externally provide metrics data through jmx and prometheus protocol, optional parameters: [JMX, PROMETHEUS, IOTDB], IOTDB is closed by default. -metricReporterList: - - JMX - - PROMETHEUS - -# The metric architecture used at the bottom layer, optional parameters: [MICROMETER, DROPWIZARD] -monitorType: MICROMETER - -# Initialize the level of the metric, optional parameters: [CORE, IMPORTANT, NORMAL, ALL] -metricLevel: IMPORTANT - -# Predefined metrics set, optional parameters: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] -predefinedMetrics: - - JVM - - FILE -``` - -2. Restart IoTDB, open a browser or use curl to access http://servier_ip:9091/metrics, and you can see the metric data. - -3. Add the corresponding IoTDB monitoring in HertzBeat. - -### Configuration parameters - -| Parameter name | Parameter help description | -|--------|----------------------------------------- --------------| -| Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | -| Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | -| Port | The port provided by the IoTDB Metric interface, which is 9091 by default. | -| Timeout | HTTP request query timeout | -| Acquisition Interval | Interval time for monitoring periodic data collection, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | -| Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | - -### Collect metrics - -#### Metric collection: cluster_node_status - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|---------------------------------| -| name | None | Node name IP | -| status | None | Node status, 1=online 2=offline | - -#### Metric collection: jvm_memory_committed_bytes - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|------------------------------------------------| -| area | none | heap memory or nonheap memory | -| id | none | memory block | -| value | MB | The memory size currently requested by the JVM | - -#### Metric collection: jvm_memory_used_bytes - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|-------------------------------| -| area | none | heap memory or nonheap memory | -| id | none | memory block | -| value | MB | JVM used memory size | - -#### Metric collection: jvm_threads_states_threads - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|---------------------------------------------------------| -| state | none | thread state | -| count | None | The number of threads corresponding to the thread state | - -#### Index collection: quantity business data - -| Metric Name | Metric Unit | Metric Help Description | -|--|------|----------------| -| name | None | Business name timeSeries/storageGroup/device/deviceUsingTemplate | -| type | none | type total/normal/template/template | -| value | None | The current timeSeries/storageGroup/device/The number of devices that have activated the template | - -#### Metric collection: cache_hit cache - -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------------------ ----------------------| -| name | None | Cache name chunk/timeSeriesMeta/bloomFilter | -| value | % | chunk/timeSeriesMeta cache hit rate, bloomFilter interception rate | - -#### Metric collection: queue task queue - -| Metric Name | Metric Unit | Metric Help Description | -| ----------- |------|------------------------------ ---------------------| -| name | None | Queue name compaction_inner/compaction_cross/flush | -| status | none | status running/waiting | -| value | None | Number of tasks at current time | - -#### Metric collection: thrift_connections - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|----------------------------------| -| name | None | name | -| connection | none | thrift current connection number | diff --git a/home/versioned_docs/version-v1.4.x/help/issue.md b/home/versioned_docs/version-v1.4.x/help/issue.md deleted file mode 100644 index 384387b45d6..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/issue.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -id: issue -title: Common issues -sidebar_label: Common issues ---- - -### Monitoring common issues - -1. **Page feedback:monitor.host: Monitoring Host must be ipv4, ipv6 or domain name** - -> As shown in the information, the entered monitoring Host must be ipv4, ipv6 or domain name, and cannot carry a protocol header, such as http - -2. **The website API and other monitoring feedback statusCode:403 or 401, but the opposite end service itself does not need authentication, and the direct access of the browser is OK** - -> Please check whether it is blocked by the firewall. For example, BaoTa/aaPanel have set the blocking of `User-Agent=Apache-HttpClient` in the request header by default. If it is blocked, please delete this blocking rule. (user-agent has been simulated as a browser in the v1.0.beat5 version. This problem does not exist) - -3. Ping connectivity monitoring exception when installing hertzbeat for package deployment. - The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 - -> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. -> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See - -4. If the history chart on the monitoring page is not displayed,popup [please configure dependency service on TDengine time series database] - -> As shown in the popup window,the premise of history chart display is that you need install and configure hertzbeat's dependency service - TDengine database. -> Installation and initialization this database refers to [TDengine Installation and Initialization](../start/tdengine-init). - -### Docker Deployment common issues - -1. **MYSQL, TDENGINE and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** - The problems lies in Docker container failed to visit and connect localhost port. Beacuse the docker default network mode is Bridge mode which can't access loacl machine through localhost. - -> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. -> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` - -2. **According to the process deploy,visit no interface** - Please refer to the following points to troubleshoot issuess: - -> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. -> two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. -> ->> three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. - -3. **Log an error TDengine connection or insert SQL failed** - -> one:Check whether database account and password configured is correct, the database is created. -> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. - -### Package Deployment common issues - -1. **According to the process deploy,visit no interface** - Please refer to the following points to troubleshoot issuess: - -> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. -> two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. -> three: Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. - -2. **Log an error TDengine connection or insert SQL failed** - -> one:Check whether database account and password configured is correct, the database is created. -> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. diff --git a/home/versioned_docs/version-v1.4.x/help/jetty.md b/home/versioned_docs/version-v1.4.x/help/jetty.md deleted file mode 100644 index ccec65b5559..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/jetty.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -id: jetty -title: Monitoring Jetty Web Server -sidebar_label: Jetty Web Server -keywords: [open source monitoring tool, open source jetty web server monitoring tool, monitoring jetty metrics] ---- - -> Collect and monitor general performance metrics of Jetty application server - -**Usage protocol: JMX** - -### Pre-monitoring Operations - -> You need to enable the `JMX` service in the JVM application. HertzBeat uses the JMX protocol to collect metrics for the JVM. - -#### Jetty application server opens JMX protocol steps - -[Refer to official documentation](https://www.eclipse.org/jetty/documentation/jetty-10/operations-guide/index.html#og-jmx-remote) - -1. Start the JMX JMX-REMOTE module in Jetty - -```shell -java -jar $JETTY_HOME/start.jar --add-module=jmx -java -jar $JETTY_HOME/start.jar --add-module=jmx-remote -``` - -Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file - -2. Edit the `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file to modify the JMX IP port and other parameters. - -**`localhost` needs to be modified to expose the IP** - -```text -## The host/address to bind the RMI server to. -# jetty.jmxremote.rmiserverhost=localhost - -## The port the RMI server listens to (0 means a random port is chosen). -# jetty.jmxremote.rmiserverport=1099 - -## The host/address to bind the RMI registry to. -# jetty.jmxremote.rmiregistryhost=localhost - -## The port the RMI registry listens to. -# jetty.jmxremote.rmiregistryport=1099 - -## The host name exported in the RMI stub. --Djava.rmi.server.hostname=localhost -``` - -3. Restart Jetty Server. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by JMX | -| Username | JMX connection user name, optional | -| Password | JMX connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:memory_pool - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | - -#### Metrics Set:class_loading - -| Metric name | Metric unit | Metric help description | -|-----------------------|-------------|--------------------------| -| LoadedClassCount | | Loaded Class Count | -| TotalLoadedClassCount | | Total Loaded Class Count | -| UnloadedClassCount | | Unloaded Class Count | - -#### Metrics Set:thread - -| Metric name | Metric unit | Metric help description | -|-------------------------|-------------|----------------------------| -| TotalStartedThreadCount | | Total Started Thread Count | -| ThreadCount | | Thread Count | -| PeakThreadCount | | Peak Thread Count | -| DaemonThreadCount | | Daemon Thread Count | -| CurrentThreadUserTime | ms | Current Thread User Time | -| CurrentThreadCpuTime | ms | Current Thread Cpu Time | diff --git a/home/versioned_docs/version-v1.4.x/help/jvm.md b/home/versioned_docs/version-v1.4.x/help/jvm.md deleted file mode 100644 index 477d9fbece1..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/jvm.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -id: jvm -title: Monitoring JVM -sidebar_label: JVM Monitor -keywords: [open source monitoring tool, open source java jvm monitoring tool, monitoring jvm metrics] ---- - -> Collect and monitor the general performance Metrics of JVM. - -**Protocol Use:JMX** - -### JVM App Enable JMX Protocol - -1. Add JVM `VM options` When Start Server ⚠️ customIP - -Refer: - -```shell --Djava.rmi.server.hostname=customIP --Dcom.sun.management.jmxremote.port=9999 --Dcom.sun.management.jmxremote.ssl=false --Dcom.sun.management.jmxremote.authenticate=false -``` - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by JMX | -| Username | JMX connection user name, optional | -| Password | JMX connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:memory_pool - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | - -#### Metrics Set:code_cache (Only Support JDK8) - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | - -#### Metrics Set:class_loading - -| Metric name | Metric unit | Metric help description | -|-----------------------|-------------|--------------------------| -| LoadedClassCount | | Loaded Class Count | -| TotalLoadedClassCount | | Total Loaded Class Count | -| UnloadedClassCount | | Unloaded Class Count | - -#### Metrics Set:thread - -| Metric name | Metric unit | Metric help description | -|-------------------------|-------------|----------------------------| -| TotalStartedThreadCount | | Total Started Thread Count | -| ThreadCount | | Thread Count | -| PeakThreadCount | | Peak Thread Count | -| DaemonThreadCount | | Daemon Thread Count | -| CurrentThreadUserTime | ms | Current Thread User Time | -| CurrentThreadCpuTime | ms | Current Thread Cpu Time | diff --git a/home/versioned_docs/version-v1.4.x/help/kafka.md b/home/versioned_docs/version-v1.4.x/help/kafka.md deleted file mode 100644 index 48d06b2037b..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/kafka.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -id: kafka -title: Monitor:Apache Kafka -sidebar_label: Apache Kafka -keywords: [open source monitoring tool, open source apache kafka monitoring tool, monitoring apache kafka metrics] ---- - -> Collect and monitor the general performance Metrics of Apache Kafka. - -**Protocol Use:JMX** - -### Kafka Enable JMX Protocol - -1. Install Kafka - -2. Modify `kafka-server-start.sh` - -Append content in kafka-server-start.sh, Attention Replace Port And IP. - -```shell -export JMX_PORT=9999; -export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=ip地址 -Dcom.sun.management.jmxremote.rmi.port=9999 -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"; - -# Already Has -exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" -``` - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by JMX | -| Username | JMX connection user name, optional | -| Password | JMX connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:server_info - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| Version | | Kafka Version | -| StartTimeMs | ms | Start Time | -| CommitId | | Version Commit ID | - -#### Metrics Set:memory_pool - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | - -#### Metrics Set:active_controller_count - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|--------------------------------| -| Value | | server active controller count | - -#### Metrics Set:broker_partition_count - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| Value | | broker partition count | - -#### Metrics Set:broker_leader_count - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| Value | | broker leader count | - -#### Metrics Set:broker_handler_avg_percent - -| Metric name | Metric unit | Metric help description | -|-------------------|-------------|-------------------------| -| EventType | | event type | -| RateUnit | | rate unit | -| Count | | percent count | -| OneMinuteRate | % | One Minute Rate | -| FiveMinuteRate | % | Five Minute Rate | -| MeanRate | % | Mean Rate | -| FifteenMinuteRate | % | Fifteen Minute Rate | diff --git a/home/versioned_docs/version-v1.4.x/help/kubernetes.md b/home/versioned_docs/version-v1.4.x/help/kubernetes.md deleted file mode 100644 index 3cb2336e768..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/kubernetes.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -id: kubernetes -Title: Monitoring Kubernetes -sidebar_label: Kubernetes Monitor -keywords: [open source monitoring tool, open source kubernetes monitoring tool, monitoring kubernetes metrics] ---- - -> Collect and monitor the general performance metrics of Kubernetes. - -## Pre-monitoring operations - -If you want to monitor the information in 'Kubernetes', you need to obtain an authorization token that can access the API Server, so that the collection request can obtain the corresponding information. - -Refer to the steps to obtain token - -#### method one - -1. Create a service account and bind the default cluster-admin administrator cluster role - -```kubectl create serviceaccount dashboard-admin -n kube-system``` - -2. User Authorization - -```shell -kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin -kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' -kubectl describe secret {secret} -n kube-system -``` - -#### method two - -```shell -kubectl create serviceaccount cluster-admin -kubectl create clusterrolebinding cluster-admin-manual --clusterrole=cluster-admin --serviceaccount=default:cluster-admin -kubectl create token --duration=1000h cluster-admin -``` - -### Configure parameters - -| Parameter name | Parameter Help describes the | -|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| APiServer port | K8s APiServer port, default 6443 | -| token | Authorize the Access Token | -| URL | The database connection URL is optional, if configured, the database name, user name and password parameters in the URL will override the parameter | configured above | -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | - -### Collect metrics - -#### metric collection: nodes - -| Metric Name | metric unit | Metrics help describe | -|--------------------|-------------|-----------------------|-----------| -| node_name | None | Node name | -| is_ready | None | Node Status | -| capacity_cpu | None | CPU capacity | -| allocatable_cpu | None | CPU | allotted | -| capacity_memory | None | Memory capacity | -| allocatable_memory | None | Memory | allocated | -| creation_time | None | Node creation time | - -#### metric Collection: namespaces - -| Metric Name | metric unit | Metrics help describe | -|---------------|-------------|-----------------------| -| namespace | None | namespace name | -| status | None | Status | -| creation_time | None | Created | - -#### metric collection: pods - -| Metric Name | metric unit | Metrics help describe | -|---------------|-------------|-------------------------------|--------------------------| -| pod | None | Pod name | -| namespace | None | The namespace | to which the pod belongs | -| status | None | Pod status | -| restart | None | Number of restarts | -| host_ip | None | The IP address of the host is | -| pod_ip | None | pod ip | -| creation_time | None | Pod creation time | -| start_time | None | Pod startup time | - -#### metric Collection: services - -| Metric Name | metric unit | Metrics help describe | -|---------------|-------------|-----------------------------------------------------------|------------------------------| -| service | None | Service Name | -| namespace | None | The namespace | to which the service belongs | -| type | None | Service Type ClusterIP NodePort LoadBalancer ExternalName | -| cluster_ip | None | cluster ip | -| selector | None | tag selector matches | -| creation_time | None | Created | diff --git a/home/versioned_docs/version-v1.4.x/help/linux.md b/home/versioned_docs/version-v1.4.x/help/linux.md deleted file mode 100644 index f5c77a72ca6..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/linux.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -id: linux -title: Monitoring:Linux operating system monitoring -sidebar_label: Linux operating system -keywords: [open source monitoring tool, open source linux monitoring tool, monitoring linux metrics] ---- - -> Collect and monitor the general performance Metrics of Linux operating system. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Linux SSH. The default is 22 | -| Username | SSH connection user name, optional | -| Password | SSH connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:basic - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|--------------------------| -| hostname | none | Host name | -| version | none | Operating system version | -| uptime | none | System running time | - -#### Metric set:cpu - -| Metric name | Metric unit | Metric help description | -|----------------|-------------|------------------------------------------------| -| info | none | CPU model | -| cores | cores | Number of CPU cores | -| interrupt | number | Number of CPU interrupts | -| load | none | Average load of CPU in the last 1/5/15 minutes | -| context_switch | number | Number of current context switches | -| usage | % | CPU usage | - -#### Metric set:memory - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------------------| -| total | Mb | Total memory capacity | -| used | Mb | User program memory | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory occupied by cache | -| available | Mb | Remaining available memory capacity | -| usage | % | Memory usage | - -#### Metric set:disk - -| Metric name | Metric unit | Metric help description | -|---------------|-------------|----------------------------------------| -| disk_num | blocks | Total number of disks | -| partition_num | partitions | Total number of partitions | -| block_write | blocks | Total number of blocks written to disk | -| block_read | blocks | Number of blocks read from disk | -| write_rate | iops | Rate of writing disk blocks per second | - -#### Metric set:interface - -| Metric name | Metric unit | Metric help description | -|----------------|-------------|------------------------------| -| interface_name | none | Network card name | -| receive_bytes | byte | Inbound data traffic(bytes) | -| transmit_bytes | byte | Outbound data traffic(bytes) | - -#### Metric set:disk_free - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| filesystem | none | File system name | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | usage | -| mounted | none | Mount point directory | diff --git a/home/versioned_docs/version-v1.4.x/help/mariadb.md b/home/versioned_docs/version-v1.4.x/help/mariadb.md deleted file mode 100644 index 8373b61cec3..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/mariadb.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: mariadb -title: Monitoring:MariaDB database monitoring -sidebar_label: MariaDB database -keywords: [open source monitoring tool, open source database monitoring tool, monitoring mariadb database metrics] ---- - -> Collect and monitor the general performance Metrics of MariaDB database. Support MariaDB5+. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 3306 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:basic - -| Metric name | Metric unit | Metric help description | -|-----------------|-------------|------------------------------------| -| version | none | Database version | -| port | none | Database exposure service port | -| datadir | none | Database storage data disk address | -| max_connections | none | Database maximum connections | - -#### Metric set:status - -| Metric name | Metric unit | Metric help description | -|-------------------|-------------|------------------------------------| -| threads_created | none | MariaDB created total connections | -| threads_connected | none | MariaDB connected connections | -| threads_cached | none | MariaDB current cached connections | -| threads_running | none | MariaDB current active connections | - -#### Metric set:innodb - -| Metric name | Metric unit | Metric help description | -|---------------------|-------------|------------------------------------------------------| -| innodb_data_reads | none | innodb average number of reads from files per second | -| innodb_data_writes | none | innodb average number of writes from file per second | -| innodb_data_read | KB | innodb average amount of data read per second | -| innodb_data_written | KB | innodb average amount of data written per second | diff --git a/home/versioned_docs/version-v1.4.x/help/memcached.md b/home/versioned_docs/version-v1.4.x/help/memcached.md deleted file mode 100644 index f3c1ddfab55..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/memcached.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -id: memcached -title: Monitoring Memcached -sidebar_label: Memcached Monitor -keywords: [ open source monitoring tool, open source Memcached monitoring tool, monitoring memcached metrics ] ---- - -> Collect and monitor the general performance Metrics of Memcached. - -**Protocol Use:Memcached** - -```text -The default YML configuration for the memcache version is in compliance with 1.4.15. -You need to use the stats command to view the parameters that your memcache can monitor -``` - -### - -**1、Obtain usable parameter indicators through commands such as stats、stats setting、stats settings. - -```shell -# telnet ip port -[root@server ~]# telnet localhost 11211 -Trying ::1... -Connected to localhost. -Escape character is '^]'. -stats -STAT pid 15168 -STAT uptime 11691 -STAT time 1702569246 -STAT version 1.4.15 -... -``` - -**There is help_doc: ** - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Memcached | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:server_info - -| Metric name | Metric unit | Metric help description | -|------------------|-------------|---------------------------------------------------| -| pid | | Memcache server process ID | -| uptime | s | The number of seconds the server has been running | -| version | | Memcache version | -| curr_connections | | Current number of connections | -| auth_errors | | Number of authentication failures | -| threads | | Current number of threads | -| item_size | byte | The size of the item | -| item_count | | Number of items | -| curr_items | | The total number of data currently stored | -| total_items | | The total number of data stored since startup | -| bytes | byte | The current number of bytes occupied by storage | -| cmd_get | | Get command request count | -| cmd_set | | Set command request count | -| cmd_flush | | Flush command request count | -| get_misses | | Get command misses | -| delete_misses | | Delete command misses | diff --git a/home/versioned_docs/version-v1.4.x/help/mysql.md b/home/versioned_docs/version-v1.4.x/help/mysql.md deleted file mode 100644 index 86922782e27..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/mysql.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: mysql -title: Monitoring:MySQL database monitoring -sidebar_label: MySQL database -keywords: [open source monitoring tool, open source database monitoring tool, monitoring mysql database metrics] ---- - -> Collect and monitor the general performance Metrics of MySQL database. Support MYSQL5+. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 3306 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:basic - -| Metric name | Metric unit | Metric help description | -|-----------------|-------------|------------------------------------| -| version | none | Database version | -| port | none | Database exposure service port | -| datadir | none | Database storage data disk address | -| max_connections | none | Database maximum connections | - -#### Metric set:status - -| Metric name | Metric unit | Metric help description | -|-------------------|-------------|----------------------------------| -| threads_created | none | MySql created total connections | -| threads_connected | none | MySql connected connections | -| threads_cached | none | MySql current cached connections | -| threads_running | none | MySql current active connections | - -#### Metric set:innodb - -| Metric name | Metric unit | Metric help description | -|---------------------|-------------|------------------------------------------------------| -| innodb_data_reads | none | innodb average number of reads from files per second | -| innodb_data_writes | none | innodb average number of writes from file per second | -| innodb_data_read | KB | innodb average amount of data read per second | -| innodb_data_written | KB | innodb average amount of data written per second | diff --git a/home/versioned_docs/version-v1.4.x/help/nebulagraph.md b/home/versioned_docs/version-v1.4.x/help/nebulagraph.md deleted file mode 100644 index 60ac139f827..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/nebulagraph.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -id: nebulaGraph -title: Monitoring NebulaGraph -sidebar_label: NebulaGraph Database -keywords: [ open source monitoring tool, open source NebulaGraph monitoring tool, monitoring NebulaGraph metrics ] ---- - -> Collect and monitor the general performance Metrics of nebulaGraph. - -**Protocol Use:nebulaGraph** - -```text -The monitoring has two parts,nebulaGraph_stats and rocksdb_stats. -nebulaGraph_stats is nebulaGraph's statistics, and rocksdb_stats is rocksdb's statistics. -``` - -### - -**1、Obtain available parameters through the stats and rocksdb stats interfaces.** - -1.1、 If you only need to get nebulaGraph_stats, you need to ensure that you have access to stats, or you'll get errors. - -The default port is 19669 and the access address is - -1.2、If you need to obtain additional parameters for rocksdb stats, you need to ensure that you have access to rocksdb -stats, otherwise an error will be reported. - -Once you connect to NebulaGraph for the first time, you must first register your Storage service in order to properly -query your data. - -**There is help_doc: ** - -**** - -The default port is 19779 and the access address is: - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| graphPort | Port of the Graph service provided by Nebula Graph | -| timePeriod | The value can be 5 seconds, 60 seconds, 600 seconds, or 3600 seconds, indicating the last 5 seconds, last 1 minute, last 10 minutes, and last 1 hour, respectively. | -| storagePort | Port of the storage service provided by Nebula Graph | -| Timeout | Allow collection response time | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:nebulaGraph_stats - -Too many indicators, related links are as follows -**** - -| Metric name | Metric unit | Metric help description | -|---------------------------------------|-------------|--------------------------------------------------------------| -| num_queries_hit_memory_watermark_rate | | The rate of statements that reached the memory watermark. | -| num_queries_hit_memory_watermark_sum | | The sum of statements that reached the memory watermark. | -| num_reclaimed_expired_sessions_sum | | Number of expired sessions actively reclaimed by the server. | -| ... | | ... | - -#### Metrics Set:rocksdb_stats - -Too many indicators, related links are as follows -**** - -| Metric name | Metric unit | Metric help description | -|----------------------------|-------------|-------------------------------------------------------------| -| rocksdb.backup.read.bytes | | Number of bytes read during the RocksDB database backup. | -| rocksdb.backup.write.bytes | | Number of bytes written during the RocksDB database backup. | -| ... | | ... | diff --git a/home/versioned_docs/version-v1.4.x/help/nginx.md b/home/versioned_docs/version-v1.4.x/help/nginx.md deleted file mode 100644 index a5662be985f..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/nginx.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -id: nginx -title: Monitoring Nginx -sidebar_label: Nginx Monitor -keywords: [open source monitoring tool, open source java monitoring tool, monitoring nginx metrics] ---- - -> Collect and monitor the general performance Metrics of Nginx. - -**Protocol Use:Nginx** - -### Need Enable `ngx_http_stub_status_module` And `ngx_http_reqstat_module` Module - -If you want to monitor information in 'Nginx' with this monitoring type, you need to modify your nginx configure file for enable the module monitor. - -### Enable `ngx_http_stub_status_module` - -1. Check if `ngx_http_stub_status_module` has been added - -```shell -nginx -V -``` - -View whether it contains `--with-http_stub_status_module`, if not, you need to recompile and install Nginx. - -2. Compile and install Nginx, add `ngx_http_stub_status_module` module - -Download Nginx and unzip it, execute the following command in the directory - -```shell - -./configure --prefix=/usr/local/nginx --with-http_stub_status_module - -make && make install -``` - -3. Modify Nginx configure file - -Modify the `nginx.conf` file and add the monitoring module exposed endpoint, as follows: - -```shell -# modify nginx.conf -server { - listen 80; # port - server_name localhost; - location /nginx-status { - stub_status on; - access_log on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts - } -} -``` - -4. Reload Nginx - -```shell - -nginx -s reload -``` - -5. Access `http://localhost/nginx-status` in the browser to view the Nginx monitoring status information. - -### Enable `ngx_http_reqstat_module` - -```shell -# install `ngx_http_reqstat_module` -wget https://github.com/zls0424/ngx_req_status/archive/master.zip -O ngx_req_status.zip - -unzip ngx_req_status.zip - -patch -p1 < ../ngx_req_status-master/write_filter.patch - -./configure --prefix=/usr/local/nginx --add-module=/path/to/ngx_req_status-master - -make -j2 - -make install -``` - -2. Modify Nginx configure file - -update `nginx.conf` file, add status module exposed endpoint, as follows: - -```shell -# modify nginx.conf -http { - req_status_zone server_name $server_name 256k; - req_status_zone server_addr $server_addr 256k; - - req_status server_name server_addr; - - server { - location /req-status { - req_status_show on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts - } - } -} -``` - -3. Reload Nginx - -```shell - -nginx -s reload -``` - -4. Access `http://localhost/req-status` in the browser to view the Nginx monitoring status information. - -**Refer Doc: ** - -**⚠️Attention: The endpoint path of the monitoring module is `/nginx-status` `/req-status`** - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Nginx | -| Timeout | Allow collection response time | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:nginx_status - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-----------------------------------------| -| accepts | | Accepted connections | -| handled | | Successfully processed connections | -| active | | Currently active connections | -| dropped | | Discarded connections | -| requests | | Client requests | -| reading | | Connections performing read operations | -| writing | | Connections performing write operations | -| waiting | | Waiting connections | - -#### Metrics Set:req_status - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|--------------------------------| -| zone_name | | Group category | -| key | | Group name | -| max_active | | Maximum concurrent connections | -| max_bw | kb | Maximum bandwidth | -| traffic | kb | Total traffic | -| requests | | Total requests | -| active | | Current concurrent connections | -| bandwidth | kb | Current bandwidth | diff --git a/home/versioned_docs/version-v1.4.x/help/ntp.md b/home/versioned_docs/version-v1.4.x/help/ntp.md deleted file mode 100644 index fc7f7925ca6..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/ntp.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: ntp -title: Monitoring NTP -sidebar_label: NTP Monitor -keywords: [ open source monitoring tool, open source NTP monitoring tool, monitoring NTP metrics ] ---- - -> Collect and monitor the general performance Metrics of NTP. - -**Protocol Use:NTP** - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:summary - -| Metric name | Metric unit | Metric help description | -|--------------|-------------|------------------------------------------------------------------------------------------| -| responseTime | ms | The time it takes for the NTP server to respond to a request). | -| time | ms | The current time reported by the NTP server). | -| date | | The date corresponding to the current time reported by the NTP server). | -| offset | ms | The time difference between the NTP server's clock and the client's clock). | -| delay | ms | The time it takes for a request to reach the NTP server and for the response to return). | -| version | | The version number of the NTP protocol used by the server). | -| mode | | The operating mode of the NTP server, such as client, server, or broadcast). | -| stratum | | The stratumevel of the NTP server, indicating its distance from a reference clock). | -| referenceId | | An identifier that indicates the reference clock or time source used by the NTP server). | -| precision | | The precision of the NTP server's clock, indicating its accuracy). | diff --git a/home/versioned_docs/version-v1.4.x/help/opengauss.md b/home/versioned_docs/version-v1.4.x/help/opengauss.md deleted file mode 100644 index 3490bb8b003..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/opengauss.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: opengauss -title: OpenGauss Database Monitor -sidebar_label: OpenGauss Database -keywords: [open source monitoring tool, open source database monitoring tool, monitoring opengauss database metrics] ---- - -> Collect and monitor the general performance Metrics of OpenGauss database. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 5432 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:basic - -| Metric name | Metric unit | Metric help description | -|-----------------|-------------|-------------------------------------------| -| server_version | none | Version number of the database server | -| port | none | Database server exposure service port | -| server_encoding | none | Character set encoding of database server | -| data_directory | none | Database storage data disk address | -| max_connections | connections | Database maximum connections | - -#### Metric set:state - -| Metric name | Metric unit | Metric help description | -|----------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| name | none | Database name, or share-object is a shared object | -| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | -| deadlocks | number | Number of deadlocks detected in the database | -| blks_read | times | The number of disk blocks read in the database | -| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | -| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | -| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | -| stats_reset | none | The last time these statistics were reset | - -#### Metric set:activity - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|--------------------------------------| -| running | connections | Number of current client connections | diff --git a/home/versioned_docs/version-v1.4.x/help/oracle.md b/home/versioned_docs/version-v1.4.x/help/oracle.md deleted file mode 100644 index 978e6736620..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/oracle.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -id: oracle -title: Monitoring:ORACLE database monitoring -sidebar_label: ORACLE database -keywords: [open source monitoring tool, open source database monitoring tool, monitoring oracle database metrics] ---- - -> Collect and monitor the general performance Metrics of ORACLE database. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 1521 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:basic - -| Metric name | Metric unit | Metric help description | -|------------------|-------------|-------------------------| -| database_version | none | Database version | -| database_type | none | Database type | -| hostname | none | Host name | -| instance_name | none | Database instance name | -| startup_time | none | Database start time | -| status | none | Database status | - -#### Metric set:tablespace - -| Metric name | Metric unit | Metric help description | -|-----------------|-------------|-------------------------| -| file_id | none | File ID | -| file_name | none | File name | -| tablespace_name | none | Table space name | -| status | none | Status | -| bytes | MB | Size | -| blocks | none | Number of blocks | - -#### Metric set:user_connect - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|---------------------------| -| username | none | Username | -| counts | number | Current connection counts | - -#### Metric set:performance - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-----------------------------| -| qps | QPS | I/O Requests per second | -| tps | TPS | User transaction per second | -| mbps | MBPS | I/O Megabytes per second | diff --git a/home/versioned_docs/version-v1.4.x/help/ping.md b/home/versioned_docs/version-v1.4.x/help/ping.md deleted file mode 100644 index bed89d53dcf..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/ping.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -id: ping -title: Monitoring:PING connectivity -sidebar_label: PING connectivity -keywords: [open source monitoring tool, open source network monitoring tool, monitoring ping metrics] ---- - -> Ping the opposite end HOST address and judge its connectivity. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Ping timeout | Set the timeout when Ping does not respond to data, unit:ms, default: 3000ms | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:summary - -| Metric name | Metric unit | Metric help description | -|--------------|-------------|-------------------------| -| responseTime | ms | Website response time | - -### Common Problem - -1. Ping connectivity monitoring exception when installing hertzbeat for package deployment. - The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 - -> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. -> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See diff --git a/home/versioned_docs/version-v1.4.x/help/pop3.md b/home/versioned_docs/version-v1.4.x/help/pop3.md deleted file mode 100644 index c73884a0afe..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/pop3.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -id: pop3 -title: Monitoring POP3 -sidebar_label: POP3 Monitor -keywords: [open source monitoring tool, open source java monitoring tool, monitoring POP3 metrics] ---- - -> Collect and monitor the general performance Metrics of POP3. - -**Protocol Use:POP3** - -### Enable POP3 Service - -If you want to monitor information in 'POP3' with this monitoring type, you just need to open `POP3` service in your mail server. - -**1、Open `POP3` Service:** - -```text -以qq邮箱为例【其它邮箱类似】: - 1. 点击`设置`选项 - 2. 选择`账号` - 3. 找到开启SMTP/POP3/IMAP选项,并开启 - 4. 得到POP3服务器域名,端口号,以及授权码【开启SMTP/POP3/IMAP服务后,qq邮箱提供】 - 5. 通过POP3服务器域名,端口号,qq邮箱账号以及授权码连接POP3服务器,采集监控指标 -``` - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by POP3 | -| SSL | POP3 If enabled SSL | -| Timeout | Allow collection response time | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:email_status - -| Metric name | Metric unit | Metric help description | -|--------------|-------------|-----------------------------------------| -| email_count | | Number of emails | -| mailbox_size | kb | The total size of emails in the mailbox | diff --git a/home/versioned_docs/version-v1.4.x/help/port.md b/home/versioned_docs/version-v1.4.x/help/port.md deleted file mode 100644 index 8d58ac1f5c5..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/port.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: port -title: Monitoring:port availability -sidebar_label: Port availability -keywords: [open source monitoring tool, open source port monitoring tool, monitoring port metrics] ---- - -> Judge whether the exposed port of the opposite end service is available, then judge whether the opposite end service is available, and collect Metrics such as response time for monitoring. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| Connection timeout | Waiting timeout for port connection, unit:ms, default: 3000ms | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:summary - -| Metric name | Metric unit | Metric help description | -|--------------|-------------|-------------------------| -| responseTime | ms | Website response time | diff --git a/home/versioned_docs/version-v1.4.x/help/postgresql.md b/home/versioned_docs/version-v1.4.x/help/postgresql.md deleted file mode 100644 index 5191f7d325d..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/postgresql.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: postgresql -title: Monitoring:PostgreSQL database monitoring -sidebar_label: PostgreSQL database -keywords: [open source monitoring tool, open source database monitoring tool, monitoring postgresql database metrics] ---- - -> Collect and monitor the general performance Metrics of PostgreSQL database. Support PostgreSQL 10+. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 5432 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:basic - -| Metric name | Metric unit | Metric help description | -|-----------------|-------------|-------------------------------------------| -| server_version | none | Version number of the database server | -| port | none | Database server exposure service port | -| server_encoding | none | Character set encoding of database server | -| data_directory | none | Database storage data disk address | -| max_connections | connections | Database maximum connections | - -#### Metric set:state - -| Metric name | Metric unit | Metric help description | -|----------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| name | none | Database name, or share-object is a shared object | -| conflicts | times | The number of queries canceled in the database due to a conflict with recovery | -| deadlocks | number | Number of deadlocks detected in the database | -| blks_read | times | The number of disk blocks read in the database | -| blks_hit | times | Times the disk block has been found to be in the buffer, so there is no need to read it once (This only includes hits in the PostgreSQL buffer, not in the operating system file system buffer) | -| blk_read_time | ms | Time spent by the backend reading data file blocks in the database | -| blk_write_time | ms | Time spent by the backend writing data file blocks in the database | -| stats_reset | none | The last time these statistics were reset | - -#### Metric set:activity - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|--------------------------------------| -| running | connections | Number of current client connections | diff --git a/home/versioned_docs/version-v1.4.x/help/rabbitmq.md b/home/versioned_docs/version-v1.4.x/help/rabbitmq.md deleted file mode 100644 index e49d572ee72..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/rabbitmq.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -id: rabbitmq -title: Monitoring RabbitMQ -sidebar_label: RabbitMQ Monitor -keywords: [open source monitoring tool, open source rabbitmq monitoring tool, monitoring rabbitmq metrics] ---- - -> Monitoring the running status of RabbitMQ message middleware, nodes, topics and other related metrics. - -### Pre-monitoring Operations - -> HertzBeat uses RabbitMQ Management's Rest Api to collect RabbitMQ metric data. -> Therefore, you need to enable the Management plug-in in your RabbitMQ environment - -1. Open the Management plugin, or use the self-opening version - -```shell -rabbitmq-plugins enable rabbitmq_management -``` - -2. Access with a browser, and the default account password is `guest/guest`. Successful login means that it is successfully opened. - -3. Just add the corresponding RabbitMQ monitoring in HertzBeat, the parameters use the IP port of Management, and the default account password. - -### Configuration parameters - -| Parameter name | Parameter help description | -|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | -| Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | -| Port | The HTTP port provided by RabbitMQ Management, the default is 15672. | -| Username | Username used for interface Basic authentication | -| Password | The password used for interface Basic authentication | -| Timeout | HTTP request query timeout | -| Acquisition Interval | Interval time for monitoring periodic data collection, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | -| Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | - -### Collect Metrics - -#### metrics: overview - -| Metric Name | Metric Unit | Metric Description | -|--------------------|-------------|------------------------------------| -| product_version | None | Product Version | -| product_name | None | Product name | -| rabbitmq_version | none | rabbitmq version | -| management_version | None | rabbitmq management plugin version | -| erlang_version | None | erlang version | -| cluster_name | None | Cluster name | -| rates_mode | None | rates mode | - -#### metrics: object_totals - -| Metric Name | Metric Unit | Metric Description | -|-------------|-------------|-----------------------------| -| channels | none | total number of channels | -| connections | none | total number of connections | -| consumers | none | total number of consumers | -| exchanges | none | total number of exchanges | -| queues | none | total number of queues | - -#### metrics: nodes - -| Metric Name | Metric Unit | Metric Description | -|--------------------|-------------|-----------------------------------------------------------| -| name | None | The node name | -| type | None | The node type | -| running | None | Running state | -| os_pid | None | Pid in OS | -| mem_limit | MB | Memory usage high watermark | -| mem_used | MB | Total amount of memory used | -| fd_total | None | File descriptors available | -| fd_used | None | File descriptors used | -| sockets_total | None | Sockets available | -| sockets_used | None | Sockets used | -| proc_total | None | Erlang process limit | -| proc_used | None | Erlang processes used | -| disk_free_limit | GB | Free disk space low watermark | -| disk_free | GB | Free disk space | -| gc_num | None | GC runs | -| gc_bytes_reclaimed | MB | Bytes reclaimed by GC | -| context_switches | None | Context_switches num | -| io_read_count | None | Total number of read operations | -| io_read_bytes | KB | Total data size read into disk | -| io_read_avg_time | ms | Average read operation time in milliseconds | -| io_write_count | None | Total disk write operations | -| io_write_bytes | KB | Total amount of data written to disk | -| io_write_avg_time | ms | Average time of each disk write operation in milliseconds | -| io_seek_count | None | total seek operation | -| io_seek_avg_time | ms | average seek operation time, in milliseconds | -| io_sync_count | None | total amount of fsync operations | -| io_sync_avg_time | ms | Average time of fsync operation in milliseconds | -| connection_created | None | connection created num | -| connection_closed | None | connection closed num | -| channel_created | None | channel created num | -| channel_closed | None | channel closed num | -| queue_declared | None | queue declared num | -| queue_created | None | queue created num | -| queue_deleted | None | queue deleted num | -| connection_closed | None | connection closed num | - -#### metrics: queues - -| Metric Name | Metric Unit | Metric Description | -|------------------------------|-------------|--------------------------------------------------------------------------------------------------------------------------------------| -| name | None | The name of the queue with non-ASCII characters escaped as in C. | -| node | None | The queue on the node name | -| state | None | The state of the queue. Normally "running", but may be "{syncing, message_count}" if the queue is synchronising. | -| type | None | Queue type, one of: quorum, stream, classic. | -| vhost | None | vhost path | -| auto_delete | None | Whether the queue will be deleted automatically when no longer used | -| policy | None | Effective policy name for the queue. | -| consumers | None | Number of consumers. | -| memory | B | Bytes of memory allocated by the runtime for the queue, including stack, heap and internal structures. | -| messages_ready | None | Number of messages ready to be delivered to clients | -| messages_unacknowledged | None | Number of messages delivered to clients but not yet acknowledged | -| messages | None | Sum of ready and unacknowledged messages (queue depth) | -| messages_ready_ram | None | Number of messages from messages_ready which are resident in ram | -| messages_persistent | None | Total number of persistent messages in the queue (will always be 0 for transient queues) | -| message_bytes | B | Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead | -| message_bytes_ready | B | Like message_bytes but counting only those messages ready to be delivered to clients | -| message_bytes_unacknowledged | B | Like message_bytes but counting only those messages delivered to clients but not yet acknowledged | -| message_bytes_ram | B | Like message_bytes but counting only those messages which are currently held in RAM | -| message_bytes_persistent | B | Like message_bytes but counting only those messages which are persistent | diff --git a/home/versioned_docs/version-v1.4.x/help/redis.md b/home/versioned_docs/version-v1.4.x/help/redis.md deleted file mode 100644 index 0a0c9f77a65..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/redis.md +++ /dev/null @@ -1,239 +0,0 @@ ---- -id: redis -title: 监控:REDIS数据库监控 -sidebar_label: REDIS数据库 -keywords: [开源监控系统, 开源数据库监控, Redis数据库监控] ---- - -> 对REDIS数据库的通用性能指标进行采集监控。支持REDIS1.0+。 - -### 配置参数 - -| 参数名称 | 参数帮助描述 | -|--------|------------------------------------------------------| -| 监控Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | -| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | -| 端口 | redis对外提供的端口,默认为6379,sentinel节点默认26379 | -| 超时时间 | 设置redis info 查询未响应数据时的超时时间,单位ms毫秒,默认3000毫秒。 | -| 数据库名称 | 数据库实例名称,可选。 | -| 用户名 | 数据库连接用户名,可选 | -| 密码 | 数据库连接密码,可选 | -| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | -| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | -| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | - -### 采集指标 - -#### 指标集合:server - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------------|------|-----------------------------------------------| -| redis_version | 无 | Redis 服务器版本 | -| redis_git_sha1 | 无 | Git SHA1 | -| redis_git_dirty | 无 | Git dirty flag | -| redis_build_id | 无 | redis 构建的id | -| redis_mode | 无 | redis模式(包括standalone, sentinel, cluster) | -| os | 无 | Redis 服务器的宿主操作系统 | -| arch_bits | 无 | 架构(32 或 64 位) | -| multiplexing_api | 无 | Redis使用的事件循环机制 | -| atomicvar_api | 无 | Redis使用的原子 API | -| gcc_version | 无 | 用于编译Redis服务器的GCC编译器版本 | -| process_id | 无 | 服务器进程的PID | -| process_supervised | 无 | 受监管系统(包括:upstart、systemd、unknown、no) | -| run_id | 无 | 标识Redis服务器的随机值(由Sentinel和Cluster使用) | -| tcp_port | 无 | TCP/IP侦听端口 | -| server_time_usec | 无 | 微秒级精度的基于时间的系统时间 | -| uptime_in_seconds | 无 | 自Redis服务器启动后的秒数 | -| uptime_in_days | 无 | 自Redis服务器启动后的天数 | -| hz | 无 | 服务器的当前频率设置,redis相关定时任务的执行频率(如清除过期key,关闭超时客户端) | -| configured_hz | 无 | 服务器配置的频率设置 | -| lru_clock | 无 | 时钟每分钟递增,用于LRU管理 | -| executable | 无 | 服务器可执行文件的路径 | -| config_file | 无 | 配置文件的路径 | -| io_threads_active | 无 | 指示I/O线程是否处于活动状态的标志 | -| shutdown_in_milliseconds | 无 | 复制副本在完成关闭序列之前赶上复制的最长剩余时间。此字段仅在停机期间出现。 | - -#### 指标集合:clients - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------------------------|------|--------------------------------------------------------------------------------| -| connected_clients | 无 | 客户端连接数(不包括来自副本的连接) | -| cluster_connections | 无 | 群集总线使用的套接字数量的近似值 | -| maxclients | 无 | maxclients配置指令的值。这是connected_clients、connected_slave和cluster_connections之和的上限。 | -| client_recent_max_input_buffer | byte | 当前客户端连接中最大的输入缓冲区 | -| client_recent_max_output_buffer | byte | 当前客户端连接中最大的输出缓冲区 | -| blocked_clients | 无 | 阻塞呼叫挂起的客户端数(BLPOP、BRPOP、BRPOPLPUSH、BLMOVE、BZPOPMIN、BZPOPMAX) | -| tracking_clients | 无 | 正在跟踪的客户端数(CLIENT TRACKING) | -| clients_in_timeout_table | 无 | 客户端超时表中的客户端数 | - -#### 指标集合:memory - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------------------|----------|-----------------------------------------------------------------------------------------------| -| used_memory | byte | Redis使用其分配器(标准libc、jemalloc或tcmalloc等替代分配器)分配的总字节数 | -| used_memory_human | GB/MB/KB | 上一个值的人类可读表示 | -| used_memory_rss | byte | 操作系统看到的Redis分配的字节数(也称为驻留集大小)。这是top(1)和ps(1)等工具报告的数字 | -| used_memory_rss_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_peak | byte | Redis消耗的峰值内存(字节) | -| used_memory_peak_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_peak_perc | 无 | used_memory_peak 与used_memory百分比 | -| used_memory_overhead | byte | 服务器分配用于管理其内部数据结构的所有开销的字节总和 | -| used_memory_startup | byte | Redis在启动时消耗的初始内存量(字节) | -| used_memory_dataset | byte | 数据集的字节大小(used_memory - used_memory_overhead) | -| used_memory_dataset_perc | 无 | 已用内存数据集占净内存使用量的百分比(used_memory_dataset / (used_memory - used_memory_startup)) | -| allocator_allocated | byte | 从分配器分配的总字节数,包括内部碎片。通常与使用的内存相同 | -| allocator_active | byte | 分配器活动页中的总字节数,包括外部碎片 | -| allocator_resident | byte | 分配器中驻留的总字节数(RSS),包括可释放到操作系统的页面(通过MEMORY PURGE或仅等待) | -| total_system_memory | byte | Redis主机的内存总量 | -| total_system_memory_human | GB/MB/KB | 上一个值的人类可读值 | -| used_memory_lua | byte | Lua引擎使用的字节数 | -| used_memory_lua_human | KB | 上一个值的人类可读值 | -| used_memory_scripts | byte | 缓存Lua脚本使用的字节数 | -| used_memory_scripts_human | GB/MB/KB | 上一值的人类可读值 | -| number_of_cached_scripts | 无 | 缓存的lua脚本数量 | -| maxmemory | byte | maxmemory配置指令的值 | -| maxmemory_human | GB/MB/KB | 上一个值的人类可读值 | -| maxmemory_policy | 无 | 当达到maxmemory时的淘汰策略 | -| allocator_frag_ratio | 无 | allocator_active 和 allocator_allocated之间的比率这是真实(外部)碎片度量(不是mem_fragmentation_ratio) | -| allocator_frag_bytes | byte | allocator_active 和 allocator_allocated 之间的差值。 | -| allocator_rss_ratio | | 从操作系统角度看, 内存分配器碎片比例 | -| allocator_rss_bytes | byte | allocator_resident 和 allocator_active之间的差值 | -| rss_overhead_ratio | 无 | used_memory_rss和allocator_resident之间的比率,这包括与分配器或堆无关的RSS开销 | -| rss_overhead_bytes | byte | used_memory_rss和allocator_resident之间的增量 | -| mem_fragmentation_ratio | 无 | used_memory_rss和used_memory之间的比率,注意,这不仅包括碎片,还包括其他进程开销(请参阅allocator_* metrics),以及代码、共享库、堆栈等开销。 | -| mem_fragmentation_bytes | byte | used_memory_rss和used_memory之间的增量。注意,当总碎片字节较低(几兆字节)时,高比率(例如1.5及以上)不是问题的表现 | -| mem_not_counted_for_evict | byte | 不应驱逐的内存大小,以字节为单位。这基本上是瞬时复制和AOF缓冲区。 | -| mem_replication_backlog | byte | 复制backlog的内存大小, 以字节为单位 | -| mem_clients_slaves | 无 | 副本客户端使用的内存-从Redis 7.0开始,副本缓冲区与复制积压工作共享内存,因此当副本不触发内存使用增加时,此字段可以显示0。 | -| mem_clients_normal | 无 | 普通客户端使用的内存 | -| mem_aof_buffer | 无 | 用于AOF和AOF重写缓冲区的临时大小 | -| mem_allocator | 无 | 内存分配器,在编译时选择。 | -| active_defrag_running | 无 | 启用activedefrag时,这表示碎片整理当前是否处于活动状态,以及它打算使用的CPU百分比。 | -| lazyfree_pending_objects | 无 | 等待释放的对象数(使用ASYNC选项调用UNLINK或FLUSHDB和FLUSHOLL) | -| lazyfreed_objects | 无 | 已延迟释放的对象数。 | - -#### 指标集合:persistence - -| 指标名称 | 指标单位 | 指标帮助描述 | -|------------------------------|--------|-----------------------------------------------------------------------------------------------------| -| loading | 无 | 服务器是否正在进行持久化 0 - 否 1 -是 | -| current_cow_size | byte | 运行子fork时写入时复制内存的大小(以字节为单位) | -| current_cow_size_age | second | current_cow_size值的年龄(以秒为单位) | -| current_fork_perc | 无 | 当前fork进程的百分比,对于AOF和RDB的fork,它是current_save_keys_processed占current_save_keys_total的百分比 | -| current_save_keys_processed | 无 | 当前保存操作处理的key的数量 | -| current_save_keys_total | 无 | 当前保存操作开始时的key的数量 | -| rdb_changes_since_last_save | 无 | 离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化 | -| rdb_bgsave_in_progress | 无 | 服务器是否正在创建rdb文件 0 - 否 1 - 是 | -| rdb_last_save_time | second | 最近一次创建rdb文件的时间戳,单位秒 | -| rdb_last_bgsave_status | 无 | 最近一次rdb持久化是否成功 ok 成功 | -| rdb_last_bgsave_time_sec | second | 最近一次成功生成rdb文件耗时秒数 | -| rdb_current_bgsave_time_sec | 无 | 如果服务器正在创建rdb文件,那么这个字段记录的就是当前的创建操作已经耗费的秒数 | -| rdb_last_cow_size | 无 | RDB过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| aof_enabled | 无 | 是否开启了AOF 0 - 否 1 - 是 | -| aof_rewrite_in_progress | 无 | 标识aof的rewrite操作是否在进行中 0 - 否 1- 是 | -| aof_rewrite_scheduled | 无 | rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite | -| aof_last_rewrite_time_sec | 无 | 最近一次aof rewrite耗费的时长 | -| aof_current_rewrite_time_sec | second | 如果rewrite操作正在进行,则记录所使用的时间,单位秒 | -| aof_last_bgrewrite_status | 无 | 上次 bgrewrite aof 操作的状态 ok 成功 | -| aof_last_write_status | 无 | 上次aof写入状态 | -| aof_last_cow_size | 无 | AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等) | -| module_fork_in_progress | 无 | 指示fork模块正在进行的标志 | -| module_fork_last_cow_size | 无 | 上一次fork操作期间写入时复制内存的字节大小 | - -#### 指标集合:stats - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------------------|------|----------------------------------------------------| -| total_connections_received | 无 | 服务器接受的连接总数 | -| total_commands_processed | 无 | 服务器处理的命令总数 | -| instantaneous_ops_per_sec | 无 | 每秒处理的命令数 | -| total_net_input_bytes | byte | 从网络读取的字节总数 | -| total_net_output_bytes | byte | 写入网络的总字节数 | -| instantaneous_input_kbps | KB/S | 网络每秒的读取速率(KB/秒) | -| instantaneous_output_kbps | KB/S | 网络每秒的写入速率(KB/秒) | -| rejected_connections | 无 | 由于maxclients限制而拒绝的连接数 | -| sync_full | 无 | 具有副本的完整重新同步数 | -| sync_partial_ok | 无 | 接受的部分重新同步请求数 | -| sync_partial_err | 无 | 被拒绝的部分重新同步请求数 | -| expired_keys | 无 | 过期的key总数 | -| expired_stale_perc | 无 | 可能过期key的百分比 | -| expired_time_cap_reached_count | 无 | 活动过期周期提前停止的次数 | -| expire_cycle_cpu_milliseconds | 无 | 活动到期周期所花费的累计时间 | -| evicted_keys | 无 | 由于最大内存限制而收回key的数量 | -| keyspace_hits | 无 | 在主dict 中成功查找key的次数 | -| keyspace_misses | 无 | 在主dict 中未查到key的次数 | -| pubsub_channels | 无 | 客户端使用 pub/sub 频道的总和 | -| pubsub_patterns | 无 | 客户端使用 pub/sub 模式的全局数量 | -| latest_fork_usec | 无 | 最后一次fork操作的持续时间(以微秒为单位) | -| total_forks | 无 | 自服务器启动以来的fork操作总数 | -| migrate_cached_sockets | 无 | 为MIGRATE目的打开的socket数量 | -| slave_expires_tracked_keys | 无 | trace key 到期的数量(仅适用于可写副本) | -| active_defrag_hits | 无 | 主动碎片整理命中次数 | -| active_defrag_misses | 无 | 主动碎片整理未命中次数 | -| active_defrag_key_hits | 无 | 主动碎片整理key命中次数 | -| active_defrag_key_misses | 无 | 主动碎片整理key未命中次数 | -| tracking_total_keys | 无 | key 查询的总数 | -| tracking_total_items | 无 | item查询的总数 | -| tracking_total_prefixes | 无 | 前缀查询的总数 | -| unexpected_error_replies | 无 | 意外错误回复数,即AOF加载或复制中的错误类型 | -| total_error_replies | 无 | 发出的错误回复总数,即被拒绝的命令(命令执行之前的错误)和失败的命令(在命令执行过程中的错误)的总和 | -| dump_payload_sanitizations | 无 | 参考sanitize-dump-payload配置 | -| total_reads_processed | 无 | 正在读取的请求数 | -| total_writes_processed | 无 | 正在写入的请求数 | -| io_threaded_reads_processed | 无 | 正在读取的线程数 | -| io_threaded_writes_processed | 无 | 正在写入的线程数 | - -#### 指标集合:replication - -| 指标名称 | 指标单位 | 指标帮助描述 | -|--------------------------------|------|-------------------------------------------------------------------------------------| -| role | 无 | 节点角色 master 主节点 slave 从节点 | -| connected_slaves | 无 | 连接的从节点数 | -| master_failover_state | 无 | 正在进行的故障切换的状态(如果有) | -| master_replid | 无 | 实例启动的随机字符串 | -| master_replid2 | 无 | 故障切换后用于PSYNC的辅助复制ID | -| master_repl_offset | 无 | 主从同步偏移量 | -| second_repl_offset | 无 | 接受从服务ID的最大偏移量 | -| repl_backlog_active | 无 | 表示从服务挤压处于活动状态 | -| repl_backlog_size | byte | 从服务积压缓冲区的总大小(字节) | -| repl_backlog_first_byte_offset | 无 | 复制缓冲区里偏移量的大小 | -| repl_backlog_histlen | 无 | 此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小 | - -#### 指标集合:cpu - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------------------|------|------------------------| -| used_cpu_sys | 无 | Redis主进程在内核态所占用CPU时钟总和 | -| used_cpu_user | 无 | Redis主进程在用户态所占用CPU时钟总和 | -| used_cpu_sys_children | 无 | Redis子进程在内核态所占用CPU时钟总和 | -| used_cpu_user_children | 无 | Redis子进程在用户态所占用CPU时钟总和 | -| used_cpu_sys_main_thread | 无 | Redis服务器主线程消耗的内核CPU | -| used_cpu_user_main_thread | 无 | Redis服务器主线程消耗的用户CPU | - -#### 指标集合:errorstats - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-------------------|------|-----------| -| errorstat_ERR | 无 | 错误累计出现的次数 | -| errorstat_MISCONF | 无 | | - -#### 指标集合:cluster - -| 指标名称 | 指标单位 | 指标帮助描述 | -|-----------------|------|--------------------| -| cluster_enabled | 无 | 集群是否开启 0 - 否 1 - 是 | - -#### 指标集合:commandstats - -| 指标名称 | 指标单位 | 指标帮助描述 | -|---------------|------|---------------------------------------------------------------------------------------------------------------------------| -| cmdstat_set | 无 | set命令的统计信息,calls: 累计调用该命令的次数;usec: 调用该命令的累计耗时,单位微秒;usec_per_call: 调用该命令的平均耗时;rejected_call: 拒绝执行的次数;failed_calls: 调用失败的次数 | -| cmdstat_get | 无 | get命令的统计信息 | -| cmdstat_setnx | 无 | setnx命令的统计信息 | -| cmdstat_hset | 无 | hset命令的统计信息 | -| cmdstat_hget | 无 | hget命令的统计信息 | -| cmdstat_lpush | 无 | lpush命令的统计信息 | -| cmdstat_rpush | 无 | rpush命令的统计信息 | -| cmdstat_lpop | 无 | lpop命令的统计信息 | -| cmdstat_rpop | 无 | rpop命令的统计信息 | -| cmdstat_llen | 无 | llen命令的统计信息 | diff --git a/home/versioned_docs/version-v1.4.x/help/shenyu.md b/home/versioned_docs/version-v1.4.x/help/shenyu.md deleted file mode 100644 index aa4a43a8d5c..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/shenyu.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -id: shenyu -title: Monitoring:Apache ShenYu API Gateway -sidebar_label: Apache ShenYu -keywords: [open source monitoring tool, open source apache shenyu monitoring tool, monitoring apache shenyu metrics] ---- - -> monitor ShenYu running status(JVM-related), include request response and other related metrics. - -## Pre-monitoring operations - -Enable `metrics` plugin in ShenYu, expose it's prometheus metrics endpoint。 - -Refer [ShenYu Document](https://shenyu.apache.org/docs/plugin-center/observability/metrics-plugin) - -Two Steps Mainly: - -1. add metrics plugin dependency in gateway's pom.xml. - -```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - -``` - -2. modify this config in shenyu gateway yaml. - -```yaml -shenyu: - metrics: - enabled: false #false is close, true is open - name : prometheus - host: 127.0.0.1 - port: 8090 - jmxConfig: - props: - jvm_enabled: true #enable jvm monitoring -``` - -Finally, restart the access gateway metrics endpoint `http://ip:8090` to respond to prometheus format data. - -### Configuration parameters - -| Parameter name | Parameter help description | -|--------|----------------------------------------- --------------| -| Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | -| Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | -| Port | The port provided by the gateway Metric interface, the default is 8090. | -| Timeout | HTTP request response timeout | -| Acquisition Interval | Interval time for monitoring periodic data collection, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | -| Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | - -### Collect metrics - -#### Index collection: shenyu_request_total - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|------------------------------------------| -| value | None | Collect all requests from ShenYu gateway | - -#### Metric collection: shenyu_request_throw_created - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|-------------------------------------------------------------| -| value | None | Collect the number of abnormal requests from ShenYu Gateway | - -#### Metric collection: process_cpu_seconds_total - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|-------------------------------------------| -| value | none | total user and system CPU elapsed seconds | - -#### Metric collection: process_open_fds - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|---------------------------------| -| value | none | number of open file descriptors | - -#### Metric collection: process_max_fds - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|-----------------------------------------| -| value | none | maximum number of open file descriptors | - -#### Metric collection: jvm_info - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|-------------------------| -| runtime | none | JVM version information | -| vendor | none | JVM version information | -| version | None | JVM version information | - -#### Metric collection: jvm_memory_bytes_used - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|------------------------------------------| -| area | None | JVM memory area | -| value | MB | used size of the given JVM memory region | - -#### Metric collection: jvm_memory_pool_bytes_used - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|----------------------------------------| -| pool | None | JVM memory pool | -| value | MB | used size of the given JVM memory pool | - -#### Metric collection: jvm_memory_pool_bytes_committed - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|-------------------------------------------------| -| pool | None | JVM memory pool | -| value | MB | The committed size of the given JVM memory pool | - -#### Metric collection: jvm_memory_pool_bytes_max - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|-------------------------------------------------------| -| pool | None | JVM memory pool | -| value | MB | The maximum size of the memory pool for the given JVM | - -#### Metric collection: jvm_threads_state - -| Metric Name | Metric Unit | Metric Help Description | -|-------------|-------------|---------------------------------------------------------| -| state | none | thread state | -| value | None | The number of threads corresponding to the thread state | diff --git a/home/versioned_docs/version-v1.4.x/help/smtp.md b/home/versioned_docs/version-v1.4.x/help/smtp.md deleted file mode 100644 index 4be044bc090..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/smtp.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -id: smtp -title: Monitoring SMTP -sidebar_label: SMTP Monitor -keywords: [ open source monitoring tool, open source SMTP monitoring tool, monitoring SMTP metrics ] ---- - -> Collect and monitor the general performance Metrics of SMTP. - -```text -Determine whether the server is available through the hello command in SMTP -``` - -> see - -**Protocol Use:SMTP** - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by SMTP | -| Email | Your email name, parameters for the hello command | -| Timeout | Allow collection response time | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:summary - -| Metric name | Metric unit | Metric help description | -|--------------|-------------|----------------------------------------------------------------| -| responseTime | ms | The time it takes for the SMTP server to respond to a request. | -| response | | Response Status. | -| smtpBanner | | Banner of SMTP server. | -| heloInfo | | Response information returned by helo. | diff --git a/home/versioned_docs/version-v1.4.x/help/spark.md b/home/versioned_docs/version-v1.4.x/help/spark.md deleted file mode 100644 index 8bc045fc9a1..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/spark.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -id: spark -title: Monitoring Spark -sidebar_label: Spark Monitor -keywords: [open source monitoring tool, open source java spark monitoring tool, monitoring spark metrics] ---- - -> Collect and monitor the general performance Metrics of Spark. - -**Protocol Use:JMX** - -### Spark App Enable JMX Protocol - -1. Add Spark `VM options` When Start Server ⚠️ customIP - -Refer: - -**监控配置spark的监控主要分为Master、Worker、driver、executor监控。Master和Worker的监控在spark集群运行时即可监控,Driver和Excutor的监控需要针对某一个app来进行监控。** -**如果都要监控,需要根据以下步骤来配置** - -## 第一步 - -**修改$SPARK_HOME/conf/spark-env.sh,添加以下语句:** - -```shell -# JMX Port to use -SPARK_DAEMON_JAVA_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false" - -# export SPARK_DAEMON_JAVA_OPTS="$SPARK_DAEMON_JAVA_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT " -export SPARK_DAEMON_JAVA_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=8712 " -``` - -语句中有$JMX_PORT,这个的值可以自定义,也可以获取一个随机数作为端口号。 -如果端口自定义为一个具体的值,而 spark 的 Master 和其中之一的 Worker 在同一台机器上,会出现端口冲突的情况。 - -## 第二步 - -**vim $SPARK_HOME/conf/metrics.properties 添加如下内容** - -```shell -*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink -master.source.jvm.class=org.apache.spark.metrics.source.JvmSource -worker.source.jvm.class=org.apache.spark.metrics.source.JvmSource -driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource -executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource -``` - -## 第三步 - -**vim $SPARK_HOME/conf/spark-defaults.conf,添加以下项为driver和executor设置监控端口,在有程序运行的情况下,此端口会被打开。** - -```shell -spark.metrics.conf /opt/bigdata/spark/conf/metrics.properties -spark.driver.extraJavaOptions -XX:+PrintGCDetails -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.mana -gement.jmxremote.port=8712 - -spark.executor.extraJavaOptions -XX:+PrintGCDetails -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.mana -gement.jmxremote.port=8711 -``` - -在spark的Master和Worker正常运行以及spark-submit提交了一个程序的情况下,可以从linux中查询出端口号码。 - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by JMX | -| Username | JMX connection user name, optional | -| Password | JMX connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:memory_pool - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | - -#### Metrics Set:code_cache (Only Support JDK8) - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | - -#### Metrics Set:class_loading - -| Metric name | Metric unit | Metric help description | -|-----------------------|-------------|--------------------------| -| LoadedClassCount | | Loaded Class Count | -| TotalLoadedClassCount | | Total Loaded Class Count | -| UnloadedClassCount | | Unloaded Class Count | - -#### Metrics Set:thread - -| Metric name | Metric unit | Metric help description | -|-------------------------|-------------|----------------------------| -| TotalStartedThreadCount | | Total Started Thread Count | -| ThreadCount | | Thread Count | -| PeakThreadCount | | Peak Thread Count | -| DaemonThreadCount | | Daemon Thread Count | -| CurrentThreadUserTime | ms | Current Thread User Time | -| CurrentThreadCpuTime | ms | Current Thread Cpu Time | diff --git a/home/versioned_docs/version-v1.4.x/help/spring_gateway.md b/home/versioned_docs/version-v1.4.x/help/spring_gateway.md deleted file mode 100644 index 7f27b7fe8ef..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/spring_gateway.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -id: spring_gateway -Title: Monitoring Spring Gateway -sidebar_label: Spring Gateway -keywords: [open source monitoring tool, open source spring gateway monitoring tool, monitoring spring gateway metrics] ---- - -> Collect and monitor the general performance metrics exposed by the SpringBoot actuator. - -## Pre-monitoring operations - -If you want to monitor information in 'Spring Gateway' with this monitoring type, you need to integrate your SpringBoot application and enable the SpringBoot Actuator. - -**1、Add POM .XML dependencies:** - -```xml - - org.springframework.boot - spring-boot-starter-actuator - -``` - -**2. Modify the YML configuration exposure metric interface:** - -```yaml -management: - endpoint: - gateway: - enabled: true # default value - endpoints: - web: - exposure: - include: '*' - enabled-by-default: on -``` - -### Configure parameters - -| Parameter name | Parameter Help describes the | -|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| Port | The default port provided by the database is 8080. | -| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | - -### Collect metrics - -#### metric Collection: Health - -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|--------------------------| -| status | None | Service health: UP, Down | - -#### metric Collection: enviroment - -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|-----------------------------------------------| -| profile | None | The application runs profile: prod, dev, test | -| port | None | Apply the exposed port | -| os | None | Run the operating system | -| os_arch | None | Run the operating system architecture | -| jdk_vendor | None | jdk vendor | -| jvm_version | None | jvm version | - -#### metric Collection: threads - -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|----------------------------------|-------------------| -| state | None | Thread status | -| number | None | This thread state corresponds to | number of threads | - -#### metric Collection: memory_used - -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|--------------------------------------| -| space | None | Memory space name | -| mem_used | MB | This space occupies a memory size of | - -#### metric Collection: route_info - -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|---------------------------------------| -| route_id | None | Route id | -| predicate | None | This is a routing matching rule | -| uri | None | This is a service resource identifier | -| order | None | The priority of this route | diff --git a/home/versioned_docs/version-v1.4.x/help/springboot2.md b/home/versioned_docs/version-v1.4.x/help/springboot2.md deleted file mode 100644 index 08029dc23b5..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/springboot2.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -id: springboot2 -Title: Monitoring SpringBoot 2.0 -sidebar_label: SpringBoot 2.0 -keywords: [open source monitoring tool, open source springboot2 monitoring tool, monitoring springboot2 metrics] ---- - -> Collect and monitor the general performance metrics exposed by the SpringBoot 2.0 actuator. - -## Pre-monitoring operations - -If you want to monitor information in 'SpringBoot' with this monitoring type, you need to integrate your SpringBoot application and enable the SpringBoot Actuator. - -**1、Add POM .XML dependencies:** - -```xml - - org.springframework.boot - spring-boot-starter-actuator - -``` - -**2. Modify the YML configuration exposure metric interface:** - -```yaml -management: - endpoints: - web: - exposure: - include: '*' - enabled-by-default: on -``` - -*Note: If your project also introduces authentication related dependencies, such as springboot security, the interfaces exposed by SpringBoot Actor may be intercepted. In this case, you need to manually release these interfaces. Taking springboot security as an example, you should add the following code to the Security Configuration class:* - -```java -public class SecurityConfig extends WebSecurityConfigurerAdapter{ - @Override - protected void configure(HttpSecurity httpSecurity) throws Exception{ - httpSecurity - // Configure the interfaces to be released ----------------------------------- - .antMatchers("/actuator/**").permitAll() - .antMatchers("/metrics/**").permitAll() - .antMatchers("/trace").permitAll() - .antMatchers("/heapdump").permitAll() - // 。。。 - // For other interfaces, please refer to: https://blog.csdn.net/JHIII/article/details/126601858 ----------------------------------- - } -} -``` - -### Configure parameters - -| Parameter name | Parameter Help describes the | -|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| -| Monitor Host | THE MONITORED PEER IPV4, IPV6 OR DOMAIN NAME. Note ⚠️ that there are no protocol headers (eg: https://, http://). | -| Monitoring Name | A name that identifies this monitoring that needs to be unique. | -| Port | The default port provided by the database is 8080. | -| Enable HTTPS | Whether to access the website through HTTPS, please note that ⚠️ when HTTPS is enabled, the default port needs to be changed to 443 | -| The acquisition interval is | Monitor the periodic data acquisition interval, in seconds, and the minimum interval that can be set is 30 seconds | -| Whether to probe the | Whether to check the availability of the monitoring before adding a monitoring is successful, and the new modification operation | will continue only if the probe is successful | -| Description Comment | For more information identifying and describing the remarks for this monitoring, users can remark the information here | - -### Collect metrics - -#### metric Collection: Health - -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|--------------------------| -| status | None | Service health: UP, Down | - -#### metric Collection: enviroment - -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|-----------------------------------------------| -| profile | None | The application runs profile: prod, dev, test | -| port | None | Apply the exposed port | -| os | None | Run the operating system | -| os_arch | None | Run the operating system architecture | -| jdk_vendor | None | jdk vendor | -| jvm_version | None | jvm version | - -#### metric Collection: threads - -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|----------------------------------|-------------------| -| state | None | Thread status | -| number | None | This thread state corresponds to | number of threads | - -#### metric Collection: memory_used - -| Metric Name | metric unit | Metrics help describe | -|-------------|-------------|--------------------------------------| -| space | None | Memory space name | -| mem_used | MB | This space occupies a memory size of | diff --git a/home/versioned_docs/version-v1.4.x/help/sqlserver.md b/home/versioned_docs/version-v1.4.x/help/sqlserver.md deleted file mode 100644 index 06e19252ede..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/sqlserver.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -id: sqlserver -title: Monitoring:SqlServer database monitoring -sidebar_label: SqlServer database -keywords: [open source monitoring tool, open source database monitoring tool, monitoring sqlserver database metrics] ---- - -> Collect and monitor the general performance Metrics of SqlServer database. Support SqlServer 2017+. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by the database. The default is 1433 | -| Query timeout | Set the timeout time when SQL query does not respond to data, unit: ms, default: 3000ms | -| Database name | Database instance name, optional | -| Username | Database connection user name, optional | -| Password | Database connection password, optional | -| URL | Database connection URL,optional,If configured, the database name, user name, password and other parameters in the URL will overwrite the above configured parameters | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:basic - -| Metric name | Metric unit | Metric help description | -|--------------|-------------|-----------------------------------------------------------------------------| -| machine_name | none | Windows computer name running the server instance | -| server_name | none | Server and instance information SQL Server associated with Windows instance | -| version | none | Version of the instance,SQL Server,format is "major.minor.build.revision" | -| edition | none | The product SQL server version of the installed instance | -| start_time | none | Database start time | - -#### Metric set:performance_counters - -| Metric name | Metric unit | Metric help description | -|------------------------|-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| database_pages | none | Database pages, Number of pages obtained (buffer pool) | -| target_pages | none | Target pages, The desired number of pages that the buffer pool must have | -| page_life_expectancy | s | Page life expectancy. The time that data pages stay in the buffer pool. This time is generally greater than 300 | -| buffer_cache_hit_ratio | % | Buffer cache hit ratio, Database buffer pool cache hit rate. The probability that the requested data is found in the buffer pool is generally greater than 80%, otherwise the buffer pool capacity may be too small | -| checkpoint_pages_sec | none | Checkpoint pages/sec, The number of dirty pages written to the disk by the checkpoint per second. If the data is too high, it indicates that there is a lack of memory capacity | -| page_reads_sec | none | Page reads/sec, Number of pages read per second in the cache pool | -| page_writes_sec | none | Page writes/sec, Number of pages written per second in the cache pool | - -#### Metric set:connection - -| Metric name | Metric unit | Metric help description | -|-----------------|-------------|------------------------------| -| user_connection | none | Number of connected sessions | - -### Common Problem - -1. SSL connection problem fixed - -jdk version: jdk11 -Description of the problem: SQL Server 2019 uses the SA user connection to report an error -Error message: - -```text -The driver could not establish a secure connection to SQL Server by using Secure Sockets Layer (SSL) encryption. Error: "PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target". ClientConnectionId:xxxxxxxxxxxxxxxxxxxx -``` - -Screenshot of the problem: -![issue](https://user-images.githubusercontent.com/38679717/206621658-c0741d48-673d-45ff-9a3b-47d113064c12.png) - -solution: -Use advanced settings when adding `SqlServer` monitoring, customize JDBC URL, add parameter configuration after the spliced jdbc url, ```;encrypt=true;trustServerCertificate=true;```This parameter true means unconditionally trust the server returned any root certificate. - -Example: ```jdbc:sqlserver://127.0.0.1:1433;DatabaseName=demo;encrypt=true;trustServerCertificate=true;``` - -Reference document: [microsoft pkix-path-building-failed-unable-to-find-valid-certification]( failed-unable-to-find-valid-certification/ba-p/2591304) diff --git a/home/versioned_docs/version-v1.4.x/help/ssl_cert.md b/home/versioned_docs/version-v1.4.x/help/ssl_cert.md deleted file mode 100644 index 253485f8b1a..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/ssl_cert.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: ssl_cert -title: Monitoring SSL Certificate -sidebar_label: SSL Monitor -keywords: [open source monitoring tool, open source ssl cert monitoring tool, monitoring website ssl metrics] ---- - -> Monitor the website's SSL certificate expiration time, response time and other Metrics - -### Configuration parameters - -| Parameter name | Parameter help description | -|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | The peer IPV4, IPV6 or domain name to be monitored. Note ⚠️Without protocol header (eg: https://, http://). | -| Monitoring name | The name that identifies this monitoring, and the name needs to be unique. | -| Port | The port provided by the website, https generally defaults to 443. | -| Relative path | The suffix path of the website address except the IP port, for example, `www.tancloud.io/console` The relative path of the website is `/console`. | -| Acquisition Interval | Interval time for monitoring periodic data collection, in seconds, the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring, and the operation of adding and modifying will continue after the detection is successful | -| Description Remarks | More remark information to identify and describe this monitoring, users can remark information here | - -### Collect metrics - -#### Metric collection: certificate - -| Metric Name | Metric Unit | Metric Help Description | -|-----------------|-----------------|--------------------------| -| subject | none | certificate name | -| expired | no | expired or not | -| start_time | None | Validity start time | -| start_timestamp | ms millisecond | Validity start timestamp | -| end_time | None | Expiration time | -| end_timestamp | ms milliseconds | expiration timestamp | diff --git a/home/versioned_docs/version-v1.4.x/help/tomcat.md b/home/versioned_docs/version-v1.4.x/help/tomcat.md deleted file mode 100644 index 9f103dfe5be..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/tomcat.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -id: tomcat -title: Monitor:Apache Tomcat -sidebar_label: Apache Tomcat -keywords: [open source monitoring tool, open source tomcat monitoring tool, monitoring tomcat metrics] ---- - -> Collect and monitor the general performance Metrics of Apache Tomcat. - -**Protocol Use:JMX** - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by JMX | -| Username | JMX connection user name, optional | -| Password | JMX connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metrics - -#### Metrics Set:memory_pool - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| name | | metrics name | -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | - -#### Metrics Set:code_cache - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| committed | kb | total size | -| init | kb | init size | -| max | kb | max size | -| used | kb | used size | - -#### Metrics Set:class_loading - -| Metric name | Metric unit | Metric help description | -|-----------------------|-------------|--------------------------| -| LoadedClassCount | | Loaded Class Count | -| TotalLoadedClassCount | | Total Loaded Class Count | -| UnloadedClassCount | | Unloaded Class Count | - -#### Metrics Set:thread - -| Metric name | Metric unit | Metric help description | -|-------------------------|-------------|----------------------------| -| TotalStartedThreadCount | | Total Started Thread Count | -| ThreadCount | | Thread Count | -| PeakThreadCount | | Peak Thread Count | -| DaemonThreadCount | | Daemon Thread Count | -| CurrentThreadUserTime | ms | Current Thread User Time | -| CurrentThreadCpuTime | ms | Current Thread Cpu Time | - -### Tomcat Enable JMX Protocol - -1. After building tomcat, enter the bin directory under tomcat and modify the catalina.sh file - -2. vim catalina.sh Attention⚠️ Replace Hostname And Port - -```aidl -CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote -Djava.rmi.server.hostname=10.1.1.52 -Dcom.sun.management.jmxremote.port=1099 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" -``` diff --git a/home/versioned_docs/version-v1.4.x/help/ubuntu.md b/home/versioned_docs/version-v1.4.x/help/ubuntu.md deleted file mode 100644 index 9de28efe095..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/ubuntu.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -id: ubuntu -title: Monitoring:Ubuntu operating system monitoring -sidebar_label: Ubuntu operating system -keywords: [open source monitoring tool, open source linux ubuntu monitoring tool, monitoring ubuntu metrics] ---- - -> Collect and monitor the general performance Metrics of Ubuntu operating system. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Linux SSH. The default is 22 | -| Username | SSH connection user name, optional | -| Password | SSH connection password, optional | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:basic - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|--------------------------| -| hostname | none | Host name | -| version | none | Operating system version | -| uptime | none | System running time | - -#### Metric set:cpu - -| Metric name | Metric unit | Metric help description | -|----------------|-------------|------------------------------------------------| -| info | none | CPU model | -| cores | cores | Number of CPU cores | -| interrupt | number | Number of CPU interrupts | -| load | none | Average load of CPU in the last 1/5/15 minutes | -| context_switch | number | Number of current context switches | -| usage | % | CPU usage | - -#### Metric set:memory - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------------------| -| total | Mb | Total memory capacity | -| used | Mb | User program memory | -| free | Mb | Free memory capacity | -| buff_cache | Mb | Memory occupied by cache | -| available | Mb | Remaining available memory capacity | -| usage | % | Memory usage | - -#### Metric set:disk - -| Metric name | Metric unit | Metric help description | -|---------------|-------------|----------------------------------------| -| disk_num | blocks | Total number of disks | -| partition_num | partitions | Total number of partitions | -| block_write | blocks | Total number of blocks written to disk | -| block_read | blocks | Number of blocks read from disk | -| write_rate | iops | Rate of writing disk blocks per second | - -#### Metric set:interface - -| Metric name | Metric unit | Metric help description | -|----------------|-------------|------------------------------| -| interface_name | none | Network card name | -| receive_bytes | byte | Inbound data traffic(bytes) | -| transmit_bytes | byte | Outbound data traffic(bytes) | - -#### Metric set:disk_free - -| Metric name | Metric unit | Metric help description | -|-------------|-------------|-------------------------| -| filesystem | none | File system name | -| used | Mb | Used disk size | -| available | Mb | Available disk size | -| usage | % | usage | -| mounted | none | Mount point directory | diff --git a/home/versioned_docs/version-v1.4.x/help/website.md b/home/versioned_docs/version-v1.4.x/help/website.md deleted file mode 100644 index 1041755f156..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/website.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: website -title: Monitoring Website -sidebar_label: Website Monitor -keywords: [open source monitoring tool, open source website monitoring tool, monitoring website metrics] ---- - -> Monitor whether the website is available, response time and other Metrics. - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Ports provided by website, http generally defaults to 80 and https generally defaults to 443 | -| Relative path | Suffix path of website address except IP port. For example, the relative path of `www.tancloud.io/console` website is `/console` | -| Enable HTTPS | Whether to access the website through HTTPS. Note⚠️When HTTPS is enabled, the default corresponding port needs to be changed to 443 | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:summary - -| Metric name | Metric unit | Metric help description | -|--------------|-------------|-------------------------| -| responseTime | ms | Website response time | diff --git a/home/versioned_docs/version-v1.4.x/help/windows.md b/home/versioned_docs/version-v1.4.x/help/windows.md deleted file mode 100644 index 99d305cbce5..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/windows.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: windows -title: Monitoring:Windows operating system monitoring -sidebar_label: Windows operating system -keywords: [open source monitoring tool, open source windows monitoring tool, monitoring windows metrics] ---- - -> Collect and monitor the general performance Metrics of Windows operating system through SNMP protocol. -> Note⚠️ You need to start SNMP service for Windows server. - -References: -[What is SNMP protocol 1](https://www.cnblogs.com/xdp-gacl/p/3978825.html) -[What is SNMP protocol 2](https://www.auvik.com/franklyit/blog/network-basics-what-is-snmp/) -[Win configure SNMP in English](https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-snmp-service) -[Win configure SNMP in Chinese](https://docs.microsoft.com/zh-cn/troubleshoot/windows-server/networking/configure-snmp-service) - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Windows SNMP service. The default is 161 | -| SNMP version | SNMP protocol version V1 V2c V3 | -| SNMP community Word | SNMP agreement community name(Community Name). It is used to realize the authentication of SNMP network administrator when accessing SNMP management agent. Similar to password, the default value is public | -| Timeout | Protocol connection timeout | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:system - -| Metric name | Metric unit | Metric help description | -|--------------|-------------|------------------------------| -| name | none | Host name | -| descr | none | Operating system description | -| uptime | none | System running time | -| numUsers | number | Current number of users | -| services | number | Current number of services | -| processes | number | Current number of processes | -| responseTime | ms | Collection response time | diff --git a/home/versioned_docs/version-v1.4.x/help/zookeeper.md b/home/versioned_docs/version-v1.4.x/help/zookeeper.md deleted file mode 100644 index 362edf8cff9..00000000000 --- a/home/versioned_docs/version-v1.4.x/help/zookeeper.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -id: zookeeper -title: Monitoring Zookeeper -sidebar_label: Zookeeper Monitor -keywords: [open source monitoring tool, open source zookeeper monitoring tool, monitoring zookeeper metrics] ---- - -> Collect and monitor the general performance Metrics of Zookeeper. - -### PreRequisites - -#### Zookeeper four word command - -> The current implementation scheme uses the four word command provided by zookeeper to collect Metrics. -> Users need to add the four word command of zookeeper to the white list by themselves. - -Steps - -> 1.Find our zookeeper configuration file, which is usually zoo.cfg. -> -> 2.Add the following commands to the configuration file - -```shell -# Add the required command to the white list -4lw.commands.whitelist=stat, ruok, conf, isro - -# Add all commands to the white list -4lw.commands.whitelist=* -``` - -> 3.Restart service - -```shell -zkServer.sh restart -``` - -#### netcat protocol - -The current implementation scheme requires us to deploy the Linux server of zookeeper -Command environment for installing netcat - -> netcat installation steps -> -> ```shell -> yum install -y nc -> ``` - -If the terminal displays the following information, the installation is successful - -```shell -Complete! -``` - -### Configuration parameter - -| Parameter name | Parameter help description | -|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | -| Monitoring name | Identify the name of this monitoring. The name needs to be unique | -| Port | Port provided by Zookeeper. The default is 2181 | -| Query timeout | Set the timeout of Zookeeper connection, unit: ms, default: 3000ms | -| Username | User name of the Linux connection where Zookeeper is located | -| Password | Password of the Linux connection where Zookeeper is located | -| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | -| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | -| Description remarks | For more information about identifying and describing this monitoring, users can note information here | - -### Collection Metric - -#### Metric set:conf - -| Metric name | Metric unit | Metric help description | -|-------------------|-------------|--------------------------------------------------------------------------------------------------------------------| -| clientPort | none | Port | -| dataDir | none | Data snapshot file directory. By default, 100000 operations generate a snapshot | -| dataDirSize | kb | Data snapshot file size | -| dataLogDir | none | Transaction log file directory, production environment on a separate disk | -| dataLogSize | kb | Transaction log file size | -| tickTime | ms | Time interval between servers or between clients and servers to maintain heartbeat | -| minSessionTimeout | ms | Minimum session timeout. Heartbeat timex2. The specified time is less than this time, which is used by default | -| maxSessionTimeout | ms | Maximum session timeout. Heartbeat timex20. The specified time is greater than this time, which is used by default | -| serverId | none | Server id | - -#### Metric set:stats - -| Metric name | Metric unit | Metric help description | -|-------------------------------|-------------|------------------------------------| -| zk_version | none | Server version | -| zk_server_state | none | Server role | -| zk_num_alive_connections | number | Number of connections | -| zk_avg_latency | ms | Average latency | -| zk_outstanding_requests | number | Number of outstanding requests | -| zk_znode_count | number | Number of znode | -| zk_packets_sent | number | Number of packets sent | -| zk_packets_received | number | Number of packets received | -| zk_watch_count | number | Number of watch | -| zk_max_file_descriptor_count | number | Maximum number of file descriptors | -| zk_approximate_data_size | kb | data size | -| zk_open_file_descriptor_count | number | Number of open file descriptors | -| zk_max_latency | ms | Max latency | -| zk_ephemerals_count | number | Number of ephemeral nodes | -| zk_min_latency | ms | Min latency | diff --git a/home/versioned_docs/version-v1.4.x/introduce.md b/home/versioned_docs/version-v1.4.x/introduce.md deleted file mode 100644 index f968e1063c9..00000000000 --- a/home/versioned_docs/version-v1.4.x/introduce.md +++ /dev/null @@ -1,312 +0,0 @@ ---- -id: introduce -title: HertzBeat -sidebar_label: Introduce -slug: / ---- - -> A real-time monitoring system with agentless, performance cluster, prometheus-compatible, custom monitoring and status page building capabilities. - -[![Discord](https://img.shields.io/badge/Chat-Discord-7289DA?logo=discord)](https://discord.gg/Fb6M73htGr) -[![Reddit](https://img.shields.io/badge/Reddit-Community-7289DA?logo=reddit)](https://www.reddit.com/r/hertzbeat/) -[![Twitter](https://img.shields.io/twitter/follow/hertzbeat1024?logo=twitter)](https://twitter.com/hertzbeat1024) -[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8139/badge)](https://www.bestpractices.dev/projects/8139) -[![Docker Pulls](https://img.shields.io/docker/pulls/apache/hertzbeat?style=%20for-the-badge&logo=docker&label=DockerHub%20Download)](https://hub.docker.com/r/apache/hertzbeat) -[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/hertzbeat)](https://artifacthub.io/packages/search?repo=hertzbeat) -[![QQ](https://img.shields.io/badge/QQ-630061200-orange)](https://qm.qq.com/q/FltGGGIX2m) -[![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UCri75zfWX0GHqJFPENEbLow?logo=youtube&label=YouTube%20Channel)](https://www.youtube.com/channel/UCri75zfWX0GHqJFPENEbLow) - -**Home: [hertzbeat.apache.org](https://hertzbeat.apache.org) Global | [hertzbeat.com](https://hertzbeat.com) China** - -**Cloud: [console.tancloud.cn](https://console.tancloud.cn)** - -## 🎡 Introduction - -[HertzBeat](https://github.com/apache/hertzbeat) is an easy-to-use, open source, real-time monitoring system with agentless, high performance cluster, prometheus-compatible, offers powerful custom monitoring and status page building capabilities. - -### Features - -* Combines **monitoring, alarm, and notification** features into one platform, and supports monitoring for web service, program, database, cache, os, webserver, middleware, bigdata, cloud-native, network, custom and more. -* Easy to use and agentless, web-based and with one-click monitoring and alerting, zero learning curve. -* Makes protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, allowing you to collect any metrics by simply configuring the template `YML` file online. Imagine being able to quickly adapt to a new monitoring type like K8s or Docker simply by configuring online with HertzBeat. -* Compatible with the `Prometheus` ecosystem and more, can monitoring what `Prometheus` can monitoring with few clicks on webui. -* High performance, supports horizontal expansion of multi-collector clusters, multi-isolated network monitoring and cloud-edge collaboration. -* Provides flexible alarm threshold rules and timely notifications delivered via `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. -* Provides powerful status page building capabilities, easily communicate the real-time status of your service to users. - -> HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system. - ---- - -### Powerful Monitoring Templates - -> Before we discuss the customizable monitoring capabilities of HertzBeat, which we mentioned at the beginning, let's introduce the different monitoring templates of HertzBeat. And it is because of this monitoring template design that the advanced features come later. - -HertzBeat itself did not create a data collection protocol for the monitoring client to adapt to. Instead, HertzBeat makes full use of the existing ecosystem, `SNMP protocol` to collect information from network switches and routers, `JMX specification` to collect information from Java applications, `JDBC specification` to collect information from datasets, `SSH` to directly connect to scripts to get the display information, `HTTP+ (JsonPath | prometheus, etc.)` to parse the information from API interfaces, `IPMI protocol to collect server information, and so on. -HertzBeat uses these existing standard protocols or specifications, makes them abstractly configurable, and finally makes them all available in the form of YML format monitoring templates that can be written to create templates that use these protocols to collect any desired metrics data. -![hertzbeat](/img/blog/multi-protocol.png) - -Do you believe that users can just write a monitoring template on the UI page, click save and immediately adapt a new monitoring type like `K8s` or `Docker`? - -![hertzbeat](/img/home/9.png) - -### Built-in Monitoring Types - -**There are a lot of built-in monitoring templates for users to add directly on the page, one monitoring type corresponds to one YML monitoring template**. - -* [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), - [Http Api](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml), [Ping Connect](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml), - [Jvm](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml), - [Ssl Certificate](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot2](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml), - [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml), [SpringBoot3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml), - [Udp Port](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-udp.yml), [Dns](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dns.yml), - [Pop3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-pop3.yml), [Ntp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ntp.yml), - [Api Code](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api_code.yml), [Smtp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-smtp.yml), - [Nginx](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nginx.yml) -* [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), - [MariaDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml), - [ElasticSearch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml), - [Oracle](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml), - [DM](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dm.yml), [OpenGauss](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opengauss.yml), - [ClickHouse](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-clickhouse.yml), [IoTDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-iotdb.yml), - [Redis Cluster](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml), [Redis Sentinel](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml) - [Doris BE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_be.yml), [Doris FE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_fe.yml), - [Memcached](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-memcached.yml), [NebulaGraph](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-nebulaGraph.yml) -* [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), - [CentOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml), - [EulerOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml), [Fedora CoreOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml), - [OpenSUSE](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml), [Rocky Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml), - [Red Hat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml), [FreeBSD](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml), - [AlmaLinux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml), [Debian Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml) -* [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), - [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml), - [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml), - [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml), - [Jetty](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jetty.yml), [ActiveMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-activemq.yml), - [Spring Gateway](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spring_gateway.yml), [EMQX MQTT](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-emqx.yml), - [AirFlow](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-airflow.yml), [Hive](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hive.yml), - [Spark](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spark.yml), [Hadoop](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hadoop.yml) -* [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) -* [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), - [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), - [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) -* And More Your Custom Template. -* Notified Support `Discord` `Slack` `Telegram` `Email` `Dingtalk` `WeChat` `FeiShu` `Webhook` `SMS` `ServerChan`. - -### Powerful Customization - -> From the previous introduction of **Monitoring Templates**, it is clear that `HertzBeat` has powerful customization features. -> Each monitor type is considered as a monitor template, no matter it is built-in or user-defined. You can easily add, modify and delete indicators by modifying the monitoring template. -> The templates contain a series of functions such as protocol configuration, environment variables, metrics conversion, metrics calculation, units conversion, metrics collection, etc., which help users to collect the metrics they want. - -![hertzbeat](/img/docs/custom-arch.png) - -### No Agent Required - -> For users who have used various systems, the most troublesome thing is the installation, deployment, debugging and upgrading of various `agents`. -> You need to install one `agent` per host, and several corresponding `agents` to monitor different application middleware, and the number of monitoring can easily reach thousands, so writing a batch script may ease the burden. -> The problem of whether the version of `agent` is compatible with the main application, debugging the communication between `agent` and the main application, upgrading the `agent` synchronization and so on and so forth, are all big headaches. - -The principle of `HertzBeat` is to use different protocols to connect directly to the end system, and use the `PULL` form to pull the collected data, without the need for the user to deploy and install `Agent` | `Exporter` on the host of the end, etc. For example, monitoring the `linux operating system`. - -* For example, if you want to monitor `linux OS`, you can just input the IP port account password or key on `HertzBeat` side. -* For example, to monitor `linux OS`, just enter your ip/port account password or key in `HertzBeat`. - -**Password and other sensitive information is encrypted on all links**. - -### High Performance Clustering - -> When the number of monitors rises exponentially, the collection performance drops or the environment is unstable and prone to single point of failure of the collectors, then our collector clusters come into play. - -* HertzBeat supports the deployment of collector clusters and the horizontal expansion of multiple collector clusters to exponentially increase the number of monitorable tasks and collection performance. -* Monitoring tasks are self-scheduled in the collector cluster, single collector hangs without sensing the failure to migrate the collection tasks, and the newly added collector nodes are automatically scheduled to share the collection pressure. -* It is very easy to switch between stand-alone mode and cluster mode without additional component deployment. - -![hertzbeat](/img/docs/cluster-arch.png) - -### Cloud Edge Collaboration - -> Two locations, three centers, multi-cloud environments, multi-isolated networks, you may have heard of these scenarios. When there is a need for a unified monitoring system to monitor the IT resources of different isolated networks, this is where our Cloud Edge Collaboration comes in. - -In an isolated network where multiple networks are not connected, we need to deploy a monitoring system in each network in the previous solution, which leads to data non-interoperability and inconvenient management, deployment and maintenance. -`HertzBeat` provides the ability of cloud edge collaboration, can be deployed in multiple isolated networks edge collector, collector in the isolated network within the monitoring task collection, collection of data reported by the main service unified scheduling management display. - -![hertzbeat](/img/docs/cluster-arch.png) - -### Easy to Use - -* Set **Monitoring+Alarm+Notification** All in one, no need to deploy multiple component services separately. -* Full UI interface operation, no matter adding new monitor, modifying monitor template, or alarm threshold notification, all can be done in WEB interface, no need to modify files or scripts or reboot. -* No Agent is needed, we only need to fill in the required IP, port, account, password and other parameters in the WEB interface. -* Customization friendly, only need a monitoring template YML, automatically generate monitoring management page, data chart page, threshold configuration for corresponding monitoring types. -* Threshold alarm notification friendly, based on the expression threshold configuration, a variety of alarm notification channels, support alarm silence, time label alarm level filtering and so on. - -### Completely Open Source - -* An open source collaboration product using the `Apache2` protocol, maintained by a free and open source community. -* No monitoring number `License`, monitoring type and other pseudo-open source restrictions . -* Built on `Java+SpringBoot+TypeScript+Angular` mainstream technology stack , convenient secondary development . -* Open source is not the same as free, dev based on HertzBeat must retain the logo, name, page footnotes, copyright, etc. - -**HertzBeat has been included in the [CNCF Observability And Analysis - Monitoring Landscape](https://landscape.cncf.io/card-mode?category=monitoring&grouping=category)** - -![cncf](/img/home/cncf-landscape-left-logo.svg) - ---- - -**HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system.** - ------ - -## Quickly Start - -Just run a single command in a Docker environment: `docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` -Browser access `http://localhost:1157` default account password `admin/hertzbeat` - -### Landing Page - -* HertzBeat's user management is unified by the configuration file `sureness.yml`, which allows users to add, delete, and modify user information, user role permissions, and so on. Default password admin/hertzbeat - -![hertzbeat](/img/home/0.png) - -### Overview Page - -* The global overview page shows the distribution of current monitoring categories, users can visualize the current monitoring types and quantities and click to jump to the corresponding monitoring types for maintenance and management. -* Show the status of currently registered collector clusters, including collector on-line status, monitoring tasks, startup time, IP address, name and so on. -* Show the list of recent alarm messages, alarm level distribution and alarm processing rate. - -![hertzbeat](/img/home/1.png) - -### Monitoring Center - -* The monitoring portal supports the management of monitoring of application services, database, operating system, middleware, network, customization, etc. It displays the currently added monitors in the form of a list. -* It displays the currently added monitors in the form of a list and supports adding, modifying, deleting, canceling, importing, exporting and batch management of monitors. -* Support tag grouping, query filtering, view monitoring details portal. - -Built-in support for monitoring types include: - -* [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), - [Http Api](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml), [Ping Connect](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml), - [Jvm](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml), - [Ssl Certificate](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot2](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml), - [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml), [SpringBoot3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml), - [Udp Port](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-udp.yml), [Dns](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dns.yml), - [Pop3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-pop3.yml), [Ntp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ntp.yml), - [Api Code](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api_code.yml), [Smtp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-smtp.yml), - [Nginx](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nginx.yml) -* [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), - [MariaDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml), - [ElasticSearch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml), - [Oracle](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml), - [DM](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dm.yml), [OpenGauss](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opengauss.yml), - [ClickHouse](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-clickhouse.yml), [IoTDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-iotdb.yml), - [Redis Cluster](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml), [Redis Sentinel](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml) - [Doris BE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_be.yml), [Doris FE](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-doris_fe.yml), - [Memcached](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-memcached.yml), [NebulaGraph](https://github.com/apache/hertzbeat/blob/master/manager/src/main/resources/define/app-nebulaGraph.yml) -* [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), - [CentOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml), - [EulerOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml), [Fedora CoreOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml), - [OpenSUSE](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml), [Rocky Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml), - [Red Hat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml), [FreeBSD](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml), - [AlmaLinux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml), [Debian Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml) -* [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), - [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml), - [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml), - [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml), - [Jetty](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jetty.yml), [ActiveMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-activemq.yml), - [Spring Gateway](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spring_gateway.yml), [EMQX MQTT](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-emqx.yml), - [AirFlow](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-airflow.yml), [Hive](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hive.yml), - [Spark](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-spark.yml), [Hadoop](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hadoop.yml) -* [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) -* [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), - [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), - [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) - -![hertzbeat](/img/home/2.png) - -### Add and Modify Surveillance - -* You can add or modify monitoring instances of a specific monitoring type, configure the IP, port and other parameters of the monitoring on the other end, set the collection period, collection task scheduling method, support detecting availability in advance, etc. The monitoring instances on the page are defined by the corresponding monitoring templates. -* The monitoring parameters configured on the page are defined by the monitoring template of the corresponding monitoring type, and users can modify the configuration parameters on the page by modifying the monitoring template. -* Support associated tags to manage monitoring grouping, alarm matching, and so on. - -![hertzbeat](/img/home/10.png) - -### Monitor Details - -* The monitoring data detail page shows the basic parameter information of the current monitoring, and the monitoring indicator data information. -* Monitor Real-time Data Report displays the real-time values of all the currently monitored indicators in the form of a list of small cards, and users can configure alarm threshold rules based on the real-time values for reference. -* Monitor Historical Data Report displays the historical values of the currently monitored metrics in the form of trend charts, supports querying hourly, daily and monthly historical data, and supports configuring the page refresh time. -* ⚠️ Note that the monitoring history charts need to be configured with an external timing database in order to get the full functionality, timing database support: IOTDB, TDengine, InfluxDB, GreptimeDB - -![hertzbeat](/img/home/3.png) - -![hertzbeat](/img/home/4.png) - -### Alarm Center - -* The management display page of triggered alarm messages enables users to visualize the current alarm situation. -* Support alarm processing, alarm marking unprocessed, alarm deletion, clearing and other batch operations. - -![hertzbeat](/img/home/7.png) - -### Threshold Rules - -* Threshold rules can be configured for monitoring the availability status, and alerts can be issued when the value of a particular metric exceeds the expected range. -* There are three levels of alerts: notification alerts, critical alerts, and emergency alerts. -* Threshold rules support visual page configuration or expression rule configuration for more flexibility. -* It supports configuring the number of triggers, alarm levels, notification templates, associated with a specific monitor and so on. - -![hertzbeat](/img/home/6.png) - -![hertzbeat](/img/home/11.png) - -### Alarm Convergence - -* When the alarm is triggered by the threshold rule, it will enter into the alarm convergence, the alarm convergence will be based on the rules of the specific time period of the duplicate alarm message de-emphasis convergence, to avoid a large number of repetitive alarms lead to the receiver alarm numbness. -* Alarm convergence rules support duplicate alarm effective time period, label matching and alarm level matching filter. - -![hertzbeat](/img/home/12.png) - -![hertzbeat](/img/home/13.png) - -### Alarm Silence - -* When the alarm is triggered by the threshold rule, it will enter into the alarm silence, the alarm silence will be based on the rules of a specific one-time time period or periodic time period of the alarm message blocking silence, this time period does not send alarm messages. -* This application scenario, such as users in the system maintenance, do not need to send known alarms. Users will only receive alarm messages on weekdays, and users need to avoid disturbances at night. -* Alarm silence rules support one-time time period or periodic time period, support label matching and alarm level matching. - -![hertzbeat](/img/home/14.png) - -![hertzbeat](/img/home/15.png) - -### Message Notification - -* Message notification is a function to notify alarm messages to specified recipients through different media channels, so that the alarm messages can reach them in time. -* It includes recipient information management and notification policy management. Recipient management maintains the information of recipients and their notification methods, while notification policy management maintains the policy rules of which recipients will be notified of the alert messages. -* Notification methods support `Email` `Discord` `Slack` `Telegram` `Pinning` `WeChat` `Flybook` `SMS` `Webhook` and so on. -* The notification policy supports tag matching and alert level matching, which makes it convenient to assign alerts with different tags and alert levels to different receivers and handlers. -* Support notification templates, users can customize the content format of the templates to meet their own personalized notification display needs. - -![hertzbeat](/img/home/16.png) - -![hertzbeat](/img/home/17.png) - -![hertzbeat](/img/home/8.png) - -### Monitoring Templates - -* HertzBeat makes `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` and other protocols configurable so that you can customize the metrics you want to collect using these protocols by simply configuring the monitoring template `YML` in your browser. Would you believe that you can instantly adapt a new monitoring type such as `K8s` or `Docker` just by configuring it? -* All our built-in monitoring types (mysql, website, jvm, k8s) are also mapped to corresponding monitoring templates, so you can add and modify monitoring templates to customize your monitoring functions. - -![hertzbeat](/img/home/9.png) - ------ - -**There's so much more to discover. Have Fun!** - ------ - -**Home: ** -**Github: ** -**Gitee: ** diff --git a/home/versioned_docs/version-v1.4.x/others/contact.md b/home/versioned_docs/version-v1.4.x/others/contact.md deleted file mode 100644 index 9411e50d072..00000000000 --- a/home/versioned_docs/version-v1.4.x/others/contact.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: contact -title: Join discussion -sidebar_label: Discussion ---- - -> If you need any help or want to exchange suggestions during the use process, you can discuss and exchange through ISSUE or Github Discussion. - -[GITHUB ISSUES](https://github.com/apache/hertzbeat/issues) - -[Chat On Discord](https://discord.gg/Fb6M73htGr) - -[Follow Us Twitter](https://twitter.com/hertzbeat1024) - -##### Github Discussion - -Welcome to Discuss in [Github Discussion](https://github.com/apache/hertzbeat/discussions) diff --git a/home/versioned_docs/version-v1.4.x/others/contributing.md b/home/versioned_docs/version-v1.4.x/others/contributing.md deleted file mode 100644 index 7c0c80721f3..00000000000 --- a/home/versioned_docs/version-v1.4.x/others/contributing.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -id: contributing -title: Contributing Guide -sidebar_label: Contributing Guide ---- - -> We are committed to maintaining a happy community that helps each other, welcome every contributor to join us! - -### Kinds of Contributions - -> In the HertzBeat community, there are many ways to contribute: - -- 💻**Code**: Can help the community complete some tasks, write new features or fix some bugs; - -- ⚠️**Test**: Can come to participate in the writing of test code, including unit testing, integration testing, e2e testing; - -- 📖**Docs**: Can write or Documentation improved to help users better understand and use HertzBeat; - -- 📝**Blog**: You can write articles about HertzBeat to help the community better promote; - -- 🤔**Discussion**: You can participate in the discussion of new features of HertzBeat and integrate your ideas with HertzBeat; - -- 💡**Preach**: Can help publicize or promote the HertzBeat community, speak in meetup or summit; - -- 💬**Suggestion**: You can also make some suggestions to the project or community to promote the healthy development of the community; - -More see [Contribution Types](https://allcontributors.org/docs/en/emoji-key) - -Even small corrections to typos are very welcome :) - -### Getting HertzBeat up and running - -> To get HertzBeat code running on your development tools, and able to debug with breakpoints. -> This is a front-end and back-end separation project. To start the local code, the back-end [manager](https://github.com/apache/hertzbeat/tree/master/manager) and the front-end [web-app](https://github.com/apache/hertzbeat/tree/master/web-app) must be started separately. - -- Backend start - -1. Requires `maven3+`, `java11` and `lombok` environments -2. (Optional) Modify the configuration file-`manager/src/main/resources/application.yml` -3. Start `springboot manager` service `manager/src/main/java/org/apache/hertzbeat/manager/Manager.java` - -- Front-web start - -1. Need `nodejs npm angular-cli` environment -2. Install yarn: `npm install -g yarn` -3. Execute under the front-end project directory web-app: `yarn install` -4. Install angular-cli globally: `npm install -g @angular/cli@14 --registry=https://registry.npm.taobao.org` -5. After the local backend is started, start the local frontend in the web-app directory: `ng serve --open` -6. Browser access to localhost:4200 to start, default account/password is *admin/hertzbeat* - -### Find tasks - -Find the issue you are interested in! On our GitHub repo issue list, we often publish some issues with the label good first issue or status: volunteer wanted. -These issues welcome the help of contributors. Among them, good first issues tend to have low thresholds and are suitable for novices. - -Of course, if you have a good idea, you can also propose it directly on GitHub Discussion or contact with community. - -### Submit Pull Request - -1. First you need to fork your target [hertzbeat repository](https://github.com/apache/hertzbeat). -2. Then download the code locally with git command: - -```shell -git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended -``` - -3. After the download is complete, please refer to the getting started guide or README file of the target repository to initialize the project. -4. Then, you can refer to the following command to submit the code: - -```shell -git checkout -b a-feature-branch #Recommended -``` - -5. Submit the coed as a commit, the commit message format specification required: [module name or type name] feature or bugfix or doc: custom message. - -```shell -git add -git commit -m '[docs]feature: necessary instructions' #Recommended -``` - -6. Push to the remote repository - -```shell -git push origin a-feature-branch -``` - -7. Then you can initiate a new PR (Pull Request) on GitHub. - -Please note that the title of the PR needs to conform to our spec, and write the necessary description in the PR to facilitate code review by Committers and other contributors. - -### Wait for the code to be merged - -After submitting the PR, the Committee or the community's friends will review the code you submitted (Code Review), and will propose some modification suggestions or conduct some discussions. Please pay attention to your PR in time. - -If subsequent changes are required, there is no need to initiate a new PR. After submitting a commit on the original branch and pushing it to the remote repository, the PR will be automatically updated. - -In addition, our project has a relatively standardized and strict CI inspection process. After submitting PR, CI will be triggered. Please pay attention to whether it passes the CI inspection. - -Finally, the Committers can merge the PR into the master branch. - -### After the code is merged - -After the code has been merged, you can delete the development branch on both the local and remote repositories: - -```shell -git branch -d a-dev-branch -git push origin --delete a-dev-branch -``` - -On the master/main branch, you can do the following to sync the upstream repository: - -```shell -git remote add upstream https://github.com/apache/hertzbeat.git #Bind the remote warehouse, if it has been executed, it does not need to be executed again -git checkout master -git pull upstream master -``` - -### How to become a Committer? - -With the above steps, you are a contributor to HertzBeat. Repeat the previous steps to stay active in the community, keep at, you can become a Committer! - -### Join Discussion - -[Github Discussion](https://github.com/apache/hertzbeat/discussions) - -Add WeChat account `ahertzbeat` to pull you into the WeChat group. - -QQ group number `630061200`, verify code: `tancloud` - -Public WeChat: `tancloudtech` - -## 🥐 Architecture - -- **[manager](https://github.com/apache/hertzbeat/tree/master/manager)** Provide monitoring management, system management basic services. - -> Provides monitoring management, monitoring configuration management, system user management, etc. -> -> - **[collector](https://github.com/apache/hertzbeat/tree/master/collector)** Provide metrics data collection services. -> Use common protocols to remotely collect and obtain peer-to-peer metrics data. -> - **[warehouse](https://github.com/apache/hertzbeat/tree/master/warehouse)** Provide monitoring data warehousing services. -> Metrics data management, data query, calculation and statistics. -> - **[alerter](https://github.com/apache/hertzbeat/tree/master/alerter)** Provide alert service. -> Alarm calculation trigger, monitoring status linkage, alarm configuration, and alarm notification. -> - **[web-app](https://github.com/apache/hertzbeat/tree/master/web-app)** Provide web ui. -> Angular Web UI. - -![hertzBeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat/home/static/img/docs/hertzbeat-arch.svg) diff --git a/home/versioned_docs/version-v1.4.x/others/design.md b/home/versioned_docs/version-v1.4.x/others/design.md deleted file mode 100644 index 64e248f5b2d..00000000000 --- a/home/versioned_docs/version-v1.4.x/others/design.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: design -title: Design Document -sidebar_label: Design Document ---- - -### HertzBeat Architecture - -![architecture](https://cdn.jsdelivr.net/gh/apache/hertzbeat/home/static/img/docs/hertzbeat-arch.svg) - -### TanCloud Architecture - -TanCloud is a SAAS cluster version based on HertzBeat, which adopts a multi-cluster and multi-tenant architecture model. diff --git a/home/versioned_docs/version-v1.4.x/others/developer.md b/home/versioned_docs/version-v1.4.x/others/developer.md deleted file mode 100644 index bdaf8172b8b..00000000000 --- a/home/versioned_docs/version-v1.4.x/others/developer.md +++ /dev/null @@ -1,262 +0,0 @@ ---- -id: developer -title: Contributors -sidebar_label: Contributors ---- - -## ✨ HertzBeat Members - - - - - - - - - - - - - - - - - - - -
tomsun28
tomsun28

💻 📖 🎨
会编程的王学长
会编程的王学长

💻 📖 🎨
zcx
zcx

💻 🐛 🎨
进击的阿晨
进击的阿晨

💻 🎨 🐛
铁甲小宝
铁甲小宝

🐛 💻 📖
cuipiheqiuqiu
cuipiheqiuqiu

💻 ⚠️ 🎨
hudongdong129
hudongdong129

💻 ⚠️ 📖
zqr10159
Logic

📖 💻🎨
vinci
vinci

💻 📖 🎨
淞筱
淞筱

💻 📖 🎨
东风
东风

💻 🎨 📖
- -cert - -## ✨ HertzBeat Contributors - -Thanks to these wonderful people, welcome to join us: [Contributor Guide](contributing) - -cert - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
tomsun28
tomsun28

💻 📖 🎨
会编程的王学长
会编程的王学长

💻 📖 🎨
MaxKey
MaxKey

💻 🎨 🤔
观沧海
观沧海

💻 🎨 🐛
yuye
yuye

💻 📖
jx10086
jx10086

💻 🐛
winnerTimer
winnerTimer

💻 🐛
goo-kits
goo-kits

💻 🐛
brave4Time
brave4Time

💻 🐛
WalkerLee
WalkerLee

💻 🐛
jianghang
jianghang

💻 🐛
ChineseTony
ChineseTony

💻 🐛
wyt199905
wyt199905

💻
卫傅庆
卫傅庆

💻 🐛
zklmcookle
zklmcookle

💻
DevilX5
DevilX5

📖 💻
tea
tea

💻
yangshihui
yangshihui

💻 🐛
DreamGirl524
DreamGirl524

💻 📖
gzwlly
gzwlly

📖
cuipiheqiuqiu
cuipiheqiuqiu

💻 ⚠️ 🎨
lambert
lambert

💻
mroldx
mroldx

📖
woshiniusange
woshiniusange

📖
VampireAchao
VampireAchao

💻
zcx
zcx

💻 🐛 🎨
CharlieXCL
CharlieXCL

📖
Privauto
Privauto

💻 📖
emrys
emrys

📖
SxLiuYu
SxLiuYu

🐛
All Contributors
All Contributors

📖
铁甲小宝
铁甲小宝

💻 📖
click33
click33

📖
蒋小小
蒋小小

📖
Kevin Huang
Kevin Huang

📖
铁甲小宝
铁甲小宝

🐛 💻 📖
Captain Jack
Captain Jack

📖
haibo.duan
haibo.duan

⚠️ 💻
assassin
assassin

🐛 💻
Reverse wind
Reverse wind

⚠️ 💻
luxx
luxx

💻
Ikko Ashimine
Ikko Ashimine

📖
leizenan
leizenan

💻
BKing
BKing

📖
xingshuaiLi
xingshuaiLi

📖
wangke6666
wangke6666

📖
刺猬
刺猬

🐛 💻
Haste
Haste

💻
zhongshi.yi
zhongshi.yi

📖
Qi Zhang
Qi Zhang

📖
MrAndyMing
MrAndyMing

📖
idongliming
idongliming

💻
Zichao Lin
Zichao Lin

💻 📖
liudonghua
liudonghua

💻 🤔
Jerry
Jerry

💻 ⚠️ 🤔
yanhom
yanhom

📖
fsl
fsl

💻
xttttv
xttttv

📖
NavinKumarBarnwal
NavinKumarBarnwal

💻
Zakkary
Zakkary

📖
sunxinbo
sunxinbo

💻 ⚠️
ldzbook
ldzbook

📖 🐛
余与雨
余与雨

💻 ⚠️
MysticalDream
MysticalDream

💻 ⚠️
zhouyoulin12
zhouyoulin12

💻 ⚠️
jerjjj
jerjjj

💻
wjl110
wjl110

💻
Sean
Sean

📖
chenyiqin
chenyiqin

💻 ⚠️
hudongdong129
hudongdong129

💻 ⚠️ 📖
TherChenYang
TherChenYang

💻 ⚠️
HattoriHenzo
HattoriHenzo

💻 ⚠️
ycilry
ycilry

📖
aoshiguchen
aoshiguchen

📖 💻
蔡本祥
蔡本祥

💻
浮游
浮游

💻
Grass-Life
Grass-Life

💻
xiaohe428
xiaohe428

💻 📖
TableRow
TableRow

📖 💻
ByteIDance
ByteIDance

💻
Jangfe
Jangfe

💻
zqr10159
zqr10159

📖 💻
vinci
vinci

💻 📖 🎨
js110
js110

💻
CrazyLionLi
CrazyLionLi

📖
banmajio
banmajio

💻
topsuder
topsuder

💻
richar2022
richar2022

💻
fcb-xiaobo
fcb-xiaobo

💻
wenkyzhang
wenkyzhang

📖
ZangJuxy
ZangJuxy

📖
l646505418
l646505418

💻
Carpe-Wang
Carpe-Wang

💻
莫枢
莫枢

💻
huangcanda
huangcanda

💻
世纪末的架构师
世纪末的架构师

💻
ShuningWan
ShuningWan

📖
MrYZhou
MrYZhou

📖
suncqujsj
suncqujsj

📖
sunqinbo
sunqinbo

💻
haoww
haoww

📖
i-mayuan
i-mayuan

📖
fengruge
fengruge

📖
zhanghuan
zhanghuan

💻
shenymin
shenymin

💻
Dhruva Chandra
Dhruva Chandra

💻
miss_z
miss_z

📖
wyt990
wyt990

💻
licocon
licocon

💻
Mi Na
Mi Na

💻
Kylin-Guo
Kylin-Guo

📖
Mr灬Dong先生
Mr灬Dong先生

💻
Pratyay Banerjee
Pratyay Banerjee

📖 💻
yujianzhong520
yujianzhong520

💻
SPPan
SPPan

💻
ZhangJiashu
ZhangJiashu

💻
impress
impress

💻
凌晨一点半
凌晨一点半

📖
Eeshaan Sawant
Eeshaan Sawant

💻
nandofromthebando
nandofromthebando

💻
caiboking
caiboking

💻
baixing99
baixing99

💻
Yang Chuang
Yang Chuang

💻
wlin20
wlin20

💻
guojing1983
guojing1983

💻
moxi
moxi

📖
qq471754603
qq471754603

💻
渭雨
渭雨

💻
liuxuezhuo
liuxuezhuo

💻
lisongning
lisongning

💻
YutingNie
YutingNie

💻 📖 🎨
Mike Zhou
Mike Zhou

💻 📖 🎨
小笨蛋
小笨蛋

💻
littlezhongzer
littlezhongzer

💻
ChenXiangxxxxx
ChenXiangxxxxx

💻
Mr.zhou
Mr.zhou

💻
姚贤丰
姚贤丰

💻
lingluojun
lingluojun

💻
1ue
1ue

💻
qyaaaa
qyaaaa

💻
novohit
novohit

💻
zhuoshangyi
zhuoshangyi

💻
ruanliang
ruanliang

📖 💻
Eden4701
Eden4701

💻 📖 🎨
XiaTian688
XiaTian688

📖
liyinjiang
liyinjiang

💻
ZhangJiashu
ZhangJiashu

📖
moghn
moghn

📖
xiaoguolong
xiaoguolong

💻
Smliexx
Smliexx

💻
Naruse
Naruse

📖 💻
Bala Sukesh
Bala Sukesh

💻
Jinyao Ma
Jinyao Ma

💻
Rick
Rick

💻 ⚠️
东风
东风

💻 🎨 📖
sonam singh
sonam singh

💻
ZhangZixuan1994
ZhangZixuan1994

💻
SHIG
SHIG

💻
泰上老菌
泰上老菌

💻
ldysdu
ldysdu

💻
梁同学
梁同学

💻
avv
avv

💻
yqxxgh
yqxxgh

📖
CharlieShi46
CharlieShi46

💻
Nctllnty
Nctllnty

💻
Wang-Yonghao
Wang-Yonghao

📖
- - - - diff --git a/home/versioned_docs/version-v1.4.x/others/hertzbeat.md b/home/versioned_docs/version-v1.4.x/others/hertzbeat.md deleted file mode 100644 index d06ffeb7fde..00000000000 --- a/home/versioned_docs/version-v1.4.x/others/hertzbeat.md +++ /dev/null @@ -1,276 +0,0 @@ ---- -id: hertzbeat -title: HertzBeat 开源实时监控系统 -sidebar_label: HertzBeat 实时监控 ---- - -> 易用友好的开源实时监控告警系统,无需Agent,高性能集群,强大自定义监控能力。 - -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/web-monitor.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/ping-connect.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/port-available.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/database-monitor.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/os-monitor.svg) -![hertzbeat](https://img.shields.io/badge/monitor-cloud%20native-brightgreen) -![hertzbeat](https://img.shields.io/badge/monitor-middleware-blueviolet) -![hertzbeat](https://img.shields.io/badge/monitor-network-red) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/alert.svg) - -## 🎡 介绍 - -[HertzBeat 赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,高性能集群,无需 Agent 的开源实时监控告警系统。 - -### 特点 - -- 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等监控阈值告警通知一步到位。 -- 易用友好,无需 `Agent`,全 `WEB` 页面操作,鼠标点一点就能监控告警,零上手学习成本。 -- 将 `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` 等协议规范可配置化,只需在浏览器配置监控模版 `YML` 就能使用这些协议去自定义采集想要的指标。您相信只需配置下就能立刻适配一款 `K8s` 或 `Docker` 等新的监控类型吗? -- 高性能,支持多采集器集群横向扩展,支持多隔离网络监控,云边协同。 -- 自由的告警阈值规则,`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式消息及时送达。 - -> `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 - ----- - -### 完全开源 - -- 使用`Apache2`协议,由自由开放的开源社区主导维护的开源协作产品。 -- 无监控数量`License`,监控类型等伪开源限制。 -- 基于`Java+SpringBoot+TypeScript+Angular`主流技术栈构建,方便的二次开发。 -- 但开源不等同于免费,基于HertzBeat二次开发需保留logo,名称,页面脚注,版权等。 - -### 强大的监控模版 - -> 开始我们就说 HertzBeat 的特点是自定义监控能力,无需 Agent。在讨论这两点之前,我们先介绍下 HertzBeat 的不一样的监控模版。而正是因为这样的监控模版设计,才会有了后面的高级特性。 - -HertzBeat 自身并没有去创造一种采集数据协议,让对端来适配它。而是充分使用了现有的生态,SNMP采集网络交换机路由器信息,JMX采集JAVA应用信息,JDBC规范采集数据集信息,SSH直连执行脚本获取回显信息,HTTP+(JsonPath | prometheus等)解析接口信息,IPMI采集服务器信息等等。 -HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可配置化,最后使其都可以通过编写YML格式监控模版的形式,来制定模版使用这些协议来采集任何想要的指标信息。 - -![hertzbeat](/img/blog/multi-protocol.png) - -你相信用户只需在UI页面编写一个监控模版,点击保存后,就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? - -![hertzbeat](/img/home/9.png) - -### 内置监控类型 - -**一款监控类型对应一个YML监控模版** - -- [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), - [Http Api](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml), [Ping Connect](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml), - [Jvm](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml), - [Ssl Certificate](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot2](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml), - [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml), [SpringBoot3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml) -- [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), - [MariaDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml), - [ElasticSearch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml), - [Oracle](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml), - [DM](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dm.yml), [OpenGauss](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opengauss.yml), - [ClickHouse](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-clickhouse.yml), [IoTDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-iotdb.yml), - [Redis Cluster](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml), [Redis Sentinel](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml) -- [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), - [CentOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml), - [EulerOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml) -- [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), - [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml), - [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml), - [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml), - [Jetty](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jetty.yml), [ActiveMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-activemq.yml) -- [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) -- [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), - [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), - [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) -- 和更多自定义监控模版。 -- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 - -### 强大自定义功能 - -> 由前面的**监控模版**介绍,大概清楚了 HertzBeat 拥有的强大自定义功能。 -> 我们将每个监控类型都视为一个监控模版,不管是官方内置的还是后期用户自定义新增的。用户都可以方便的通过修改监控模版来新增修改删除监控指标。 -> 模版里面包含各个协议的使用,指标别名转换,指标计算,单位转换等一系列功能,帮助用户能采集到自己想要的监控指标。 - -![hertzbeat](/img/docs/custom-arch.png) - -### 无需 Agent - -> 对于使用过各种系统的用户来说,可能最麻烦头大的不过就是各种 agent 的安装部署调试了。 -> 每台主机得装个 agent,为了监控不同应用中间件可能还得装几个对应的 agent,量上来了轻轻松松上千个,写个批量脚本可能会减轻点负担。 -> agent 的版本是否与主应用兼容, agent 与主应用的通讯调试, agent 的同步升级等等等等,这些全是头大的点。 - -HertzBeat 的原理就是使用不同的协议去直连对端系统,采集 PULL 的形式去拉取采集数据,无需用户在对端主机上部署安装 Agent | Exporter等。 -比如监控 linux, 在 HertzBeat 端输入IP端口账户密码或密钥即可。 -比如监控 mysql, 在 HertzBeat 端输入IP端口账户密码即可。 -**密码等敏感信息全链路加密** - -### 高性能集群 - -> 支持部署采集器集群,多采集器集群横向扩展,指数级提高可监控数量与采集性能。 -> 监控任务在采集器集群中自调度,单采集器挂掉无感知故障迁移采集任务,新加入采集器节点自动调度分担采集压力。 -> 单机模式与集群模式相互切换部署非常方便,无需额外组件部署。 - -![hertzbeat](/img/docs/cluster-arch.png) - -### 云边协同 - -> 支持部署边缘采集器集群,与主 HertzBeat 服务云边协同提升采集能力。 - -在多个网络不相通的隔离网络中,在以往的方案中我们需要在每个网络都部署一套监控系统,这导致数据不互通,管理部署维护都不方便。 -HertzBeat 提供云边协同能力,可以在多个隔离网络部署边缘采集器,采集器在隔离网络内部进行监控任务采集,采集数据上报,由主 HertzBeat 服务统一调度管理展示。 - -![hertzbeat](/img/docs/cluster-arch.png) - -### 易用友好 - -> 集 **监控+告警+通知** All in one, 无需单独部署多个组件服务。 -> 全UI界面操作,不管是新增监控,修改监控模版,还是告警阈值通知,都可在WEB界面操作完成,无需要修改文件或脚本或重启。 -> 无需 Agent, 监控对端我们只需在WEB界面填写所需IP端口账户密码等参数即可。 -> 自定义友好,只需一个监控模版YML,自动生成对应监控类型的监控管理页面,数据图表页面,阈值配置等。 -> 阈值告警通知友好,基于表达式阈值配置,多种告警通知渠道,支持告警静默,时段标签告警级别过滤等。 - ---- -**`HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。** - ------ - -## 即刻体验一波 - -Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat` -浏览器访问 `http://localhost:1157` 默认账户密码 `admin/hertzbeat` - -### 登陆页面 - -- HertzBeat 的用户管理统一由配置文件 `sureness.yml` 维护,用户可以通过修改此文件来新增删除修改用户信息,用户角色权限等。默认账户密码 admin/hertzbeat - -![hertzbeat](/img/home/0.png) - -### 概览页面 - -- 全局概览页面,分类展示了当前监控大类别数量分布,用户可直观查看当前的监控类型与数量并点击跳转至对应监控类型进行维护管理。 -- 展示当前注册的采集器集群状态,包括采集器的上线状态,监控任务,启动时间,IP地址,名称等。 -- 下发展示了最近告警信息列表,告警级别分布情况,告警处理率情况。 - -![hertzbeat](/img/home/1.png) - -### 监控中心 - -- 监控入口,支持对应用服务,数据库,操作系统,中间件,网络,自定义等监控的管理。 -- 以列表的形式展示当前已添加的监控,支持对监控的新增,修改,删除,取消监控,导入导出,批量管理等。 -- 支持标签分组,查询过滤,查看监控详情入口等。 - -内置支持的监控类型包括: - -- [Website](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml), [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml), - [Http Api](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml), [Ping Connect](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml), - [Jvm](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml), - [Ssl Certificate](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot2](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml), - [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml), [SpringBoot3](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml) -- [Mysql](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml), - [MariaDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml), - [ElasticSearch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml), - [Oracle](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml), - [DM](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dm.yml), [OpenGauss](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opengauss.yml), - [ClickHouse](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-clickhouse.yml), [IoTDB](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-iotdb.yml), - [Redis Cluster](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml), [Redis Sentinel](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml) -- [Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml), - [CentOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml), - [EulerOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml), [Fedora CoreOS](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml), - [OpenSUSE](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml), [Rocky Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml), - [Red Hat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml), [FreeBSD](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml), - [AlmaLinux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml), [Debian Linux](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml) -- [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml), - [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml), - [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml), - [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml), - [Jetty](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jetty.yml), [ActiveMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-activemq.yml) -- [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml) -- [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml), [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml), - [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml), [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml), - [H3cSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml) - -![hertzbeat](/img/home/2.png) - -### 新增修改监控 - -- 新增或修改指定监控类型的监控实例,配置对端监控的IP,端口等参数,设置采集周期,采集任务调度方式,支持提前探测可用性等。 -- 页面上配置的监控参数由对应监控类型的监控模版所定义,用户可以通过修改监控模版来修改页面配置参数。 -- 支持关联标签,用标签来管理监控分组,告警匹配等。 - -![hertzbeat](/img/home/10.png) - -### 监控详情 - -- 监控的数据详情页面,展示了当前监控的基本参数信息,监控指标数据信息。 -- 监控实时数据报告,以小卡片列表的形式展示了当前监控的所有指标实时值,用户可根据实时值参考配置告警阈值规则。 -- 监控历史数据报告,以趋势图表的形式展示了当前监控数值类型的指标的历史值,支持查询小时,天,月的历史数据,支持配置页面刷新时间。 -- ⚠️注意监控历史图表需配置外置时序数据库才能获取完整功能,时序数据库支持: IOTDB, TDengine, InfluxDB, GreptimeDB - -![hertzbeat](/img/home/3.png) - -![hertzbeat](/img/home/4.png) - -### 告警中心 - -- 已触发告警消息的管理展示页面,使用户有直观的展示当前告警情况。 -- 支持告警处理,告警标记未处理,告警删除清空等批量操作。 - -![hertzbeat](/img/home/7.png) - -### 阈值规则 - -- 对于监控的可用性状态设置阈值规则,特定指标的值超过我们预期范围时发出告警,这些都可以在阈值规则这里配置。 -- 告警级别分为三级:通知告警,严重告警,紧急告警。 -- 阈值规则支持可视化页面配置或表达式规则配置,灵活性更高。 -- 支持配置触发次数,告警级别,通知模版,关联指定监控等。 - -![hertzbeat](/img/home/6.png) - -![hertzbeat](/img/home/11.png) - -### 告警收敛 - -- 当通过阈值规则判断触发告警后,会进入到告警收敛,告警收敛会根据规则对特定时间段的重复告警消息去重收敛,已避免大量重复性告警导致接收人告警麻木。 -- 告警收敛规则支持重复告警生效时间段,标签匹配和告警级别匹配过滤。 - -![hertzbeat](/img/home/12.png) - -![hertzbeat](/img/home/13.png) - -### 告警静默 - -- 当通过阈值规则判断触发告警后,会进入到告警静默,告警静默会根据规则对特定一次性时间段或周期性时候段的告警消息屏蔽静默,此时间段不发送告警消息。 -- 此应用场景如用户在系统维护中,无需发已知告警。用户在工作日时间才会接收告警消息,用户在晚上需避免打扰等。 -- 告警静默规则支持一次性时间段或周期性时间段,支持标签匹配和告警级别匹配。 - -![hertzbeat](/img/home/14.png) - -![hertzbeat](/img/home/15.png) - -### 消息通知 - -- 消息通知功能是把告警消息通过不同媒体渠道通知给指定的接收人,告警消息及时触达。 -- 功能包含接收人信息管理和通知策略管理,接收人管理维护接收人信息以其通知方式信息,通知策略管理维护把哪些告警信息通知给哪些接收人的策略规则。 -- 通知方式支持 `邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式。 -- 通知策略支持标签匹配和告警级别匹配,方便的使不同标签的告警和告警级别分派给不同的接收处理人。 - -![hertzbeat](/img/home/16.png) - -![hertzbeat](/img/home/17.png) - -![hertzbeat](/img/home/8.png) - -### 监控模版 - -- HertzBeat 将 `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` 等协议规范可配置化,只需在浏览器配置监控模版 `YML` 就能使用这些协议去自定义采集想要的指标。您相信只需配置下就能立刻适配一款 `K8s` 或 `Docker` 等新的监控类型吗? -- 同理我们内置的所有监控类型(mysql,website,jvm,k8s)也一一映射为对应的监控模版,用户可以新增修改监控模版来自定义监控功能。 - -![hertzbeat](/img/home/9.png) - ---- - -**`HertzBeat`更多强大的功能欢迎使用探索。Have Fun!** - ------ - -**官网: ** -**Github: ** -**Gitee: ** diff --git a/home/versioned_docs/version-v1.4.x/others/huaweicloud.md b/home/versioned_docs/version-v1.4.x/others/huaweicloud.md deleted file mode 100644 index bc2c4f50c96..00000000000 --- a/home/versioned_docs/version-v1.4.x/others/huaweicloud.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: huaweicloud -title: HertzBeat & HuaweiCloud -sidebar_label: HertzBeat & HuaweiCloud ---- - -### HertzBeat 与 HuaweiCloud 的开源合作需求Issue - -> 欢迎大家对感兴趣的Issue领取贡献。 - -- [Task] support using Huawei Cloud OBS to store custom define yml file [#841](https://github.com/apache/hertzbeat/issues/841) -- [Task] support Huawei Cloud CCE metrics monitoring [#839](https://github.com/apache/hertzbeat/issues/839) -- [Task] support EulerOS metrics monitoring [#838](https://github.com/apache/hertzbeat/issues/838) -- [Task] support using Huawei Cloud SMN send alarm notification message [#837](https://github.com/apache/hertzbeat/issues/837) -- [Task] support using GaussDB For Influx store history metrics data [#836](https://github.com/apache/hertzbeat/issues/836) - -### 关于 HuaweiCloud 开源活动 - -HuaweiCloud 华为云将面向开源软件工具链与环境、开源应用构建和开源生态组件构建这三大重点场景,提供技术支持、奖金支持、活动支持,邀请更多的开发者,携手构建开源for HuaweiCloud。 - -开发者将开源软件工具、开源应用和开源组件与华为云对象存储OBS、数仓DWS、云容器CCE等云服务对接,同时基于Terraform模板,上架到华为云云商店,支持其他开发者一键部署使用开源组件 ,称为“开源xxx for HuaweiCloud”。 - -感兴趣的开发者可以查看:华为云开源项目仓库 了解更多。 diff --git a/home/versioned_docs/version-v1.4.x/others/images-deploy.md b/home/versioned_docs/version-v1.4.x/others/images-deploy.md deleted file mode 100644 index 3cdc25e6196..00000000000 --- a/home/versioned_docs/version-v1.4.x/others/images-deploy.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -id: images-deploy -title: HertzBeat 华为云镜像部署 -sidebar_label: HertzBeat 华为云镜像部署快速指引 ---- - -> 易用友好的开源实时监控告警工具,无需Agent,强大自定义监控能力。 - -[![discord](https://img.shields.io/badge/chat-on%20discord-brightgreen)](https://discord.gg/Fb6M73htGr) -[![QQ](https://img.shields.io/badge/qq-630061200-orange)](https://qm.qq.com/q/FltGGGIX2m) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/web-monitor.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/ping-connect.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/port-available.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/database-monitor.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/os-monitor.svg) -![hertzbeat](https://img.shields.io/badge/monitor-cloud%20native-brightgreen) -![hertzbeat](https://img.shields.io/badge/monitor-middleware-blueviolet) -![hertzbeat](https://img.shields.io/badge/monitor-network-red) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/custom-monitor.svg) -![hertzbeat](https://cdn.jsdelivr.net/gh/apache/hertzbeat@gh-pages/img/badge/alert.svg) - -## 🎡 介绍 - -> [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是一个拥有强大自定义监控能力,无需 Agent 的开源实时监控告警工具。 -> 集 **监控+告警+通知** 为一体,支持对应用服务,应用程序,数据库,缓存,操作系统,大数据,中间件,Web服务器,云原生,网络,自定义等监控,阈值告警通知一步到位。 -> 更自由化的阈值规则(计算表达式),`邮件` `Discord` `Slack` `Telegram` `钉钉` `微信` `飞书` `短信` `Webhook` 等方式及时送达。 -> -> 我们将`Http, Jmx, Ssh, Snmp, Jdbc, Prometheus`等协议规范可配置化,您只需在浏览器配置`YML`就能使用这些协议去自定义采集任何您想要的指标。 -> 您相信只需配置下就能立刻适配一款`K8s`或`Docker`等新的监控类型吗? -> -> `HertzBeat`的强大自定义,多类型支持,易扩展,低耦合,希望能帮助开发者和中小团队快速搭建自有监控系统。 -> 当然我们也提供了对应的 **[SAAS版本监控云](https://console.tancloud.cn)**,中小团队和个人无需再为了监控自己的网站资源,而去部署学习一套繁琐的监控系统,**[登录即可免费开始](https://console.tancloud.cn)**。 - ----- - -![hertzbeat](/img/home/1.png) - -![hertzbeat](/img/home/9.png) - -## ⛄ Supported - -- [网站监控](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-website.yml), [端口可用性](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-port.yml), - [Http Api](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-api.yml), [Ping连通性](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-ping.yml), - [Jvm](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-jvm.yml), [SiteMap全站](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-fullsite.yml), - [Ssl证书](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-ssl_cert.yml), [SpringBoot](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-springboot2.yml), - [FTP服务器](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-ftp.yml) -- [Mysql](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-mysql.yml), [PostgreSQL](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-postgresql.yml), - [MariaDB](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-mariadb.yml), [Redis](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-redis.yml), - [ElasticSearch](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-elasticsearch.yml), [SqlServer](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-sqlserver.yml), - [Oracle](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-oracle.yml), [MongoDB](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-mongodb.yml), - [达梦](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-dm.yml), [OpenGauss](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-opengauss.yml), - [ClickHouse](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-clickhouse.yml), [IoTDB](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-iotdb.yml) -- [Linux](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-linux.yml), [Ubuntu](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-ubuntu.yml), - [CentOS](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-centos.yml), [Windows](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-windows.yml) -- [Tomcat](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-tomcat.yml), [Nacos](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-nacos.yml), - [Zookeeper](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-zookeeper.yml), [RabbitMQ](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-rabbitmq.yml), - [Flink](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-flink.yml), [Kafka](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-kafka.yml), - [ShenYu](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-shenyu.yml), [DynamicTp](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-dynamic_tp.yml), - [Jetty](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-jetty.yml), [ActiveMQ](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-activemq.yml) -- [Kubernetes](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-kubernetes.yml), [Docker](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/define/app-docker.yml) -- 和更多的自定义监控。 -- 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 - -## 镜像部署 - -> HertzBeat支持在Linux Windows Mac系统安装运行,CPU支持X86/ARM64。 - -1. 开通服务器时选用 HertzBeat 镜像 -2. 启动服务器 -3. 配置HertzBeat的配置文件(可选) - - 修改位于 `/opt/hertzbeat/config/application.yml` 的配置文件(可选),您可以根据需求修改配置文件 - - 若需使用邮件发送告警,需替换`application.yml`里面的邮件服务器参数 - - **推荐**若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](../start/mysql-change)) - - **推荐**若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.td-engine`参数 具体步骤参见 [使用TDengine存储指标数据](../start/tdengine-init) - - **推荐**若需使用时序数据库IotDB来存储指标数据库,需替换`application.yml`里面的`warehouse.storeiot-db`参数 具体步骤参见 [使用IotDB存储指标数据](../start/iotdb-init) - -4. 配置用户配置文件(可选,自定义配置用户密码) - HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat - 若需要新增删除修改账户或密码,可以通过修改位于 `/opt/hertzbeat/config/sureness.yml` 的配置文件实现,若无此需求可忽略此步骤 - 具体参考 [配置修改账户密码](../start/account-modify) - -5. 部署启动 - 执行位于安装目录/opt/hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat - - ``` - ./startup.sh - ``` - -6. 开始探索HertzBeat - 浏览器访问 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 - -**HAVE FUN** - -### 部署常见问题 - -**最多的问题就是网络问题,请先提前排查** - -1. **按照流程部署,访问 无界面** - 请参考下面几点排查问题: - -> 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 - -2. **监控历史图表长时间都一直无数据** - -> 一:Tdengine或IoTDB是否配置,未配置则无历史图表数据 -> 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 -> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB 或 Tdengine IP账户密码等配置是否正确 diff --git a/home/versioned_docs/version-v1.4.x/others/resource.md b/home/versioned_docs/version-v1.4.x/others/resource.md deleted file mode 100644 index 6b52c0ee20d..00000000000 --- a/home/versioned_docs/version-v1.4.x/others/resource.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: resource -title: Related resources -sidebar_label: Related resources ---- - -## HertzBeat Introduce PDF - -Download: [PDF](http://cdn.hertzbeat.com/hertzbeat.pdf) - -## Icon Resources - -### HertzBeat Logo - -![logo](/img/hertzbeat-logo.svg) - -Download: [SVG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.svg) [PNG](https://gitee.com/hertzbeat/hertzbeat/raw/master/home/static/img/hertzbeat-logo.jpg) diff --git a/home/versioned_docs/version-v1.4.x/others/sponsor.md b/home/versioned_docs/version-v1.4.x/others/sponsor.md deleted file mode 100644 index c741e29218a..00000000000 --- a/home/versioned_docs/version-v1.4.x/others/sponsor.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -id: sponsor -title: Sponsor -sidebar_label: Sponsor ---- - -**Hertzbeat is completely free for individuals or enterprises. If you like this project and are willing to help, buy us a cup of coffee** - -![wechat-alipay](/img/docs/pay.png) - -Thanks [JiShi Information(build a new microwave + optical transaction network)](https://www.flarespeed.com) sponsored server node. -Thanks [TianShang cloud computing(new wisdom cloud)](https://www.tsyvps.com/aff/BZBEGYLX) sponsored server node. diff --git a/home/versioned_docs/version-v1.4.x/start/account-modify.md b/home/versioned_docs/version-v1.4.x/start/account-modify.md deleted file mode 100644 index 99541502eb2..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/account-modify.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -id: account-modify -title: Modify Account Username Password -sidebar_label: Update Account ---- - -HertzBeat default built-in three user accounts, respectively admin/hertzbeat tom/hertzbeat guest/hertzbeat -If you need add, delete or modify account or password, configure `sureness.yml`. Ignore this step without this demand. -The configuration file content refer to project repository[/script/sureness.yml](https://gitee.com/hertzbeat/hertzbeat/blob/master/script/sureness.yml) -Modify the following **part parameters** in sureness.yml:**[Note⚠️Other default sureness configuration parameters should be retained]** - -```yaml - -resourceRole: - - /api/account/auth/refresh===post===[admin,user,guest] - - /api/apps/**===get===[admin,user,guest] - - /api/monitor/**===get===[admin,user,guest] - - /api/monitor/**===post===[admin,user] - - /api/monitor/**===put===[admin,user] - - /api/monitor/**===delete==[admin] - - /api/monitors/**===get===[admin,user,guest] - - /api/monitors/**===post===[admin,user] - - /api/monitors/**===put===[admin,user] - - /api/monitors/**===delete===[admin] - - /api/alert/**===get===[admin,user,guest] - - /api/alert/**===post===[admin,user] - - /api/alert/**===put===[admin,user] - - /api/alert/**===delete===[admin] - - /api/alerts/**===get===[admin,user,guest] - - /api/alerts/**===post===[admin,user] - - /api/alerts/**===put===[admin,user] - - /api/alerts/**===delete===[admin] - - /api/notice/**===get===[admin,user,guest] - - /api/notice/**===post===[admin,user] - - /api/notice/**===put===[admin,user] - - /api/notice/**===delete===[admin] - - /api/tag/**===get===[admin,user,guest] - - /api/tag/**===post===[admin,user] - - /api/tag/**===put===[admin,user] - - /api/tag/**===delete===[admin] - - /api/summary/**===get===[admin,user,guest] - - /api/summary/**===post===[admin,user] - - /api/summary/**===put===[admin,user] - - /api/summary/**===delete===[admin] - -# Resources that need to be filtered and protected can be accessed directly without authentication -# /api/v1/source3===get means /api/v1/source3===get it can be accessed by anyone. Don't need to authentication -excludedResource: - - /api/account/auth/**===* - - /api/i18n/**===get - - /api/apps/hierarchy===get - # web ui the front-end static resource - - /===get - - /dashboard/**===get - - /monitors/**===get - - /alert/**===get - - /account/**===get - - /setting/**===get - - /passport/**===get - - /**/*.html===get - - /**/*.js===get - - /**/*.css===get - - /**/*.ico===get - - /**/*.ttf===get - - /**/*.png===get - - /**/*.gif===get - - /**/*.jpg===get - - /**/*.svg===get - - /**/*.json===get - # swagger ui resource - - /swagger-resources/**===get - - /v2/api-docs===get - - /v3/api-docs===get - -# user account information -# Here is admin tom lili three accounts -# eg: admin includes[admin,user]roles, password is hertzbeat -# eg: tom includes[user], password is hertzbeat -# eg: lili includes[guest],text password is lili, salt password is 1A676730B0C7F54654B0E09184448289 -account: - - appId: admin - credential: hertzbeat - role: [admin,user] - - appId: tom - credential: hertzbeat - role: [user] - - appId: guest - credential: hertzbeat - role: [guest] -``` - -Modify the following **part parameters** in sureness.yml **[Note⚠️Other default sureness configuration parameters should be retained]**: - -```yaml - -# user account information -# Here is admin tom lili three accounts -# eg: admin includes[admin,user]roles, password is hertzbeat -# eg: tom includes[user], password is hertzbeat -# eg: lili includes[guest], text password is lili, salt password is 1A676730B0C7F54654B0E09184448289 -account: - - appId: admin - credential: hertzbeat - role: [admin,user] - - appId: tom - credential: hertzbeat - role: [user] - - appId: guest - credential: hertzbeat - role: [guest] -``` - -## Update Security Secret - -> This secret is the key for account security encryption management and needs to be updated to your custom key string of the same length. - -Update the `application.yml` file in the `config` directory, modify the `sureness.jwt.secret` parameter to your custom key string of the same length. - -```yaml -sureness: - jwt: - secret: 'CyaFv0bwq2Eik0jdrKUtsA6bx3sDJeFV643R - LnfKefTjsIfJLBa2YkhEqEGtcHDTNe4CU6+9 - 8tVt4bisXQ13rbN0oxhUZR73M6EByXIO+SV5 - dKhaX0csgOCTlCxq20yhmUea6H6JIpSE2Rwp' -``` - -**Restart HertzBeat, access to explore** diff --git a/home/versioned_docs/version-v1.4.x/start/custom-config.md b/home/versioned_docs/version-v1.4.x/start/custom-config.md deleted file mode 100644 index 7f45b5dd27d..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/custom-config.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: custom-config -title: Advanced Params Config -sidebar_label: Advanced Params Config ---- - -This describes how to configure the SMS server, the number of built-in availability alarm triggers, etc. - -**Configuration file `application.yml` of `hertzbeat`** - -### Configure the configuration file of HertzBeat - -Modify the configuration file located at `hertzbeat/config/application.yml` -Note ⚠️The docker container method needs to mount the application.yml file to the local host -The installation package can be decompressed and modified in `hertzbeat/config/application.yml` - -1. Configure the SMS sending server - -> Only when your own SMS server is successfully configured, the alarm SMS triggered in the monitoring tool will be sent normally. - -Add the following Tencent platform SMS server configuration in `application.yml` (parameters need to be replaced with your SMS server configuration) - -```yaml -common: - sms: - tencent: - secret-id: AKIDbQ4VhdMr89wDedFrIcgU2PaaMvOuBCzY - secret-key: PaXGl0ziY9UcWFjUyiFlCPMr77rLkJYlyA - app-id: 1435441637 - sign-name: XX Technology - template-id: 1343434 -``` - -2. Configure alarm custom parameters - -```yaml -alerter: - # Custom console address - console-url: https://console.tancloud.io -``` - -3. Use external redis instead of memory to store real-time metric data - -> By default, the real-time data of our metrics is stored in memory, which can be configured as follows to use redis instead of memory storage. - -Note ⚠️ `memory.enabled: false, redis.enabled: true` - -```yaml -warehouse: - store: - memory: - enabled: false - init-size: 1024 - redis: - enabled: true - host: 127.0.0.1 - port: 6379 - password: 123456 -``` diff --git a/home/versioned_docs/version-v1.4.x/start/docker-deploy.md b/home/versioned_docs/version-v1.4.x/start/docker-deploy.md deleted file mode 100644 index a1ff268fcb0..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/docker-deploy.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -id: docker-deploy -title: Install HertzBeat via Docker -sidebar_label: Install via Docker ---- - -> Recommend to use docker deploy HertzBeat - -1. Download and install the Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 - After the installation you can check if the Docker version normally output at the terminal. - - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` - -2. pull HertzBeat Docker mirror - you can look up the mirror version TAG in [dockerhub mirror repository](https://hub.docker.com/r/apache/hertzbeat/tags) - or in [quay.io mirror repository](https://quay.io/repository/apache/hertzbeat) - - ```shell - docker pull apache/hertzbeat - docker pull apache/hertzbeat-collector - ``` - - or - - ```shell - docker pull quay.io/tancloud/hertzbeat - docker pull quay.io/tancloud/hertzbeat-collector - ``` - -3. Mounted HertzBeat configuration file (optional) - Download and config `application.yml` in the host directory, eg:`$(pwd)/application.yml` - Download from [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) or [gitee/script/application.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/application.yml). - You can modify the configuration yml file according to your needs. - - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` - - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) - - **Recommended** If you need to use the time series database TDengine to store metric data, you need to replace the `warehouse.store.td-engine` parameter in `application.yml` for specific steps, see [Using TDengine to store metrics data](tdengine-init) - - **Recommended** If you need to use the time series database IotDB to store the metric database, you need to replace the `warehouse.storeiot-db` parameter in `application.yml` For specific steps, see [Use IotDB to store metrics data](iotdb-init) -4. Mounted the account file(optional) - HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` - If you need update account or password, configure `sureness.yml`. Ignore this step without this demand. - Download and config `sureness.yml` in the host directory,eg:`$(pwd)/sureness.yml` - Download from [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) or [gitee/script/sureness.yml](https://gitee.com/hertzbeat/hertzbeat/raw/master/script/sureness.yml) - For detail steps, please refer to [Configure Account Password](account-modify) -5. Start the HertzBeat Docker container - -```shell -$ docker run -d -p 1157:1157 -p 1158:1158 \ - -e LANG=en_US.UTF-8 \ - -e TZ=Asia/Shanghai \ - -v $(pwd)/data:/opt/hertzbeat/data \ - -v $(pwd)/logs:/opt/hertzbeat/logs \ - -v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml \ - -v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml \ - --name hertzbeat apache/hertzbeat -``` - -This command starts a running HertzBeat Docker container with mapping port 1157-1158. If existing processes on the host use the port, please modify host mapped port. - -- `docker run -d` : Run a container in the background via Docker -- `-p 1157:1157 -p 1158:1158` : Mapping container ports to the host, 1157 is web-ui port, 1158 is cluster port. -- `-e LANG=en_US.UTF-8` : Set the system language -- `-e TZ=Asia/Shanghai` : Set the system timezone -- `-v $(pwd)/data:/opt/hertzbeat/data` : (optional, data persistence) Important⚠️ Mount the H2 database file to the local host, to ensure that the data is not lost due creating or deleting container. -- `-v $(pwd)/logs:/opt/hertzbeat/logs` : (optional, if you don't have a need, just delete it) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. -- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional, if you don't have a need, just delete it) Mount the local configuration file into the container which has been modified in the previous step, namely using the local configuration file to cover container configuration file. -- `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (optional, if you don't have a need, just delete it) Mount account configuration file modified in the previous step into the container. Delete this command parameters if no needs. -- `--name hertzbeat` : Naming container name hertzbeat -- `apache/hertzbeat` : Use the pulled latest HertzBeat official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat` instead if you pull `quay.io` docker image.** - -6. Begin to explore HertzBeat - - Access `http://ip:1157/` using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! - -7. Deploy collector cluster (Optional) - -```shell -$ docker run -d \ - -e IDENTITY=custom-collector-name \ - -e MODE=public \ - -e MANAGER_HOST=127.0.0.1 \ - -e MANAGER_PORT=1158 \ - --name hertzbeat-collector apache/hertzbeat-collector -``` - -This command starts a running HertzBeat-Collector container. - -- `docker run -d` : Run a container in the background via Docker -- `-e IDENTITY=custom-collector-name` : (optional) Set the collector unique identity name. Attention the clusters collector name must unique. -- `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. -- `-e MANAGER_HOST=127.0.0.1` : Important⚠️ Set the main hertzbeat server ip. -- `-e MANAGER_PORT=1158` : (optional) Set the main hertzbeat server port, default 1158. -- `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (optional) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. -- `--name hertzbeat-collector` : Naming container name hertzbeat-collector -- `apache/hertzbeat-collector` : Use the pulled latest HertzBeat-Collector official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat-collector` instead if you pull `quay.io` docker image.** - -8. Access `http://localhost:1157` and you will see the registered new collector in dashboard. - -**HAVE FUN** - -### FAQ - -**The most common problem is network problems, please check in advance** - -1. **MYSQL, TDENGINE, IoTDB and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** - The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. - -> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. -> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` - -2. **According to the process deploy,visit no interface** - Please refer to the following points to troubleshoot issues: - -> 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. -> 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. -> 3:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. - -3. **Log an error TDengine connection or insert SQL failed** - -> 1:Check whether database account and password configured is correct, the database is created. -> 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. - -4. **Historical monitoring charts have been missing data for a long time** - -> 1:Check whether you configure Tdengine or IoTDB. No configuration means no historical chart data. -> 2:Check whether Tdengine database `hertzbeat` is created. -> 3: Check whether IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. - -5. If the history chart on the monitoring page is not displayed,popup [please configure time series database] - -> As shown in the popup window,the premise of history chart display is that you need install and configure hertzbeat's dependency service - IoTDB or TDengine database. -> Installation and initialization this database refer to [TDengine Installation](tdengine-init) or [IoTDB Installation](iotdb-init) - -6. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed - -> Please confirm whether the installed TDengine version is near 2.4.0.12, version 3.0 and 2.2 are not compatible. - -7. The time series database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure dependent time series database] - -> Please check if the configuration parameters are correct -> Is iot-db or td-engine enable set to true -> Note⚠️If both hertzbeat and IotDB, TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory diff --git a/home/versioned_docs/version-v1.4.x/start/greptime-init.md b/home/versioned_docs/version-v1.4.x/start/greptime-init.md deleted file mode 100644 index 5102ecfe6b7..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/greptime-init.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -id: greptime-init -title: Use Time Series Database GreptimeDB to Store Metrics Data (Optional) -sidebar_label: Use GreptimeDB Store Metrics ---- - -HertzBeat's historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) -We recommend VictoriaMetrics for long term support. - -GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. - -It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage. - -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** - -### Install GreptimeDB via Docker - -> Refer to the official website [installation tutorial](https://docs.greptime.com/getting-started/overview) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Install GreptimeDB with Docker - -```shell -$ docker run -p 4000-4004:4000-4004 \ - -p 4242:4242 -v /opt/greptimedb:/tmp/greptimedb \ - --name greptime \ - greptime/greptimedb standalone start \ - --http-addr 0.0.0.0:4000 \ - --rpc-addr 0.0.0.0:4001 \ -``` - -`-v /opt/greptimedb:/tmp/greptimedb` is local persistent mount of greptimedb data directory. `/opt/greptimedb` should be replaced with the actual local directory. -use```$ docker ps``` to check if the database started successfully - -### Configure the database connection in hertzbeat `application.yml` configuration file - -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Replace `warehouse.store.greptime` data source parameters, URL account and password. - -```yaml -warehouse: - store: - # disable jpa - jpa: - enabled: false - # enable greptime - greptime: - enabled: true - endpoint: localhost:4001 -``` - -2. Restart HertzBeat - -### FAQ - -1. Do both the time series databases Greptime, IoTDB or TDengine need to be configured? Can they both be used? - -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. diff --git a/home/versioned_docs/version-v1.4.x/start/influxdb-init.md b/home/versioned_docs/version-v1.4.x/start/influxdb-init.md deleted file mode 100644 index 05f6b44d876..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/influxdb-init.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -id: influxdb-init -title: Use Time Series Database InfluxDB to Store Metrics Data (Optional) -sidebar_label: Use InfluxDB Store Metrics ---- - -HertzBeat's historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) -We recommend VictoriaMetrics for long term support. - -TDengine is the Time Series Data Platform where developers build IoT, analytics, and cloud applications. - -**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** -Note⚠️ Need InfluxDB 1.x Version. - -### 1. Use HuaweiCloud GaussDB For Influx - -> Use [HuaweiCloud GaussDB For Influx](https://www.huaweicloud.com/product/gaussdbforinflux.html) -> -> Get the `GaussDB For Influx` service url, username and password config. - -⚠️Note `GaussDB For Influx` enable SSL default, the service url should use `https:` - -### 2. Install TDengine via Docker - -> Refer to the official website [installation tutorial](https://hub.docker.com/_/influxdb) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Install InfluxDB with Docker -> -> ``` -> $ docker run -p 8086:8086 \ -> -v /opt/influxdb:/var/lib/influxdb \ -> influxdb:1.8 -> ``` -> -> `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. -> use```$ docker ps``` to check if the database started successfully - -### Configure the database connection in hertzbeat `application.yml` configuration file - -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Replace `warehouse.store.influxdb` data source parameters, URL account and password. - -```yaml -warehouse: - store: - # disable jpa - jpa: - enabled: false - # enable influxdb - influxdb: - enabled: true - server-url: http://localhost:8086 - username: root - password: root - expire-time: '30d' - replication: 1 -``` - -2. Restart HertzBeat - -### FAQ - -1. Do both the time series databases InfluxDB, IoTDB and TDengine need to be configured? Can they both be used? - -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. diff --git a/home/versioned_docs/version-v1.4.x/start/iotdb-init.md b/home/versioned_docs/version-v1.4.x/start/iotdb-init.md deleted file mode 100644 index 7c3f7bd4e38..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/iotdb-init.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -id: iotdb-init -title: Use Time Series Database IoTDB to Store Metrics Data (Optional) -sidebar_label: Use IoTDB Store Metrics ---- - -HertzBeat's historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) -We recommend VictoriaMetrics for long term support. - -Apache IoTDB is a software system that integrates the collection, storage, management and analysis of time series data of the Internet of Things. We use it to store and analyze the historical data of monitoring metrics collected. Support V0.13+ version and V1.0.+ version. - -**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** - -> If you already have an IoTDB environment, you can skip directly to the YML configuration step. - -### Install IoTDB via Docker - -> Refer to the official website [installation tutorial](https://iotdb.apache.org/UserGuide/V0.13.x/QuickStart/WayToGetIoTDB.html) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Install IoTDB via Docker - -```shell -$ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ - -v /opt/iotdb/data:/iotdb/data \ - --name iotdb \ - apache/iotdb:0.13.3-node -``` - -`-v /opt/iotdb/data:/iotdb/data` is local persistent mount of IotDB data directory.`/iotdb/data` should be replaced with the actual local directory. -use```$ docker ps``` to check if the database started successfully - -3. Configure the database connection in hertzbeat `application.yml`configuration file - - Modify `hertzbeat/config/application.yml` configuration file - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Config the `warehouse.store.jpa.enabled` `false`. Replace `warehouse.store.iot-db` data source parameters, HOST account and password. - -``` -warehouse: - store: - # disable JPA - jpa: - enabled: false - # enable iot-db - iot-db: - enabled: true - host: 127.0.0.1 - rpc-port: 6667 - username: root - password: root - # config.org.apache.hertzbeat.warehouse.IotDbVersion: V_0_13 || V_1_0 - version: V_0_13 - query-timeout-in-ms: -1 - # default '7776000000'(90days,unit:ms,-1:no-expire) - expire-time: '7776000000' -``` - -4. Restart HertzBeat - -### FAQ - -1. Do both the time series databases IoTDB and TDengine need to be configured? Can they both be used? - -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. - -2. The historical chart of the monitoring page is not displayed, and pops up [Unable to provide historical chart data, please configure to rely on the time series database] - -> As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database - -3. The TDengine database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure the dependent time series database] - -> Please check if the configuration parameters are correct -> Is td-engine enable set to true -> Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory diff --git a/home/versioned_docs/version-v1.4.x/start/mysql-change.md b/home/versioned_docs/version-v1.4.x/start/mysql-change.md deleted file mode 100644 index e79b98cd264..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/mysql-change.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -id: mysql-change -title: Use MYSQL Replace H2 Database to Store Metadata(Optional) -sidebar_label: Use MYSQL Instead of H2 ---- - -MYSQL is a reliable relational database. In addition to default built-in H2 database, HertzBeat allow you to use MYSQL to store structured relational data such as monitoring information, alarm information and configuration information. - -> If you have the MYSQL environment, can be directly to database creation step. - -### Install MYSQL via Docker - -1. Download and install the Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 - After the installation you can check if the Docker version normally output at the terminal. - - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` - -2. Install MYSQl with Docker - - ``` - docker run -d --name mysql -p 3306:3306 -v /opt/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7 - ``` - - `-v /opt/data:/var/lib/mysql` is local persistent mount of mysql data directory. `/opt/data` should be replaced with the actual local directory. - use ```$ docker ps``` to check if the database started successfully - -### Database creation - -1. Enter MYSQL or use the client to connect MYSQL service - `mysql -uroot -p123456` -2. Create database named hertzbeat - `create database hertzbeat default charset utf8mb4 collate utf8mb4_general_ci;` -3. Check if hertzbeat database has been successfully created - `show databases;` - -### Modify hertzbeat's configuration file application.yml and switch data source - -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Replace `spring.database` data source parameters, URL account and password. - -```yaml -spring: - datasource: - driver-class-name: org.h2.Driver - username: sa - password: 123456 - url: jdbc:h2:./data/hertzbeat;MODE=MYSQL -``` - -Specific replacement parameters are as follows and you need to configure account according to the mysql environment: - -```yaml -spring: - datasource: - driver-class-name: com.mysql.cj.jdbc.Driver - username: root - password: 123456 - url: jdbc:mysql://localhost:3306/hertzbeat?useUnicode=true&characterEncoding=utf-8&useSSL=false -``` - -**Start HertzBeat visit on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/versioned_docs/version-v1.4.x/start/package-deploy.md b/home/versioned_docs/version-v1.4.x/start/package-deploy.md deleted file mode 100644 index f86a68c6362..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/package-deploy.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -id: package-deploy -title: Install HertzBeat via Package -sidebar_label: Install via Package ---- - -> You can install and run HertzBeat on Linux Windows Mac system, and CPU supports X86/ARM64. - -1. Download HertzBeat installation package - Download installation package `hertzbeat-xx.tar.gz` `hertzbeat-collector-xx.tar.gz` corresponding to your system environment - - download from [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) repository - - download from [Download](https://hertzbeat.apache.org/docs/download) repository -2. Configure HertzBeat's configuration file(optional) - Unzip the installation package to the host eg: /opt/hertzbeat - - ``` - $ tar zxvf hertzbeat-xx.tar.gz - or - $ unzip -o hertzbeat-xx.zip - ``` - - Modify the configuration file `hertzbeat/config/application.yml` params according to your needs. - - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` - - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) - - **Highly recommended** From now on we will mainly support VictoriaMetrics as a time-series database, if you need to use the time series database VictoriaMetrics to store metric data, you need to replace the `warehouse.store.victoria-metrics` parameter in `application.yml` for specific steps, see [Using VictoriaMetrics to store metrics data](victoria-metrics-init) - - **Recommended** If you need to use the time series database TDengine to store metric data, you need to replace the `warehouse.store.td-engine` parameter in `application.yml` for specific steps, see [Using TDengine to store metrics data](tdengine-init) - - **Recommended** If you need to use the time series database IotDB to store the metric database, you need to replace the `warehouse.storeiot-db` parameter in `application.yml` For specific steps, see [Use IotDB to store metrics data](iotdb-init) - -3. Configure the account file(optional) - HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` - If you need add, delete or modify account or password, configure `hertzbeat/config/sureness.yml`. Ignore this step without this demand. - For detail steps, please refer to [Configure Account Password](account-modify) - -4. Start the service - Execute the startup script `startup.sh` in the installation directory `hertzbeat/bin/`, or `startup.bat` in windows. - - ``` - ./startup.sh - ``` - -5. Begin to explore HertzBeat - - Access using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! - -6. Deploy collector clusters (Optional) - - - Download and unzip the collector release package `hertzbeat-collector-xx.tar.gz` to new machine [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) - - Configure the collector configuration yml file `hertzbeat-collector/config/application.yml`: unique `identity` name, running `mode` (public or private), hertzbeat `manager-host`, hertzbeat `manager-port` - - ```yaml - collector: - dispatch: - entrance: - netty: - enabled: true - identity: ${IDENTITY:} - mode: ${MODE:public} - manager-host: ${MANAGER_HOST:127.0.0.1} - manager-port: ${MANAGER_PORT:1158} - ``` - - - Run command `$ ./bin/startup.sh` or `bin/startup.bat` - - Access `http://localhost:1157` and you will see the registered new collector in dashboard - -**HAVE FUN** - -### FAQ - -1. **If using the package not contains JDK, you need to prepare the JAVA environment in advance** - - Install JAVA runtime environment-refer to [official website](http://www.oracle.com/technetwork/java/javase/downloads/index.html) - requirement:JDK11 ENV - download JAVA installation package: [mirror website](https://repo.huaweicloud.com/java/jdk/) - After installation use command line to check whether you install it successfully. - - ``` - $ java -version - java version "11.0.12" - Java(TM) SE Runtime Environment 18.9 (build 11.0.12+8-LTS-237) - Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.12+8-LTS-237, mixed mode) - - ``` - -2. **According to the process deploy,visit no interface** - Please refer to the following points to troubleshoot issues: - -> 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. -> 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. -> 3:Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. - -3. **Log an error TDengine connection or insert SQL failed** - -> 1:Check whether database account and password configured is correct, the database is created. -> 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. - -4. **Monitoring historical charts with no data for a long time** - -> 1: Whether the time series database is configured or not, if it is not configured, there is no historical chart data. -> 2: If you are using Tdengine, check whether the database `hertzbeat` of Tdengine is created. -> 3: HertzBeat's configuration file `application.yml`, the dependent services in it, the time series, the IP account password, etc. are configured correctly. diff --git a/home/versioned_docs/version-v1.4.x/start/postgresql-change.md b/home/versioned_docs/version-v1.4.x/start/postgresql-change.md deleted file mode 100644 index d06d040ee7e..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/postgresql-change.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -id: postgresql-change -title: Use PostgreSQL Replace H2 Database to Store Metadata(Optional) -sidebar_label: Use PostgreSQL Instead of H2 ---- - -PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition to default built-in H2 database, HertzBeat allow you to use PostgreSQL to store structured relational data such as monitoring information, alarm information and configuration information. - -> If you have the PostgreSQL environment, can be directly to database creation step. - -### Install PostgreSQL via Docker - -1. Download and install the Docker environment - Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 - After the installation you can check if the Docker version normally output at the terminal. - - ``` - $ docker -v - Docker version 20.10.12, build e91ed57 - ``` - -2. Install PostgreSQL with Docker - - ``` - docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 - ``` - - use```$ docker ps```to check if the database started successfully - -3. Create database in container manually or with [script](https://github.com/apache/hertzbeat/tree/master/script/docker-compose/hertzbeat-postgresql-iotdb/conf/sql/schema.sql). - -### Database creation - -1. Enter postgreSQL or use the client to connect postgreSQL service - - ``` - su - postgres - psql - ``` - -2. Create database named hertzbeat - `CREATE DATABASE hertzbeat;` -3. Check if hertzbeat database has been successfully created - `\l` - -### Modify hertzbeat's configuration file application.yml and switch data source - -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Replace `spring.database` data source parameters, URL account and password. - -```yaml -spring: - datasource: - driver-class-name: org.h2.Driver - username: sa - password: 123456 - url: jdbc:h2:./data/hertzbeat;MODE=MYSQL -``` - -Specific replacement parameters are as follows and you need to configure account, ip, port according to the postgresql environment: - -```yaml -spring: - config: - activate: - on-profile: prod - datasource: - driver-class-name: org.postgresql.Driver - username: root - password: 123456 - url: jdbc:postgresql://127.0.0.1:5432/hertzbeat - hikari: - max-lifetime: 120000 - - jpa: - database: postgresql - hibernate: - ddl-auto: update - properties: - hibernate: - dialect: org.hibernate.dialect.PostgreSQLDialect -``` - -**Start HertzBeat visit on the browser You can use HertzBeat monitoring alarm, default account and password are admin/hertzbeat** diff --git a/home/versioned_docs/version-v1.4.x/start/quickstart.md b/home/versioned_docs/version-v1.4.x/start/quickstart.md deleted file mode 100644 index 5c3b8239a82..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/quickstart.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -id: quickstart -title: Quick Start -sidebar_label: Quick Start ---- - -### 🐕 Quick Start - -- If you wish to deploy HertzBeat locally, please refer to the following Deployment Documentation for instructions. - -### 🍞 Install HertzBeat - -> HertzBeat supports installation through source code, docker or package, cpu support X86/ARM64. - -##### 1:Install quickly via docker - -1. Just one command to get started: - -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` - -```or use quay.io (if dockerhub network connect timeout)``` - -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` - -2. Access `http://localhost:1157` to start, default account: `admin/hertzbeat` - -3. Deploy collector clusters - -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` - -- `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -- `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. -- `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. -- `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. - -Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) - -##### 2:Install via package - -1. Download the release package `hertzbeat-xx.tar.gz` [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) -2. Configure the HertzBeat configuration yml file `hertzbeat/config/application.yml` (optional) -3. Run command `$ ./bin/startup.sh` or `bin/startup.bat` -4. Access `http://localhost:1157` to start, default account: `admin/hertzbeat` -5. Deploy collector clusters - - Download the release package `hertzbeat-collector-xx.tar.gz` to new machine [GITEE Release](https://gitee.com/hertzbeat/hertzbeat/releases) [Download](https://hertzbeat.apache.org/docs/download) - - Configure the collector configuration yml file `hertzbeat-collector/config/application.yml`: unique `identity` name, running `mode` (public or private), hertzbeat `manager-host`, hertzbeat `manager-port` - - ```yaml - collector: - dispatch: - entrance: - netty: - enabled: true - identity: ${IDENTITY:} - mode: ${MODE:public} - manager-host: ${MANAGER_HOST:127.0.0.1} - manager-port: ${MANAGER_PORT:1158} - ``` - - - Run command `$ ./bin/startup.sh` or `bin/startup.bat` - - Access `http://localhost:1157` and you will see the registered new collector in dashboard - -Detailed config refer to [Install HertzBeat via Package](https://hertzbeat.com/docs/start/package-deploy) - -##### 3:Start via source code - -1. Local source code debugging needs to start the back-end project `manager` and the front-end project `web-app`. -2. Backend:need `maven3+`, `java11`, `lombok`, start the `manager` service. -3. Web:need `nodejs npm angular-cli` environment, Run `ng serve --open` in `web-app` directory after backend startup. -4. Access `http://localhost:4200` to start, default account: `admin/hertzbeat` - -Detailed steps refer to [CONTRIBUTING](../others/contributing) - -##### 4:Install All(hertzbeat+mysql+iotdb/tdengine) via Docker-compose - -Install and deploy the mysql database, iotdb/tdengine database and hertzbeat at one time through [docker-compose deployment script](https://github.com/apache/hertzbeat/tree/master/script/docker-compose). - -Detailed steps refer to [Install via Docker-Compose](https://github.com/apache/hertzbeat/tree/master/script/docker-compose) - -##### 5. Install All(hertzbeat+collector+mysql+iotdb) via kubernetes helm charts - -Install HertzBeat cluster in a Kubernetes cluster by Helm chart. - -Detailed steps refer to [Artifact Hub](https://artifacthub.io/packages/helm/hertzbeat/hertzbeat) - -**HAVE FUN** diff --git a/home/versioned_docs/version-v1.4.x/start/rainbond-deploy.md b/home/versioned_docs/version-v1.4.x/start/rainbond-deploy.md deleted file mode 100644 index d1fbf4763fa..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/rainbond-deploy.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -id: rainbond-deploy -title: Use Rainbond Deploy HertzBeat -sidebar_label: Install via Rainbond ---- - -If you are unfamiliar with Kubernetes, and want to install HertzBeat in Kubernetes, you can use Rainbond to deploy. Rainbond is a cloud-native application management platform built on Kubernetes and simplifies the application deployment to Kubernetes. - -## Prerequisites - -To install Rainbond, please refer to [Rainbond Quick Install](https://www.rainbond.com/docs/quick-start/quick-install)。 - -## Deploy HertzBeat - -After logging in Rainbond, click Market in the left menu, switch to open source app store, and search HertzBeat in the search box, and click the Install button. - -![](/img/docs/start/install-to-rainbond-en.png) - -Fill in the following information, and click Confirm button to install. - -* Team: select a team or create a new team -* Cluster: select a cluster -* Application: select an application or create a new application -* Version: select a version - -After installation, HertzBeat can be accessed via the Access button. - -![](/img/docs/start/hertzbeat-topology-en.png) - -:::tip -HertzBeat installed via Rainbond, External Mysql database and Redis and IoTDB are used by default, The HertzBeat configuration file is also mounted, which can be modified in `Components -> Environment Configuration -> Configuration File Settings`. -::: diff --git a/home/versioned_docs/version-v1.4.x/start/sslcert-practice.md b/home/versioned_docs/version-v1.4.x/start/sslcert-practice.md deleted file mode 100644 index f3acc57b71e..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/sslcert-practice.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -id: ssl-cert-practice -title: SSL Certificate Monitor Practice -sidebar_label: Practice Example ---- - -Most websites now support HTTPS by default. The certificate we apply for is usually 3 months or 1 year. It is easy to expire the SSL certificate over time, but we did not find it the first time, or did not update the certificate in time before it expired. - -This article introduces how to use the hertzbeat monitoring tool to detect the validity period of our website's SSL certificate, and send us a warning message when the certificate expires or a few days before the certificate expires. - -#### What is HertzBeat - -HertzBeat is a real-time monitoring tool with powerful custom monitoring capabilities without Agent. Website monitoring, PING connectivity, port availability, database, operating system, middleware, API monitoring, threshold alarms, alarm notification (email, WeChat, Ding Ding Feishu). - -**Official website: | ** - -github: -gitee: - -#### Install HertzBeat - -1. If you don't want to install, you can use the cloud service directly [TanCloud exploration cloud console.tancloud.cn](https://console.tancloud.cn) - -2. The `docker` environment can be installed with just one command - -`docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` - -3. After the installation is successful, the browser can access `localhost:1157` to start, the default account password is `admin/hertzbeat` - -#### Monitoring SSL certificates - -1. Click Add SSL Certificate Monitor - -> System Page -> Monitor Menu -> SSL Certificate -> Add SSL Certificate - -![](/img/docs/start/ssl_1.png) - -2. Configure the monitoring website - -> Here we take the example of monitoring Baidu website, configure monitoring host domain name, name, collection interval, etc. -> Click OK Note ⚠️Before adding, it will test the connectivity of the website by default, and the connection will be successful before adding. Of course, you can also gray out the **Test or not** button. - -![](/img/docs/start/ssl_2.png) - -3. View the detection index data - -> In the monitoring list, you can view the monitoring status, and in the monitoring details, you can view the metric data chart, etc. - -![](/img/docs/start/ssl_3.png) - -![](/img/docs/start/ssl_11.png) - -4. Set the threshold (triggered when the certificate expires) - -> System Page -> Alarms -> Alarm Thresholds -> New Thresholds - -![](/img/docs/start/ssl_4.png) - -> Configure the threshold, select the SSL certificate metric object, configure the alarm expression-triggered when the metric `expired` is `true`, that is, `equals(expired,"true")`, set the alarm level notification template information, etc. - -![](/img/docs/start/ssl_5.png) - -> Associating thresholds with monitoring, in the threshold list, set which monitoring this threshold applies to. - -![](/img/docs/start/ssl_6.png) - -5. Set the threshold (triggered one week before the certificate expires) - -> In the same way, add a new configuration threshold and configure an alarm expression - when the metric expires timestamp `end_timestamp`, the `now()` function is the current timestamp, if the configuration triggers an alarm one week in advance: `end_timestamp <= (now( ) + 604800000)` , where `604800000` is the 7-day total time difference in milliseconds. - -![](/img/docs/start/ssl_7.png) - -> Finally, you can see the triggered alarm in the alarm center. - -![](/img/docs/start/ssl_8.png) - -6. Alarm notification (in time notification via Dingding WeChat Feishu, etc.) - -> Monitoring Tool -> Alarm Notification -> New Receiver - -![](/img/docs/start/ssl_10.png) - -For token configuration such as Dingding WeChat Feishu, please refer to the help document - - - - -> Alarm Notification -> New Alarm Notification Policy -> Enable Notification for the Recipient Just Configured - -![](/img/docs/start/ssl_11.png) - -7. OK When the threshold is triggered, we can receive the corresponding alarm message. If there is no notification, you can also view the alarm information in the alarm center. - ----- - -#### Finish - -The practice of monitoring SSL certificates is here. Of course, for hertzbeat, this function is just the tip of the iceberg. If you think hertzbeat is a good open source project, please give us a Gitee star on GitHub, thank you very much. Thank you for your support. Refill! - -**github: ** - -**gitee: ** diff --git a/home/versioned_docs/version-v1.4.x/start/tdengine-init.md b/home/versioned_docs/version-v1.4.x/start/tdengine-init.md deleted file mode 100644 index f443a72eb56..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/tdengine-init.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -id: tdengine-init -title: Use Time Series Database TDengine to Store Metrics Data (Optional) -sidebar_label: Use TDengine Store Metrics ---- - -HertzBeat's historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) -We recommend VictoriaMetrics for long term support. - -TDengine is an open-source IoT time-series database, which we use to store the collected historical data of monitoring metrics. Pay attention to support ⚠️ 3.x version. - -**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** -Note⚠️ Need TDengine 3.x Version. - -> If you have TDengine environment, can directly skip to create a database instance. - -### Install TDengine via Docker - -> Refer to the official website [installation tutorial](https://docs.taosdata.com/get-started/docker/) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Install TDengine with Docker -> -> ```shell -> $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ -> -v /opt/taosdata:/var/lib/taos \ -> --name tdengine -e TZ=Asia/Shanghai \ -> tdengine/tdengine:3.0.4.0 -> ``` -> -> `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. -> `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. -> use```$ docker ps``` to check if the database started successfully - -### Create database instance - -1. Enter database Docker container - - ``` - docker exec -it tdengine /bin/bash - ``` - -2. Create database named hertzbeat - After entering the container,execute `taos` command as follows: - - ``` - root@tdengine-server:~/TDengine-server# taos - Welcome to the TDengine shell from Linux, Client Version - Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - taos> - ``` - - execute commands to create database - - ``` - taos> show databases; - taos> CREATE DATABASE hertzbeat KEEP 90 DURATION 10 BUFFER 16; - ``` - - The above statements will create a database named hertzbeat. The data will be saved for 90 days (more than 90 days data will be automatically deleted). - A data file every 10 days, memory blocks buffer is 16MB. - -3. Check if hertzbeat database has been created success - - ``` - taos> show databases; - taos> use hertzbeat; - ``` - -**Note⚠️If you install TDengine using package** - -> In addition to start the server,you must execute `systemctl start taosadapter` to start adapter - -### Configure the database connection in hertzbeat `application.yml` configuration file - -1. Configure HertzBeat's configuration file - Modify `hertzbeat/config/application.yml` configuration file [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) - Note⚠️The docker container way need to mount application.yml file locally,while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Replace `warehouse.store.td-engine` data source parameters, URL account and password. - -```yaml -warehouse: - store: - # disable jpa - jpa: - enabled: false - # enable td-engine - td-engine: - enabled: true - driver-class-name: com.taosdata.jdbc.rs.RestfulDriver - url: jdbc:TAOS-RS://localhost:6041/hertzbeat - username: root - password: taosdata -``` - -2. Restart HertzBeat - -### FAQ - -1. Do both the time series databases IoTDB and TDengine need to be configured? Can they both be used? - -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. - -2. The historical chart of the monitoring page is not displayed, and pops up [Unable to provide historical chart data, please configure to rely on the time series database] - -> As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database - -3. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed - -> Please confirm whether the installed TDengine version is 3.x, version 2.x are not compatible. - -4. The TDengine database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure the dependent time series database] - -> Please check if the configuration parameters are correct -> Is td-engine enable set to true -> Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory diff --git a/home/versioned_docs/version-v1.4.x/start/upgrade.md b/home/versioned_docs/version-v1.4.x/start/upgrade.md deleted file mode 100644 index 101564dc00b..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/upgrade.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -id: upgrade -title: HertzBeat New Version Upgrade -sidebar_label: Version Upgrade Guide ---- - -**HertzBeat Release Version List** - -- [Download](https://hertzbeat.apache.org/docs/download) -- [Gitee Release](https://gitee.com/hertzbeat/hertzbeat/releases) -- [DockerHub Release](https://hub.docker.com/r/apache/hertzbeat/tags) -- [Docker quay.io Release](https://quay.io/repository/apache/hertzbeat?tab=tags) - -HertzBeat's metadata information is stored in H2 or Mysql, PostgreSQL relational databases, and the collected metric data is stored in time series databases such as TDengine and IotDB. - -**You need to save and back up the data files of the database and monitoring templates yml files before upgrading** - -### Upgrade For Docker Deploy - -1. If using custom monitoring templates - - Need to back up docker templates directory `docker cp hertzbeat:/opt/hertzbeat/define ./define` in the container `/opt/hertzbeat/define` - - `docker cp hertzbeat:/opt/hertzbeat/define ./define` - - And mount the template define directory when docker start `-v $(pwd)/define:/opt/hertzbeat/define` - - `-v $(pwd)/define:/opt/hertzbeat/define` -2. If using the built-in default H2 database - - Need to mount or back up `-v $(pwd)/data:/opt/hertzbeat/data` database file directory in the container `/opt/hertzbeat/data` - - Stop and delete the container, delete the local HertzBeat docker image, and pull the new version image - - Refer to [Docker installation of HertzBeat](docker-deploy) to create a new container using a new image. Note that the database file directory needs to be mounted `-v $(pwd)/data:/opt/hertzbeat/data` -3. If using external relational database Mysql, PostgreSQL - - No need to mount the database file directory in the backup container - - Stop and delete the container, delete the local HertzBeat docker image, and pull the new version image - - Refer to [Docker installation HertzBeat](docker-deploy) to create a new container using the new image, and configure the database connection in `application.yml` - -### Upgrade For Package Deploy - -1. If using the built-in default H2 database - - Back up the database file directory under the installation package `/opt/hertzbeat/data` - - If there is a custom monitoring template, you need to back up the template YML under `/opt/hertzbeat/define` - - `bin/shutdown.sh` stops the HertzBeat process and downloads the new installation package - - Refer to [Installation package to install HertzBeat](package-deploy) to start using the new installation package -2. If using external relational database Mysql, PostgreSQL - - No need to back up the database file directory under the installation package - - If there is a custom monitoring template, you need to back up the template YML under `/opt/hertzbeat/define` - - `bin/shutdown.sh` stops the HertzBeat process and downloads the new installation package - - Refer to [Installation package to install HertzBeat](package-deploy) to start with the new installation package and configure the database connection in `application.yml` - -**HAVE FUN** diff --git a/home/versioned_docs/version-v1.4.x/start/victoria-metrics-init.md b/home/versioned_docs/version-v1.4.x/start/victoria-metrics-init.md deleted file mode 100644 index 0c4c968371a..00000000000 --- a/home/versioned_docs/version-v1.4.x/start/victoria-metrics-init.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -id: victoria-metrics-init -title: Use Time Series Database VictoriaMetrics to Store Metrics Data (Recommended) -sidebar_label: Use VictoriaMetrics Store Metrics ---- - -HertzBeat's historical data storage relies on the time series database, you can choose one of them to install and initialize, or not to install (note ⚠️ but it is strongly recommended to configure in the production environment) -We recommend VictoriaMetrics for long term support. - -VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.Recommend Version(VictoriaMetrics:v1.95.1+, HertzBeat:v1.4.3+) - -**Note⚠️ Time series database is optional, but production environment configuration is strongly recommended to provide more complete historical chart functions and high performance** -**⚠️ If you do not configure a time series database, only the last hour of historical data is retained.** - -> If you already have an VictoriaMetrics environment, you can skip directly to the YML configuration step. - -### Install VictoriaMetrics via Docker - -> Refer to the official website [installation tutorial](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` - -2. Install VictoriaMetrics via Docker - -```shell -$ docker run -d -p 8428:8428 \ - -v $(pwd)/victoria-metrics-data:/victoria-metrics-data \ - --name victoria-metrics \ - victoriametrics/victoria-metrics:v1.95.1 -``` - -`-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` is local persistent mount of VictoriaMetrics data directory -use```$ docker ps``` to check if the database started successfully - -3. Configure the database connection in hertzbeat `application.yml`configuration file - - Modify `hertzbeat/config/application.yml` configuration file - Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` - Config the `warehouse.store.jpa.enabled` `false`. Replace `warehouse.store.victoria-metrics` data source parameters, HOST account and password. - -```yaml -warehouse: - store: - # disable JPA - jpa: - enabled: false - # enable victoria-metrics - victoria-metrics: - enabled: true - url: http://localhost:8428 - username: root - password: root -``` - -4. Restart HertzBeat - -### FAQ - -1. Do both the time series databases need to be configured? Can they both be used? - -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which can affects the historical chart data. diff --git a/home/versioned_docs/version-v1.4.x/template.md b/home/versioned_docs/version-v1.4.x/template.md deleted file mode 100644 index 92fba55542c..00000000000 --- a/home/versioned_docs/version-v1.4.x/template.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -id: template -title: Monitoring Template Here -sidebar_label: Monitoring Template ---- - -> Hertzbeat is an open source, real-time monitoring tool with custom-monitor and agentLess. -> -> We make protocols such as `Http, Jmx, Ssh, Snmp, Jdbc, Prometheus` configurable, and you only need to configure `YML` online to collect any metrics you want. -> Do you believe that you can immediately adapt a new monitoring type such as K8s or Docker just by configuring online? - -Here is the architecture. - -![hertzBeat](/img/docs/hertzbeat-arch.png) - -**We define all monitoring collection types (mysql, website, jvm, k8s) as yml templates, and users can import these templates into the hertzbeat system to support corresponding types of monitoring, which is very convenient!** - -![](/img/docs/advanced/extend-point-1.png) - -**Welcome everyone to contribute your customized general monitoring type YML template during use. The available templates are as follows:** - -### Application service monitoring - - 👉 [Website monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-website.yml)
- 👉 [HTTP API](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-api.yml)
- 👉 [PING Connectivity](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ping.yml)
- 👉 [Port Telnet](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-port.yml)
- 👉 [Full site monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-fullsite.yml)
- 👉 [SSL Cert monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ssl_cert.yml)
- 👉 [JVM monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jvm.yml)
- 👉 [SpringBoot2.0](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot2.yml)
- 👉 [SpringBoot3.0](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-springboot3.yml)
- 👉 [FTP Server](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ftp.yml)
- -### Database monitoring - - 👉 [MYSQL database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mysql.yml)
- 👉 [MariaDB database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mariadb.yml)
- 👉 [PostgreSQL database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-postgresql.yml)
- 👉 [SqlServer database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-sqlserver.yml)
- 👉 [Oracle database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-oracle.yml)
- 👉 [DM database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dm.yml)
- 👉 [OpenGauss database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opengauss.yml)
- 👉 [IoTDB database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-iotdb.yml)
- 👉 [ElasticSearch database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-elasticsearch.yml)
- 👉 [MongoDB database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-mongodb.yml)
- 👉 [ClickHouse database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-clickhouse.yml)
- 👉 [Redis database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis.yml)
- 👉 [Redis Sentinel database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_sentinel.yml)
- 👉 [Redis Cluster database monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redis_cluster.yml)
- -### Operating system monitoring - - 👉 [Linux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-linux.yml)
- 👉 [Windows operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-windows.yml)
- 👉 [Ubuntu operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-ubuntu.yml)
- 👉 [Centos operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-centos.yml)
- 👉 [EulerOS operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-euleros.yml)
- 👉 [Fedora CoreOS operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-coreos.yml)
- 👉 [OpenSUSE operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-opensuse.yml)
- 👉 [Rocky Linux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rockylinux.yml)
- 👉 [Red Hat operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-redhat.yml)
- 👉 [FreeBSD operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-freebsd.yml)
- 👉 [AlmaLinux operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-almalinux.yml)
- 👉 [Debian operating system monitoring](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-debian.yml)
- -### Middleware monitoring - - 👉 [Zookeeper](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-zookeeper.yml)
- 👉 [Kafka](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kafka.yml)
- 👉 [Tomcat](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tomcat.yml)
- 👉 [ShenYu](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-shenyu.yml)
- 👉 [DynamicTp](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-dynamic_tp.yml)
- 👉 [RabbitMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-rabbitmq.yml)
- 👉 [ActiveMQ](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-activemq.yml)
- 👉 [Jetty](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-jetty.yml)
- 👉 [Flink](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-flink.yml)
- 👉 [Nacos](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-nacos.yml)
- -### CloudNative monitoring - - 👉 [Docker](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-docker.yml)
- 👉 [Kubernetes](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-kubernetes.yml)
- -### Network monitoring - - 👉 [CiscoSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-cisco_switch.yml)
- 👉 [HpeSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-hpe_switch.yml)
- 👉 [HuaweiSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-huawei_switch.yml)
- 👉 [TpLinkSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-tplink_switch.yml)
- 👉 [H3CSwitch](https://raw.githubusercontent.com/apache/hertzbeat/master/manager/src/main/resources/define/app-h3c_switch.yml)
- ---- - -**Have Fun!** diff --git a/home/versioned_docs/version-v1.5.x/community/how-to-release.md b/home/versioned_docs/version-v1.5.x/community/how-to-release.md index 71583c0d36c..94c919d30f0 100644 --- a/home/versioned_docs/version-v1.5.x/community/how-to-release.md +++ b/home/versioned_docs/version-v1.5.x/community/how-to-release.md @@ -168,6 +168,8 @@ $ svn ci -m "add gpg key for muchunjin" ## 3. Prepare material package & release +### Build Package + #### 3.1 Based on the master branch, create a release-${release_version}-rcx branch, such as release-1.6.0-rc1, And create a tag named v1.6.0-rc1 based on the release-1.6.0-rc1 branch, and set this tag as pre-release ```shell @@ -228,6 +230,8 @@ release-1.6.0-rc1 The archive package is here `dist/apache-hertzbeat-1.6.0-incubating-src.tar.gz` +### Sign package + #### 3.5 Sign binary and source packages > The `gpg -u 33545C76` `33545C76` is your gpg secret ID, see from `gpg --keyid-format SHORT --list-keys` @@ -333,7 +337,7 @@ svn commit -m "release for HertzBeat 1.6.0" ## 4. Enter the community voting stage -#### 4.1 Send a Community Vote Email +### 4.1 Send a Community Vote Email Send a voting email in the community requires at least three `+1` and no `-1`. @@ -525,9 +529,9 @@ One item of the email content is `Vote thread`, and the link is obtained here: < Wait a day to see if the tutor has any other comments, if not, send the following announcement email -## 4. Complete the final publishing steps +## 5. Complete the final publishing steps -#### 4.1 Migrating source and binary packages +### 5.1 Migrating source and binary packages ```shell svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 https://dist.apache.org/repos/dist/release/incubator/hertzbeat/1.6.0 -m "transfer packages for 1.6.0-RC1" @@ -573,27 +577,32 @@ The rename the release-1.6.0-rc1 branch to release-1.6.0. > `Send to`:
> `cc`:
-> `Title`: [ANNOUNCE] Release Apache HertzBeat (incubating) 1.6.0
+> `Title`: [ANNOUNCE] Apache HertzBeat (incubating) 1.6.0 released
> `Body`: ``` -Hi Incubator Community, +Hi Community, We are glad to announce the release of Apache HertzBeat (incubating) 1.6.0. -Once again I would like to express my thanks to your help. +Thanks again for your help. -Apache HertzBeat(https://hertzbeat.apache.org/) - a real-time monitoring system with agentless, performance cluster, prometheus-compatible, custom monitoring and status page building capabilities. +Apache HertzBeat (https://hertzbeat.apache.org/) - a real-time monitoring system with agentless, performance cluster, prometheus-compatible, custom monitoring and status page building capabilities. -Download Links: https://hertzbeat.apache.org/download/ +Download Link: +https://hertzbeat.apache.org/docs/download/ -Release Notes: https://github.com/apache/hertzbeat/releases/tag/v1.6.0 +Release Note: +https://github.com/apache/hertzbeat/releases/tag/v1.6.0 + +Website: +https://hertzbeat.apache.org/ HertzBeat Resources: - Issue: https://github.com/apache/hertzbeat/issues - Mailing list: dev@hertzbeat.apache.org --- Apache HertzBeat Team - +--- Best, ChunJin Mu ``` diff --git a/home/versioned_docs/version-v1.5.x/help/dns.md b/home/versioned_docs/version-v1.5.x/help/dns.md index 3d6a5fe4b8e..5eb31844648 100644 --- a/home/versioned_docs/version-v1.5.x/help/dns.md +++ b/home/versioned_docs/version-v1.5.x/help/dns.md @@ -9,7 +9,7 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito **Protocol Use:DNS** -### Configuration parameter +## Configuration parameter | Parameter name | Parameter help description | |---------------------|--------------------------------------------------------------------------------------------------------------------------| @@ -24,9 +24,9 @@ keywords: [ open source monitoring tool, open source DNS monitoring tool, monito | Bind Tags | Used to classify and manage monitoring resources. | | Description remarks | For more information about identifying and describing this monitoring, users can note information here. | -### Collection Metrics +## Collection Metrics -#### Metrics Set:Header +### Metrics Set:Header | Metric name | Metric unit | Metric help description | |-------------------------|-------------|---------------------------------------------------| diff --git a/home/versioned_docs/version-v1.5.x/help/kubernetes.md b/home/versioned_docs/version-v1.5.x/help/kubernetes.md index 3cb2336e768..d7e6b657ea6 100644 --- a/home/versioned_docs/version-v1.5.x/help/kubernetes.md +++ b/home/versioned_docs/version-v1.5.x/help/kubernetes.md @@ -13,7 +13,7 @@ If you want to monitor the information in 'Kubernetes', you need to obtain an au Refer to the steps to obtain token -#### method one +### method one 1. Create a service account and bind the default cluster-admin administrator cluster role @@ -27,7 +27,7 @@ kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' kubectl describe secret {secret} -n kube-system ``` -#### method two +### method two ```shell kubectl create serviceaccount cluster-admin diff --git a/home/versioned_docs/version-v1.5.x/help/ntp.md b/home/versioned_docs/version-v1.5.x/help/ntp.md index fc7f7925ca6..3c3abeee5a8 100644 --- a/home/versioned_docs/version-v1.5.x/help/ntp.md +++ b/home/versioned_docs/version-v1.5.x/help/ntp.md @@ -9,7 +9,7 @@ keywords: [ open source monitoring tool, open source NTP monitoring tool, monito **Protocol Use:NTP** -### Configuration parameter +## Configuration parameter | Parameter name | Parameter help description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| diff --git a/home/versioned_docs/version-v1.5.x/start/quickstart.md b/home/versioned_docs/version-v1.5.x/start/quickstart.md index 6cbaffc43a6..319e046b2ea 100644 --- a/home/versioned_docs/version-v1.5.x/start/quickstart.md +++ b/home/versioned_docs/version-v1.5.x/start/quickstart.md @@ -8,7 +8,7 @@ sidebar_label: Quick Start - If you wish to deploy Apache HertzBeat (incubating) locally, please refer to the following Deployment Documentation for instructions. -### 🍞 Install HertzBeat +#### 🍞 Install HertzBeat > Apache HertzBeat (incubating) supports installation through source code, docker or package, cpu support X86/ARM64. diff --git a/home/versioned_sidebars/version-v1.4.x-sidebars.json b/home/versioned_sidebars/version-v1.4.x-sidebars.json deleted file mode 100644 index 8d1841e07a5..00000000000 --- a/home/versioned_sidebars/version-v1.4.x-sidebars.json +++ /dev/null @@ -1,240 +0,0 @@ -{ - "docs": [ - { - "type": "category", - "label": "quickstart", - "items": [ - "introduce", - { - "type": "link", - "label": "Cloud Service", - "href": "https://console.tancloud.cn" - }, - "start/quickstart", - "start/docker-deploy", - "start/package-deploy", - { - "type": "link", - "label": "Install via HuaweiCloud", - "href": "https://marketplace.huaweicloud.com/contents/0477015c-ad63-4522-a308-816861769f0a#productid=OFFI863735781612109824" - }, - "start/rainbond-deploy", - { - "type": "link", - "label": "Install via Helm", - "href": "https://artifacthub.io/packages/helm/hertzbeat/hertzbeat" - }, - "start/upgrade", - "start/victoria-metrics-init", - "start/iotdb-init", - "start/tdengine-init", - "start/greptime-init", - "start/influxdb-init", - "start/mysql-change", - "start/postgresql-change", - "start/account-modify", - "start/custom-config", - "start/ssl-cert-practice" - ] - }, - { - "type": "category", - "label": "custom", - "items": [ - "advanced/extend-point", - "advanced/extend-tutorial", - { - "type": "category", - "label": "http", - "items": [ - "advanced/extend-http", - "advanced/extend-http-default", - "advanced/extend-http-jsonpath", - "advanced/extend-http-example-hertzbeat", - "advanced/extend-http-example-token" - ] - }, - { - "type": "category", - "label": "jdbc", - "items": [ - "advanced/extend-jdbc" - ] - }, - { - "type": "category", - "label": "ssh", - "items": [ - "advanced/extend-ssh" - ] - }, - { - "type": "category", - "label": "jmx", - "items": [ - "advanced/extend-jmx" - ] - }, - { - "type": "category", - "label": "snmp", - "items": [ - "advanced/extend-snmp" - ] - }, - { - "type": "category", - "label": "push", - "items": [ - "advanced/extend-push" - ] - } - ] - }, - { - "type": "doc", - "id": "template" - }, - { - "type": "category", - "label": "help", - "items": [ - "help/guide", - { - "type": "category", - "label": "service", - "items": [ - "help/website", - "help/api", - "help/ping", - "help/port", - "help/fullsite", - "help/ssl_cert", - "help/nginx", - "help/pop3", - "help/smtp", - "help/ntp" - ] - }, - { - "type": "category", - "label": "program", - "items": [ - "help/jvm", - "help/springboot2", - "help/dynamic_tp" - ] - }, - { - "type": "category", - "label": "database", - "items": [ - "help/mysql", - "help/mariadb", - "help/postgresql", - "help/sqlserver", - "help/oracle", - "help/dm", - "help/opengauss", - "help/nebulaGraph" - ] - }, - { - "type": "category", - "label": "cache", - "items": [ - "help/redis", - "help/memcached" - ] - }, - { - "type": "category", - "label": "os", - "items": [ - "help/linux", - "help/windows", - "help/ubuntu", - "help/centos" - ] - }, - { - "type": "category", - "label": "mid", - "items": [ - "help/zookeeper", - "help/shenyu", - "help/rabbitmq", - "help/activemq", - "help/spring_gateway" - ] - }, - { - "type": "category", - "label": "bigdata", - "items": [ - "help/spark", - "help/doris_be", - "help/doris_fe", - "help/hadoop", - "help/iotdb", - "help/hive", - "help/airflow" - ] - }, - { - "type": "category", - "label": "webserver", - "items": [ - "help/tomcat", - "help/jetty" - ] - }, - { - "type": "category", - "label": "cloud-native", - "items": [ - "help/docker", - "help/kubernetes" - ] - }, - { - "type": "category", - "label": "threshold", - "items": [ - "help/alert_threshold", - "help/alert_threshold_expr" - ] - }, - { - "type": "category", - "label": "notice", - "items": [ - "help/alert_email", - "help/alert_webhook", - "help/alert_discord", - "help/alert_slack", - "help/alert_telegram", - "help/alert_wework", - "help/alert_dingtalk", - "help/alert_feishu", - "help/alert_console", - "help/alert_enterprise_wechat_app", - "help/alert_smn" - ] - }, - "help/issue" - ] - }, - { - "type": "category", - "label": "Others", - "items": [ - "others/developer", - "others/contributing", - "others/contact", - "others/sponsor", - "others/resource" - ] - } - ] -} diff --git a/home/versions.json b/home/versions.json index 37c425aa229..b0190cee534 100644 --- a/home/versions.json +++ b/home/versions.json @@ -1,4 +1,3 @@ [ - "v1.5.x", - "v1.4.x" + "v1.5.x" ] diff --git a/web-app/src/app/routes/monitor/monitor-data-chart/monitor-data-chart.component.ts b/web-app/src/app/routes/monitor/monitor-data-chart/monitor-data-chart.component.ts index 1a6e98c5c76..8680dce5cee 100644 --- a/web-app/src/app/routes/monitor/monitor-data-chart/monitor-data-chart.component.ts +++ b/web-app/src/app/routes/monitor/monitor-data-chart/monitor-data-chart.component.ts @@ -62,7 +62,7 @@ export class MonitorDataChartComponent implements OnInit, AfterViewInit { if (this.cardElement.nativeElement) { const grandparentElement = this.cardElement.nativeElement.parentElement.parentElement; const grandparentWidth = grandparentElement.clientWidth; - this.cardWidth = grandparentWidth / 2 - 4; + this.cardWidth = Math.floor(grandparentWidth / 2) - 4; this.cdr.detectChanges(); } } diff --git a/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.less b/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.less index e21d7f68cac..5f1d96d10e5 100644 --- a/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.less +++ b/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.less @@ -14,4 +14,7 @@ p { .ant-table-body { overflow-y: auto!important; } + .ant-card { + margin-bottom: inherit; + } } diff --git a/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.ts b/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.ts index 11582d1d87d..0d1e3e9325d 100644 --- a/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.ts +++ b/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.ts @@ -70,7 +70,7 @@ export class MonitorDataTableComponent implements OnInit, AfterViewInit { if (this.cardElement.nativeElement) { const grandparentElement = this.cardElement.nativeElement.parentElement.parentElement; const grandparentWidth = grandparentElement.clientWidth; - this.cardWidth = grandparentWidth / 2 - 4; + this.cardWidth = Math.floor(grandparentWidth / 2) - 4; this.cdr.detectChanges(); } } diff --git a/web-app/src/app/routes/monitor/monitor-detail/monitor-detail.component.less b/web-app/src/app/routes/monitor/monitor-detail/monitor-detail.component.less index e216479b966..c7cc8f28d18 100644 --- a/web-app/src/app/routes/monitor/monitor-detail/monitor-detail.component.less +++ b/web-app/src/app/routes/monitor/monitor-detail/monitor-detail.component.less @@ -43,6 +43,3 @@ p { width: calc(50% - 4px); } } -.lists > .card { - height: 400px; -} diff --git a/yarn.lock b/yarn.lock deleted file mode 100644 index fb57ccd13af..00000000000 --- a/yarn.lock +++ /dev/null @@ -1,4 +0,0 @@ -# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. -# yarn lockfile v1 - - From 343afd517d34c5980fb6f7c603c73add8b49f660 Mon Sep 17 00:00:00 2001 From: Jast Date: Fri, 23 Aug 2024 11:33:06 +0800 Subject: [PATCH 221/257] [Improve] improve markdown format for MD041 (#2588) --- .markdownlint-cli2.jsonc | 1 - home/docs/help/status.md | 3 --- .../2024-06-11-hertzbeat-v1.6.0-update.md | 8 ++++---- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index e2f39cfecaf..ed95878742c 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -30,7 +30,6 @@ "MD035": false, "MD036": false, "MD040": false, - "MD041": false, "MD045": false, "MD046": false, "MD047": false diff --git a/home/docs/help/status.md b/home/docs/help/status.md index ab969150e1e..dcdafbb794c 100644 --- a/home/docs/help/status.md +++ b/home/docs/help/status.md @@ -1,7 +1,4 @@ -Here is the English translation of the provided text: - --- - id: status title: Status Page sidebar_label: Status Page diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md index 1334c16c891..b75648284ff 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md @@ -1,10 +1,10 @@ -## HertzBeat 1.6.0 升级指南 +# HertzBeat 1.6.0 升级指南 -### 注意:该指南适用于1.5.0向1.6.0版本升级 +## 注意:该指南适用于1.5.0向1.6.0版本升级 -### 如果你使用更老的版本,建议使用导出功能重新安装,或先升级到1.5.0再按本指南升级到1.6.0 +## 如果你使用更老的版本,建议使用导出功能重新安装,或先升级到1.5.0再按本指南升级到1.6.0 -### 二进制安装包升级 +## 二进制安装包升级 1. 升级Java环境 From 51699e34cd78fa80088863653d9203de2b74f439 Mon Sep 17 00:00:00 2001 From: Kerwin Bryant Date: Fri, 23 Aug 2024 17:27:34 +0800 Subject: [PATCH 222/257] [webapp] fix #2244 and update detail ui (#2590) --- .../monitor-data-chart.component.html | 3 +- .../monitor-data-chart.component.ts | 20 +- .../monitor-data-table.component.html | 338 ++++++++++-------- .../monitor-data-table.component.ts | 35 +- .../monitor-detail.component.html | 2 + 5 files changed, 198 insertions(+), 200 deletions(-) diff --git a/web-app/src/app/routes/monitor/monitor-data-chart/monitor-data-chart.component.html b/web-app/src/app/routes/monitor/monitor-data-chart/monitor-data-chart.component.html index e744e7afaa9..8f1c4cb7fa6 100644 --- a/web-app/src/app/routes/monitor/monitor-data-chart/monitor-data-chart.component.html +++ b/web-app/src/app/routes/monitor/monitor-data-chart/monitor-data-chart.component.html @@ -17,8 +17,7 @@ ~ under the License. --> -
- +
= 5) { - this.cardWidth = this.cardWidth + this.cardWidth; - } this.lineHistoryTheme.series = []; let valueKeyArr = Object.keys(values); for (let index = 0; index < valueKeyArr.length; index++) { diff --git a/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.html b/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.html index 05f4348eb1f..b8973666912 100644 --- a/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.html +++ b/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.html @@ -16,153 +16,160 @@ ~ specific language governing permissions and limitations ~ under the License. --> -
- -
-
-

ID

-

{{ monitorId }}

-
-
-

{{ 'monitors.detail.name' | i18n }}

-

{{ monitor.name }}

-
-
-

HOST

-

{{ monitor.host }}

-
-
-

{{ 'monitors.detail.port' | i18n }}

-

{{ port }}

-
-
-

{{ 'monitors.detail.description' | i18n }}

-

{{ monitor.description }}

-
-
-

{{ 'monitors.detail.status' | i18n }}

-
- - - {{ 'monitor.status.paused' | i18n }} - - - - {{ 'monitor.status.up' | i18n }} - - - - {{ 'monitor.status.down' | i18n }} - + + + + + +
+
+

ID

+

{{ monitorId }}

+
+
+

{{ 'monitors.detail.name' | i18n }}

+

{{ monitor.name }}

+
+
+

HOST

+

{{ monitor.host }}

+
+
+

{{ 'monitors.detail.port' | i18n }}

+

{{ port }}

+
+
+

{{ 'monitors.detail.description' | i18n }}

+

{{ monitor.description }}

+
+
+

{{ 'monitors.detail.status' | i18n }}

+
+ + + {{ 'monitor.status.paused' | i18n }} + + + + {{ 'monitor.status.up' | i18n }} + + + + {{ 'monitor.status.down' | i18n }} + +
+
+
+

{{ 'monitor.intervals' | i18n }}

+

{{ monitor.intervals }}s

+
+
+

{{ 'common.new-time' | i18n }}

+

{{ monitor.gmtCreate | date : 'YYYY-MM-dd HH:mm:ss' }}

+
+
+

{{ 'common.edit-time' | i18n }}

+

{{ monitor.gmtUpdate | date : 'YYYY-MM-dd HH:mm:ss' }}

-
-

{{ 'monitor.intervals' | i18n }}

-

{{ monitor.intervals }}s

-
-
-

{{ 'common.new-time' | i18n }}

-

{{ monitor.gmtCreate | date : 'YYYY-MM-dd HH:mm:ss' }}

-
-
-

{{ 'common.edit-time' | i18n }}

-

{{ monitor.gmtUpdate | date : 'YYYY-MM-dd HH:mm:ss' }}

-
-
- - - - - {{ 'monitor.app.' + app + '.metrics.' + metrics + '.metric.' + field.name | i18nElse : field.name }} - - - - - - - {{ value.origin }} - {{ 'monitors.detail.value.null' | i18n }} - {{ fields[i].unit }} - - - - - - - - {{ 'common.name' | i18n }} - {{ 'common.value' | i18n }} - - - - - - {{ 'monitor.app.' + app + '.metrics.' + metrics + '.metric.' + field.name | i18nElse : field.name }} - - {{ rowValues[i].origin }} - {{ 'monitors.detail.value.null' | i18n }} - {{ field.unit }} - - - - - + + + + + {{ 'monitor.app.' + app + '.metrics.' + metrics + '.metric.' + field.name | i18nElse : field.name }} + + + + + + + + + {{ value.origin }} + {{ 'monitors.detail.value.null' | i18n }} + {{ fields[i].unit }} + + + + + + + + + {{ 'common.name' | i18n }} + {{ 'common.value' | i18n }} + + + + + + {{ 'monitor.app.' + app + '.metrics.' + metrics + '.metric.' + field.name | i18nElse : field.name }} + + {{ rowValues[i].origin }} + {{ 'monitors.detail.value.null' | i18n }} + {{ field.unit }} + + + + + +

@@ -175,10 +182,41 @@ -

- - - {{ 'monitors.collect.time' | i18n }}: {{ time | _date : 'HH:mm:ss' }} - +
+
+ + + {{ 'monitors.collect.time' | i18n }}: {{ time | _date : 'HH:mm:ss' }} + +
+
+ +
+
+ + + +
+ +
+
+ + + {{ ('monitors.collect.time.tip' | i18n) + ': ' + (time | _date : 'yyyy-MM-dd HH:mm:ss') }} + +
+
+
+ + + diff --git a/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.ts b/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.ts index 0d1e3e9325d..a2a66783670 100644 --- a/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.ts +++ b/web-app/src/app/routes/monitor/monitor-data-table/monitor-data-table.component.ts @@ -17,7 +17,7 @@ * under the License. */ -import { AfterViewInit, ChangeDetectorRef, Component, ElementRef, Input, OnInit, ViewChild } from '@angular/core'; +import { Component, Input, OnInit } from '@angular/core'; import { NzNotificationService } from 'ng-zorro-antd/notification'; import { finalize } from 'rxjs/operators'; @@ -28,7 +28,7 @@ import { MonitorService } from '../../../service/monitor.service'; templateUrl: './monitor-data-table.component.html', styleUrls: ['./monitor-data-table.component.less'] }) -export class MonitorDataTableComponent implements OnInit, AfterViewInit { +export class MonitorDataTableComponent implements OnInit { @Input() get monitorId(): number { return this._monitorId; @@ -53,8 +53,7 @@ export class MonitorDataTableComponent implements OnInit, AfterViewInit { @Input() height: string = '100%'; - @ViewChild('targetElement', { static: false }) cardElement!: ElementRef; - + showModal!: boolean; time!: any; fields!: any[]; valueRows!: any[]; @@ -62,18 +61,8 @@ export class MonitorDataTableComponent implements OnInit, AfterViewInit { isTable: boolean = true; scrollY: string = '100%'; loading: boolean = false; - cardWidth: number = 300; - constructor(private monitorSvc: MonitorService, private notifySvc: NzNotificationService, private cdr: ChangeDetectorRef) {} - - ngAfterViewInit() { - if (this.cardElement.nativeElement) { - const grandparentElement = this.cardElement.nativeElement.parentElement.parentElement; - const grandparentWidth = grandparentElement.clientWidth; - this.cardWidth = Math.floor(grandparentWidth / 2) - 4; - this.cdr.detectChanges(); - } - } + constructor(private monitorSvc: MonitorService, private notifySvc: NzNotificationService) {} ngOnInit(): void { this.scrollY = `calc(${this.height} - 130px)`; @@ -92,25 +81,9 @@ export class MonitorDataTableComponent implements OnInit, AfterViewInit { this.time = message.data.time; this.fields = message.data.fields; this.valueRows = message.data.valueRows; - let updateWidth = false; if (this.valueRows.length == 1) { this.isTable = false; this.rowValues = this.valueRows[0].values; - } else { - if (this.fields?.length >= 5) { - updateWidth = true; - } - } - this.valueRows.forEach(row => { - row.values.forEach((value: any) => { - if (value.origin?.length > 60) { - updateWidth = true; - } - }); - }); - if (updateWidth) { - this.cardWidth = this.cardWidth + this.cardWidth; - this.cdr.detectChanges(); } } else if (message.code !== 0) { this.notifySvc.warning(`${this.metrics}:${message.msg}`, ''); diff --git a/web-app/src/app/routes/monitor/monitor-detail/monitor-detail.component.html b/web-app/src/app/routes/monitor/monitor-detail/monitor-detail.component.html index 2dcdaa66ee0..7cf402565f0 100755 --- a/web-app/src/app/routes/monitor/monitor-detail/monitor-detail.component.html +++ b/web-app/src/app/routes/monitor/monitor-detail/monitor-detail.component.html @@ -65,6 +65,7 @@ [app]="app" >
Date: Fri, 23 Aug 2024 23:06:49 +0800 Subject: [PATCH 223/257] [feature] manage dependencies version by parent pom (#2571) Co-authored-by: Calvin Co-authored-by: YuLuo --- alerter/pom.xml | 6 +++--- collector/pom.xml | 20 +++++++++----------- common/pom.xml | 4 ++-- manager/pom.xml | 8 -------- plugin/pom.xml | 4 +--- pom.xml | 28 ++++++++++++++++++++++++++++ push/pom.xml | 4 ---- remoting/pom.xml | 4 ---- warehouse/pom.xml | 9 --------- 9 files changed, 43 insertions(+), 44 deletions(-) diff --git a/alerter/pom.xml b/alerter/pom.xml index 8092c079730..896bbba9fb7 100644 --- a/alerter/pom.xml +++ b/alerter/pom.xml @@ -73,19 +73,19 @@ cn.afterturn easypoi-annotation - 4.3.0 + ${easy-poi.version} compile org.apache.poi poi - 4.1.1 + ${poi.version} compile org.apache.poi poi-ooxml - 4.1.1 + ${poi.version} compile diff --git a/collector/pom.xml b/collector/pom.xml index 241c999c4ca..2d93db59718 100644 --- a/collector/pom.xml +++ b/collector/pom.xml @@ -30,8 +30,6 @@ 17 ${java.version} ${java.version} - 3.2.0 - 3.3.0 @@ -89,7 +87,7 @@ com.googlecode.concurrentlinkedhashmap concurrentlinkedhashmap-lru - 1.4.2 + ${concurrentlinkedhashmap-lru.version} com.google.guava @@ -126,12 +124,12 @@ org.apache.sshd sshd-core - 2.13.1 + ${sshd-core.version} net.i2p.crypto eddsa - 0.3.0 + ${eddsa.version} @@ -163,40 +161,40 @@ org.snmp4j snmp4j - 3.6.7 + ${snmp4j.version} org.apache.rocketmq rocketmq-tools - 4.9.4 + ${rocketmq-tools.version} dnsjava dnsjava - 3.5.2 + ${dnsjava.version} com.ecwid.consul consul-api - 1.4.5 + ${consul-api.version} com.alibaba.nacos nacos-client - 2.2.1 + ${nacos-client.version} com.vesoft client - 3.6.0 + ${vesoft-client.version} javax.servlet diff --git a/common/pom.xml b/common/pom.xml index 0aaa3497ab0..770733af86b 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -49,7 +49,7 @@ org.eclipse.persistence org.eclipse.persistence.jpa - 4.0.2 + ${eclipse-jpa.version} org.springframework.boot @@ -126,7 +126,7 @@ com.beetstra.jutf7 jutf7 - 1.0.0 + ${jutf7.version} diff --git a/manager/pom.xml b/manager/pom.xml index 2e0e1120445..34debeb7894 100644 --- a/manager/pom.xml +++ b/manager/pom.xml @@ -29,14 +29,6 @@ true - 3.2.0 - 3.3.0 - 1.9.22 - 1.1.0 - 1.0 - 4.3.0 - 3.1.37 - 3.23.5 diff --git a/plugin/pom.xml b/plugin/pom.xml index 023225ceb8f..5f365c6907a 100644 --- a/plugin/pom.xml +++ b/plugin/pom.xml @@ -28,8 +28,6 @@ hertzbeat-plugin ${project.artifactId} - 3.2.0 - 3.3.0 @@ -46,7 +44,7 @@ org.apache.maven.plugins maven-assembly-plugin - 3.3.0 + ${maven-assembly-plugin.version} src/main/resources/assembly/assembly.xml diff --git a/pom.xml b/pom.xml index ef78581687a..1a354117606 100644 --- a/pom.xml +++ b/pom.xml @@ -95,6 +95,9 @@ 17 ${java.version} ${java.version} + 3.2.0 + 3.3.0 + 2.1.1 3.8.1 @@ -140,6 +143,31 @@ 3.10.0 0.8.11 2.40.0 + + 4.3.0 + 4.1.1 + 1.4.2 + 2.13.1 + 0.3.0 + 3.6.7 + 4.9.4 + 3.5.2 + 1.4.5 + 2.2.1 + 3.6.0 + 4.0.2 + 1.0.0 + 1.1.0 + 3.1.37 + 3.23.5 + + + 0.13.3 + 2.23 + 3.0.5 + 3.0.0 + 0.7.3 + 8.0.33 diff --git a/push/pom.xml b/push/pom.xml index 5ae67fece70..d3be8d935b2 100644 --- a/push/pom.xml +++ b/push/pom.xml @@ -27,10 +27,6 @@ hertzbeat-push ${project.artifactId} - - 3.2.0 - 3.3.0 - diff --git a/remoting/pom.xml b/remoting/pom.xml index eeda5bbd3a1..4ecbc815861 100644 --- a/remoting/pom.xml +++ b/remoting/pom.xml @@ -27,10 +27,6 @@ hertzbeat-remoting ${project.artifactId} - - 3.2.0 - 3.3.0 - diff --git a/warehouse/pom.xml b/warehouse/pom.xml index 0db9386496d..c0303aa7732 100644 --- a/warehouse/pom.xml +++ b/warehouse/pom.xml @@ -23,15 +23,6 @@ org.apache.hertzbeat 2.0-SNAPSHOT - - 1.0 - 0.13.3 - 2.23 - 3.0.5 - 3.0.0 - 0.7.3 - 8.0.33 - 4.0.0 hertzbeat-warehouse From e3a1b8345fe91d78d72eea0ea7a62a63f0a92aa0 Mon Sep 17 00:00:00 2001 From: liutianyou Date: Sat, 24 Aug 2024 07:52:56 +0800 Subject: [PATCH 224/257] [improve] improve markdown format for MD047 (#2586) Co-authored-by: YuLuo Co-authored-by: Jast --- .markdownlint-cli2.jsonc | 2 +- .../2022-09-04-hertzbeat-v1.1.3.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index ed95878742c..94dd8518fa7 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -32,7 +32,7 @@ "MD040": false, "MD045": false, "MD046": false, - "MD047": false + "MD047": true }, "ignore": [ "node_modules/", diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md index 1d7bf0a0042..0429d684ddd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-04-hertzbeat-v1.1.3.md @@ -70,4 +70,4 @@ Bugfix. Online . -Have Fun \ No newline at end of file +Have Fun From 7facbe827ca03d05bc873f10880ab654aaaad627 Mon Sep 17 00:00:00 2001 From: aias00 Date: Sun, 25 Aug 2024 09:30:43 +0800 Subject: [PATCH 225/257] [improve] improve md format MD025 (#2587) --- .markdownlint-cli2.jsonc | 2 +- home/blog/2022-12-19-new-committer.md | 26 ++++++------ home/blog/2023-02-10-new-committer.md | 2 +- home/blog/2024-01-11-new-committer.md | 42 +++++++++---------- home/blog/2024-01-18-hertzbeat-v1.4.4.md | 2 +- home/blog/2024-07-07-new-committer.md | 4 +- home/docs/community/how-to-verify.md | 24 +++++------ home/docs/help/http_sd.md | 6 +-- home/docs/start/update-1.6.0.md | 2 +- .../2022-12-19-new-committer.md | 26 ++++++------ .../2023-02-10-new-committer.md | 2 +- .../2024-01-11-new-committer.md | 38 ++++++++--------- .../2024-01-18-hertzbeat-v1.4.4.md | 2 +- .../2024-07-07-new-committer.md | 4 +- .../current/community/how-to-verify.md | 24 +++++------ .../version-v1.4.x/help/ntp.md | 0 .../version-v1.5.x/community/how-to-verify.md | 24 +++++------ .../version-v1.5.x/community/how-to-verify.md | 24 +++++------ .../version-v1.5.x/help/http_sd.md | 6 +-- 19 files changed, 130 insertions(+), 130 deletions(-) create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index 94dd8518fa7..01e8be79233 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -24,7 +24,7 @@ "MD024": { "siblings_only": true }, - "MD025": false, + "MD025": true, "MD029": false, "MD033": false, "MD035": false, diff --git a/home/blog/2022-12-19-new-committer.md b/home/blog/2022-12-19-new-committer.md index 3a324e7817e..33eec0552b8 100644 --- a/home/blog/2022-12-19-new-committer.md +++ b/home/blog/2022-12-19-new-committer.md @@ -9,7 +9,7 @@ tags: [opensource] > 非常高兴 HertzBeat 迎来了两位新晋社区Committer, 两位都是来自互联网公司的开发工程师,让我们来了解下他们的开源经历吧! -# 第一位 花城 +## 第一位 花城 姓名:王庆华 @@ -19,15 +19,15 @@ HertzBeat Committer github:[wang1027-wqh](https://github.com/wang1027-wqh) -## 初识hertzbeat +### 初识hertzbeat 说起来挺偶然的,结识hertzbeat是因为我大学的毕业设计,当时在一家互联网公司实习,那个时候第一次看到了企业是怎么监控项目的,不管是系统监控、业务监控还是物联网iot监控,那个时候见世面不广,只知道Prometheus + Grafana,但是学起来、用起来成本比较高,那个时候就觉得应该有其他类型的监控,恰好,到了大学毕业设计选题,我就开始寻找这方面的开源项目,那个时候我们小组正在使用Shen Yu网关,我就看了下社区,发现了hertzbeat,自此我便于它结缘了。 -## 开始提交PR +### 开始提交PR 到了2022-02-18 我开始提交了我第一个pr,当时只是为了优化一些controller入参的格式,没有什么技术含量,但是这是我接触开源的第一步,让我在从理论学习跨出了实践的一步 -## 持续的开源贡献与收获 +### 持续的开源贡献与收获 到目前为止,参与hertzbeat开源项目已有半年多时间,贡献了许多,也成长收获了许多。具体如下: @@ -37,11 +37,11 @@ github:[wang1027-wqh](https://github.com/wang1027-wqh) 4. 参与了开源之夏并顺利结项 5. 增加了监控系统的基础告警功能: 钉钉、飞书、企业微信、邮箱等 -## 感谢社区小伙伴 +### 感谢社区小伙伴 感谢无偿帮助过我或给过我启发的小伙伴(排名不分先后):tomsun28,MaxKeyTop,阿超 -## 对新人的一点建议 +### 对新人的一点建议 1. 不要觉得自己一上手就能接触到核心,心急吃不了热豆腐 2. 不要只注重看代码,要上手搭建、使用 @@ -51,7 +51,7 @@ github:[wang1027-wqh](https://github.com/wang1027-wqh) --- --- -# 第二位 星辰 +## 第二位 星辰 姓名:郑晨鑫 @@ -63,11 +63,11 @@ Hertzbeat Committer github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) -## 初识Hertzbeat +### 初识Hertzbeat 2022年8月开始接触Hertzbeat,由于公司监控elasticsearch使用的cerebro,虽然有非常强大的数据监控,但缺少告警通知的功能;就去github上浏览监控类的项目,刚好看到Hertzbeat,对此非常感兴趣,在了解完整个项目结构和实现后,刚好elasticsearch的监控部分做的不够完善,我就根据cerebro完善了这部分监控数据并提交了pull request。后面在tom老哥的帮助下也开始其他部分的实现。 -## 开始提交PR +### 开始提交PR 从2022年9月至今提交了好几个pr,主要包括: @@ -78,7 +78,7 @@ github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) + 一些bug的修复。。。 + promethues exporter 协议解析 -## 持续的开源贡献与收获 +### 持续的开源贡献与收获 到目前为止,参与Hertzbeat社区开源已有半年多时间,贡献了许多,也成长收获了许多。 @@ -86,17 +86,17 @@ github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) 同时在社区群里,看到别人提的问题和帮助别人可以学到很多新的知识,很多问题你目前不一定会遇到,其他人遇到的时候你可以思考并收获很多知识。 -## 感谢社区小伙伴 +### 感谢社区小伙伴 感谢无偿帮助过我或给过我启发的小伙伴:[tomsun28](https://github.com/tomsun28) -## 对新人的一点建议 +### 对新人的一点建议 + 使用者可以先看官网,官网基本能够解决你的问题。部分简单或者常见的问题其他可以自己解决,对自己也是一种锻炼 + 可以尝试阅读源码,大部分源码都是包含注释的,并不难;不懂的地方也可以通过运行test,debug看一下整个流程 + 有想法或者bug,可以前往gitee或者github提交issues,也可以在群里询问,不要怕,都是从菜逼过来的 -## 如何参与Hertzbeat +### 如何参与Hertzbeat + 官网有非常完善的贡献者指南:[贡献者指南 | HertzBeat](https://hertzbeat.com/docs/community/contribution) diff --git a/home/blog/2023-02-10-new-committer.md b/home/blog/2023-02-10-new-committer.md index 9dbc9399ba9..0ad2128bc20 100644 --- a/home/blog/2023-02-10-new-committer.md +++ b/home/blog/2023-02-10-new-committer.md @@ -9,7 +9,7 @@ tags: [opensource] ![hertzBeat](/img/blog/new-committer.png) -# Welcome two new Committers from HertzBeat +## Welcome two new Committers from HertzBeat > I am very happy that the HertzBeat open source community has welcomed two new community Committers, one is the leader of the R&D team from the front line, and the other is an intern from a large factory. Let us learn about their open source experience! diff --git a/home/blog/2024-01-11-new-committer.md b/home/blog/2024-01-11-new-committer.md index 69dc9a239af..42e94929c2f 100644 --- a/home/blog/2024-01-11-new-committer.md +++ b/home/blog/2024-01-11-new-committer.md @@ -12,7 +12,7 @@ keywords: [open source monitoring system, alerting system] > Welcome to HertzBeat's three new community committeers, let's learn more about their open source experience! -# New Committer - vinci +## New Committer - vinci **Name: Wang Jianing** @@ -20,21 +20,21 @@ keywords: [open source monitoring system, alerting system] **Github ID: vinci-897** -## Getting to know Hertzbeat for the first time +### Getting to know Hertzbeat for the first time In March 2023, I noticed Hertzbeat's project, so I directly sent an email to Tom in the community to ask if I could join, and he replied very quickly. It just so happened that I was in my senior year of college when I had more time, so I decisively picked an issue to resolve and had my first pull request at Hertzbeat. -## Start submitting PR +### Start submitting PR In the following period, I spent some time reading Hertzbeat's code, and submitted a few PRs off and on until April, when I learned about the Summer of Open Source event, and it just so happened that Hertzbeat was participating as well, so I submitted my application information and was selected. My task is mainly responsible for the implementation of a push collector, in the process of writing the code, I got a lot of community mentor Zheng Chenxin and Tom to help, and finally was able to successfully complete the code, the whole process is still relatively smooth. -## Open source contributions +### Open source contributions - Add push module to expose interface for users to push data. - Implement the collection of push data in the collector module. - Implement user-defined data display in the front-end. -## Harvest +### Harvest - Exposed to a great open source community and improved my skills in related areas. @@ -42,85 +42,85 @@ Thanks to Tom and my ospp mentor, Zheng Chenxin, who gave me a lot of help and a --- -# New Committer - SongXiao +## New Committer - SongXiao **Name: Zhou Shusheng** **Junior student at Henan Normal University** -## Getting to know Hertzbeat for the first time +### Getting to know Hertzbeat for the first time In March this year, under the influence of Xiaobao's seniors, I had some concepts about open source projects and laid the foundation for learning open source projects later, but I didn't go further because I only learned some Java basics at that time. -## Start submitting PR +### Start submitting PR In July this year, after basically completing the study of Java framework development, with the encouragement of Xiaobao, I started to try to pull issues, and submitted my first PR on July 20, during this period, I also consulted with the author of Hertzbeat and Dongfeng for some related issues, thank you very much. -## Open source contributions +### Open source contributions - Support for Spring Gateway, Apache Spark, Apache Hive and other services metrics collection - Customize nginx and pop3 protocols to collect metrics for Nginx and POP3 mailbox servers, and add corresponding help files. -## Harvest +### Harvest - Exposed to better and more complex large-scale projects, improved programming and problem-solving skills. - Put the theoretical knowledge into practice, gained JUC, microservice related development experience, and valuable project experience. -## Thanks to our community partners +### Thanks to our community partners Thanks to the author of HertzBeat, HertzBeat/Sms4j Committer Tiejia Xiaobao, Sms4j Committer Dongfeng, when I encountered problems that I could not solve, I often asked the three brothers to ask for advice, and they are always tired of patiently helping me solve the problem, there are really no words. Thanks to the other partners in the community, I've gained a lot from communicating and discussing with them, and I've also felt the active open source atmosphere in the community. -## Some advice for newcomers +### Some advice for newcomers - When you first get involved in an open source project, start with simple tasks. Gradually familiarize yourself with the code and process of the project, and gradually take on more complex tasks. - If you encounter problems that you can't solve by yourself, you can ask for help from the community. --- -# New Committer - Dongfeng +## New Committer - Dongfeng **Name: Zhang Yang **Freshman from Henan Normal University** -## Getting to know hertzbeat for the first time +### Getting to know hertzbeat for the first time In June of this year, I started to learn more about the project, I was recommended by a friend to learn about the project, I have been exploring open source projects and communities, and I like the atmosphere of sharing, discussing and improving each other. At the same time, I also tried to implement some monitoring in my previous projects, so I am more interested in this project. -## Start submitting PR +### Start submitting PR Since July this year, I found hertzbeat's issues and prs are very active, so I went through their issues and prs to find out how to implement monitoring for a certain protocol. Then I found out that there is a task about smtp protocol monitoring, so I discussed with the author on the issue, and then I finished my pr through documentation and code. -## Open source contributions +### Open source contributions - Implementation of smtp, ntp, websocket availability monitoring. - Implement monitoring metrics for memcached and NebulaGraph. - Add documentation for the implemented monitoring. -## Harvest +### Harvest - Gained monitoring-related development experience and added a valuable project experience. - Gained a deeper understanding of network protocols. - I gained a deeper understanding of network protocols. I gained a preliminary understanding of the contribution process of open source projects. -## Thank you to our community partners +### Thank you to our community partners Thanks to the authors of hertzbeat for the documentation and help. Thanks to my friends for providing me with the courage to try to enter the open source project to contribute. Thanks to other community members for their issues and prs, which accelerated my understanding of the project. -## A bit of advice for newcomers +### A bit of advice for newcomers - Issues and pr's are the knock on the door of the project you are getting to know, so be willing to discuss and express your opinion. - No matter how big or small your contribution is, be willing to try and keep improving yourself. ---- -## What is HertzBeat? +### What is HertzBeat? [HertzBeat HertzBeat](https://github.com/apache/hertzbeat) is an open source real-time monitoring and alerting system with powerful customizable monitoring capabilities, high performance clustering, Prometheus compatibility, and no Agent required. -### Features +#### Features - Integrate **Monitoring+Alerting+Notification** into one system, support monitoring thresholds and alerting notifications for application services, applications, databases, caches, operating systems, big data, middleware, web servers, cloud-native, networks, customization, etc. in one step. - Easy to use and friendly, no need for `Agent`, full `WEB` page operation, a little mouse click to monitor alarms, zero start-up learning costs. diff --git a/home/blog/2024-01-18-hertzbeat-v1.4.4.md b/home/blog/2024-01-18-hertzbeat-v1.4.4.md index efeaa2b1db8..a3e7bd93ffa 100644 --- a/home/blog/2024-01-18-hertzbeat-v1.4.4.md +++ b/home/blog/2024-01-18-hertzbeat-v1.4.4.md @@ -83,7 +83,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do * add prolevel1 as a contributor for code by @allcontributors in * -# 1397 feature: support for dns monitoring by @Calvin979 in +## 1397 feature: support for dns monitoring by @Calvin979 in * Support monitoring hive metrics by @a-little-fool in * support legend pageable in history data charts by @tomsun28 in diff --git a/home/blog/2024-07-07-new-committer.md b/home/blog/2024-07-07-new-committer.md index 041d6b771ee..ad2d2baaccb 100644 --- a/home/blog/2024-07-07-new-committer.md +++ b/home/blog/2024-07-07-new-committer.md @@ -16,7 +16,7 @@ I have been working in backend development since I started my career in 2015, en I have always been passionate about the open-source community, and I am also a Committer for Apache Pulsar and a Contributor for OpenTelemetry and VictoriaMetrics. -# My Connection with HertzBeat +## My Connection with HertzBeat In April of this year, when HertzBeat entered the Apache Incubator, I happened to see a recommendation on a WeChat public account in my social circle. @@ -24,7 +24,7 @@ My first reaction was that the name was really well chosen 😄. Then, after loo Since I also need to maintain an observability system at my company, and I have participated in an open-source project called cprobe (which has some similar goals to HertzBeat), I have some experience with monitoring systems and a strong interest in them. Therefore, I read the documentation and quickly started it locally (the community has done a great job in this regard, as many open-source projects lose potential developers right at the initial startup step). -# Starting Contributions +## Starting Contributions My first PR was to fix an incorrect path in a PR template, and the community responded very quickly, which gave me a very positive impression. diff --git a/home/docs/community/how-to-verify.md b/home/docs/community/how-to-verify.md index 77e53ee444c..c91c4fbb486 100644 --- a/home/docs/community/how-to-verify.md +++ b/home/docs/community/how-to-verify.md @@ -4,13 +4,13 @@ title: How to Verify Release sidebar_position: 4 --- -# Verify the candidate version +## Verify the candidate version For detailed check list, please refer to the official [check list](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) Version content accessible in browser -## 1. Download the candidate version +### 1. Download the candidate version Download the candidate version to be released to the local environment Need to rely on gpg tool, if not, it is recommended to install `gpg2`. @@ -27,11 +27,11 @@ $ svn co https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_ve $ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_version}-${rc_version}/xxx.xxx ``` -## 2. Verify that the uploaded version is compliant +### 2. Verify that the uploaded version is compliant Start the verification process, which includes but is not limited to the following content and forms. -### 2.1 Check whether the release package is complete +#### 2.1 Check whether the release package is complete The package uploaded to dist must include the source code package, and the binary package is optional. @@ -40,18 +40,18 @@ The package uploaded to dist must include the source code package, and the binar 3. Whether to include the sha512 of the source code package 4. If the binary package is uploaded, also check the contents listed in (2)-(4) -### 2.2 Check gpg signature +#### 2.2 Check gpg signature First import the publisher's public key. Import KEYS from the svn repository to the local environment. (The person who releases the version does not need to import it again, the person who helps to do the verification needs to import it, and the user name is enough for the person who issued the version) -#### 2.2.1 Import public key +##### 2.2.1 Import public key ```shell curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # Download KEYS gpg --import KEYS # Import KEYS to local ``` -#### 2.2.2 Trust the public key +##### 2.2.2 Trust the public key Trust the KEY used in this version: @@ -80,7 +80,7 @@ gpg> ``` -#### 2.2.3 Check the gpg signature +##### 2.2.3 Check the gpg signature ```shell for i in *.tar.gz; do echo $i; gpg --verify $i.asc $i; done @@ -97,13 +97,13 @@ gpg: using RSA key XXXXX gpg: Good signature from "xxx @apache.org>" ``` -### 2.3 Check sha512 hash +#### 2.3 Check sha512 hash ```shell for i in *.tar.gz; do echo $i; sha512sum --check $i.sha512; done ``` -### 2.4 Check the binary package +#### 2.4 Check the binary package unzip `apache-hertzbeat-${release.version}-incubating-bin.tar.gz` @@ -123,7 +123,7 @@ check as follows: - [ ] Able to compile correctly - [ ] ..... -### 2.5 Check the source package +#### 2.5 Check the source package > If the binary/web-binary package is uploaded, check the binary package. @@ -149,7 +149,7 @@ and check as follows: You can refer to this article: [ASF Third Party License Policy](https://apache.org/legal/resolved.html) -## 3. Email reply +### 3. Email reply If you initiate a posting vote, you can refer to this response example to reply to the email after verification diff --git a/home/docs/help/http_sd.md b/home/docs/help/http_sd.md index 122b159f41b..957b0f88bf6 100644 --- a/home/docs/help/http_sd.md +++ b/home/docs/help/http_sd.md @@ -9,7 +9,7 @@ keywords: [open source monitoring tool, open source java monitoring tool, monito **Protocol Use:httpsd** -# Steps to monitor micro services +## Steps to monitor micro services 1. Make sure your **Register center** is available @@ -18,7 +18,7 @@ keywords: [open source monitoring tool, open source java monitoring tool, monito 2. Add http_sd monitor and enter necessary info about **Register center** on Hertzbeat, such as host, port and so on. 3. Click **OK** -# Configuration parameter +## Configuration parameter | Parameter name | Parameter help description | |-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| @@ -30,7 +30,7 @@ keywords: [open source monitoring tool, open source java monitoring tool, monito | Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | | Description remarks | For more information about identifying and describing this monitoring, users can note information here | -# Collection Metrics +## Collection Metrics ## Metrics Set:server diff --git a/home/docs/start/update-1.6.0.md b/home/docs/start/update-1.6.0.md index 182dd58be4a..2a5b2581ed4 100644 --- a/home/docs/start/update-1.6.0.md +++ b/home/docs/start/update-1.6.0.md @@ -4,7 +4,7 @@ title: How to update to 1.6.0 sidebar_label: Update to 1.6.0 guide --- -# HertzBeat 1.6.0 Upgrade Guide +## HertzBeat 1.6.0 Upgrade Guide **Note: This guide is applicable for upgrading from 1.5.0 to 1.6.0 to version 1.6.0.** **If you are using an older version, it is recommended to reinstall using the export function, or upgrade to 1.5.0 and then follow this guide to 1.6.0.** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md index 3a324e7817e..33eec0552b8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-12-19-new-committer.md @@ -9,7 +9,7 @@ tags: [opensource] > 非常高兴 HertzBeat 迎来了两位新晋社区Committer, 两位都是来自互联网公司的开发工程师,让我们来了解下他们的开源经历吧! -# 第一位 花城 +## 第一位 花城 姓名:王庆华 @@ -19,15 +19,15 @@ HertzBeat Committer github:[wang1027-wqh](https://github.com/wang1027-wqh) -## 初识hertzbeat +### 初识hertzbeat 说起来挺偶然的,结识hertzbeat是因为我大学的毕业设计,当时在一家互联网公司实习,那个时候第一次看到了企业是怎么监控项目的,不管是系统监控、业务监控还是物联网iot监控,那个时候见世面不广,只知道Prometheus + Grafana,但是学起来、用起来成本比较高,那个时候就觉得应该有其他类型的监控,恰好,到了大学毕业设计选题,我就开始寻找这方面的开源项目,那个时候我们小组正在使用Shen Yu网关,我就看了下社区,发现了hertzbeat,自此我便于它结缘了。 -## 开始提交PR +### 开始提交PR 到了2022-02-18 我开始提交了我第一个pr,当时只是为了优化一些controller入参的格式,没有什么技术含量,但是这是我接触开源的第一步,让我在从理论学习跨出了实践的一步 -## 持续的开源贡献与收获 +### 持续的开源贡献与收获 到目前为止,参与hertzbeat开源项目已有半年多时间,贡献了许多,也成长收获了许多。具体如下: @@ -37,11 +37,11 @@ github:[wang1027-wqh](https://github.com/wang1027-wqh) 4. 参与了开源之夏并顺利结项 5. 增加了监控系统的基础告警功能: 钉钉、飞书、企业微信、邮箱等 -## 感谢社区小伙伴 +### 感谢社区小伙伴 感谢无偿帮助过我或给过我启发的小伙伴(排名不分先后):tomsun28,MaxKeyTop,阿超 -## 对新人的一点建议 +### 对新人的一点建议 1. 不要觉得自己一上手就能接触到核心,心急吃不了热豆腐 2. 不要只注重看代码,要上手搭建、使用 @@ -51,7 +51,7 @@ github:[wang1027-wqh](https://github.com/wang1027-wqh) --- --- -# 第二位 星辰 +## 第二位 星辰 姓名:郑晨鑫 @@ -63,11 +63,11 @@ Hertzbeat Committer github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) -## 初识Hertzbeat +### 初识Hertzbeat 2022年8月开始接触Hertzbeat,由于公司监控elasticsearch使用的cerebro,虽然有非常强大的数据监控,但缺少告警通知的功能;就去github上浏览监控类的项目,刚好看到Hertzbeat,对此非常感兴趣,在了解完整个项目结构和实现后,刚好elasticsearch的监控部分做的不够完善,我就根据cerebro完善了这部分监控数据并提交了pull request。后面在tom老哥的帮助下也开始其他部分的实现。 -## 开始提交PR +### 开始提交PR 从2022年9月至今提交了好几个pr,主要包括: @@ -78,7 +78,7 @@ github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) + 一些bug的修复。。。 + promethues exporter 协议解析 -## 持续的开源贡献与收获 +### 持续的开源贡献与收获 到目前为止,参与Hertzbeat社区开源已有半年多时间,贡献了许多,也成长收获了许多。 @@ -86,17 +86,17 @@ github:[Ceilzcx (zcx) (github.com)](https://github.com/Ceilzcx) 同时在社区群里,看到别人提的问题和帮助别人可以学到很多新的知识,很多问题你目前不一定会遇到,其他人遇到的时候你可以思考并收获很多知识。 -## 感谢社区小伙伴 +### 感谢社区小伙伴 感谢无偿帮助过我或给过我启发的小伙伴:[tomsun28](https://github.com/tomsun28) -## 对新人的一点建议 +### 对新人的一点建议 + 使用者可以先看官网,官网基本能够解决你的问题。部分简单或者常见的问题其他可以自己解决,对自己也是一种锻炼 + 可以尝试阅读源码,大部分源码都是包含注释的,并不难;不懂的地方也可以通过运行test,debug看一下整个流程 + 有想法或者bug,可以前往gitee或者github提交issues,也可以在群里询问,不要怕,都是从菜逼过来的 -## 如何参与Hertzbeat +### 如何参与Hertzbeat + 官网有非常完善的贡献者指南:[贡献者指南 | HertzBeat](https://hertzbeat.com/docs/community/contribution) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-10-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-10-new-committer.md index bb348eacf12..9f3a66ca7e9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-10-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-10-new-committer.md @@ -9,7 +9,7 @@ tags: [opensource] ![hertzBeat](/img/blog/new-committer.png) -# 欢迎 HertzBeat 新晋两位 Committer +## 欢迎 HertzBeat 新晋两位 Committer > 非常高兴 HertzBeat 开源社区又迎来了两位新晋社区 Committer, 有来自一线的研发小组leader,也有来自大厂的实习生,让我们来了解下他们的开源经历吧! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md index 5cdc243aa58..d41901e5ffb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md @@ -12,7 +12,7 @@ keywords: [open source monitoring system, alerting system] > 热烈欢迎 HertzBeat 有三位小伙伴新晋社区 Committer, 让我们来了解下他们的开源经历吧! -# New Committer - vinci +## New Committer - vinci **姓名:王佳宁** @@ -20,21 +20,21 @@ keywords: [open source monitoring system, alerting system] **Github ID: vinci-897** -## 初识Hertzbeat +### 初识Hertzbeat 2023年3月,我关注到了Hertzbeat的项目,于是直接给社区的Tom老哥发了邮件咨询是否可以加入,老哥很爽快的给了回复。刚好我当时正是大四时间比较充裕,就果断挑了一个issue解决,在hertzbeat有了第一个pull request。 -## 开始提交PR +### 开始提交PR 在之后一段时间里,我花了一段时间阅读Hertzbeat的代码,又断断续续的交了几个pr。直到4月份,我了解到开源之夏相关活动,刚好Hertzbeat也在参加,所以提交了报名信息后便顺利入选。我的任务主要是负责实现一个推送方式的采集器,在编写代码的过程中,我得到了许多社区的郑晨鑫导师和Tom老哥帮助,最终能够顺利完成代码,整个过程还是比较顺利的。 -## 开源贡献 +### 开源贡献 - 新增push module,暴露接口供用户推送数据。 - 在collector模块中实现对推送数据的采集。 - 在前端中实现展示用户自定义的数据。 -## 收获 +### 收获 - 接触到了很优秀的开源社区,提升了相关领域的技术水平。 @@ -42,74 +42,74 @@ keywords: [open source monitoring system, alerting system] --- -# New Committer - 淞筱 +## New Committer - 淞筱 **姓名:周书胜** **河南师范大学大三学生** -## 初识Hertzbeat +### 初识Hertzbeat 今年三月份,在小宝学长的影响下,对开源项目有了些许概念,并为后来学习开源项目奠定了基础,但由于当时只学习了一些Java基础,所以并没有再深入了解。 -## 开始提交PR +### 开始提交PR 在今年七月份,基本完成Java框架开发的学习后,在小宝学长的鼓励下,我开始尝试拉取issue,并在7月20日提交了第一个PR。在此期间,也咨询了Hertzbeat作者和东风学长一些相关问题,实在感谢。 -## 开源贡献 +### 开源贡献 - 支持Spring Gateway、Apache Spark、Apache Hive等服务指标采集 - 自定义nginx、pop3协议,对Nginx,POP3邮箱服务器进行指标采集,并添加相应帮助文档 -## 收获 +### 收获 - 接触了更加优秀、结构更加复杂的大型项目,提高了编程和解决问题的能力 - 将理论知识付诸于实践,收获了JUC,微服务相关的开发经验,以及宝贵的项目经历 -## 感谢社区小伙伴 +### 感谢社区小伙伴 感谢HertzBeat的作者、HertzBeat/Sms4j Committer铁甲小宝同学、Sms4j Committer东风同学,在我遇到自己不能解决的问题时,常常向三位哥哥请教,他们也总是不厌其烦,耐心的帮助我解决问题,实在是无以言表。 感谢社区的其它小伙伴,在与他们交流讨论的过程中收获满满,也感受到了社区活跃的开源氛围。 -## 给新人的一些建议 +### 给新人的一些建议 - 初次参与开源项目时,可以从简单的任务开始。逐渐熟悉项目的代码和流程,并逐步承担更复杂的任务。 - 如果遇到自己无法解决的问题时,可以多多请教社区的小伙伴们。 --- -# New Committer - 东风 +## New Committer - 东风 **姓名:张洋** **河南师范大学应届生** -## 初识hertzbeat +### 初识hertzbeat 今年6月份开始对项目进行深入了解,我是经朋友推荐了解该项目的,一直对开源项目和社区有所探索,很喜欢这种大家互相分享、讨论并改进的氛围。同时之前在项目中也尝试实现一些监控,所以对于该项目比较感兴趣。 -## 开始提交PR +### 开始提交PR 在今年7月起,我发现hertzbeat的issue和pr很活跃,于是就通过他们的issue和pr来了解如何实现某个协议的监控。随后我发现有关于smtp协议监控的task,就在issue上与作者进行讨论,随后通过文档和代码完成了自己的pr。 -## 开源贡献 +### 开源贡献 - 实现smtp、ntp、websocket可用性的监控。 - 实现memcached 、NebulaGraph的监控指标。 - 为实现的监控添加相关文档。 -## 收获 +### 收获 - 收获了监控相关的开发经验,新增了一个宝贵的项目经历。 - 对于网络协议有了更深刻的了解。 - 对于开源项目的贡献流程有了初步认识。 -## 感谢社区小伙伴 +### 感谢社区小伙伴 感谢hertzbeat的作者提供的相关文档和帮助。感谢朋友的带领,为我提供了勇气,让我敢于尝试进入开源项目进行贡献。感谢社区中其他的小伙伴的issue和pr,加快了我对于该项目的了解。 -## 对新人的一点建议 +### 对新人的一点建议 - issue和pr是你了解的项目的敲门砖,一点要敢于讨论并发表观点。 - 贡献不分大小,要敢于尝试,并不断提升自己。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md index e375a5c15b9..ddd36937324 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md @@ -102,7 +102,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - add prolevel1 as a contributor for code by @allcontributors in - -# 1397 feature: support for dns monitoring by @Calvin979 in +## 1397 feature: support for dns monitoring by @Calvin979 in - Support monitoring hive metrics by @a-little-fool in - support legend pageable in history data charts by @tomsun28 in diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-07-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-07-new-committer.md index 9a0e2b5ee11..e1190acd56e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-07-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-07-07-new-committer.md @@ -16,7 +16,7 @@ keywords: [open source monitoring system, alerting system] 个人一直热衷于开源社区,同时也是 Apache Pulsar 的 Committer,OpenTelemetry 和 VictoriaMetrics 的 Contributor。 -# 与 HertzBeat 结缘 +## 与 HertzBeat 结缘 今年 4 月份,也就是 HertzBeat 进入 Apache 孵化器的时候,我无意间在朋友圈里看到了一篇公众号的推荐。 @@ -26,7 +26,7 @@ keywords: [open source monitoring system, alerting system] 所以对监控系统有一些经验同时也非常感兴趣,于是便阅读了文档很快就在本地启动起来了(这一点社区做的很好,许多开源项目第一步启动就要劝退不少潜在的开发者)。 -# 开始贡献 +## 开始贡献 我的第一个 PR 是修改了一个 PR 模版里的错误路径,社区处理的非常快,所以第一次贡献就好感倍增。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-verify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-verify.md index 2c8af78f0e8..f6aa3c19039 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-verify.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-verify.md @@ -4,13 +4,13 @@ title: 版本物料的验证 sidebar_position: 4 --- -# 验证候选版本 +## 验证候选版本 详细检查列表请参考官方的[check list](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) 在浏览器中可访问版本内容 -## 1. 下载候选版本到本地 +### 1. 下载候选版本到本地 > 需要依赖gpg工具,如果没有,建议安装gpg2 @@ -22,11 +22,11 @@ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_versio ``` -## 2. 验证上传的版本是否合规 +### 2. 验证上传的版本是否合规 > 开始验证环节,验证包含但不局限于以下内容和形式 -### 2.1 查看发布包是否完整 +#### 2.1 查看发布包是否完整 > 上传到dist的包必须包含源码包,二进制包可选 @@ -35,18 +35,18 @@ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_versio 3. 是否包含源码包的sha512 4. 如果上传了二进制包,则同样检查(2)-(4)所列的内容 -### 2.2 检查gpg签名 +#### 2.2 检查gpg签名 首先导入发布人公钥。从svn仓库导入KEYS到本地环境。(发布版本的人不需要再导入,帮助做验证的人需要导入,用户名填发版人的即可) -#### 2.2.1 导入公钥 +##### 2.2.1 导入公钥 ```shell curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # 下载KEYS gpg --import KEYS # 导入KEYS到本地 ``` -#### 2.2.2 信任公钥 +##### 2.2.2 信任公钥 > 信任此次版本所使用的KEY @@ -75,7 +75,7 @@ gpg> ``` -#### 2.2.3 检查签名 +##### 2.2.3 检查签名 ```shell for i in *.tar.gz; do echo $i; gpg --verify $i.asc $i ; done @@ -92,7 +92,7 @@ gpg: using RSA key XXXXX gpg: Good signature from "xxx @apache.org>" ``` -### 2.3 检查sha512哈希 +#### 2.3 检查sha512哈希 > 本地计算sha512哈希后,验证是否与dist上的一致,如果上传二进制包,则同样需要检查二进制包的sha512哈希 @@ -100,7 +100,7 @@ gpg: Good signature from "xxx @apache.org>" for i in *.tar.gz; do echo $i; sha512sum --check $i.sha512; done ``` -### 2.4 检查二进制包 +#### 2.4 检查二进制包 解压缩 `apache-hertzbeat-${release.version}-incubating-bin.tar.gz` @@ -123,7 +123,7 @@ tar -xzvf apache-hertzbeat-${release.version}-incubating-bin.tar.gz 参考: -### 2.5. 源码编译验证 +#### 2.5. 源码编译验证 解压缩 `apache-hertzbeat-${release_version}-incubating-src.tar.gz` @@ -147,7 +147,7 @@ cd apache-hertzbeat-${release_version}-incubating-src 参考: -## 3. 邮件回复 +### 3. 邮件回复 如果发起了发布投票,验证后,可以参照此回复示例进行邮件回复 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-verify.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-verify.md index 2c8af78f0e8..f6aa3c19039 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-verify.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-verify.md @@ -4,13 +4,13 @@ title: 版本物料的验证 sidebar_position: 4 --- -# 验证候选版本 +## 验证候选版本 详细检查列表请参考官方的[check list](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) 在浏览器中可访问版本内容 -## 1. 下载候选版本到本地 +### 1. 下载候选版本到本地 > 需要依赖gpg工具,如果没有,建议安装gpg2 @@ -22,11 +22,11 @@ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_versio ``` -## 2. 验证上传的版本是否合规 +### 2. 验证上传的版本是否合规 > 开始验证环节,验证包含但不局限于以下内容和形式 -### 2.1 查看发布包是否完整 +#### 2.1 查看发布包是否完整 > 上传到dist的包必须包含源码包,二进制包可选 @@ -35,18 +35,18 @@ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_versio 3. 是否包含源码包的sha512 4. 如果上传了二进制包,则同样检查(2)-(4)所列的内容 -### 2.2 检查gpg签名 +#### 2.2 检查gpg签名 首先导入发布人公钥。从svn仓库导入KEYS到本地环境。(发布版本的人不需要再导入,帮助做验证的人需要导入,用户名填发版人的即可) -#### 2.2.1 导入公钥 +##### 2.2.1 导入公钥 ```shell curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # 下载KEYS gpg --import KEYS # 导入KEYS到本地 ``` -#### 2.2.2 信任公钥 +##### 2.2.2 信任公钥 > 信任此次版本所使用的KEY @@ -75,7 +75,7 @@ gpg> ``` -#### 2.2.3 检查签名 +##### 2.2.3 检查签名 ```shell for i in *.tar.gz; do echo $i; gpg --verify $i.asc $i ; done @@ -92,7 +92,7 @@ gpg: using RSA key XXXXX gpg: Good signature from "xxx @apache.org>" ``` -### 2.3 检查sha512哈希 +#### 2.3 检查sha512哈希 > 本地计算sha512哈希后,验证是否与dist上的一致,如果上传二进制包,则同样需要检查二进制包的sha512哈希 @@ -100,7 +100,7 @@ gpg: Good signature from "xxx @apache.org>" for i in *.tar.gz; do echo $i; sha512sum --check $i.sha512; done ``` -### 2.4 检查二进制包 +#### 2.4 检查二进制包 解压缩 `apache-hertzbeat-${release.version}-incubating-bin.tar.gz` @@ -123,7 +123,7 @@ tar -xzvf apache-hertzbeat-${release.version}-incubating-bin.tar.gz 参考: -### 2.5. 源码编译验证 +#### 2.5. 源码编译验证 解压缩 `apache-hertzbeat-${release_version}-incubating-src.tar.gz` @@ -147,7 +147,7 @@ cd apache-hertzbeat-${release_version}-incubating-src 参考: -## 3. 邮件回复 +### 3. 邮件回复 如果发起了发布投票,验证后,可以参照此回复示例进行邮件回复 diff --git a/home/versioned_docs/version-v1.5.x/community/how-to-verify.md b/home/versioned_docs/version-v1.5.x/community/how-to-verify.md index 77e53ee444c..c91c4fbb486 100644 --- a/home/versioned_docs/version-v1.5.x/community/how-to-verify.md +++ b/home/versioned_docs/version-v1.5.x/community/how-to-verify.md @@ -4,13 +4,13 @@ title: How to Verify Release sidebar_position: 4 --- -# Verify the candidate version +## Verify the candidate version For detailed check list, please refer to the official [check list](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) Version content accessible in browser -## 1. Download the candidate version +### 1. Download the candidate version Download the candidate version to be released to the local environment Need to rely on gpg tool, if not, it is recommended to install `gpg2`. @@ -27,11 +27,11 @@ $ svn co https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_ve $ wget https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/${release_version}-${rc_version}/xxx.xxx ``` -## 2. Verify that the uploaded version is compliant +### 2. Verify that the uploaded version is compliant Start the verification process, which includes but is not limited to the following content and forms. -### 2.1 Check whether the release package is complete +#### 2.1 Check whether the release package is complete The package uploaded to dist must include the source code package, and the binary package is optional. @@ -40,18 +40,18 @@ The package uploaded to dist must include the source code package, and the binar 3. Whether to include the sha512 of the source code package 4. If the binary package is uploaded, also check the contents listed in (2)-(4) -### 2.2 Check gpg signature +#### 2.2 Check gpg signature First import the publisher's public key. Import KEYS from the svn repository to the local environment. (The person who releases the version does not need to import it again, the person who helps to do the verification needs to import it, and the user name is enough for the person who issued the version) -#### 2.2.1 Import public key +##### 2.2.1 Import public key ```shell curl https://downloads.apache.org/incubator/hertzbeat/KEYS > KEYS # Download KEYS gpg --import KEYS # Import KEYS to local ``` -#### 2.2.2 Trust the public key +##### 2.2.2 Trust the public key Trust the KEY used in this version: @@ -80,7 +80,7 @@ gpg> ``` -#### 2.2.3 Check the gpg signature +##### 2.2.3 Check the gpg signature ```shell for i in *.tar.gz; do echo $i; gpg --verify $i.asc $i; done @@ -97,13 +97,13 @@ gpg: using RSA key XXXXX gpg: Good signature from "xxx @apache.org>" ``` -### 2.3 Check sha512 hash +#### 2.3 Check sha512 hash ```shell for i in *.tar.gz; do echo $i; sha512sum --check $i.sha512; done ``` -### 2.4 Check the binary package +#### 2.4 Check the binary package unzip `apache-hertzbeat-${release.version}-incubating-bin.tar.gz` @@ -123,7 +123,7 @@ check as follows: - [ ] Able to compile correctly - [ ] ..... -### 2.5 Check the source package +#### 2.5 Check the source package > If the binary/web-binary package is uploaded, check the binary package. @@ -149,7 +149,7 @@ and check as follows: You can refer to this article: [ASF Third Party License Policy](https://apache.org/legal/resolved.html) -## 3. Email reply +### 3. Email reply If you initiate a posting vote, you can refer to this response example to reply to the email after verification diff --git a/home/versioned_docs/version-v1.5.x/help/http_sd.md b/home/versioned_docs/version-v1.5.x/help/http_sd.md index 122b159f41b..957b0f88bf6 100644 --- a/home/versioned_docs/version-v1.5.x/help/http_sd.md +++ b/home/versioned_docs/version-v1.5.x/help/http_sd.md @@ -9,7 +9,7 @@ keywords: [open source monitoring tool, open source java monitoring tool, monito **Protocol Use:httpsd** -# Steps to monitor micro services +## Steps to monitor micro services 1. Make sure your **Register center** is available @@ -18,7 +18,7 @@ keywords: [open source monitoring tool, open source java monitoring tool, monito 2. Add http_sd monitor and enter necessary info about **Register center** on Hertzbeat, such as host, port and so on. 3. Click **OK** -# Configuration parameter +## Configuration parameter | Parameter name | Parameter help description | |-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| @@ -30,7 +30,7 @@ keywords: [open source monitoring tool, open source java monitoring tool, monito | Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | | Description remarks | For more information about identifying and describing this monitoring, users can note information here | -# Collection Metrics +## Collection Metrics ## Metrics Set:server From 20f89f3b3bc3fad1e0362d1426c2dc3ba52d6a53 Mon Sep 17 00:00:00 2001 From: crossoverJie Date: Sun, 25 Aug 2024 12:16:18 +0800 Subject: [PATCH 226/257] [Improve]Temporarily disable import sort (#2601) --- script/checkstyle/checkstyle.xml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/script/checkstyle/checkstyle.xml b/script/checkstyle/checkstyle.xml index 91a4d9b97a2..fc1623a3585 100644 --- a/script/checkstyle/checkstyle.xml +++ b/script/checkstyle/checkstyle.xml @@ -296,15 +296,18 @@ value="Redundant import {0}."/> - + + + <!– This ensures that static imports go first. –> - + --> From 88aeb9fc2179f3691407224ffb8f94dda91bbb53 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Sun, 25 Aug 2024 18:24:10 +0800 Subject: [PATCH 227/257] [bugfix] fix alarm recover not match in converge reduce in some condition (#2603) Signed-off-by: tomsun28 --- .../alert/reduce/AlarmConvergeReduce.java | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/alerter/src/main/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduce.java b/alerter/src/main/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduce.java index 936cb2d8b95..8b8dbb257d9 100644 --- a/alerter/src/main/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduce.java +++ b/alerter/src/main/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduce.java @@ -64,17 +64,11 @@ public boolean filterConverge(Alert currentAlert) { isHasIgnore = true; tags.remove(CommonConstants.IGNORE); } - int alertHash = Objects.hash(CommonConstants.ALERT_PRIORITY_CODE_CRITICAL) - + Arrays.hashCode(tags.keySet().toArray(new String[0])) - + Arrays.hashCode(tags.values().toArray(new String[0])); + int alertHash = generateAlertHash(CommonConstants.ALERT_PRIORITY_CODE_CRITICAL, tags); converageAlertMap.remove(alertHash); - alertHash = Objects.hash(CommonConstants.ALERT_PRIORITY_CODE_EMERGENCY) - + Arrays.hashCode(tags.keySet().toArray(new String[0])) - + Arrays.hashCode(tags.values().toArray(new String[0])); + alertHash = generateAlertHash(CommonConstants.ALERT_PRIORITY_CODE_EMERGENCY, tags); converageAlertMap.remove(alertHash); - alertHash = Objects.hash(CommonConstants.ALERT_PRIORITY_CODE_WARNING) - + Arrays.hashCode(tags.keySet().toArray(new String[0])) - + Arrays.hashCode(tags.values().toArray(new String[0])); + alertHash = generateAlertHash(CommonConstants.ALERT_PRIORITY_CODE_WARNING, tags); converageAlertMap.remove(alertHash); if (isHasIgnore) { tags.put(CommonConstants.IGNORE, CommonConstants.IGNORE); @@ -132,9 +126,7 @@ public boolean filterConverge(Alert currentAlert) { if (evalInterval <= 0) { return true; } - int alertHash = Objects.hash(currentAlert.getPriority()) - + Arrays.hashCode(currentAlert.getTags().keySet().toArray(new String[0])) - + Arrays.hashCode(currentAlert.getTags().values().toArray(new String[0])); + int alertHash = generateAlertHash(currentAlert.getPriority(), currentAlert.getTags()); Alert preAlert = converageAlertMap.get(alertHash); if (preAlert == null) { currentAlert.setTimes(1); @@ -165,4 +157,12 @@ public boolean filterConverge(Alert currentAlert) { } return true; } + + private int generateAlertHash(byte priority, Map tags) { + List keyList = tags.keySet().stream().filter(Objects::nonNull).sorted().toList(); + List valueList = tags.values().stream().filter(Objects::nonNull).sorted().toList(); + return Objects.hash(priority) + + Arrays.hashCode(keyList.toArray(new String[0])) + + Arrays.hashCode(valueList.toArray(new String[0])); + } } From 102c72bcc61ab8ddbf1c6fa14f5a2873ccfe7714 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Sun, 25 Aug 2024 18:46:01 +0800 Subject: [PATCH 228/257] [doc] fix markdown lint md013 check (#2599) Co-authored-by: shown --- .markdownlint-cli2.jsonc | 6 +++++- home/blog/2022-06-01-hertzbeat-v1.0.md | 3 ++- home/blog/2023-08-14-hertzbeat-v1.4.0.md | 3 ++- home/blog/2023-08-28-new-committer.md | 8 ++++++-- home/blog/2024-07-08-new-committer.md | 6 ++++-- home/blog/2024-07-15-new-committer.md | 3 ++- home/blog/2024-07-29-new-committer.md | 3 ++- home/blog/2024-08-18-new-committer.md | 10 +++++++--- home/docs/community/how-to-release.md | 4 +++- .../version-v1.4.x/help/ntp.md | 0 .../version-v1.5.x/community/how-to-release.md | 4 +++- home/versioned_docs/version-v1.5.x/template.md | 3 ++- 12 files changed, 38 insertions(+), 15 deletions(-) delete mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index 01e8be79233..9dab612ab39 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -20,7 +20,11 @@ "MD001": true, "MD052": false, "MD003": false, - "MD013": false, + "MD013": { + "line_length": 600, + "code_blocks": false, + "tables": false + }, "MD024": { "siblings_only": true }, diff --git a/home/blog/2022-06-01-hertzbeat-v1.0.md b/home/blog/2022-06-01-hertzbeat-v1.0.md index eaf32fa4a1a..9d6a003ee50 100644 --- a/home/blog/2022-06-01-hertzbeat-v1.0.md +++ b/home/blog/2022-06-01-hertzbeat-v1.0.md @@ -7,7 +7,8 @@ author_image_url: https://avatars.githubusercontent.com/u/24788200?s=400&v=4 tags: [opensource] --- -HertzBeat, incubated by Dromara and open-sourced by TanCloud, is an open-source monitoring and alerting project that supports a variety of monitoring types including websites, APIs, PING, ports, databases, full-site, operating systems, middleware, etc. It supports threshold alarms and notification alerts (email, webhook, DingTalk, WeCom, Feishu robots) and has an easy-to-use, friendly visual operation interface. +HertzBeat, incubated by Dromara and open-sourced by TanCloud, is an open-source monitoring and alerting project that supports a variety of monitoring types including websites, APIs, PING, ports, databases, full-site, operating systems, middleware, etc. +It supports threshold alarms and notification alerts (email, webhook, DingTalk, WeCom, Feishu robots) and has an easy-to-use, friendly visual operation interface. Official Website: hertzbeat.com | tancloud.cn diff --git a/home/blog/2023-08-14-hertzbeat-v1.4.0.md b/home/blog/2023-08-14-hertzbeat-v1.4.0.md index 34179eb4df7..e0fec2a10fc 100644 --- a/home/blog/2023-08-14-hertzbeat-v1.4.0.md +++ b/home/blog/2023-08-14-hertzbeat-v1.4.0.md @@ -64,7 +64,8 @@ First of all, let's take a look at what open source can bring, or why open sourc * User traffic. Open source projects are provided free of charge to users and developers, and have advantages in attracting users to use and promoting them. * User trust. Open source products are naturally easy to gain the trust and patience of users, or lower the threshold of trust for users. -* Community collaboration. Open source products can attract top contributors to contribute together, receive user feedback issues, pr contributions, etc. Driven by the community, open source projects will become better and better, and more people will participate and use them after positive feedback. Community collaboration I think this is the meaning of open source, and this is not just the contribution code collaboration between programmers, users are all collaboration objects (for example, our project has a large number of operation and maintenance friends who contribute code and documents), if it is only code Open source without community collaboration, it is better to release an installation package for others to use and download for free. +* Community collaboration. Open source products can attract top contributors to contribute together, receive user feedback issues, pr contributions, etc. +* Driven by the community, open source projects will become better and better, and more people will participate and use them after positive feedback. Community collaboration I think this is the meaning of open source, and this is not just the contribution code collaboration between programmers, users are all collaboration objects (for example, our project has a large number of operation and maintenance friends who contribute code and documents), if it is only code Open source without community collaboration, it is better to release an installation package for others to use and download for free. * Product ecology. This is required for some ecological products, such as hertzbeat, which need to support monitoring types that connect to various types of protocols, and a large number of monitoring templates. Only a good open source project ecology can attract other contributors to contribute and share, exchange what is needed in the ecology, and ultimately everyone will benefit from the ecology. This is difficult to do in closed source programs. The above points focus on community collaboration and product ecology. This is also the reason for the open source cluster version. Only open source products can be rolled into stronger product power. For example, the technical feature of cluster will naturally attract developers (and the cluster itself is The product of our community collaboration) will attract more users and contributors to use feedback and iterate together. The community drives and then positively promotes open source projects and satisfies user functional experience. diff --git a/home/blog/2023-08-28-new-committer.md b/home/blog/2023-08-28-new-committer.md index 62a9b03d7b6..f6b389f3340 100644 --- a/home/blog/2023-08-28-new-committer.md +++ b/home/blog/2023-08-28-new-committer.md @@ -10,7 +10,10 @@ keywords: [open source monitoring system, alerting system] ! [hertzBeat](/img/blog/new-committer.png) -It's great to welcome a new community `Committer`, unlike other contributors `logicz` comes from an Ops implementation position at Cyberoam rather than a development position, but the quality of the contributions, both in terms of code and documentation etc. is very high 👍. This is also our `HertzBeat` and other open source projects are not the same place, because the user group is more oriented to the operation and maintenance of the development, in our 139 contributors in the operation and maintenance engineers accounted for more than 30%, which breaks the open source project collaboration and contribution to the object are the inherent cognition of the development position, which shows that whether it is the operation and maintenance engineers and test engineers to contribute to the open source project participation is very enthusiastic! This shows that both operation and maintenance engineers and test engineers are very enthusiastic about contributing to open source projects, not just as bystanders to open source collaboration. Participation in open source projects is not exclusive to a certain group of people, but is open to all who want to participate, it may be a document, a script or a piece of code, imagine your participation in the open source project is deployed to thousands of servers to run running, to help others to be used or browse the Review discussion, git record will always be kept, this may be the significance of participation in open source projects. +It's great to welcome a new community `Committer`, unlike other contributors `logicz` comes from an Ops implementation position at Cyberoam rather than a development position, but the quality of the contributions, both in terms of code and documentation etc. is very high 👍. +This is also our `HertzBeat` and other open source projects are not the same place, because the user group is more oriented to the operation and maintenance of the development, in our 139 contributors in the operation and maintenance engineers accounted for more than 30%, which breaks the open source project collaboration and contribution to the object are the inherent cognition of the development position, which shows that whether it is the operation and maintenance engineers and test engineers to contribute to the open source project participation is very enthusiastic! +This shows that both operation and maintenance engineers and test engineers are very enthusiastic about contributing to open source projects, not just as bystanders to open source collaboration. +Participation in open source projects is not exclusive to a certain group of people, but is open to all who want to participate, it may be a document, a script or a piece of code, imagine your participation in the open source project is deployed to thousands of servers to run running, to help others to be used or browse the Review discussion, git record will always be kept, this may be the significance of participation in open source projects. > Welcome HertzBeat's newest community committer logicz, let's learn more about his open source experience! @@ -28,7 +31,8 @@ github:zqr10159 ## Getting to know Hertzbeat -In March 2023, I started to contact Hertzbeat, due to the need for a complete monitoring->alerting platform for the project, due to the deployment of the project on the intranet, the company's internal closed-source monitoring platform can not be developed to meet the needs of cross-network segment alerts. Later in the github looking for open source monitoring platform, found Hertzbeat, easy to deploy and full-featured. The most important thing is that the author tom replies to issues and updates very quickly, very much in line with my imagination of the open source community, I'm very happy to be able to participate in open source and can see their own results for everyone to use. +In March 2023, I started to contact Hertzbeat, due to the need for a complete monitoring->alerting platform for the project, due to the deployment of the project on the intranet, the company's internal closed-source monitoring platform can not be developed to meet the needs of cross-network segment alerts. +Later in the github looking for open source monitoring platform, found Hertzbeat, easy to deploy and full-featured. The most important thing is that the author tom replies to issues and updates very quickly, very much in line with my imagination of the open source community, I'm very happy to be able to participate in open source and can see their own results for everyone to use. ## Ongoing open source contributions and gains diff --git a/home/blog/2024-07-08-new-committer.md b/home/blog/2024-07-08-new-committer.md index b955e2055b0..ae9ca623da4 100644 --- a/home/blog/2024-07-08-new-committer.md +++ b/home/blog/2024-07-08-new-committer.md @@ -16,7 +16,8 @@ I first came into contact with the Apache Hertzbeat project by chance. At that t ### Start contributing -After having a preliminary understanding of the project, I found that it needed to complete the monitoring scope of the big data field, so I decided to start contributing some code. I started with supplementing big data monitoring. This not only helped me understand the project more deeply, but also gradually let other members of the community know me. I remember that the first Pull Request I submitted was to add a new Hbase cluster monitoring template. Although it seems insignificant, I was very excited when it was merged. This was a real interaction between me and the open source community and my first step towards greater contribution. +After having a preliminary understanding of the project, I found that it needed to complete the monitoring scope of the big data field, so I decided to start contributing some code. I started with supplementing big data monitoring. This not only helped me understand the project more deeply, but also gradually let other members of the community know me. +I remember that the first Pull Request I submitted was to add a new Hbase cluster monitoring template. Although it seems insignificant, I was very excited when it was merged. This was a real interaction between me and the open source community and my first step towards greater contribution. ### In-depth participation @@ -46,4 +47,5 @@ This process made me understand the importance of cooperation and made me feel t ### Conclusion -Becoming a Committer of the Apache Hertzbeat project is a challenging and rewarding journey. Through continuous learning and contribution, I have not only improved my technical ability, but also found a sense of belonging and accomplishment in the community. I hope that my experience can inspire more people to participate in the open source community and jointly promote the progress and development of technology. To borrow the words of Tom: Participating in open source should not affect everyone's work and life, otherwise it will go against the original intention. Everyone should participate in the free time after get off work. +Becoming a Committer of the Apache Hertzbeat project is a challenging and rewarding journey. Through continuous learning and contribution, I have not only improved my technical ability, but also found a sense of belonging and accomplishment in the community. +I hope that my experience can inspire more people to participate in the open source community and jointly promote the progress and development of technology. To borrow the words of Tom: Participating in open source should not affect everyone's work and life, otherwise it will go against the original intention. Everyone should participate in the free time after get off work. diff --git a/home/blog/2024-07-15-new-committer.md b/home/blog/2024-07-15-new-committer.md index 2b8ec2bb111..2cde4ec39cb 100644 --- a/home/blog/2024-07-15-new-committer.md +++ b/home/blog/2024-07-15-new-committer.md @@ -14,7 +14,8 @@ Hello everyone, I am very honored to receive an invitation from the community to ### Encounter -In my work, several physical servers are deployed, running various databases and middleware. Although we have deployed the Prometheus + Grafana monitoring combination, most services and servers require additional installation of exporters. As a result, this monitoring system does not cover the entire project. Sometimes, we only realize a service is down when it is too late. One day in April, I came across an article introducing HertzBeat. I was immediately attracted by its unique features, such as no need for agents and fully visualized configuration, along with support for one-click deployment via Docker. I quickly deployed HertzBeat and put it into use. +In my work, several physical servers are deployed, running various databases and middleware. Although we have deployed the Prometheus + Grafana monitoring combination, most services and servers require additional installation of exporters. +As a result, this monitoring system does not cover the entire project. Sometimes, we only realize a service is down when it is too late. One day in April, I came across an article introducing HertzBeat. I was immediately attracted by its unique features, such as no need for agents and fully visualized configuration, along with support for one-click deployment via Docker. I quickly deployed HertzBeat and put it into use. ### Familiarization diff --git a/home/blog/2024-07-29-new-committer.md b/home/blog/2024-07-29-new-committer.md index 55aecd3eb94..642d1fac885 100644 --- a/home/blog/2024-07-29-new-committer.md +++ b/home/blog/2024-07-29-new-committer.md @@ -18,7 +18,8 @@ In the open-source community, every contribution not only pushes the project for ## Starting from the Details: Optimizing Visuals and Interactions -I firmly believe that details determine success or failure. When I first joined the project, I began by optimizing the interface to enhance the user's visual and interactive experience. I refined the modal window layout of the monitoring selection menu to better align with user operation habits. I adjusted the header style and content layout of the monitoring details page to make information presentation clearer and more intuitive. Additionally, I unified the border-radius values of components and addressed issues such as missing internationalization translations, ensuring the consistency and completeness of the system interface. +I firmly believe that details determine success or failure. When I first joined the project, I began by optimizing the interface to enhance the user's visual and interactive experience. I refined the modal window layout of the monitoring selection menu to better align with user operation habits. +I adjusted the header style and content layout of the monitoring details page to make information presentation clearer and more intuitive. Additionally, I unified the border-radius values of components and addressed issues such as missing internationalization translations, ensuring the consistency and completeness of the system interface. These seemingly minor changes significantly enhanced the overall aesthetics and user experience of the system. Through this process, I gained a profound understanding of the importance of interface design for user experience and honed my attention to detail. diff --git a/home/blog/2024-08-18-new-committer.md b/home/blog/2024-08-18-new-committer.md index b1175b5534b..6e1f9c712dc 100644 --- a/home/blog/2024-08-18-new-committer.md +++ b/home/blog/2024-08-18-new-committer.md @@ -20,7 +20,9 @@ From my junior year to the present, I still maintain my passion for open source ## Participate in the Apache Community -[Apache Software Foundation (ASF)](https://community.apache.org/) is an American non-profit organization that aims to support various open source software projects. ASF was originally formed by a group of developers of Apache HTTP Server and was officially established on March 25, 1999. As of 2021, its total membership is approximately 1,000. The name comes from a local Indian tribe in North America. This tribe is famous for its superb military literacy and superhuman endurance. In the second half of the 19th century, it resisted the invaders who invaded their territory. In order to show respect for this Indian tribe, the name of the tribe (Apache) is used as the server name. But when it comes to this naming, there is an interesting story circulating here. Because this server is based on the NCSA HTTPd server and is the product of continuous revision and patching through the efforts of everyone, it is nicknamed "A Patchy Server" (a patch server). Here, because "A Patchy" and "Apache" are homophones, it was finally officially named "Apache Server". +[Apache Software Foundation (ASF)](https://community.apache.org/) is an American non-profit organization that aims to support various open source software projects. ASF was originally formed by a group of developers of Apache HTTP Server and was officially established on March 25, 1999. As of 2021, its total membership is approximately 1,000. The name comes from a local Indian tribe in North America. +This tribe is famous for its superb military literacy and superhuman endurance. In the second half of the 19th century, it resisted the invaders who invaded their territory. In order to show respect for this Indian tribe, the name of the tribe (Apache) is used as the server name. +But when it comes to this naming, there is an interesting story circulating here. Because this server is based on the NCSA HTTPd server and is the product of continuous revision and patching through the efforts of everyone, it is nicknamed "A Patchy Server" (a patch server). Here, because "A Patchy" and "Apache" are homophones, it was finally officially named "Apache Server". The above is an introduction to the Apache Software Foundation from Wikipedia. @@ -46,13 +48,15 @@ As the saying goes, the greater the ability, the greater the task. Becoming a pr ## How to participate in open source -Anyone who wants to do something needs an opportunity and a guide. Among the many Apache projects, there are many people who pay attention to the project's Issue List. The one time that remains fresh in my memory is: one night after writing the unit test of a tool class, I discovered a small bug. What I thought at the time was that there was too much contextual information and it was not good to write it in a PR, so I opened an Issue to record the context. How small is this bug? It was so small that I just created the Issue. After submitting the unit test and the code to fix the bug together, I refreshed the PR List again and saw a PR Title to fix the bug. +Anyone who wants to do something needs an opportunity and a guide. Among the many Apache projects, there are many people who pay attention to the project's Issue List. The one time that remains fresh in my memory is: one night after writing the unit test of a tool class, I discovered a small bug. What I thought at the time was that there was too much contextual information and it was not good to write it in a PR, so I opened an Issue to record the context. +How small is this bug? It was so small that I just created the Issue. After submitting the unit test and the code to fix the bug together, I refreshed the PR List again and saw a PR Title to fix the bug. In fact, there is no shortage of people paying attention to the project, but more of an opportunity! Opportunities to participate in projects. ### The Apache Way -[The Apache Way](https://www.apache.org/theapacheway/) pursued by the Apache Community. The community is greater than the code. A good community is often more important than excellent code. The components of the community include developers, users, etc. Users are the first users of the project code. A healthy community status is when users discover problems, then report problems, and finally solve problems during use. A more likely scenario is that a user reports a problem, switches from being a user to a developer, and solves the problem. And continue to participate in the maintenance of community projects. +[The Apache Way](https://www.apache.org/theapacheway/) pursued by the Apache Community. The community is greater than the code. A good community is often more important than excellent code. The components of the community include developers, users, etc. Users are the first users of the project code. A healthy community status is when users discover problems, then report problems, and finally solve problems during use. +A more likely scenario is that a user reports a problem, switches from being a user to a developer, and solves the problem. And continue to participate in the maintenance of community projects. ### Paths to participate in open source diff --git a/home/docs/community/how-to-release.md b/home/docs/community/how-to-release.md index 94c919d30f0..46ab8a2bc86 100644 --- a/home/docs/community/how-to-release.md +++ b/home/docs/community/how-to-release.md @@ -307,7 +307,9 @@ svn co --depth empty https://dist.apache.org/repos/dist/dev/incubator/hertzbeat - Copy the material package to the dev directory -Create a version number directory and name it in the form of ${release_version}-${RC_version}. RC_version starts from 1, that is, the candidate version starts from RC1. During the release process, there is a problem that causes the vote to fail. If it needs to be corrected, it needs to iterate the RC version , the RC version number needs to be +1. For example: Vote for version 1.6.0-RC1. If the vote passes without any problems, the RC1 version material will be released as the final version material. If there is a problem (when the hertzbeat/incubator community votes, the voters will strictly check various release requirements and compliance issues) and need to be corrected, then re-initiate the vote after the correction, and the candidate version for the next vote is 1.6.0- RC2. +Create a version number directory and name it in the form of ${release_version}-${RC_version}. RC_version starts from 1, that is, the candidate version starts from RC1. During the release process, there is a problem that causes the vote to fail. +If it needs to be corrected, it needs to iterate the RC version , the RC version number needs to be +1. For example: Vote for version 1.6.0-RC1. If the vote passes without any problems, the RC1 version material will be released as the final version material. +If there is a problem (when the hertzbeat/incubator community votes, the voters will strictly check various release requirements and compliance issues) and need to be corrected, then re-initiate the vote after the correction, and the candidate version for the next vote is 1.6.0- RC2. ```shell mkdir -p svn/dev/1.6.0-RC1 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.4.x/help/ntp.md deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/home/versioned_docs/version-v1.5.x/community/how-to-release.md b/home/versioned_docs/version-v1.5.x/community/how-to-release.md index 94c919d30f0..46ab8a2bc86 100644 --- a/home/versioned_docs/version-v1.5.x/community/how-to-release.md +++ b/home/versioned_docs/version-v1.5.x/community/how-to-release.md @@ -307,7 +307,9 @@ svn co --depth empty https://dist.apache.org/repos/dist/dev/incubator/hertzbeat - Copy the material package to the dev directory -Create a version number directory and name it in the form of ${release_version}-${RC_version}. RC_version starts from 1, that is, the candidate version starts from RC1. During the release process, there is a problem that causes the vote to fail. If it needs to be corrected, it needs to iterate the RC version , the RC version number needs to be +1. For example: Vote for version 1.6.0-RC1. If the vote passes without any problems, the RC1 version material will be released as the final version material. If there is a problem (when the hertzbeat/incubator community votes, the voters will strictly check various release requirements and compliance issues) and need to be corrected, then re-initiate the vote after the correction, and the candidate version for the next vote is 1.6.0- RC2. +Create a version number directory and name it in the form of ${release_version}-${RC_version}. RC_version starts from 1, that is, the candidate version starts from RC1. During the release process, there is a problem that causes the vote to fail. +If it needs to be corrected, it needs to iterate the RC version , the RC version number needs to be +1. For example: Vote for version 1.6.0-RC1. If the vote passes without any problems, the RC1 version material will be released as the final version material. +If there is a problem (when the hertzbeat/incubator community votes, the voters will strictly check various release requirements and compliance issues) and need to be corrected, then re-initiate the vote after the correction, and the candidate version for the next vote is 1.6.0- RC2. ```shell mkdir -p svn/dev/1.6.0-RC1 diff --git a/home/versioned_docs/version-v1.5.x/template.md b/home/versioned_docs/version-v1.5.x/template.md index 2359a43e51f..f4b2743be26 100644 --- a/home/versioned_docs/version-v1.5.x/template.md +++ b/home/versioned_docs/version-v1.5.x/template.md @@ -13,7 +13,8 @@ Here is the architecture. ![hertzBeat](/img/docs/hertzbeat-arch.png) -**We define all monitoring collection types (mysql, website, jvm, k8s) as yml templates, and users can import these templates into the hertzbeat system to support corresponding types of monitoring, which is very convenient!** +**We define all monitoring collection types (mysql, website, jvm, k8s) as yml templates** +**Users can import these templates into the hertzbeat system to support corresponding types of monitoring, which is very convenient!** ![](/img/docs/advanced/extend-point-1.png) From c3ffad76ca992abc3ae2264af468d74c8b353101 Mon Sep 17 00:00:00 2001 From: Gao Jian <1209640759@qq.com> Date: Sun, 25 Aug 2024 19:01:40 +0800 Subject: [PATCH 229/257] [fixbug]: Fix Redfish protocol parse bug (#2597) Co-authored-by: shown --- .../collect/redfish/RedfishClient.java | 2 +- .../collect/redfish/RedfishCollectImpl.java | 6 +-- .../redfish/RedfishCollectImplTest.java | 12 +++--- .../entity/job/protocol/RedfishProtocol.java | 3 ++ .../src/main/resources/define/app-redfish.yml | 38 +++++++++---------- 5 files changed, 32 insertions(+), 29 deletions(-) diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishClient.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishClient.java index afc715ce4ec..f9de2fda92a 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishClient.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishClient.java @@ -73,7 +73,7 @@ public ConnectSession connect() throws Exception { ? String.format("[%s]:%s", this.host, this.port + uri) : String.format("%s:%s", this.host, this.port + uri); - requestBuilder.setUri(NetworkConstants.HTTP_HEADER + baseUri); + requestBuilder.setUri(NetworkConstants.HTTPS_HEADER + baseUri); } requestBuilder.addHeader(HttpHeaders.CONNECTION, NetworkConstants.KEEP_ALIVE); diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishCollectImpl.java index a6e30f5c24f..5977871c639 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/redfish/RedfishCollectImpl.java @@ -176,10 +176,10 @@ private void parseRedfishResource(CollectRep.MetricsData.Builder builder, String if (!StringUtils.hasText(resp)) { return; } - List aliasFields = metrics.getAliasFields(); + List jsonPaths = metrics.getRedfish().getJsonPath(); CollectRep.ValueRow.Builder valueRowBuilder = CollectRep.ValueRow.newBuilder(); - for (String alias : aliasFields) { - List res = JsonPathParser.parseContentWithJsonPath(resp, alias); + for (String path : jsonPaths) { + List res = JsonPathParser.parseContentWithJsonPath(resp, path); if (res != null && !res.isEmpty()) { Object value = res.get(0); valueRowBuilder.addColumns(value == null ? CommonConstants.NULL_VALUE : String.valueOf(value)); diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/redfish/RedfishCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/redfish/RedfishCollectImplTest.java index f2ddf1b3481..3fa01f78b5a 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/redfish/RedfishCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/redfish/RedfishCollectImplTest.java @@ -63,11 +63,11 @@ void setUp() { @Test void collect() { CollectRep.MetricsData.Builder builder = CollectRep.MetricsData.newBuilder(); - List aliasField = new ArrayList<>(); - aliasField.add("$.Id"); + List jsonPath = new ArrayList<>(); + jsonPath.add("$.Id"); Metrics metrics = new Metrics(); metrics.setRedfish(redfishProtocol); - metrics.setAliasFields(aliasField); + metrics.getRedfish().setJsonPath(jsonPath); metrics.setName("Chassis"); RedfishClient.create(redfishProtocol); redfishCollect.preCheck(metrics); @@ -77,12 +77,12 @@ void collect() { @Test void mockCollect() throws Exception { CollectRep.MetricsData.Builder builder = CollectRep.MetricsData.newBuilder(); - List aliasField = new ArrayList<>(); - aliasField.add("$.['@odata.id']"); + List jsonPath = new ArrayList<>(); + jsonPath.add("$.['@odata.id']"); redfishProtocol.setSchema("/redfish/v1/Chassis/{ChassisId}/PowerSubsystem/PowerSupplies"); Metrics metrics = new Metrics(); metrics.setRedfish(redfishProtocol); - metrics.setAliasFields(aliasField); + metrics.getRedfish().setJsonPath(jsonPath); metrics.setName("PowerSupply"); String chassis = """ { diff --git a/common/src/main/java/org/apache/hertzbeat/common/entity/job/protocol/RedfishProtocol.java b/common/src/main/java/org/apache/hertzbeat/common/entity/job/protocol/RedfishProtocol.java index b8042e12f29..42b3e53fa5e 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/entity/job/protocol/RedfishProtocol.java +++ b/common/src/main/java/org/apache/hertzbeat/common/entity/job/protocol/RedfishProtocol.java @@ -17,6 +17,7 @@ package org.apache.hertzbeat.common.entity.job.protocol; +import java.util.List; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; @@ -59,4 +60,6 @@ public class RedfishProtocol { * Redfish Resource Name and Corresponding Collection URI */ private String schema; + + private List jsonPath; } diff --git a/manager/src/main/resources/define/app-redfish.yml b/manager/src/main/resources/define/app-redfish.yml index 2be76bb8c2f..2c38fd88671 100644 --- a/manager/src/main/resources/define/app-redfish.yml +++ b/manager/src/main/resources/define/app-redfish.yml @@ -164,12 +164,6 @@ metrics: en-US: Chasis Health # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field # (可选)监控指标别名, 做为中间字段与采集数据字段和指标字段映射转换 - aliasFields: - - $.['@odata.id'] - - $.Name - - $.ChassisType - - $.Status.State - - $.Status.Health # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk protocol: redfish # the config content when protocol is redfish @@ -184,6 +178,12 @@ metrics: password: ^_^password^_^ # timeout unit:ms timeout: ^_^timeout^_^ + jsonPath: + - $.['@odata.id'] + - $.Name + - $.ChassisType + - $.Status.State + - $.Status.Health - name: Battery priority: 1 @@ -213,12 +213,6 @@ metrics: i18n: zh-CN: 电池充电状态 en-US: Battery Charge Status - aliasFields: - - $.['@odata.id'] - - $.Name - - $.Status.State - - $.Status.Health - - $.ChargeState protocol: redfish redfish: # redfish host: ipv4 ipv6 domain @@ -231,6 +225,12 @@ metrics: password: ^_^password^_^ # timeout unit:ms timeout: ^_^timeout^_^ + jsonPath: + - $.['@odata.id'] + - $.Name + - $.Status.State + - $.Status.Health + - $.ChargeState - name: Fan priority: 2 @@ -265,13 +265,6 @@ metrics: i18n: zh-CN: 风扇转速 en-US: Fan Speed - aliasFields: - - $.['@odata.id'] - - $.Name - - $.Status.State - - $.Status.Health - - $.SpeedPercent.Reading - - $.SpeedPercent.SpeedRPM protocol: redfish redfish: # redfish host: ipv4 ipv6 domain @@ -286,3 +279,10 @@ metrics: timeout: ^_^timeout^_^ # redfish fan collection schema schema: /redfish/v1/Chassis/{ChassisId}/ThermalSubsystem/Fans + jsonPath: + - $.['@odata.id'] + - $.Name + - $.Status.State + - $.Status.Health + - $.SpeedPercent.Reading + - $.SpeedPercent.SpeedRPM From 857e4118f0259f8b092e81d98cb64c56a49ffa16 Mon Sep 17 00:00:00 2001 From: Jast Date: Mon, 26 Aug 2024 09:27:40 +0800 Subject: [PATCH 230/257] [improve] Improve markdown md035 (#2592) Co-authored-by: tomsun28 Co-authored-by: shown --- .markdownlint-cli2.jsonc | 1 - home/blog/2022-06-19-hertzbeat-v1.1.0.md | 4 ++-- home/blog/2022-06-22-one-step-up.md | 2 +- home/blog/2023-03-15-hertzbeat-v1.3.0.md | 2 +- home/blog/2023-08-14-hertzbeat-v1.4.0.md | 2 +- home/blog/2023-09-26-hertzbeat-v1.4.1.md | 2 +- home/blog/2023-11-12-hertzbeat-v1.4.2.md | 2 +- home/blog/2023-12-11-hertzbeat-v1.4.3.md | 2 +- home/blog/2024-01-11-new-committer.md | 2 +- home/blog/2024-01-18-hertzbeat-v1.4.4.md | 2 +- home/docs/introduce.md | 6 +++--- .../2022-06-19-hertzbeat-v1.1.0.md | 4 ++-- .../2022-06-22-one-step-up.md | 4 ++-- .../2023-03-15-hertzbeat-v1.3.0.md | 4 ++-- .../2023-08-14-hertzbeat-v1.4.0.md | 2 +- .../2023-12-11-hertzbeat-v1.4.3.md | 2 +- .../2024-01-11-new-committer.md | 2 +- .../2024-01-18-hertzbeat-v1.4.4.md | 2 +- .../current/introduce.md | 6 +++--- .../version-v1.5.x/introduce.md | 8 ++++---- home/versioned_docs/version-v1.5.x/introduce.md | 10 +++++----- 21 files changed, 35 insertions(+), 36 deletions(-) diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index 9dab612ab39..1c5c7e73949 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -31,7 +31,6 @@ "MD025": true, "MD029": false, "MD033": false, - "MD035": false, "MD036": false, "MD040": false, "MD045": false, diff --git a/home/blog/2022-06-19-hertzbeat-v1.1.0.md b/home/blog/2022-06-19-hertzbeat-v1.1.0.md index c2de558f751..3e0c69bf8db 100644 --- a/home/blog/2022-06-19-hertzbeat-v1.1.0.md +++ b/home/blog/2022-06-19-hertzbeat-v1.1.0.md @@ -58,7 +58,7 @@ commit; Have Fun! ----- +--- ## V1.1.0 @@ -114,7 +114,7 @@ Have Fun! Have Fun! ----- +--- > [HertzBeat](https://github.com/apache/hertzbeat), incubated by [Dromara](https://dromara.org) and open-sourced by [TanCloud](https://tancloud.cn), is an open-source project supporting monitoring for websites, APIs, PING, ports, databases, operating systems, and more, with a user-friendly visual interface. > We also offer a [SAAS cloud monitoring version](https://console.tancloud.cn), allowing small and medium teams and individuals to start monitoring their web resources without deploying a complicated monitoring system, simply [log in to start](https://console.tancloud.cn) your monitoring journey for free. diff --git a/home/blog/2022-06-22-one-step-up.md b/home/blog/2022-06-22-one-step-up.md index ad172eef3ca..84e5381f25f 100644 --- a/home/blog/2022-06-22-one-step-up.md +++ b/home/blog/2022-06-22-one-step-up.md @@ -58,7 +58,7 @@ commit; Have Fun! ----- +--- ## V1.1.0 diff --git a/home/blog/2023-03-15-hertzbeat-v1.3.0.md b/home/blog/2023-03-15-hertzbeat-v1.3.0.md index 1082e4fa16c..170c2fa1516 100644 --- a/home/blog/2023-03-15-hertzbeat-v1.3.0.md +++ b/home/blog/2023-03-15-hertzbeat-v1.3.0.md @@ -89,7 +89,7 @@ COMMIT; - and more for your custom monitoring. - Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook`. ----- +--- **Github: ** **Gitee: ** diff --git a/home/blog/2023-08-14-hertzbeat-v1.4.0.md b/home/blog/2023-08-14-hertzbeat-v1.4.0.md index e0fec2a10fc..3e3762b91b4 100644 --- a/home/blog/2023-08-14-hertzbeat-v1.4.0.md +++ b/home/blog/2023-08-14-hertzbeat-v1.4.0.md @@ -162,7 +162,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do * and more for your custom monitoring. * Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook`. ----- +--- **Github: ** **Gitee: ** diff --git a/home/blog/2023-09-26-hertzbeat-v1.4.1.md b/home/blog/2023-09-26-hertzbeat-v1.4.1.md index 3c9910bc556..cbb1d74324d 100644 --- a/home/blog/2023-09-26-hertzbeat-v1.4.1.md +++ b/home/blog/2023-09-26-hertzbeat-v1.4.1.md @@ -150,7 +150,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do * and more for your custom monitoring. * Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. ----- +--- **Github: ** **Gitee: ** diff --git a/home/blog/2023-11-12-hertzbeat-v1.4.2.md b/home/blog/2023-11-12-hertzbeat-v1.4.2.md index 879ccd288e0..76747ce3989 100644 --- a/home/blog/2023-11-12-hertzbeat-v1.4.2.md +++ b/home/blog/2023-11-12-hertzbeat-v1.4.2.md @@ -131,7 +131,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do * and more for your custom monitoring. * Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. ----- +--- **Github: ** **Gitee: ** diff --git a/home/blog/2023-12-11-hertzbeat-v1.4.3.md b/home/blog/2023-12-11-hertzbeat-v1.4.3.md index 7a2ed2e01b0..14a01fb4153 100644 --- a/home/blog/2023-12-11-hertzbeat-v1.4.3.md +++ b/home/blog/2023-12-11-hertzbeat-v1.4.3.md @@ -145,7 +145,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do * and more for your custom monitoring. * Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. ----- +--- **Github: ** **Gitee: ** diff --git a/home/blog/2024-01-11-new-committer.md b/home/blog/2024-01-11-new-committer.md index 42e94929c2f..8d709c18ed2 100644 --- a/home/blog/2024-01-11-new-committer.md +++ b/home/blog/2024-01-11-new-committer.md @@ -114,7 +114,7 @@ Thanks to the authors of hertzbeat for the documentation and help. Thanks to my - Issues and pr's are the knock on the door of the project you are getting to know, so be willing to discuss and express your opinion. - No matter how big or small your contribution is, be willing to try and keep improving yourself. ----- +--- ### What is HertzBeat? diff --git a/home/blog/2024-01-18-hertzbeat-v1.4.4.md b/home/blog/2024-01-18-hertzbeat-v1.4.4.md index a3e7bd93ffa..a69c099723e 100644 --- a/home/blog/2024-01-18-hertzbeat-v1.4.4.md +++ b/home/blog/2024-01-18-hertzbeat-v1.4.4.md @@ -159,7 +159,7 @@ Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/do * and more for your custom monitoring. * Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. ----- +--- **Github: ** **Gitee: ** diff --git a/home/docs/introduce.md b/home/docs/introduce.md index 6f6769ca55c..97bb00d9cd0 100644 --- a/home/docs/introduce.md +++ b/home/docs/introduce.md @@ -154,7 +154,7 @@ In an isolated network where multiple networks are not connected, we need to dep **HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system.** ------ +--- ## Quickly Start @@ -299,10 +299,10 @@ Built-in support for monitoring types include: ![hertzbeat](/img/home/9.png) ------ +--- **There's so much more to discover. Have Fun!** ------ +--- **Github: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md index 076285a021a..c5f4f874fb3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md @@ -58,7 +58,7 @@ commit; Have Fun! ----- +--- ## V1.1.0 @@ -112,7 +112,7 @@ commit; Have Fun! ----- +--- > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn)开源的一个支持网站,API,PING,端口,数据库,操作系统等监控类型,拥有易用友好的可视化操作界面的开源监控告警项目。 > 当然,我们也提供了对应的[SAAS云监控版本](https://console.tancloud.cn),中小团队和个人无需再为了监控自己的网站资源,而去部署一套繁琐的监控系统,[登录即可免费开始](https://console.tancloud.cn)监控之旅。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md index 076285a021a..c5f4f874fb3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md @@ -58,7 +58,7 @@ commit; Have Fun! ----- +--- ## V1.1.0 @@ -112,7 +112,7 @@ commit; Have Fun! ----- +--- > [HertzBeat赫兹跳动](https://github.com/apache/hertzbeat) 是由 [Dromara](https://dromara.org) 孵化,[TanCloud](https://tancloud.cn)开源的一个支持网站,API,PING,端口,数据库,操作系统等监控类型,拥有易用友好的可视化操作界面的开源监控告警项目。 > 当然,我们也提供了对应的[SAAS云监控版本](https://console.tancloud.cn),中小团队和个人无需再为了监控自己的网站资源,而去部署一套繁琐的监控系统,[登录即可免费开始](https://console.tancloud.cn)监控之旅。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md index fed67a861cb..5d622de36dc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md @@ -109,7 +109,7 @@ Bugfix. 22. [[script] modified the linux memory metrics specified script code #719](https://github.com/apache/hertzbeat/pull/719) 23. [[webapp] bugfix the cover of the big screen is too small #724](https://github.com/apache/hertzbeat/pull/724) ----- +--- 升级注意⚠️. @@ -144,7 +144,7 @@ COMMIT; - 和更多你的自定义监控。 - 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 ----- +--- **Github: ** **Gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md index be2a3761fce..bd69544d028 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md @@ -169,7 +169,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - 和更多自定义监控模版。 - 通知支持 `Discord` `Slack` `Telegram` `邮件` `钉钉` `微信` `飞书` `短信` `Webhook`。 ----- +--- 欢迎star一波来支持我们哦。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md index c8c138121d3..a5f0cdd27c6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md @@ -164,7 +164,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - and more for your custom monitoring. - Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. ----- +--- **Github: ** **Gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md index d41901e5ffb..ede6586b0df 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-11-new-committer.md @@ -114,7 +114,7 @@ keywords: [open source monitoring system, alerting system] - issue和pr是你了解的项目的敲门砖,一点要敢于讨论并发表观点。 - 贡献不分大小,要敢于尝试,并不断提升自己。 ----- +--- ## 什么是 HertzBeat? diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md index ddd36937324..1067f96fe51 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md @@ -178,7 +178,7 @@ docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MAN - and more for your custom monitoring. - Notifications support `Discord` `Slack` `Telegram` `Mail` `Pinning` `WeChat` `FlyBook` `SMS` `Webhook` `ServerChan`. ----- +--- **Github: ** **Gitee: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md index 2e8e643ea6d..31c0a33eeaa 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/introduce.md @@ -32,7 +32,7 @@ slug: / > `HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。 ----- +--- ### 强大的监控模版 @@ -153,7 +153,7 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 --- **`HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。** ------ +--- ## 即刻体验一波 @@ -302,6 +302,6 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 **还有更多强大的功能快去探索呀。Have Fun!** ------ +--- **Github: ** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md index 60c550fa547..06600a0a972 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/introduce.md @@ -150,10 +150,10 @@ HertzBeat 使用这些已有的标准协议或规范,将他们抽象规范可 ![cncf](/img/home/cncf-landscape-left-logo.svg) ---- +---- **`HertzBeat`的强大自定义,多类型支持,高性能,易扩展,低耦合,希望能帮助开发者和团队快速搭建自有监控系统。** ------ +---- ## 即刻体验一波 @@ -298,10 +298,10 @@ Docker 环境下运行一条命令即可:`docker run -d -p 1157:1157 -p 1158:1 ![hertzbeat](/img/home/9.png) ---- +---- **还有更多强大的功能快去探索呀。Have Fun!** ------ +---- **Github: ** diff --git a/home/versioned_docs/version-v1.5.x/introduce.md b/home/versioned_docs/version-v1.5.x/introduce.md index 9bf3cd50930..94b3eaf741c 100644 --- a/home/versioned_docs/version-v1.5.x/introduce.md +++ b/home/versioned_docs/version-v1.5.x/introduce.md @@ -34,7 +34,7 @@ slug: / > HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system. ---- +---- ### Powerful Monitoring Templates @@ -150,11 +150,11 @@ In an isolated network where multiple networks are not connected, we need to dep ![cncf](/img/home/cncf-landscape-left-logo.svg) ---- +---- **HertzBeat's powerful customization, multi-type support, high performance, easy expansion, and low coupling, aims to help users quickly build their own monitoring system.** ------ +---- ## Quickly Start @@ -299,10 +299,10 @@ Built-in support for monitoring types include: ![hertzbeat](/img/home/9.png) ------ +---- **There's so much more to discover. Have Fun!** ------ +---- **Github: ** From 2f4343c4531cb22b24b1969b006ba9b251b3f1bd Mon Sep 17 00:00:00 2001 From: kangli <69385076+pwallk@users.noreply.github.com> Date: Mon, 26 Aug 2024 10:02:21 +0800 Subject: [PATCH 231/257] [Improve] improve markdown format for MD029 (#2604) Co-authored-by: shown Co-authored-by: Jast --- .markdownlint-cli2.jsonc | 2 +- CONTRIBUTING.md | 52 +-- home/blog/2022-09-10-ssl-practice.md | 52 +-- home/blog/2023-01-05-monitor-iotdb.md | 56 +-- home/blog/2023-01-08-monitor-shenyu.md | 80 ++-- home/blog/2023-02-02-monitor-dynamic-tp.md | 120 ++--- home/blog/2023-02-11-monitor-mysql.md | 44 +- home/blog/2023-02-15-monitor-linux.md | 48 +- home/blog/2023-03-22-monitor-springboot2.md | 134 +++--- home/blog/2023-05-11-greptimedb-store.md | 70 +-- home/blog/2023-08-14-hertzbeat-v1.4.0.md | 18 +- home/blog/2023-09-26-hertzbeat-v1.4.1.md | 18 +- home/blog/2023-11-12-hertzbeat-v1.4.2.md | 18 +- home/blog/2023-12-11-hertzbeat-v1.4.3.md | 18 +- home/blog/2024-01-18-hertzbeat-v1.4.4.md | 18 +- .../community/code-style-and-quality-guide.md | 412 ++++++++-------- home/docs/community/contribution.md | 28 +- home/docs/help/activemq.md | 60 +-- home/docs/help/alert_dingtalk.md | 20 +- home/docs/help/alert_discord.md | 34 +- home/docs/help/alert_email.md | 10 +- home/docs/help/alert_enterprise_wechat_app.md | 14 +- home/docs/help/alert_feishu.md | 12 +- home/docs/help/alert_slack.md | 10 +- home/docs/help/alert_smn.md | 18 +- home/docs/help/alert_telegram.md | 58 +-- home/docs/help/alert_webhook.md | 10 +- home/docs/help/alert_wework.md | 14 +- home/docs/help/dynamic_tp.md | 82 ++-- home/docs/help/iotdb.md | 46 +- home/docs/help/issue.md | 35 +- home/docs/help/jetty.md | 46 +- home/docs/help/kubernetes.md | 12 +- home/docs/help/nacos.md | 6 +- home/docs/help/nginx.md | 130 +++--- home/docs/help/rabbitmq.md | 6 +- home/docs/help/shenyu.md | 36 +- home/docs/start/custom-config.md | 66 +-- home/docs/start/docker-compose-deploy.md | 50 +- home/docs/start/docker-deploy.md | 180 +++---- home/docs/start/greptime-init.md | 82 ++-- home/docs/start/influxdb-init.md | 72 +-- home/docs/start/iotdb-init.md | 140 +++--- home/docs/start/package-deploy.md | 64 +-- home/docs/start/quickstart.md | 16 +- home/docs/start/sslcert-practice.md | 50 +- home/docs/start/tdengine-init.md | 86 ++-- home/docs/start/victoria-metrics-init.md | 62 +-- .../2022-09-10-ssl-practice.md | 54 +-- .../2023-01-05-monitor-iotdb.md | 56 +-- .../2023-01-08-monitor-shenyu.md | 80 ++-- .../2023-02-02-monitor-dynamic-tp.md | 120 ++--- .../2023-02-11-monitor-mysql.md | 44 +- .../2023-02-15-monitor-linux.md | 48 +- .../2023-03-22-monitor-springboot2.md | 132 +++--- .../2023-05-11-greptimedb-store.md | 72 +-- .../2023-08-14-hertzbeat-v1.4.0.md | 18 +- .../2023-09-26-hertzbeat-v1.4.1.md | 18 +- .../2023-11-12-hertzbeat-v1.4.2.md | 18 +- .../2023-12-11-hertzbeat-v1.4.3.md | 18 +- .../2024-01-18-hertzbeat-v1.4.4.md | 18 +- ...-09-hertzbeat-ospp-subject-introduction.md | 5 +- .../2024-06-11-hertzbeat-v1.6.0-update.md | 410 ++++++++-------- .../community/code-style-and-quality-guide.md | 411 ++++++++-------- .../current/community/contribution.md | 28 +- .../community/new_committer_process.md | 74 +-- .../current/help/activemq.md | 60 +-- .../current/help/alert_dingtalk.md | 14 +- .../current/help/alert_discord.md | 24 +- .../current/help/alert_email.md | 16 +- .../help/alert_enterprise_wechat_app.md | 8 +- .../current/help/alert_feishu.md | 9 +- .../current/help/alert_slack.md | 8 +- .../current/help/alert_smn.md | 16 +- .../current/help/alert_telegram.md | 56 +-- .../current/help/alert_webhook.md | 8 +- .../current/help/alert_wework.md | 15 +- .../current/help/dynamic_tp.md | 82 ++-- .../current/help/iotdb.md | 46 +- .../current/help/issue.md | 40 +- .../current/help/jetty.md | 46 +- .../current/help/kafka.md | 20 +- .../current/help/kubernetes.md | 12 +- .../current/help/nacos.md | 6 +- .../current/help/nginx.md | 122 ++--- .../current/help/rabbitmq.md | 6 +- .../current/help/shenyu.md | 36 +- .../current/help/zookeeper.md | 26 +- .../current/start/custom-config.md | 88 ++-- .../current/start/docker-compose-deploy.md | 50 +- .../current/start/docker-deploy.md | 158 +++---- .../current/start/greptime-init.md | 92 ++-- .../current/start/influxdb-init.md | 70 +-- .../current/start/iotdb-init.md | 142 +++--- .../current/start/package-deploy.md | 118 ++--- .../current/start/quickstart.md | 20 +- .../current/start/sslcert-practice.md | 53 +-- .../current/start/tdengine-init.md | 94 ++-- .../current/start/update-1.6.0.md | 410 ++++++++-------- .../current/start/victoria-metrics-init.md | 78 ++-- .../community/code-style-and-quality-guide.md | 439 +++++++++--------- .../version-v1.5.x/community/contribution.md | 26 +- .../version-v1.5.x/help/activemq.md | 60 +-- .../version-v1.5.x/help/alert_dingtalk.md | 20 +- .../version-v1.5.x/help/alert_discord.md | 34 +- .../version-v1.5.x/help/alert_email.md | 16 +- .../help/alert_enterprise_wechat_app.md | 14 +- .../version-v1.5.x/help/alert_feishu.md | 12 +- .../version-v1.5.x/help/alert_slack.md | 10 +- .../version-v1.5.x/help/alert_smn.md | 18 +- .../version-v1.5.x/help/alert_telegram.md | 58 +-- .../version-v1.5.x/help/alert_webhook.md | 10 +- .../version-v1.5.x/help/alert_wework.md | 16 +- .../version-v1.5.x/help/dynamic_tp.md | 82 ++-- .../version-v1.5.x/help/iotdb.md | 46 +- .../version-v1.5.x/help/issue.md | 40 +- .../version-v1.5.x/help/jetty.md | 46 +- .../version-v1.5.x/help/kafka.md | 20 +- .../version-v1.5.x/help/kubernetes.md | 14 +- .../version-v1.5.x/help/nacos.md | 6 +- .../version-v1.5.x/help/nginx.md | 122 ++--- .../version-v1.5.x/help/rabbitmq.md | 6 +- .../version-v1.5.x/help/shenyu.md | 36 +- .../version-v1.5.x/help/zookeeper.md | 26 +- .../version-v1.5.x/start/custom-config.md | 88 ++-- .../version-v1.5.x/start/docker-deploy.md | 122 ++--- .../version-v1.5.x/start/greptime-init.md | 64 +-- .../version-v1.5.x/start/influxdb-init.md | 70 +-- .../version-v1.5.x/start/iotdb-init.md | 144 +++--- .../version-v1.5.x/start/package-deploy.md | 40 +- .../version-v1.5.x/start/quickstart.md | 20 +- .../version-v1.5.x/start/sslcert-practice.md | 52 +-- .../version-v1.5.x/start/tdengine-init.md | 88 ++-- .../start/victoria-metrics-init.md | 74 +-- .../community/code-style-and-quality-guide.md | 427 +++++++++-------- .../version-v1.5.x/community/contribution.md | 26 +- .../version-v1.5.x/help/activemq.md | 60 +-- .../version-v1.5.x/help/alert_dingtalk.md | 20 +- .../version-v1.5.x/help/alert_discord.md | 34 +- .../version-v1.5.x/help/alert_email.md | 16 +- .../help/alert_enterprise_wechat_app.md | 14 +- .../version-v1.5.x/help/alert_feishu.md | 12 +- .../version-v1.5.x/help/alert_slack.md | 10 +- .../version-v1.5.x/help/alert_smn.md | 18 +- .../version-v1.5.x/help/alert_telegram.md | 58 +-- .../version-v1.5.x/help/alert_webhook.md | 10 +- .../version-v1.5.x/help/alert_wework.md | 16 +- .../version-v1.5.x/help/dynamic_tp.md | 82 ++-- .../version-v1.5.x/help/iotdb.md | 46 +- .../version-v1.5.x/help/issue.md | 35 +- .../version-v1.5.x/help/jetty.md | 46 +- .../version-v1.5.x/help/kubernetes.md | 12 +- .../version-v1.5.x/help/nacos.md | 6 +- .../version-v1.5.x/help/nginx.md | 134 +++--- .../version-v1.5.x/help/rabbitmq.md | 6 +- .../version-v1.5.x/help/shenyu.md | 36 +- .../version-v1.5.x/start/custom-config.md | 66 +-- .../version-v1.5.x/start/docker-deploy.md | 118 ++--- .../version-v1.5.x/start/greptime-init.md | 70 +-- .../version-v1.5.x/start/influxdb-init.md | 72 +-- .../version-v1.5.x/start/iotdb-init.md | 138 +++--- .../version-v1.5.x/start/package-deploy.md | 22 +- .../version-v1.5.x/start/quickstart.md | 20 +- .../version-v1.5.x/start/sslcert-practice.md | 50 +- .../version-v1.5.x/start/tdengine-init.md | 86 ++-- .../start/victoria-metrics-init.md | 62 +-- web-app/README.md | 8 +- 167 files changed, 5081 insertions(+), 5082 deletions(-) diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index 1c5c7e73949..844b297a9d1 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -29,7 +29,7 @@ "siblings_only": true }, "MD025": true, - "MD029": false, + "MD029": true, "MD033": false, "MD036": false, "MD040": false, diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8b88edf477d..d75b6de2042 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -68,23 +68,23 @@ Of course, if you have a good idea, you can also propose it directly on GitHub D 1. First you need to fork your target [hertzbeat repository](https://github.com/apache/hertzbeat). 2. Then download the code locally with git command: -```shell -git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended -``` + ```shell + git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended + ``` 3. After the download is complete, please refer to the getting started guide or README file of the target repository to initialize the project. 4. Then, you can refer to the following command to submit the code: -```shell -git checkout -b a-feature-branch #Recommended -``` + ```shell + git checkout -b a-feature-branch #Recommended + ``` 5. Submit the coed as a commit, the commit message format specification required: [module name or type name]feature or bugfix or doc: custom message. -```shell -git add -git commit -m '[docs]feature: necessary instructions' #Recommended -``` + ```shell + git add + git commit -m '[docs]feature: necessary instructions' #Recommended + ``` 6. Push to the remote repository -```shell -git push origin a-feature-branch -``` + ```shell + git push origin a-feature-branch + ``` 7. Then you can initiate a new PR (Pull Request) on GitHub. Please note that the title of the PR needs to conform to our spec, and write the necessary description in the PR to facilitate code review by Committers and other contributors. @@ -218,23 +218,23 @@ Add WeChat account `ahertzbeat` to pull you into the WeChat group. 1. 首先您需要 Fork 目标仓库 [hertzbeat repository](https://github.com/apache/hertzbeat). 2. 然后 用git命令 将代码下载到本地: -```shell -git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended -``` + ```shell + git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended + ``` 3. 下载完成后,请参考目标仓库的入门指南或者 README 文件对项目进行初始化。 4. 接着,您可以参考如下命令进行代码的提交, 切换新的分支, 进行开发: -```shell -git checkout -b a-feature-branch #Recommended -``` + ```shell + git checkout -b a-feature-branch #Recommended + ``` 5. 提交 commit , commit 描述信息需要符合约定格式: [module name or type name]feature or bugfix or doc: custom message. -```shell -git add -git commit -m '[docs]feature: necessary instructions' #Recommended -``` + ```shell + git add + git commit -m '[docs]feature: necessary instructions' #Recommended + ``` 6. 推送到远程仓库 -```shell -git push origin a-feature-branch -``` + ```shell + git push origin a-feature-branch + ``` 7. 然后您就可以在 GitHub 上发起新的 PR (Pull Request)。 请注意 PR 的标题需要符合我们的规范,并且在 PR 中写上必要的说明,来方便 Committer 和其他贡献者进行代码审查。 diff --git a/home/blog/2022-09-10-ssl-practice.md b/home/blog/2022-09-10-ssl-practice.md index 340a87b3149..aae58d34db2 100644 --- a/home/blog/2022-09-10-ssl-practice.md +++ b/home/blog/2022-09-10-ssl-practice.md @@ -28,7 +28,7 @@ gitee: 2. The `docker` environment can be installed with a single command -`docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` + `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` 3. Installation success browser visit `localhost:1157` to start, the default account password `admin/hertzbeat @@ -36,63 +36,63 @@ gitee: 1. Click Add SSL Certificate Monitor -> System Page -> Monitor Menu -> SSL Certificates -> New SSL Certificate + > System Page -> Monitor Menu -> SSL Certificates -> New SSL Certificate -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/bd53f343a5b54feab62e71458d076441~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/bd53f343a5b54feab62e71458d076441~tplv-k3u1fbpfcp-zoom-1.image) 2. Configure monitoring website -> Here is an example to monitor Baidu website, configure the host domain name, name, collection interval, etc. > Click OK. -> Click OK. Note that ⚠️ will test the connectivity of the website before adding it by default, and it will add it only if the connection is successful, of course, you can also gray out the **Whether to test** button. + > Here is an example to monitor Baidu website, configure the host domain name, name, collection interval, etc. > Click OK. + > Click OK. Note that ⚠️ will test the connectivity of the website before adding it by default, and it will add it only if the connection is successful, of course, you can also gray out the **Whether to test** button. -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ad1154670648413bb82c8bdeb5b13609~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ad1154670648413bb82c8bdeb5b13609~tplv-k3u1fbpfcp-zoom-1.image) 3. Viewing Test Indicator Data -> You can view the task status in the monitor list, and go into the monitor details to view the metrics data graphs etc. + > You can view the task status in the monitor list, and go into the monitor details to view the metrics data graphs etc. -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/f874b45e909c4bb0acdd28b3fb034a61~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/f874b45e909c4bb0acdd28b3fb034a61~tplv-k3u1fbpfcp-zoom-1.image) -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ef5d7443f8c04818ae5aa28d421203be~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ef5d7443f8c04818ae5aa28d421203be~tplv-k3u1fbpfcp-zoom-1.image) 4. Set the threshold (triggered when the certificate expires) -> System Page -> Alarms -> Alarm Thresholds -> Add Thresholds + > System Page -> Alarms -> Alarm Thresholds -> Add Thresholds -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/8d6205172d43463aa34e534477f132f1~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/8d6205172d43463aa34e534477f132f1~tplv-k3u1fbpfcp-zoom-1.image) -> Configure thresholds, select SSL certificate indicator object, configure alert expression - triggered when indicator `expired` is `true`, i.e. `equals(expired, "true")` , set alert level notification template message etc. + > Configure thresholds, select SSL certificate indicator object, configure alert expression - triggered when indicator `expired` is `true`, i.e. `equals(expired, "true")` , set alert level notification template message etc. -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/83d17b381d994f26a6240e01915b2001~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/83d17b381d994f26a6240e01915b2001~tplv-k3u1fbpfcp-zoom-1.image) -> Associate thresholds with monitors, set which monitors this threshold should be applied to in the threshold list. + > Associate thresholds with monitors, set which monitors this threshold should be applied to in the threshold list. -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/9b9063d7bcf9454387be0491fc382bd1~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/9b9063d7bcf9454387be0491fc382bd1~tplv-k3u1fbpfcp-zoom-1.image) 5. set the threshold (triggered one week before certificate expiration) -> Same as above, add a new configuration threshold, configure the alert expression - when the indicator validity timestamp `end_timestamp`, `now()` function for the current timestamp, if you configure to trigger the alert one week in advance i.e.: `end_timestamp <= (now() + 604800000)` , where `604800000` is the total time difference of 7 days. milliseconds. + > Same as above, add a new configuration threshold, configure the alert expression - when the indicator validity timestamp `end_timestamp`, `now()` function for the current timestamp, if you configure to trigger the alert one week in advance i.e.: `end_timestamp <= (now() + 604800000)` , where `604800000` is the total time difference of 7 days. milliseconds. -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/0d6f837f57c247e09f668f60eff4a0ff~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/0d6f837f57c247e09f668f60eff4a0ff~tplv-k3u1fbpfcp-zoom-1.image) -> Eventually you can see the triggered alarms in the alarm center. + > Eventually you can see the triggered alarms in the alarm center. -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/5a61b23127524976b2c209ce0ca6a339~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/5a61b23127524976b2c209ce0ca6a339~tplv-k3u1fbpfcp-zoom-1.image) 6. Alarm notification (timely notification via NailWeChatFlysheet, etc.) -> Monitoring System -> Alert Notification -> Add Recipients + > Monitoring System -> Alert Notification -> Add Recipients -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/7f36956060ef410a82bbecafcbb2957f~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/7f36956060ef410a82bbecafcbb2957f~tplv-k3u1fbpfcp-zoom-1.image) -You can refer to the help file for the token configuration of Nail WeChat Flying Book, etc. + You can refer to the help file for the token configuration of Nail WeChat Flying Book, etc. - - + + -> Alert Notification -> Add new alert notification policy -> Enable notification for the recipients you just configured + > Alert Notification -> Add new alert notification policy -> Enable notification for the recipients you just configured -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/d976343e81f843138344a039f3aff8a3~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/d976343e81f843138344a039f3aff8a3~tplv-k3u1fbpfcp-zoom-1.image) 7. OK When the threshold is triggered, we can receive the corresponding alarm message, if there is no notification, you can also view the alarm information in the alarm center. diff --git a/home/blog/2023-01-05-monitor-iotdb.md b/home/blog/2023-01-05-monitor-iotdb.md index 0f105d8fe53..6ab4d25f11e 100644 --- a/home/blog/2023-01-05-monitor-iotdb.md +++ b/home/blog/2023-01-05-monitor-iotdb.md @@ -31,15 +31,15 @@ tags: [opensource, practice] 1. The metric collection is disabled by default, you need to modify the parameters in `conf/iotdb-metric.yml` first, then restart the server -``` -# Whether to start the monitoring module, the default is false -enableMetric: true - -# Data provision method, externally provide metrics data through jmx and prometheus protocol, optional parameters: [JMX, PROMETHEUS, IOTDB], IOTDB is closed by default. -metricReporterList: - - JMX - - PROMETHEUS -``` + ```yaml + # Whether to start the monitoring module, the default is false + enableMetric: true + + # Data provision method, externally provide metrics data through jmx and prometheus protocol, optional parameters: [JMX, PROMETHEUS, IOTDB], IOTDB is closed by default. + metricReporterList: + - JMX + - PROMETHEUS + ``` 2. Restart IoTDB, open a browser or use curl to access , and you can see the metric data. @@ -47,28 +47,28 @@ metricReporterList: 1. Click Add IoTDB Monitoring -Path: Menu -> Database Monitoring -> IoTDB Monitoring -> Add IoTDB Monitoring + Path: Menu -> Database Monitoring -> IoTDB Monitoring -> Add IoTDB Monitoring -![hertzbeat](/img/blog/monitor-iotdb-1.png) + ![hertzbeat](/img/blog/monitor-iotdb-1.png) 2. Configure the parameters required for monitoring IoTDB -Fill in the IoTDB **service IP** and **monitoring port** (default 9091) on the monitoring page, and finally click OK to add. -For other parameters such as **collection interval**, **timeout period**, etc., please refer to [Help Documentation](https://hertzbeat.com/docs/help/iotdb/) /iotdb/ + Fill in the IoTDB **service IP** and **monitoring port** (default 9091) on the monitoring page, and finally click OK to add. + For other parameters such as **collection interval**, **timeout period**, etc., please refer to [Help Documentation](https://hertzbeat.com/docs/help/iotdb/) /iotdb/ -![hertzbeat](/img/blog/monitor-iotdb-2.png) + ![hertzbeat](/img/blog/monitor-iotdb-2.png) 3. Complete ✅, now we have added the monitoring of IoTDB, check the monitoring list to see our added items. -![hertzbeat](/img/blog/monitor-iotdb-3.png) + ![hertzbeat](/img/blog/monitor-iotdb-3.png) 4. Click **Operation**->**Monitoring Details Icon** of the monitoring list item to browse the real-time monitoring metric data of IoTDB. -![hertzbeat](/img/blog/monitor-iotdb-4.png) + ![hertzbeat](/img/blog/monitor-iotdb-4.png) 5. Click **Monitoring History Details TAB** to browse IoTDB's historical monitoring metric data chart📈. -![hertzbeat](/img/blog/monitor-iotdb-5.png) + ![hertzbeat](/img/blog/monitor-iotdb-5.png) **Complete DONE! Through the above steps, it is actually two steps to sum up** @@ -83,29 +83,29 @@ For other parameters such as **collection interval**, **timeout period**, etc., 1. Configure a threshold alarm for an important metric -Path: Menu -> Alarm Threshold -> Add Threshold + Path: Menu -> Alarm Threshold -> Add Threshold -- Select the configured metric object. IotDB monitors many metrics, one of which is related to the status of the node `cluster_node_status` -> `status` (node status, 1=online 2=offline). -- Here we configure to send an alarm when the metric `status==2`, the alarm level is **Critical Alarm**, which is triggered once, as shown in the figure below. + - Select the configured metric object. IotDB monitors many metrics, one of which is related to the status of the node `cluster_node_status` -> `status` (node status, 1=online 2=offline). + - Here we configure to send an alarm when the metric `status==2`, the alarm level is **Critical Alarm**, which is triggered once, as shown in the figure below. -![hertzbeat](/img/blog/monitor-iotdb-6.png) + ![hertzbeat](/img/blog/monitor-iotdb-6.png) 2. Add message notification recipients -Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient + Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient -Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. + Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. -- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) to configure the robot on DingTalk and set the security custom keyword `HertzBeat`, get the corresponding `access_token` value. -- Configure the receiver parameters in HertzBeat as follows. + - Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) to configure the robot on DingTalk and set the security custom keyword `HertzBeat`, get the corresponding `access_token` value. + - Configure the receiver parameters in HertzBeat as follows. -【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 + 【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 -![hertzbeat](/img/blog/alert-notice-1.png) + ![hertzbeat](/img/blog/alert-notice-1.png) 3. Configure the associated alarm notification strategy ⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK] -![hertzbeat](/img/blog/alert-notice-2.png) + ![hertzbeat](/img/blog/alert-notice-2.png) ### Finished, now wait for the warning message to come. ding ding ding ding diff --git a/home/blog/2023-01-08-monitor-shenyu.md b/home/blog/2023-01-08-monitor-shenyu.md index 37681ff86b8..c271fa9a231 100644 --- a/home/blog/2023-01-08-monitor-shenyu.md +++ b/home/blog/2023-01-08-monitor-shenyu.md @@ -42,27 +42,27 @@ tags: [opensource, practice] 1. Add the `metrics plugin` dependency to the `pom.xml` file of the gateway. -```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - -``` + ```xml + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + + ``` 2. `metric` plugin Capture is turned off by default, edit the following in the gateway's configuration `yaml` file: -```yaml -shenyu: - metrics: - enabled: true #Set to true to enable - name: prometheus - host: 127.0.0.1 #exposed ip - port: 8090 #Exposed port - jmxConfig: #jmx configuration - props: #jvm_enabled: true - jvm_enabled: true #Enable monitoring metrics for jvm -``` + ```yaml + shenyu: + metrics: + enabled: true #Set to true to enable + name: prometheus + host: 127.0.0.1 #exposed ip + port: 8090 #Exposed port + jmxConfig: #jmx configuration + props: #jvm_enabled: true + jvm_enabled: true #Enable monitoring metrics for jvm + ``` 3. Restart ShenYu Gateway, open a browser or use curl to access `http://ip:8090`, you can see the metric data. @@ -70,30 +70,30 @@ shenyu: 1. Click Add ShenYu Monitor -Path: Menu -> Middleware Monitor -> ShenYu Monitor -> Add ShenYu Monitor + Path: Menu -> Middleware Monitor -> ShenYu Monitor -> Add ShenYu Monitor -![hertzbeat](/img/blog/monitor-shenyu-1.png) + ![hertzbeat](/img/blog/monitor-shenyu-1.png) 2. Configure the parameters required for monitoring ShenYu -On the monitor page, fill in ShenYu **service IP**, **monitor port** (default 8090), and click OK to add. -For other parameters such as **collection interval**, **timeout**, etc., you can refer to the [help file](https://hertzbeat.com/docs/help/shenyu/) + On the monitor page, fill in ShenYu **service IP**, **monitor port** (default 8090), and click OK to add. + For other parameters such as **collection interval**, **timeout**, etc., you can refer to the [help file](https://hertzbeat.com/docs/help/shenyu/) -![hertzbeat](/img/blog/monitor-shenyu-1.png) + ![hertzbeat](/img/blog/monitor-shenyu-1.png) 3. Done ✅, now we have added monitoring for ShenYu, check the monitor list to see our additions. -![hertzbeat](/img/blog/monitor-shenyu-3.png) + ![hertzbeat](/img/blog/monitor-shenyu-3.png) 4. Click **Options**->**Monitoring Details icon** in the monitor list to view ShenYu's real-time monitoring metrics. -![hertzbeat](/img/blog/monitor-shenyu-4.png) + ![hertzbeat](/img/blog/monitor-shenyu-4.png) 5. Click the **Monitor History TAB** to view ShenYu's historical monitoring metrics graphs 📈. -![hertzbeat](/img/blog/monitor-shenyu-5.png) + ![hertzbeat](/img/blog/monitor-shenyu-5.png) -![hertzbeat](/img/blog/monitor-shenyu-6.png) + ![hertzbeat](/img/blog/monitor-shenyu-6.png) **DONE! With the above steps, it's really only two steps** @@ -111,33 +111,33 @@ Of course, just looking at it is not perfect, monitoring is often accompanied by 1. Configure an alarm threshold for an important metric. -Path: Menu -> Alert Thresholds -> Add Thresholds + Path: Menu -> Alert Thresholds -> Add Thresholds -- There are a lot of metrics in ShenYu monitoring, for example, we will set the threshold for the `number of open file descriptors` `process_open_fds` -> `value` metric, which will alert you when the number of open file descriptors on the server side is greater than 3,000. -- Here we configure an alert to be issued when the `value' of`process_open_fds` exceeds 3000, with an alert level of **Warning alert**, which is triggered three times, as shown in the following figure. + - There are a lot of metrics in ShenYu monitoring, for example, we will set the threshold for the `number of open file descriptors` `process_open_fds` -> `value` metric, which will alert you when the number of open file descriptors on the server side is greater than 3,000. + - Here we configure an alert to be issued when the `value' of`process_open_fds` exceeds 3000, with an alert level of **Warning alert**, which is triggered three times, as shown in the following figure. -![hertzbeat](/img/blog/monitor-shenyu-7.png) + ![hertzbeat](/img/blog/monitor-shenyu-7.png) 2. Add message notification recipients -> Configure recipients to let alert message know who to send to and in what way. + > Configure recipients to let alert message know who to send to and in what way. -Path: Menu -> Alert Notification -> Alert Recipients -> Add New Recipient. + Path: Menu -> Alert Notification -> Alert Recipients -> Add New Recipient. -Message notification methods support **Email, Nail, WeChat, Flybook, WebHook, SMS**, etc. Here we take the commonly used Nail as an example. + Message notification methods support **Email, Nail, WeChat, Flybook, WebHook, SMS**, etc. Here we take the commonly used Nail as an example. -- Refer to this [help document](https://hertzbeat.com/docs/help/alert_dingtalk) Configure the bot on the pinning side, set the security customization keyword `HertzBeat`, get the corresponding `access_token` value. -- Configure the recipient parameters in HertzBeat as follows. + - Refer to this [help document](https://hertzbeat.com/docs/help/alert_dingtalk) Configure the bot on the pinning side, set the security customization keyword `HertzBeat`, get the corresponding `access_token` value. + - Configure the recipient parameters in HertzBeat as follows. -[Alert Notification] -> [Add Recipient] -> [Select Nailed Bot Notification Method] -> [Set Nailed Bot ACCESS_TOKEN] -> [OK] + [Alert Notification] -> [Add Recipient] -> [Select Nailed Bot Notification Method] -> [Set Nailed Bot ACCESS_TOKEN] -> [OK] -![hertzbeat](/img/blog/alert-notice-1.png) + ![hertzbeat](/img/blog/alert-notice-1.png) 3. Configure the associated alert notification policy ⚠️ [Add Notification Policy] -> [Associate the recipient you just set] -> [OK] ! -> Configure the alert notification policy to bind alert messages to recipients so that you can decide which alerts go to which person. + > Configure the alert notification policy to bind alert messages to recipients so that you can decide which alerts go to which person. -![hertzbeat](/img/blog/alert-notice-2.png) + ![hertzbeat](/img/blog/alert-notice-2.png) ### Over and out, now wait for the alert message to come through. Ding, ding, ding, ding diff --git a/home/blog/2023-02-02-monitor-dynamic-tp.md b/home/blog/2023-02-02-monitor-dynamic-tp.md index 83980106d3c..9cc7d45a3ca 100644 --- a/home/blog/2023-02-02-monitor-dynamic-tp.md +++ b/home/blog/2023-02-02-monitor-dynamic-tp.md @@ -36,76 +36,76 @@ tags: [opensource, practice] 1. Enable the SpringBoot Actuator Endpoint to expose the `DynamicTp` metrics interface. -```yaml -management: - endpoints: - web: - exposure: - include: '*' -``` + ```yaml + management: + endpoints: + web: + exposure: + include: '*' + ``` 2. Reboot and test access to the metrics interface `ip:port/actuator/dynamic-tp` to see if it responds with json data as follows. -```json -[ - { - "poolName": "commonExecutor", - "corePoolSize": 1, - "maximumPoolSize": 1, - "queueType": "LinkedBlockingQueue", - "queueCapacity": 2147483647, - "queueSize": 0, - "fair": false, - "queueRemainingCapacity": 2147483647, - "activeCount": 0, - "taskCount": 0, - "completedTaskCount": 0, - "largestPoolSize": 0, - "poolSize": 0, - "waitTaskCount": 0, - "rejectCount": 0, - "rejectHandlerName": null, - "dynamic": false, - "runTimeoutCount": 0, - "queueTimeoutCount": 0 - }, - { - "maxMemory": "4 GB", - "totalMemory": "444 MB", - "freeMemory": "250.34 MB", - "usableMemory": "3.81 GB" - } -] -``` + ```json + [ + { + "poolName": "commonExecutor", + "corePoolSize": 1, + "maximumPoolSize": 1, + "queueType": "LinkedBlockingQueue", + "queueCapacity": 2147483647, + "queueSize": 0, + "fair": false, + "queueRemainingCapacity": 2147483647, + "activeCount": 0, + "taskCount": 0, + "completedTaskCount": 0, + "largestPoolSize": 0, + "poolSize": 0, + "waitTaskCount": 0, + "rejectCount": 0, + "rejectHandlerName": null, + "dynamic": false, + "runTimeoutCount": 0, + "queueTimeoutCount": 0 + }, + { + "maxMemory": "4 GB", + "totalMemory": "444 MB", + "freeMemory": "250.34 MB", + "usableMemory": "3.81 GB" + } + ] + ``` #### ii. To add DynamicTp thread pool monitoring to the HertzBeat monitoring page 1. Click Add DynamicTp Monitor -Path: Menu -> Middleware Monitor -> DynamicTp Monitor -> Add DynamicTp Monitor + Path: Menu -> Middleware Monitor -> DynamicTp Monitor -> Add DynamicTp Monitor -![hertzbeat](/img/blog/monitor-dynamic-tp-1.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-1.png) 2. Configure the parameters required for monitoring DynamicTp. -On the monitor page, fill in DynamicTp **service IP**, **monitoring port** (default 8080), and finally click OK to add it. -For other parameters such as **collection interval**, **timeout**, etc., you can refer to [help](https://hertzbeat.com/docs/help/dynamic_tp/) + On the monitor page, fill in DynamicTp **service IP**, **monitoring port** (default 8080), and finally click OK to add it. + For other parameters such as **collection interval**, **timeout**, etc., you can refer to [help](https://hertzbeat.com/docs/help/dynamic_tp/) -![hertzbeat](/img/blog/monitor-dynamic-tp-2.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-2.png) 3. Done ✅, now we have added monitoring for DynamicTp, check the monitor list to see our additions. -![hertzbeat](/img/blog/monitor-dynamic-tp-1.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-1.png) 4. Click **Options**->**Monitor Details icon** in the Monitor list to view the real-time monitoring metrics of the DynamicTp thread pool. -![hertzbeat](/img/blog/monitor-dynamic-tp-3.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-3.png) 5. Click the **Monitoring History TAB** to view a graphical representation of the historical monitoring metrics for the DynamicTp thread pool 📈. -![hertzbeat](/img/blog/monitor-dynamic-tp-4.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-4.png) -![hertzbeat](/img/blog/monitor-dynamic-tp-5.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-5.png) **DONE! With the above steps, it's really just two steps** @@ -123,33 +123,33 @@ Of course, just watching is not perfect, monitoring is often accompanied by alar 1. Configure an alarm threshold for an important metric. -Path: Menu -> Alert Thresholds -> Add Thresholds + Path: Menu -> Alert Thresholds -> Add Thresholds -- DynamicTp monitors some thread pool related metrics, for example, we set the threshold for the `run_timeout_count` `thread_pool_running` -> `run_timeout_count` metric, which will raise an alarm when the thread_timeout_count is greater than one. -- Here we configure an alert to be issued when `thread_pool_running` has a `run_timeout_count>1`, with an alert level of **Serious Alert**, which is triggered three times, as shown in the following figure. + - DynamicTp monitors some thread pool related metrics, for example, we set the threshold for the `run_timeout_count` `thread_pool_running` -> `run_timeout_count` metric, which will raise an alarm when the thread_timeout_count is greater than one. + - Here we configure an alert to be issued when `thread_pool_running` has a `run_timeout_count>1`, with an alert level of **Serious Alert**, which is triggered three times, as shown in the following figure. -![hertzbeat](/img/blog/monitor-dynamic-tp-6.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-6.png) 2. Add message notification recipients -> Configure recipients to let alert message know who to send to and in what way. + > Configure recipients to let alert message know who to send to and in what way. -Path: Menu -> Alert Notification -> Alert Recipient -> Add Recipient. + Path: Menu -> Alert Notification -> Alert Recipient -> Add Recipient. -Message notification methods support **Email, Dingtalk, WeChat, Flybook, WebHook, SMS**, etc. We take the commonly used Dingtalk as an example. + Message notification methods support **Email, Dingtalk, WeChat, Flybook, WebHook, SMS**, etc. We take the commonly used Dingtalk as an example. -- Refer to this [help document](https://hertzbeat.com/docs/help/alert_dingtalk) Configure the bot on Dingtalk side, set the security customization keyword `HertzBeat`, get the corresponding `access_token` value. -- Configure the recipient parameters in HertzBeat as follows. + - Refer to this [help document](https://hertzbeat.com/docs/help/alert_dingtalk) Configure the bot on Dingtalk side, set the security customization keyword `HertzBeat`, get the corresponding `access_token` value. + - Configure the recipient parameters in HertzBeat as follows. -[Alert Notification] -> [Add Recipient] -> [Choose Dingtalk bot notification method] -> [Set Dingtalk bot ACCESS_TOKEN] -> [OK] + [Alert Notification] -> [Add Recipient] -> [Choose Dingtalk bot notification method] -> [Set Dingtalk bot ACCESS_TOKEN] -> [OK] -![hertzbeat](/img/blog/alert-notice-1.png) + ![hertzbeat](/img/blog/alert-notice-1.png) 3. Configure the associated alert notification policy ⚠️ [Add Notification Policy] -> [Associate the recipient you just set] -> [OK] ! -> Configure the alert notification policy to bind alert messages to recipients so that you can decide which alerts go to which person. + > Configure the alert notification policy to bind alert messages to recipients so that you can decide which alerts go to which person. -![hertzbeat](/img/blog/alert-notice-2.png) + ![hertzbeat](/img/blog/alert-notice-2.png) ### Over and out, now wait for the alert message to come through. Ding, ding, ding, ding diff --git a/home/blog/2023-02-11-monitor-mysql.md b/home/blog/2023-02-11-monitor-mysql.md index fa0519a7cde..485855fa814 100644 --- a/home/blog/2023-02-11-monitor-mysql.md +++ b/home/blog/2023-02-11-monitor-mysql.md @@ -33,28 +33,28 @@ Keywords: [Open source monitoring tool, open source database monitoring, Mysql d 1. Click Add Mysql Monitoring -Path: Menu -> Database Monitoring -> Mysql Database -> Add Mysql Database Monitoring + Path: Menu -> Database Monitoring -> Mysql Database -> Add Mysql Database Monitoring -![hertzbeat](/img/blog/monitor-mysql-1.png) + ![hertzbeat](/img/blog/monitor-mysql-1.png) 2. Configure the parameters required for the new monitoring Mysql database -On the monitoring page, fill in Mysql **service IP**, **monitoring port** (default 3306), **account password, etc.**, and finally click OK to add. -For other parameters such as **collection interval**, **timeout period**, etc., please refer to [Help Documentation](https://hertzbeat.com/docs/help/mysql/) /mysql/ + On the monitoring page, fill in Mysql **service IP**, **monitoring port** (default 3306), **account password, etc.**, and finally click OK to add. + For other parameters such as **collection interval**, **timeout period**, etc., please refer to [Help Documentation](https://hertzbeat.com/docs/help/mysql/) /mysql/ -![hertzbeat](/img/blog/monitor-mysql-2.png) + ![hertzbeat](/img/blog/monitor-mysql-2.png) 3. Complete ✅, now we have added the monitoring of the Mysql database, check the monitoring list to see our added items. -![hertzbeat](/img/blog/monitor-mysql-1.png) + ![hertzbeat](/img/blog/monitor-mysql-1.png) 4. Click **Operation**->**Monitoring Details Icon** of the monitoring list item to browse the real-time monitoring metric data of the Mysql database. -![hertzbeat](/img/blog/monitor-mysql-3.png) + ![hertzbeat](/img/blog/monitor-mysql-3.png) 5. Click **Monitoring History Details TAB** to browse the historical monitoring metric data chart of Mysql database📈. -![hertzbeat](/img/blog/monitor-mysql-4.png) + ![hertzbeat](/img/blog/monitor-mysql-4.png) **DONE! Done! Through the above steps, in fact, it only takes one step to sum up** @@ -71,35 +71,35 @@ Of course, just looking at it is definitely not perfect. Monitoring is often acc 1. Configure an alarm threshold for an important metric -Path: Menu -> Threshold Rules -> Add Threshold + Path: Menu -> Threshold Rules -> Add Threshold -- Select the configured metric object. Mysql database monitoring is mainly about database performance and other related metrics. For example, we set the threshold for the metric `query cache hit rate` `cache` -> `query_cache_hit_rate`. When the query cache hit rate of Mysql is very low An alarm is issued when it is less than 30%. -- Here we configure to send an alarm when the `query_cache_hit_rate<30` of this metric `cache`, the alarm level is **serious alarm**, and it will be triggered after three times, as shown in the figure below. + - Select the configured metric object. Mysql database monitoring is mainly about database performance and other related metrics. For example, we set the threshold for the metric `query cache hit rate` `cache` -> `query_cache_hit_rate`. When the query cache hit rate of Mysql is very low An alarm is issued when it is less than 30%. + - Here we configure to send an alarm when the `query_cache_hit_rate<30` of this metric `cache`, the alarm level is **serious alarm**, and it will be triggered after three times, as shown in the figure below. -![hertzbeat](/img/blog/monitor-mysql-5.png) + ![hertzbeat](/img/blog/monitor-mysql-5.png) -![hertzbeat](/img/blog/monitor-mysql-6.png) + ![hertzbeat](/img/blog/monitor-mysql-6.png) 2. Add message notification recipients -> Configure the receiver to let the alarm message know who to send and how to send it. + > Configure the receiver to let the alarm message know who to send and how to send it. -Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient + Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient -Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. + Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. -- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) to configure the robot on DingTalk and set the security custom keyword `HertzBeat`, get the corresponding `access_token` value. -- Configure the receiver parameters in HertzBeat as follows. + - Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) to configure the robot on DingTalk and set the security custom keyword `HertzBeat`, get the corresponding `access_token` value. + - Configure the receiver parameters in HertzBeat as follows. -【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 + 【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 -![hertzbeat](/img/blog/alert-notice-1.png) + ![hertzbeat](/img/blog/alert-notice-1.png) 3. Configure the associated alarm notification strategy ⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK] -> Configure the alarm notification policy to bind the alarm message with the receiver, so that you can decide which alarms to send to whom. + > Configure the alarm notification policy to bind the alarm message with the receiver, so that you can decide which alarms to send to whom. -![hertzbeat](/img/blog/alert-notice-2.png) + ![hertzbeat](/img/blog/alert-notice-2.png) ### Finished, now wait for the warning message to come. ding ding ding ding diff --git a/home/blog/2023-02-15-monitor-linux.md b/home/blog/2023-02-15-monitor-linux.md index 5b41eefc41b..9128c72f40f 100644 --- a/home/blog/2023-02-15-monitor-linux.md +++ b/home/blog/2023-02-15-monitor-linux.md @@ -30,32 +30,32 @@ Github: 1. Click Add Linux Monitoring -Path: Menu -> Operating System Monitoring -> Linux Operating System -> Add Linux Operating System Monitoring + Path: Menu -> Operating System Monitoring -> Linux Operating System -> Add Linux Operating System Monitoring -![hertzbeat](/img/blog/monitor-linux-1.png) + ![hertzbeat](/img/blog/monitor-linux-1.png) 2. Configure the parameters required for new monitoring Linux -Fill in the Linux **peer IP**, **SSH port** (default 22), **account password, etc.** on the monitoring page, and finally click OK to add. -For other parameters such as **collection interval**, **timeout period**, etc., please refer to the help document + Fill in the Linux **peer IP**, **SSH port** (default 22), **account password, etc.** on the monitoring page, and finally click OK to add. + For other parameters such as **collection interval**, **timeout period**, etc., please refer to the help document -![hertzbeat](/img/blog/monitor-linux-2.png) + ![hertzbeat](/img/blog/monitor-linux-2.png) 3. Complete ✅, now we have added the monitoring of Linux, check the monitoring list to see our added items. -![hertzbeat](/img/blog/monitor-linux-3.png) + ![hertzbeat](/img/blog/monitor-linux-3.png) 4. Click **Operation**->**Monitoring Details Icon** of the monitoring list item to browse the real-time monitoring metric data of Linux. -![hertzbeat](/img/blog/monitor-linux-4.png) + ![hertzbeat](/img/blog/monitor-linux-4.png) -![hertzbeat](/img/blog/monitor-linux-7.png) + ![hertzbeat](/img/blog/monitor-linux-7.png) 5. Click **Monitoring History Details TAB** to browse the historical monitoring metric data chart of Linux📈. -![hertzbeat](/img/blog/monitor-linux-5.png) + ![hertzbeat](/img/blog/monitor-linux-5.png) -![hertzbeat](/img/blog/monitor-linux-6.png) + ![hertzbeat](/img/blog/monitor-linux-6.png) **DONE! Done! To sum up, it only takes one step** @@ -72,35 +72,35 @@ Of course, just looking at it is definitely not perfect. Monitoring is often acc 1. Configure an alarm threshold for an important metric -Path: Menu -> Threshold Rules -> Add Threshold + Path: Menu -> Threshold Rules -> Add Threshold -- Select the configured metric object. Linux monitors mainly related metrics such as cpu, memory, disk, network performance, etc. For example, we set the threshold for the metric `CPU utilization` `cpu` -> `usage`. When the Linux cpu utilization is greater than 90% When a warning is issued. -- Here we configure to send an alarm when the `usage>90` of this metric `cpu`, the alarm level is **Warning Alarm**, which will be triggered after three times, as shown in the figure below. + - Select the configured metric object. Linux monitors mainly related metrics such as cpu, memory, disk, network performance, etc. For example, we set the threshold for the metric `CPU utilization` `cpu` -> `usage`. When the Linux cpu utilization is greater than 90% When a warning is issued. + - Here we configure to send an alarm when the `usage>90` of this metric `cpu`, the alarm level is **Warning Alarm**, which will be triggered after three times, as shown in the figure below. -![hertzbeat](/img/blog/monitor-linux-8.png) + ![hertzbeat](/img/blog/monitor-linux-8.png) -![hertzbeat](/img/blog/monitor-linux-9.png) + ![hertzbeat](/img/blog/monitor-linux-9.png) 2. Add message notification recipients -> Configure the receiver to let the alarm message know who to send and how to send it. + > Configure the receiver to let the alarm message know who to send and how to send it. -Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient + Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient -Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. + Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. -- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) to configure the robot on DingTalk and set the security custom keyword `HertzBeat`, get the corresponding `access_token` value. -- Configure the receiver parameters in HertzBeat as follows. + - Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) to configure the robot on DingTalk and set the security custom keyword `HertzBeat`, get the corresponding `access_token` value. + - Configure the receiver parameters in HertzBeat as follows. -【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 + 【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 -![hertzbeat](/img/blog/alert-notice-1.png) + ![hertzbeat](/img/blog/alert-notice-1.png) 3. Configure the associated alarm notification strategy ⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK] -> Configure the alarm notification policy to bind the alarm message with the receiver, so that you can decide which alarms to send to whom. + > Configure the alarm notification policy to bind the alarm message with the receiver, so that you can decide which alarms to send to whom. -![hertzbeat](/img/blog/alert-notice-2.png) + ![hertzbeat](/img/blog/alert-notice-2.png) ### Finished, now wait for the warning message to come. ding ding ding ding diff --git a/home/blog/2023-03-22-monitor-springboot2.md b/home/blog/2023-03-22-monitor-springboot2.md index 6c669bfc9fb..2772d358420 100644 --- a/home/blog/2023-03-22-monitor-springboot2.md +++ b/home/blog/2023-03-22-monitor-springboot2.md @@ -34,81 +34,81 @@ Github: 1. Open SpringBoot Actuator Endpoint to expose `metrics health env` metric interface -```yaml -management: - endpoints: - web: - exposure: - include: - - 'metrics' - - 'health' - - 'env' - enabled-by-default: on -``` + ```yaml + management: + endpoints: + web: + exposure: + include: + - 'metrics' + - 'health' + - 'env' + enabled-by-default: on + ``` 2. After restarting, test whether the access metric interface `ip:port/actuator` has response json data as follows: -```json -{ - "_links": { - "self": { - "href": "http://localhost:1157/actuator", - "templated": false - }, - "health-path": { - "href": "http://localhost:1157/actuator/health/{*path}", - "templated": true - }, - "health": { - "href": "http://localhost:1157/actuator/health", - "templated": false - }, - "env": { - "href": "http://localhost:1157/actuator/env", - "templated": false - }, - "env-toMatch": { - "href": "http://localhost:1157/actuator/env/{toMatch}", - "templated": true - }, - "metrics-requiredMetricName": { - "href": "http://localhost:1157/actuator/metrics/{requiredMetricName}", - "templated": true - }, - "metrics": { - "href": "http://localhost:1157/actuator/metrics", - "templated": false - } - } -} -``` + ```json + { + "_links": { + "self": { + "href": "http://localhost:1157/actuator", + "templated": false + }, + "health-path": { + "href": "http://localhost:1157/actuator/health/{*path}", + "templated": true + }, + "health": { + "href": "http://localhost:1157/actuator/health", + "templated": false + }, + "env": { + "href": "http://localhost:1157/actuator/env", + "templated": false + }, + "env-toMatch": { + "href": "http://localhost:1157/actuator/env/{toMatch}", + "templated": true + }, + "metrics-requiredMetricName": { + "href": "http://localhost:1157/actuator/metrics/{requiredMetricName}", + "templated": true + }, + "metrics": { + "href": "http://localhost:1157/actuator/metrics", + "templated": false + } + } + } + ``` #### Add SpringBoot2 application monitoring in the HertzBeat monitoring ui 1. Click to add SpringBoot2 monitoring -Path: Menu -> Application Service Monitoring -> SpringBoot2 -> Add SpringBoot2 Monitoring + Path: Menu -> Application Service Monitoring -> SpringBoot2 -> Add SpringBoot2 Monitoring -![hertzbeat](/img/blog/monitor-springboot2-1.png) + ![hertzbeat](/img/blog/monitor-springboot2-1.png) 2. Configure the parameters required for new monitoring SpringBoot2 -Fill in the SpringBoot2 application **peer IP**, **service port** (default 8080), **account password, etc.** on the monitoring page, and finally click OK to add. -For other parameters such as **collection interval**, **timeout period**, etc., please refer to the help document + Fill in the SpringBoot2 application **peer IP**, **service port** (default 8080), **account password, etc.** on the monitoring page, and finally click OK to add. + For other parameters such as **collection interval**, **timeout period**, etc., please refer to the help document -![hertzbeat](/img/blog/monitor-springboot2-2.png) + ![hertzbeat](/img/blog/monitor-springboot2-2.png) 3. Complete ✅, now we have added the monitoring of the SpringBoot2 application, check the monitoring list to see our additions. -![hertzbeat](/img/blog/monitor-springboot2-3.png) + ![hertzbeat](/img/blog/monitor-springboot2-3.png) 4. Click **Operation**->**Monitoring Details Icon** of the monitoring list item to browse the real-time monitoring metric data of the SpringBoot2 application. -![hertzbeat](/img/blog/monitor-springboot2-4.png) + ![hertzbeat](/img/blog/monitor-springboot2-4.png) 5. Click **Monitoring History Details TAB** to browse the historical monitoring metric data chart of the SpringBoot2 application📈. -![hertzbeat](/img/blog/monitor-springboot2-5.png) + ![hertzbeat](/img/blog/monitor-springboot2-5.png) **DONE! Done! It doesn't require us to deploy agents or various cumbersome operations, isn't it very simple** @@ -125,35 +125,35 @@ Of course, it is impossible to manually check the metrics in real time. Monitori 1. Configure an alarm threshold for an important metric -Path: Menu -> Threshold Rules -> Add Threshold + Path: Menu -> Threshold Rules -> Add Threshold -- Select the configured metric object. SpringBoot2 application monitoring mainly focuses on stack memory threads and other related metrics. For example, we set the threshold for the metric `threads` -> `threads`. When the number of threads in the `runnable` state is greater than At 300 an alert is issued. -- Here we configure to send an alarm when `size`, `state` of `equals(state, "runnable"") && size>300` of this metric, the alarm level is **warning alarm**, which will be triggered three times, specifically As shown below. + - Select the configured metric object. SpringBoot2 application monitoring mainly focuses on stack memory threads and other related metrics. For example, we set the threshold for the metric `threads` -> `threads`. When the number of threads in the `runnable` state is greater than At 300 an alert is issued. + - Here we configure to send an alarm when `size`, `state` of `equals(state, "runnable"") && size>300` of this metric, the alarm level is **warning alarm**, which will be triggered three times, specifically As shown below. -![hertzbeat](/img/blog/monitor-springboot2-6.png) + ![hertzbeat](/img/blog/monitor-springboot2-6.png) -![hertzbeat](/img/blog/monitor-springboot2-7.png) + ![hertzbeat](/img/blog/monitor-springboot2-7.png) 2. Add message notification recipients -> Configure the receiver to let the alarm message know who to send and how to send it. + > Configure the receiver to let the alarm message know who to send and how to send it. -Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient + Path: Menu -> Alarm Notification -> Alarm Recipient -> Add New Recipient -Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. + Message notification methods support **email, DingTalk, WeChat Work, Feishu, WebHook, SMS**, etc. Here we take the commonly used DingTalk as an example. -- Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) to configure the robot on DingTalk and set the security custom keyword `HertzBeat`, get the corresponding `access_token` value. -- Configure the receiver parameters in HertzBeat as follows. + - Refer to this [Help Documentation](https://hertzbeat.com/docs/help/alert_dingtalk) to configure the robot on DingTalk and set the security custom keyword `HertzBeat`, get the corresponding `access_token` value. + - Configure the receiver parameters in HertzBeat as follows. -【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 + 【Alarm Notification】->【New Recipient】->【Select DingTalk Robot Notification Method】->【Set DingTalk Robot ACCESS_TOKEN】->【OK】 -![hertzbeat](/img/blog/alert-notice-1.png) + ![hertzbeat](/img/blog/alert-notice-1.png) 3. Configure the associated alarm notification strategy ⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK] -> Configure the alarm notification policy to bind the alarm message with the receiver, so that you can decide which alarms to send to whom. + > Configure the alarm notification policy to bind the alarm message with the receiver, so that you can decide which alarms to send to whom. -![hertzbeat](/img/blog/alert-notice-2.png) + ![hertzbeat](/img/blog/alert-notice-2.png) ### Finished, now wait for the warning message to come. ding ding ding ding diff --git a/home/blog/2023-05-11-greptimedb-store.md b/home/blog/2023-05-11-greptimedb-store.md index 210a41f5420..24e3bb07a83 100644 --- a/home/blog/2023-05-11-greptimedb-store.md +++ b/home/blog/2023-05-11-greptimedb-store.md @@ -43,16 +43,16 @@ You can refer to the [official documentation](https://docs.greptime.com/getting- 1. Docker installation of GreptimeDB -```shell -$ docker run -p 4000-4004:4000-4004 \ - -p 4242:4242 -v "$(pwd)/greptimedb:/tmp/greptimedb" \ - --name greptime \ - greptime/greptimedb:0.2.0 standalone start \ - --http-addr 0.0.0.0.0:4000 \ - --rpc-addr 0.0.0.0:4001 -``` + ```shell + $ docker run -p 4000-4004:4000-4004 \ + -p 4242:4242 -v "$(pwd)/greptimedb:/tmp/greptimedb" \ + --name greptime \ + greptime/greptimedb:0.2.0 standalone start \ + --http-addr 0.0.0.0.0:4000 \ + --rpc-addr 0.0.0.0:4001 + ``` -- `-v "$(pwd)/greptimedb:/tmp/greptimedb"` is the local persistent mount for the greptimeDB data directory, it is recommended to replace `$(pwd)/greptimedb` with the actual local directory you want to specify for storage. + - `-v "$(pwd)/greptimedb:/tmp/greptimedb"` is the local persistent mount for the greptimeDB data directory, it is recommended to replace `$(pwd)/greptimedb` with the actual local directory you want to specify for storage. 2. Use ``$ docker ps | grep greptime`` to see if GreptimeDB started successfully. @@ -62,21 +62,21 @@ See the [official documentation](https://hertzbeat.com/zh-cn/docs/start/docker-d 1. Docker installs HertzBeat. -```shell -$ docker run -d -p 1157:1157 \ - -e LANG=zh_CN.UTF-8 \ - -e TZ=Asia/Shanghai \ - -v /opt/data:/opt/hertzbeat/data \ - -v /opt/application.yml:/opt/hertzbeat/config/application.yml \ - --restart=always \ - --name hertzbeat apache/hertzbeat -``` + ```shell + $ docker run -d -p 1157:1157 \ + -e LANG=zh_CN.UTF-8 \ + -e TZ=Asia/Shanghai \ + -v /opt/data:/opt/hertzbeat/data \ + -v /opt/application.yml:/opt/hertzbeat/config/application.yml \ + --restart=always \ + --name hertzbeat apache/hertzbeat + ``` -- `-v /opt/data:/opt/hertzbeat/data` : (Optional, data persistence) Important ⚠️ Mount the H2 database files to the local host to ensure that the data will not be lost due to the creation and deletion of the container + - `-v /opt/data:/opt/hertzbeat/data` : (Optional, data persistence) Important ⚠️ Mount the H2 database files to the local host to ensure that the data will not be lost due to the creation and deletion of the container -- `-v /opt/application.yml:/opt/hertzbeat/config/application.yml` : Mount customized local configuration files to the container, i.e. use local configuration files to overwrite the container configuration files. + - `-v /opt/application.yml:/opt/hertzbeat/config/application.yml` : Mount customized local configuration files to the container, i.e. use local configuration files to overwrite the container configuration files. -Note that the ⚠️ local mount configuration file `application.yml` needs to exist in advance, and the full contents of the file can be found in the project repository [/script/application.yml]( application.yml) + Note that the ⚠️ local mount configuration file `application.yml` needs to exist in advance, and the full contents of the file can be found in the project repository [/script/application.yml]( application.yml) 2. Go to with the default account and password admin/hertzbeat to see if HertzBeat starts successfully. @@ -84,25 +84,25 @@ Note that the ⚠️ local mount configuration file `application.yml` needs to e 1. Modify the HertzBeat configuration file. -Modify the locally mounted HertzBeat configuration file [application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml), in package mode modify `hertzbeat/ config/application.yml + Modify the locally mounted HertzBeat configuration file [application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml), in package mode modify `hertzbeat/ config/application.yml -**Modify the `warehouse.store.jpa.enabled` parameter in there to `false`, configure the `warehouse.store.greptime` datasource parameter in there, the URL account password, and enable `enabled` to `true`**. + **Modify the `warehouse.store.jpa.enabled` parameter in there to `false`, configure the `warehouse.store.greptime` datasource parameter in there, the URL account password, and enable `enabled` to `true`**. -```yaml -warehouse: - store: - jpa: - enabled: false - greptime: - enabled: true - endpoint: localhost:4001 -``` + ```yaml + warehouse: + store: + jpa: + enabled: false + greptime: + enabled: true + endpoint: localhost:4001 + ``` 2. Restart HertzBeat. -```shell -docker restart hertzbeat -``` + ```shell + docker restart hertzbeat + ``` #### Observe the authentication effect diff --git a/home/blog/2023-08-14-hertzbeat-v1.4.0.md b/home/blog/2023-08-14-hertzbeat-v1.4.0.md index 3e3762b91b4..b769ec923b3 100644 --- a/home/blog/2023-08-14-hertzbeat-v1.4.0.md +++ b/home/blog/2023-08-14-hertzbeat-v1.4.0.md @@ -75,23 +75,23 @@ As for open source commercialization, the premise of open source commercializati 1. Just one command to get started: -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```or use quay.io (if dockerhub network connect timeout)``` + ```or use quay.io (if dockerhub network connect timeout)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. Access `http://localhost:1157` to start, default account: `admin/hertzbeat` 3. Deploy collector clusters -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_IP=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ``` + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_IP=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -* `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -* `-e MANAGER_IP=127.0.0.1` : set the main hertzbeat server ip. -* `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. + * `-e IDENTITY=custom-collector-name` : set the collector unique identity name. + * `-e MANAGER_IP=127.0.0.1` : set the main hertzbeat server ip. + * `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) diff --git a/home/blog/2023-09-26-hertzbeat-v1.4.1.md b/home/blog/2023-09-26-hertzbeat-v1.4.1.md index cbb1d74324d..e04b55bc71f 100644 --- a/home/blog/2023-09-26-hertzbeat-v1.4.1.md +++ b/home/blog/2023-09-26-hertzbeat-v1.4.1.md @@ -58,23 +58,23 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 1. Just one command to get started: -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```or use quay.io (if dockerhub network connect timeout)``` + ```or use quay.io (if dockerhub network connect timeout)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. Access `http://localhost:1157` to start, default account: `admin/hertzbeat` 3. Deploy collector clusters -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ``` + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -* `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -* `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. -* `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. + * `-e IDENTITY=custom-collector-name` : set the collector unique identity name. + * `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. + * `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) diff --git a/home/blog/2023-11-12-hertzbeat-v1.4.2.md b/home/blog/2023-11-12-hertzbeat-v1.4.2.md index 76747ce3989..d59e5076d4c 100644 --- a/home/blog/2023-11-12-hertzbeat-v1.4.2.md +++ b/home/blog/2023-11-12-hertzbeat-v1.4.2.md @@ -40,23 +40,23 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 1. Just one command to get started: -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```or use quay.io (if dockerhub network connect timeout)``` + ```or use quay.io (if dockerhub network connect timeout)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. Access `http://localhost:1157` to start, default account: `admin/hertzbeat` 3. Deploy collector clusters -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ``` + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -* `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -* `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. -* `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. + * `-e IDENTITY=custom-collector-name` : set the collector unique identity name. + * `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. + * `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) diff --git a/home/blog/2023-12-11-hertzbeat-v1.4.3.md b/home/blog/2023-12-11-hertzbeat-v1.4.3.md index 14a01fb4153..966946b4f81 100644 --- a/home/blog/2023-12-11-hertzbeat-v1.4.3.md +++ b/home/blog/2023-12-11-hertzbeat-v1.4.3.md @@ -46,23 +46,23 @@ Compatible with the Prometheus ecosystem, now we can monitor what Prometheus can 1. Just one command to get started: -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```or use quay.io (if dockerhub network connect timeout)``` + ```or use quay.io (if dockerhub network connect timeout)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. Access `http://localhost:1157` to start, default account: `admin/hertzbeat` 3. Deploy collector clusters -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ``` + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -* `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -* `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. -* `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. + * `-e IDENTITY=custom-collector-name` : set the collector unique identity name. + * `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. + * `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) diff --git a/home/blog/2024-01-18-hertzbeat-v1.4.4.md b/home/blog/2024-01-18-hertzbeat-v1.4.4.md index a69c099723e..420d9e1457f 100644 --- a/home/blog/2024-01-18-hertzbeat-v1.4.4.md +++ b/home/blog/2024-01-18-hertzbeat-v1.4.4.md @@ -47,23 +47,23 @@ keywords: [open source monitoring system, alerting system] 1. Just one command to get started: -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```or use quay.io (if dockerhub network connect timeout)``` + ```or use quay.io (if dockerhub network connect timeout)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. Access `http://localhost:1157` to start, default account: `admin/hertzbeat` 3. Deploy collector clusters -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ``` + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -* `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -* `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. -* `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. + * `-e IDENTITY=custom-collector-name` : set the collector unique identity name. + * `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. + * `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.com/docs/start/docker-deploy) diff --git a/home/docs/community/code-style-and-quality-guide.md b/home/docs/community/code-style-and-quality-guide.md index 8be58aa2d84..7e65b0c0a92 100644 --- a/home/docs/community/code-style-and-quality-guide.md +++ b/home/docs/community/code-style-and-quality-guide.md @@ -206,9 +206,9 @@ Error code description: 2. Ensure code readability and intuitiveness -- The string in the `annotation` symbol doesn't need to be extracted as constant. + - The string in the `annotation` symbol doesn't need to be extracted as constant. -- The referenced `package` or `resource` name doesn't need to be extracted as constant. + - The referenced `package` or `resource` name doesn't need to be extracted as constant. 3. Variables that have not been reassigned must also be declared as final types. @@ -250,17 +250,17 @@ Error code description: - returns Set if use `HashSet` 2. If there are multiple threads, the following declaration or returned types can be used: -```java -private CurrentHashMap map; -public CurrentHashMap funName(); -``` + ```java + private CurrentHashMap map; + public CurrentHashMap funName(); + ``` 3. Use `isEmpty()` instead of `length() == 0` or `size() == 0` - Negative demo: ```java if (pathPart.length() == 0) { - return; + return; } ``` @@ -268,7 +268,7 @@ public CurrentHashMap funName(); ```java if (pathPart.isEmpty()) { - return; + return; } ``` @@ -295,89 +295,89 @@ Generally speaking, if a method's code line depth exceeds `2+ Tabs` due to conti to reduce code line depth and improve readability like follows: - Union or merge the logic into the next level calling -- Negative demo: + - Negative demo: -```java -if (isInsert) { -save(platform); -} else { -updateById(platform); -} -``` + ```java + if (isInsert) { + save(platform); + } else { + updateById(platform); + } + ``` -- Positive demo: + - Positive demo: -```java -saveOrUpdate(platform); -``` + ```java + saveOrUpdate(platform); + ``` - Merge the conditions -- Negative demo: + - Negative demo: -```java -if (expression1) { -if(expression2) { -...... -} -} - -``` + ```java + if (expression1) { + if (expression2) { + ...... + } + } + + ``` -- Positive demo: + - Positive demo: - ```java - if (expression1 && expression2) { - ...... - } - ``` + ```java + if (expression1 && expression2) { + ...... + } + ``` - Reverse the condition -- Negative demo: - - ```java - public void doSomething() { - // Ignored more deeper block lines - // ..... - if (condition1) { - ... - } else { - ... - } - } - ``` - -- Positive demo: - - ```java - public void doSomething() { - // Ignored more deeper block lines - // ..... - if (!condition1) { - ... - return; - } - // ... - } - ``` + - Negative demo: + + ```java + public void doSomething() { + // Ignored more deeper block lines + // ..... + if (condition1) { + // ... + } else { + // ... + } + } + ``` + + - Positive demo: + + ```java + public void doSomething() { + // Ignored more deeper block lines + // ..... + if (!condition1) { + // ... + return; + } + // ... + } + ``` - Using a single variable or method to reduce the complex conditional expression -- Negative demo: + - Negative demo: - ```java - if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { - ... - } - ``` + ```java + if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { + // ... + } + ``` -- Positive demo: + - Positive demo: - ```java - if (containsSqlServer(dbType)) { - .... - } - //..... - // definition of the containsSqlServer - ``` + ```java + if (containsSqlServer(dbType)) { + // .... + } + //..... + // definition of the containsSqlServer + ``` > Using `sonarlint` and `better highlights` to check code depth looks like good in the future. @@ -385,20 +385,20 @@ if(expression2) { 1. Method lacks comments: -- `When`: When can the method be called -- `How`: How to use this method and how to pass parameters, etc. -- `What`: What functions does this method achieve -- `Note`: What should developers pay attention to when calling this method + - `When`: When can the method be called + - `How`: How to use this method and how to pass parameters, etc. + - `What`: What functions does this method achieve + - `Note`: What should developers pay attention to when calling this method 2. Missing necessary class header description comments. -Add `What`, `Note`, etc. like mentioned in the `1`. + Add `What`, `Note`, etc. like mentioned in the `1`. 3. The method declaration in the interface must be annotated. -- If the semantics of the implementation and the annotation content at the interface declaration are inconsistent, the specific implementation method also needs to be rewritten with annotations. + - If the semantics of the implementation and the annotation content at the interface declaration are inconsistent, the specific implementation method also needs to be rewritten with annotations. -- If the semantics of the method implementation are consistent with the annotation content at the interface declaration, it is not recommended to write annotations to avoid duplicate annotations. + - If the semantics of the method implementation are consistent with the annotation content at the interface declaration, it is not recommended to write annotations to avoid duplicate annotations. 4. The first word in the comment lines need to be capitalized, like `param` lines, `return` lines. If a special reference as a subject does not need to be capitalized, special symbols such as quotation marks need to be noted. @@ -408,31 +408,31 @@ Add `What`, `Note`, etc. like mentioned in the `1`. 1. Prefer `non-capturing` lambdas (lambdas that do not contain references to the outer scope). Capturing lambdas need to create a new object instance for every call. `Non-capturing` lambdas can use the same instance for each invocation. -- Negative demo: + - Negative demo: - ```java - map.computeIfAbsent(key, x -> key.toLowerCase()) - ``` + ```java + map.computeIfAbsent(key, x -> key.toLowerCase()) + ``` -- Positive demo: + - Positive demo: - ```java - map.computeIfAbsent(key, k -> k.toLowerCase()); - ``` + ```java + map.computeIfAbsent(key, k -> k.toLowerCase()); + ``` 2. Consider method references instead of inline lambdas -- Negative demo: + - Negative demo: - ```java - map.computeIfAbsent(key, k-> Loader.load(k)); - ``` + ```java + map.computeIfAbsent(key, k-> Loader.load(k)); + ``` -- Positive demo: + - Positive demo: - ```java - map.computeIfAbsent(key, Loader::load); - ``` + ```java + map.computeIfAbsent(key, Loader::load); + ``` ### 3.9 Java Streams @@ -450,127 +450,127 @@ Add `What`, `Note`, etc. like mentioned in the `1`. 1. Use `StringUtils.isBlank` instead of `StringUtils.isEmpty` -- Negative demo: + - Negative demo: - ```java - if (StringUtils.isEmpty(name)) { - return; - } - ``` + ```java + if (StringUtils.isEmpty(name)) { + return; + } + ``` -- Positive demo: + - Positive demo: - ```java - if (StringUtils.isBlank(name)) { - return; - } - ``` + ```java + if (StringUtils.isBlank(name)) { + return; + } + ``` 2. Use `StringUtils.isNotBlank` instead of `StringUtils.isNotEmpty` -- Negative demo: + - Negative demo: - ```java - if (StringUtils.isNotEmpty(name)) { - return; - } - ``` + ```java + if (StringUtils.isNotEmpty(name)) { + return; + } + ``` -- Positive demo: + - Positive demo: - ```java - if (StringUtils.isNotBlank(name)) { - return; - } - ``` + ```java + if (StringUtils.isNotBlank(name)) { + return; + } + ``` 3. Use `StringUtils.isAllBlank` instead of `StringUtils.isAllEmpty` -- Negative demo: + - Negative demo: - ```java - if (StringUtils.isAllEmpty(name, age)) { - return; - } - ``` + ```java + if (StringUtils.isAllEmpty(name, age)) { + return; + } + ``` -- Positive demo: + - Positive demo: - ```java - if (StringUtils.isAllBlank(name, age)) { - return; - } - ``` + ```java + if (StringUtils.isAllBlank(name, age)) { + return; + } + ``` ### 3.12 `Enum` Class 1. Enumeration value comparison -- Negative demo: + - Negative demo: - ```java - if (status.equals(JobStatus.RUNNING)) { - return; - } - ``` + ```java + if (status.equals(JobStatus.RUNNING)) { + return; + } + ``` -- Positive demo: + - Positive demo: - ```java - if (status == JobStatus.RUNNING) { - return; - } - ``` + ```java + if (status == JobStatus.RUNNING) { + return; + } + ``` 2. Enumeration classes do not need to implement Serializable -- Negative demo: + - Negative demo: - ```java - public enum JobStatus implements Serializable { - ... - } - ``` + ```java + public enum JobStatus implements Serializable { + // ... + } + ``` -- Positive demo: + - Positive demo: - ```java - public enum JobStatus { - ... - } - ``` + ```java + public enum JobStatus { + // ... + } + ``` 3. Use `Enum.name()` instead of `Enum.toString()` -- Negative demo: + - Negative demo: - ```java - System.out.println(JobStatus.RUNNING.toString()); - ``` + ```java + System.out.println(JobStatus.RUNNING.toString()); + ``` -- Positive demo: + - Positive demo: - ```java - System.out.println(JobStatus.RUNNING.name()); - ``` + ```java + System.out.println(JobStatus.RUNNING.name()); + ``` 4. Enumeration class names uniformly use the Enum suffix -- Negative demo: + - Negative demo: - ```java - public enum JobStatus { - ... - } - ``` + ```java + public enum JobStatus { + // ... + } + ``` -- Positive demo: + - Positive demo: - ```java - public enum JobStatusEnum { - ... - } - ``` + ```java + public enum JobStatusEnum { + // ... + } + ``` ### 3.13 `Deprecated` Annotation @@ -579,7 +579,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. ```java @deprecated public void process(String input) { - ... + // ... } ``` @@ -588,7 +588,7 @@ public void process(String input) { ```java @Deprecated public void process(String input) { - ... + // ... } ``` @@ -596,43 +596,43 @@ public void process(String input) { 1. Use `placeholders` for log output: -- Negative demo + - Negative demo - ```java - log.info("Deploy cluster request " + deployRequest); - ``` + ```java + log.info("Deploy cluster request " + deployRequest); + ``` -- Positive demo + - Positive demo - ```java - log.info("load plugin:{} to {}", file.getName(), appPlugins); - ``` + ```java + log.info("load plugin:{} to {}", file.getName(), appPlugins); + ``` 2. Pay attention to the selection of `log level` when printing logs -When printing the log content, if the actual parameters of the log placeholder are passed, it is necessary to avoid premature evaluation to avoid unnecessary evaluation caused by the log level. + When printing the log content, if the actual parameters of the log placeholder are passed, it is necessary to avoid premature evaluation to avoid unnecessary evaluation caused by the log level. -- Negative demo: + - Negative demo: - Assuming the current log level is `INFO`: + Assuming the current log level is `INFO`: - ```java - // ignored declaration lines. - List userList = getUsersByBatch(1000); - LOG.debug("All users: {}", getAllUserIds(userList)); - ``` + ```java + // ignored declaration lines. + List userList = getUsersByBatch(1000); + LOG.debug("All users: {}", getAllUserIds(userList)); + ``` -- Positive demo: + - Positive demo: - In this case, we should determine the log level in advance before making actual log calls as follows: + In this case, we should determine the log level in advance before making actual log calls as follows: - ```java - // ignored declaration lines. - List userList = getUsersByBatch(1000); - if (LOG.isDebugEnabled()) { - LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); - } - ``` + ```java + // ignored declaration lines. + List userList = getUsersByBatch(1000); + if (LOG.isDebugEnabled()) { + LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); + } + ``` ## 5 Testing diff --git a/home/docs/community/contribution.md b/home/docs/community/contribution.md index 9a634ffffed..af7d20bfec7 100644 --- a/home/docs/community/contribution.md +++ b/home/docs/community/contribution.md @@ -88,33 +88,33 @@ Of course, if you have a good idea, you can also propose it directly on GitHub D 1. First you need to fork your target [hertzbeat repository](https://github.com/apache/hertzbeat). 2. Then download the code locally with git command: -```shell -git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended -``` + ```shell + git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended + ``` 3. After the download is complete, please refer to the getting started guide or README file of the target repository to initialize the project. 4. Then, you can refer to the following command to submit the code: -```shell -git checkout -b a-feature-branch #Recommended -``` + ```shell + git checkout -b a-feature-branch #Recommended + ``` 5. Submit the coed as a commit, the commit message format specification required: [module name or type name]feature or bugfix or doc: custom message. -```shell -git add -git commit -m '[docs]feature: necessary instructions' #Recommended -``` + ```shell + git add + git commit -m '[docs]feature: necessary instructions' #Recommended + ``` 6. Push to the remote repository -```shell -git push origin a-feature-branch -``` + ```shell + git push origin a-feature-branch + ``` 7. Then you can initiate a new PR (Pull Request) on GitHub. -Please note that the title of the PR needs to conform to our spec, and write the necessary description in the PR to facilitate code review by Committers and other contributors. + Please note that the title of the PR needs to conform to our spec, and write the necessary description in the PR to facilitate code review by Committers and other contributors. ### Wait for the code to be merged diff --git a/home/docs/help/activemq.md b/home/docs/help/activemq.md index ef3cc911969..ee014e7ce8c 100644 --- a/home/docs/help/activemq.md +++ b/home/docs/help/activemq.md @@ -15,40 +15,40 @@ keywords: [open source monitoring tool, monitoring Apache ActiveMQ metrics] 1. Modify the `conf/activemq.xml` file in the installation directory to enable JMX -> Add `userJmx="true"` attribute in `broker` tag + > Add `userJmx="true"` attribute in `broker` tag -```xml - - - -``` + ```xml + + + + ``` 2. Modify the `bin/env` file in the installation directory, configure the JMX port IP, etc. -The original configuration information will be as follows - -```text -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" -``` - -Update to the following configuration, ⚠️ pay attention to modify `local external IP` - -```text -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.authenticate=false" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Djava.rmi.server.hostname=本机对外IP" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" -``` + The original configuration information will be as follows + + ```text + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" + + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" + ``` + + Update to the following configuration, ⚠️ pay attention to modify `local external IP` + + ```text + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" + + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.authenticate=false" + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Djava.rmi.server.hostname=本机对外IP" + + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" + ``` 3. Restart the ACTIVEMQ service, and add the corresponding ActiveMQ monitoring in HertzBeat. The parameters use the IP port configured by JMX. diff --git a/home/docs/help/alert_dingtalk.md b/home/docs/help/alert_dingtalk.md index 36e332d9b21..fdac2211c74 100644 --- a/home/docs/help/alert_dingtalk.md +++ b/home/docs/help/alert_dingtalk.md @@ -11,31 +11,31 @@ keywords: [open source monitoring tool, open source alerter, open source DingDin 1. **【DingDing desktop client】-> 【Group settings】-> 【Intelligent group assistant】-> 【Add new robot-select custom】-> 【Set robot name and avatar】-> 【Note⚠️Set custom keywords: HertzBeat】 ->【Copy its webhook address after adding successfully】** -> Note⚠️ When adding a robot, its custom keywords need to be set in the security setting block: HertzBeat. Other security settings or the IP segment don't need to be filled in. + > Note⚠️ When adding a robot, its custom keywords need to be set in the security setting block: HertzBeat. Other security settings or the IP segment don't need to be filled in. -![email](/img/docs/help/alert-notice-8.png) + ![email](/img/docs/help/alert-notice-8.png) 2. **【Save access_token value of the WebHook address of the robot】** -> eg: webHook address:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` -> Its robot access_token value is `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` + > eg: webHook address:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` + > Its robot access_token value is `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` 3. **【Alarm notification】->【Add new recipient】 ->【Select DingDing robot notification method】->【Set DingDing robot ACCESS_TOKEN】-> 【Confirm】** -![email](/img/docs/help/alert-notice-9.png) + ![email](/img/docs/help/alert-notice-9.png) 4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** + > **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### DingDing robot common issues 1. DingDing group did not receive the robot alarm notification. -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether DingDing robot is configured with security custom keywords :HertzBeat. -> Please check whether the robot ACCESS_TOKEN is configured correctly and whether the alarm strategy association is configured. + > Please check whether there is any triggered alarm information in the alarm center. + > Please check whether DingDing robot is configured with security custom keywords :HertzBeat. + > Please check whether the robot ACCESS_TOKEN is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_discord.md b/home/docs/help/alert_discord.md index 68296148f22..6eb18982dba 100644 --- a/home/docs/help/alert_discord.md +++ b/home/docs/help/alert_discord.md @@ -13,56 +13,56 @@ keywords: [open source monitoring tool, open source alerter, open source Discord 1. Visit [https://discord.com/developers/applications](https://discord.com/developers/applications) to create an application -![bot](/img/docs/help/discord-bot-1.png) + ![bot](/img/docs/help/discord-bot-1.png) 2. Create a robot under the application and get the robot Token -![bot](/img/docs/help/discord-bot-2.png) + ![bot](/img/docs/help/discord-bot-2.png) -![bot](/img/docs/help/discord-bot-3.png) + ![bot](/img/docs/help/discord-bot-3.png) 3. Authorize the bot to the chat server -> Authorize the robot under the OAuth2 menu, select `bot` for `SCOPES`, `BOT PERMISSIONS` select `Send Messages` + > Authorize the robot under the OAuth2 menu, select `bot` for `SCOPES`, `BOT PERMISSIONS` select `Send Messages` -![bot](/img/docs/help/discord-bot-4.png) + ![bot](/img/docs/help/discord-bot-4.png) -> Obtain the URL generated at the bottom, and the browser accesses this URL to officially authorize the robot, that is, to set which chat server the robot will join. + > Obtain the URL generated at the bottom, and the browser accesses this URL to officially authorize the robot, that is, to set which chat server the robot will join. 4. Check if your chat server has joined robot members -![bot](/img/docs/help/discord-bot-5.png) + ![bot](/img/docs/help/discord-bot-5.png) ### Enable developer mode and get Channel ID 1. Personal Settings -> Advanced Settings -> Enable Developer Mode -![bot](/img/docs/help/discord-bot-6.png) + ![bot](/img/docs/help/discord-bot-6.png) 2. Get channel Channel ID -> Right-click the chat channel you want to send the robot message to, click the COPY ID button to get the Channel ID + > Right-click the chat channel you want to send the robot message to, click the COPY ID button to get the Channel ID -![bot](/img/docs/help/discord-bot-7.png) + ![bot](/img/docs/help/discord-bot-7.png) ### Add an alarm notification person in HertzBeat, the notification method is Discord Bot 1. **[Alarm notification] -> [Add recipient] -> [Select Discord robot notification method] -> [Set robot Token and ChannelId] -> [OK]** -![email](/img/docs/help/discord-bot-8.png) + ![email](/img/docs/help/discord-bot-8.png) -4. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** +2. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** -> **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. + > **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. -![email](/img/docs/help/alert-notice-policy.png) + ![email](/img/docs/help/alert-notice-policy.png) ### Discord Bot Notification FAQ 1. Discord doesn't receive bot alert notifications -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured -> Please check whether the bot is properly authorized by the Discord chat server + > Please check whether the alarm information has been triggered in the alarm center + > Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured + > Please check whether the bot is properly authorized by the Discord chat server Other questions can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_email.md b/home/docs/help/alert_email.md index 1fc45db80f4..fd12b4eb164 100644 --- a/home/docs/help/alert_email.md +++ b/home/docs/help/alert_email.md @@ -11,23 +11,23 @@ keywords: [open source monitoring tool, open source alerter, open source email n 1. **【Alarm notification】->【Add new recipient】 ->【Select email notification method】** -![email](/img/docs/help/alert-notice-1.png) + ![email](/img/docs/help/alert-notice-1.png) 2. **【Get verification code】-> 【Enter email verification code】-> 【Confirm】** ![email](/img/docs/help/alert-notice-2.png) -![email](/img/docs/help/alert-notice-3.png) + ![email](/img/docs/help/alert-notice-3.png) 3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** + > **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### Email notification common issues 1. Hertzbeat deployed on its own intranet cannot receive email notifications -> Hertzbeat needs to configure its own mail server. Please confirm whether you have configured its own mail server in application.yml + > Hertzbeat needs to configure its own mail server. Please confirm whether you have configured its own mail server in application.yml Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_enterprise_wechat_app.md b/home/docs/help/alert_enterprise_wechat_app.md index 1d5d41a15bc..352981b7d85 100644 --- a/home/docs/help/alert_enterprise_wechat_app.md +++ b/home/docs/help/alert_enterprise_wechat_app.md @@ -11,24 +11,24 @@ keywords: [open source monitoring tool, open source alerter, open source Enterpr 1. **【Enterprise Wechat backstage】-> 【App Management】-> 【Create an app】-> 【Set App message】->【Copy AgentId and Secret adding successfully】** -![email](/img/docs/help/alert-wechat-1.jpg) + ![email](/img/docs/help/alert-wechat-1.jpg) 2. **【Alarm notification】->【Add new recipient】 ->【Select Enterprise WeChat App notification method】->【Set Enterprise WeChat ID,Enterprise App ID and Enterprise App Secret 】-> 【Confirm】** -![email](/img/docs/help/alert-wechat-2.jpg) + ![email](/img/docs/help/alert-wechat-2.jpg) 3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** + > **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-wechat-3.jpg) + ![email](/img/docs/help/alert-wechat-3.jpg) ### Enterprise WeChat App common issues 1. Enterprise WeChat App did not receive the alarm notification. -> Please check if the user has application permissions. -> Please check if the enterprise application callback address settings are normal. -> Please check if the server IP is on the enterprise application whitelist. + > Please check if the user has application permissions. + > Please check if the enterprise application callback address settings are normal. + > Please check if the server IP is on the enterprise application whitelist. Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_feishu.md b/home/docs/help/alert_feishu.md index 38f7c72cf03..80efffd0305 100644 --- a/home/docs/help/alert_feishu.md +++ b/home/docs/help/alert_feishu.md @@ -13,22 +13,22 @@ keywords: [open source monitoring tool, open source alerter, open source feishu 2. **【Save the key value of the WebHook address of the robot】** -> eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select FeiShu robot notification method】->【Set FeiShu robot KEY】-> 【Confirm】** 4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** + > **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### FeiShu robot notification common issues 1. FeiShu group did not receive the robot alarm notification. -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. + > Please check whether there is any triggered alarm information in the alarm center. + > Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_slack.md b/home/docs/help/alert_slack.md index 26bde4ed2e5..7b5c395c729 100644 --- a/home/docs/help/alert_slack.md +++ b/home/docs/help/alert_slack.md @@ -17,19 +17,19 @@ Refer to the official website document [Sending messages using Incoming Webhooks 1. **【Alarm Notification】->【Add Recipient】->【Select Slack Webhook Notification Method】->【Set Webhook URL】-> 【OK】** -![email](/img/docs/help/slack-bot-1.png) + ![email](/img/docs/help/slack-bot-1.png) 2. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** -> **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. + > **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. -![email](/img/docs/help/alert-notice-policy.png) + ![email](/img/docs/help/alert-notice-policy.png) ### Slack Notification FAQ 1. Slack did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured + > Please check whether the alarm information has been triggered in the alarm center + > Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_smn.md b/home/docs/help/alert_smn.md index 53774315561..a49ff926801 100644 --- a/home/docs/help/alert_smn.md +++ b/home/docs/help/alert_smn.md @@ -11,33 +11,33 @@ keywords: [ open source monitoring tool, open source alerter, open source Huawei 1. **According to [Huawei Cloud SMN Official Document](https://support.huaweicloud.com/qs-smn/smn_json.html) activate the SMN service and configure SMN** -![alert-notice-10](/img/docs/help/alert-notice-10.png) + ![alert-notice-10](/img/docs/help/alert-notice-10.png) 2. **Save topic URN for SMN** -![alert-notice-11](/img/docs/help/alert-notice-11.png) + ![alert-notice-11](/img/docs/help/alert-notice-11.png) 3. **According to [Huawei Cloud Signature Document](https://support.huaweicloud.com/devg-apisign/api-sign-provide.html) obtain AK, SK, and project ID** -![alert-notice-12](/img/docs/help/alert-notice-12.png) + ![alert-notice-12](/img/docs/help/alert-notice-12.png) -![alert-notice-13](/img/docs/help/alert-notice-13.png) + ![alert-notice-13](/img/docs/help/alert-notice-13.png) 4. **【Alarm Notification】->【Add Recipient】->【Select Slack Webhook Notification Method】->【Set Huawei Cloud SMN AK, SK and other configurations】-> 【OK】** -![alert-notice-14](/img/docs/help/alert-notice-14.png) + ![alert-notice-14](/img/docs/help/alert-notice-14.png) 5. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** -> **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. + > **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### Huawei Cloud SMN Notification FAQ 1. Huawei Cloud SMN did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the Huawei Cloud SMN AK, SK and other configurations are configured correctly, and whether the alarm policy association has been configured + > Please check whether the alarm information has been triggered in the alarm center + > Please check whether the Huawei Cloud SMN AK, SK and other configurations are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_telegram.md b/home/docs/help/alert_telegram.md index 1689788f0f4..c33d2b0c72b 100644 --- a/home/docs/help/alert_telegram.md +++ b/home/docs/help/alert_telegram.md @@ -13,32 +13,32 @@ keywords: [open source monitoring tool, open source alerter, open source Telegra 1. Use [@BotFather](https://t.me/BotFather) to create your own bot and get an access token `Token` -![telegram-bot](/img/docs/help/telegram-bot-1.png) + ![telegram-bot](/img/docs/help/telegram-bot-1.png) 2. Get the `User ID` of the recipient -**Use the recipient account you want to notify to send a message to the newly created Bot account**, -Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token from the previous step to replace the `**, and respond to the first in the `Json` data A `result.message.from.id` value is the recipient's `User ID` - -```json -{ - "ok": true, - "result": [ - { - "update_id": 632299191, - "message": { - "from":{ - "id": "User ID" - }, - "chat":{ - }, - "date": 1673858065, - "text": "111" + **Use the recipient account you want to notify to send a message to the newly created Bot account**, + Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token from the previous step to replace the `**, and respond to the first in the `Json` data A `result.message.from.id` value is the recipient's `User ID` + + ```json + { + "ok": true, + "result": [ + { + "update_id": 632299191, + "message": { + "from":{ + "id": "User ID" + }, + "chat":{ + }, + "date": 1673858065, + "text": "111" + } } - } - ] -} -``` + ] + } + ``` 3. Record and save the `Token` and `User Id` we got @@ -46,20 +46,20 @@ Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token 1. **【Alarm Notification】->【Add Recipient】->【Select Telegram Robot Notification Method】->【Set Robot Token and UserId】-> 【OK】** -![email](/img/docs/help/telegram-bot-2.png) + ![email](/img/docs/help/telegram-bot-2.png) -4. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** +2. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** -> **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. + > **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. -![email](/img/docs/help/alert-notice-policy.png) + ![email](/img/docs/help/alert-notice-policy.png) ### Telegram Bot Notification FAQ 1. Telegram did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured -> UserId should be the UserId of the recipient of the message + > Please check whether the alarm information has been triggered in the alarm center + > Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured + > UserId should be the UserId of the recipient of the message Other questions can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_webhook.md b/home/docs/help/alert_webhook.md index d1741d71481..9d0db1f9ed3 100644 --- a/home/docs/help/alert_webhook.md +++ b/home/docs/help/alert_webhook.md @@ -11,13 +11,13 @@ keywords: [open source monitoring tool, open source alerter, open source webhook 1. **【Alarm notification】->【Add new recipient】 ->【Select WebHook notification method】-> 【Set WebHook callback address】 -> 【Confirm】** -![email](/img/docs/help/alert-notice-5.png) + ![email](/img/docs/help/alert-notice-5.png) 2. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** + > **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### WebHook callback POST body BODY content @@ -60,7 +60,7 @@ Content format:JSON 1. WebHook callback did not take effect -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the configured webhook callback address is correct. + > Please check whether there is any triggered alarm information in the alarm center. + > Please check whether the configured webhook callback address is correct. Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/alert_wework.md b/home/docs/help/alert_wework.md index ce344200301..db0b0061757 100644 --- a/home/docs/help/alert_wework.md +++ b/home/docs/help/alert_wework.md @@ -11,20 +11,20 @@ keywords: [open source monitoring tool, open source alerter, open source WeWork 1. **【Enterprise Wechat】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** -![email](/img/docs/help/alert-notice-6.jpg) + ![email](/img/docs/help/alert-notice-6.jpg) 2. **【Save the key value of the WebHook address of the robot】** -> eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select enterprise Wechat robot notification method】->【Set enterprise Wechat robot KEY】-> 【Confirm】** -![email](/img/docs/help/alert-notice-7.png) + ![email](/img/docs/help/alert-notice-7.png) 4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** + > **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** ![email](/img/docs/help/alert-notice-4.png) @@ -32,7 +32,7 @@ keywords: [open source monitoring tool, open source alerter, open source WeWork 1. The enterprise wechat group did not receive the robot alarm notification. -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. + > Please check whether there is any triggered alarm information in the alarm center. + > Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/docs/help/dynamic_tp.md b/home/docs/help/dynamic_tp.md index 332767b2a39..72e6389fdde 100644 --- a/home/docs/help/dynamic_tp.md +++ b/home/docs/help/dynamic_tp.md @@ -11,53 +11,53 @@ keywords: [open source monitoring tool, open source dynamicTp monitoring tool, m 1. Integration Using `DynamicTp` -`DynamicTp` is a lightweight dynamic thread pool based on the configuration center of the Jvm language. It has built-in monitoring and alarm functions, which can be realized through SPI custom extensions. + `DynamicTp` is a lightweight dynamic thread pool based on the configuration center of the Jvm language. It has built-in monitoring and alarm functions, which can be realized through SPI custom extensions. -For integrated use, please refer to the document [Quick Start](https://dynamictp.cn/guide/use/quick-start.html) + For integrated use, please refer to the document [Quick Start](https://dynamictp.cn/guide/use/quick-start.html) 2. Open SpringBoot Actuator Endpoint to expose `DynamicTp` Metric interface -```yaml -management: - endpoints: - web: - exposure: - include: '*' -``` + ```yaml + management: + endpoints: + web: + exposure: + include: '*' + ``` -Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has response json data as follows: + Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has response json data as follows: -```json -[ - { - "poolName": "commonExecutor", - "corePoolSize": 1, - "maximumPoolSize": 1, - "queueType": "LinkedBlockingQueue", - "queueCapacity": 2147483647, - "queueSize": 0, - "fair": false, - "queueRemainingCapacity": 2147483647, - "activeCount": 0, - "taskCount": 0, - "completedTaskCount": 0, - "largestPoolSize": 0, - "poolSize": 0, - "waitTaskCount": 0, - "rejectCount": 0, - "rejectHandlerName": null, - "dynamic": false, - "runTimeoutCount": 0, - "queueTimeoutCount": 0 - }, - { - "maxMemory": "4GB", - "totalMemory": "444MB", - "freeMemory": "250.34 MB", - "usableMemory": "3.81GB" - } -] -``` + ```json + [ + { + "poolName": "commonExecutor", + "corePoolSize": 1, + "maximumPoolSize": 1, + "queueType": "LinkedBlockingQueue", + "queueCapacity": 2147483647, + "queueSize": 0, + "fair": false, + "queueRemainingCapacity": 2147483647, + "activeCount": 0, + "taskCount": 0, + "completedTaskCount": 0, + "largestPoolSize": 0, + "poolSize": 0, + "waitTaskCount": 0, + "rejectCount": 0, + "rejectHandlerName": null, + "dynamic": false, + "runTimeoutCount": 0, + "queueTimeoutCount": 0 + }, + { + "maxMemory": "4GB", + "totalMemory": "444MB", + "freeMemory": "250.34 MB", + "usableMemory": "3.81GB" + } + ] + ``` 3. Add DynamicTp monitoring under HertzBeat middleware monitoring diff --git a/home/docs/help/iotdb.md b/home/docs/help/iotdb.md index 011b9cbec12..c87f4fba7d6 100644 --- a/home/docs/help/iotdb.md +++ b/home/docs/help/iotdb.md @@ -17,29 +17,29 @@ The main steps are as follows: 1. The metric collection is disabled by default, you need to modify the parameters in `conf/iotdb-metric.yml` first, then restart the server -``` -# Whether to start the monitoring module, the default is false -enableMetric: true - -# Whether to enable operation delay statistics -enablePerformanceStat: false - -# Data provision method, externally provide metrics data through jmx and prometheus protocol, optional parameters: [JMX, PROMETHEUS, IOTDB], IOTDB is closed by default. -metricReporterList: - - JMX - - PROMETHEUS - -# The metric architecture used at the bottom layer, optional parameters: [MICROMETER, DROPWIZARD] -monitorType: MICROMETER - -# Initialize the level of the metric, optional parameters: [CORE, IMPORTANT, NORMAL, ALL] -metricLevel: IMPORTANT - -# Predefined metrics set, optional parameters: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] -predefinedMetrics: - - JVM - - FILE -``` + ``` + # Whether to start the monitoring module, the default is false + enableMetric: true + + # Whether to enable operation delay statistics + enablePerformanceStat: false + + # Data provision method, externally provide metrics data through jmx and prometheus protocol, optional parameters: [JMX, PROMETHEUS, IOTDB], IOTDB is closed by default. + metricReporterList: + - JMX + - PROMETHEUS + + # The metric architecture used at the bottom layer, optional parameters: [MICROMETER, DROPWIZARD] + monitorType: MICROMETER + + # Initialize the level of the metric, optional parameters: [CORE, IMPORTANT, NORMAL, ALL] + metricLevel: IMPORTANT + + # Predefined metrics set, optional parameters: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] + predefinedMetrics: + - JVM + - FILE + ``` 2. Restart IoTDB, open a browser or use curl to access http://servier_ip:9091/metrics, and you can see the metric data. diff --git a/home/docs/help/issue.md b/home/docs/help/issue.md index 9904fab6551..baa62af2101 100644 --- a/home/docs/help/issue.md +++ b/home/docs/help/issue.md @@ -8,50 +8,49 @@ sidebar_label: Common issues 1. **Page feedback:monitor.host: Monitoring Host must be ipv4, ipv6 or domain name** -> As shown in the information, the entered monitoring Host must be ipv4, ipv6 or domain name, and cannot carry a protocol header, such as http + > As shown in the information, the entered monitoring Host must be ipv4, ipv6 or domain name, and cannot carry a protocol header, such as http 2. **The website API and other monitoring feedback statusCode:403 or 401, but the opposite end service itself does not need authentication, and the direct access of the browser is OK** -> Please check whether it is blocked by the firewall. For example, BaoTa/aaPanel have set the blocking of `User-Agent=Apache-HttpClient` in the request header by default. If it is blocked, please delete this blocking rule. (user-agent has been simulated as a browser in the v1.0.beat5 version. This problem does not exist) + > Please check whether it is blocked by the firewall. For example, BaoTa/aaPanel have set the blocking of `User-Agent=Apache-HttpClient` in the request header by default. If it is blocked, please delete this blocking rule. (user-agent has been simulated as a browser in the v1.0.beat5 version. This problem does not exist) 3. Ping connectivity monitoring exception when installing hertzbeat for package deployment. The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 -> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. -> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See + > The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. + > When you install HertzBeat via DockerDocker root is enabled by default. No such problem. + > See ### Docker Deployment common issues 1. **MYSQL, TDENGINE and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. -> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. -> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` + > Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. + > Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` 2. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: -> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. -> two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. -> ->> three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. + > one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. + > two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. + > three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. 3. **Log an error TDengine connection or insert SQL failed** -> one:Check whether database account and password configured is correct, the database is created. -> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. + > one:Check whether database account and password configured is correct, the database is created. + > two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. ### Package Deployment common issues 1. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: -> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. -> two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. -> three: Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. + > one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. + > two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. + > three: Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 2. **Log an error TDengine connection or insert SQL failed** -> one:Check whether database account and password configured is correct, the database is created. -> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. + > one:Check whether database account and password configured is correct, the database is created. + > two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. diff --git a/home/docs/help/jetty.md b/home/docs/help/jetty.md index ccec65b5559..2a3f69c13e4 100644 --- a/home/docs/help/jetty.md +++ b/home/docs/help/jetty.md @@ -19,33 +19,33 @@ keywords: [open source monitoring tool, open source jetty web server monitoring 1. Start the JMX JMX-REMOTE module in Jetty -```shell -java -jar $JETTY_HOME/start.jar --add-module=jmx -java -jar $JETTY_HOME/start.jar --add-module=jmx-remote -``` + ```shell + java -jar $JETTY_HOME/start.jar --add-module=jmx + java -jar $JETTY_HOME/start.jar --add-module=jmx-remote + ``` -Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file + Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file 2. Edit the `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file to modify the JMX IP port and other parameters. -**`localhost` needs to be modified to expose the IP** - -```text -## The host/address to bind the RMI server to. -# jetty.jmxremote.rmiserverhost=localhost - -## The port the RMI server listens to (0 means a random port is chosen). -# jetty.jmxremote.rmiserverport=1099 - -## The host/address to bind the RMI registry to. -# jetty.jmxremote.rmiregistryhost=localhost - -## The port the RMI registry listens to. -# jetty.jmxremote.rmiregistryport=1099 - -## The host name exported in the RMI stub. --Djava.rmi.server.hostname=localhost -``` + **`localhost` needs to be modified to expose the IP** + + ```text + ## The host/address to bind the RMI server to. + # jetty.jmxremote.rmiserverhost=localhost + + ## The port the RMI server listens to (0 means a random port is chosen). + # jetty.jmxremote.rmiserverport=1099 + + ## The host/address to bind the RMI registry to. + # jetty.jmxremote.rmiregistryhost=localhost + + ## The port the RMI registry listens to. + # jetty.jmxremote.rmiregistryport=1099 + + ## The host name exported in the RMI stub. + -Djava.rmi.server.hostname=localhost + ``` 3. Restart Jetty Server. diff --git a/home/docs/help/kubernetes.md b/home/docs/help/kubernetes.md index d7e6b657ea6..836c84f3818 100644 --- a/home/docs/help/kubernetes.md +++ b/home/docs/help/kubernetes.md @@ -17,15 +17,15 @@ Refer to the steps to obtain token 1. Create a service account and bind the default cluster-admin administrator cluster role -```kubectl create serviceaccount dashboard-admin -n kube-system``` + ```kubectl create serviceaccount dashboard-admin -n kube-system``` 2. User Authorization -```shell -kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin -kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' -kubectl describe secret {secret} -n kube-system -``` + ```shell + kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin + kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' + kubectl describe secret {secret} -n kube-system + ``` ### method two diff --git a/home/docs/help/nacos.md b/home/docs/help/nacos.md index f7c8815521f..3eeafc9ac47 100644 --- a/home/docs/help/nacos.md +++ b/home/docs/help/nacos.md @@ -14,9 +14,9 @@ keywords: [open source monitoring tool, open source middleware monitoring tool, 1. Deploy the Nacos cluster according to [deployment document](https://nacos.io/en-us/docs/deployment.html). 2. Configure the application. properties file to expose metrics data. -``` -management.endpoints.web.exposure.include=* -``` + ``` + management.endpoints.web.exposure.include=* + ``` 3. Access ```{ip}:8848/nacos/actuator/prometheus``` to see if metrics data can be accessed. diff --git a/home/docs/help/nginx.md b/home/docs/help/nginx.md index a5662be985f..ce7ccf536a4 100644 --- a/home/docs/help/nginx.md +++ b/home/docs/help/nginx.md @@ -17,95 +17,97 @@ If you want to monitor information in 'Nginx' with this monitoring type, you nee 1. Check if `ngx_http_stub_status_module` has been added -```shell -nginx -V -``` + ```shell + nginx -V + ``` -View whether it contains `--with-http_stub_status_module`, if not, you need to recompile and install Nginx. + View whether it contains `--with-http_stub_status_module`, if not, you need to recompile and install Nginx. 2. Compile and install Nginx, add `ngx_http_stub_status_module` module -Download Nginx and unzip it, execute the following command in the directory + Download Nginx and unzip it, execute the following command in the directory -```shell - -./configure --prefix=/usr/local/nginx --with-http_stub_status_module - -make && make install -``` + ```shell + + ./configure --prefix=/usr/local/nginx --with-http_stub_status_module + + make && make install + ``` 3. Modify Nginx configure file -Modify the `nginx.conf` file and add the monitoring module exposed endpoint, as follows: + Modify the `nginx.conf` file and add the monitoring module exposed endpoint, as follows: -```shell -# modify nginx.conf -server { - listen 80; # port - server_name localhost; - location /nginx-status { - stub_status on; - access_log on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts - } -} -``` + ```shell + # modify nginx.conf + server { + listen 80; # port + server_name localhost; + location /nginx-status { + stub_status on; + access_log on; + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts + } + } + ``` 4. Reload Nginx -```shell - -nginx -s reload -``` + ```shell + + nginx -s reload + ``` 5. Access `http://localhost/nginx-status` in the browser to view the Nginx monitoring status information. ### Enable `ngx_http_reqstat_module` -```shell -# install `ngx_http_reqstat_module` -wget https://github.com/zls0424/ngx_req_status/archive/master.zip -O ngx_req_status.zip - -unzip ngx_req_status.zip - -patch -p1 < ../ngx_req_status-master/write_filter.patch - -./configure --prefix=/usr/local/nginx --add-module=/path/to/ngx_req_status-master - -make -j2 - -make install -``` +1. Install `ngx_http_reqstat_module` + + ```shell + # install `ngx_http_reqstat_module` + wget https://github.com/zls0424/ngx_req_status/archive/master.zip -O ngx_req_status.zip + + unzip ngx_req_status.zip + + patch -p1 < ../ngx_req_status-master/write_filter.patch + + ./configure --prefix=/usr/local/nginx --add-module=/path/to/ngx_req_status-master + + make -j2 + + make install + ``` 2. Modify Nginx configure file -update `nginx.conf` file, add status module exposed endpoint, as follows: - -```shell -# modify nginx.conf -http { - req_status_zone server_name $server_name 256k; - req_status_zone server_addr $server_addr 256k; - - req_status server_name server_addr; - - server { - location /req-status { - req_status_show on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + update `nginx.conf` file, add status module exposed endpoint, as follows: + + ```shell + # modify nginx.conf + http { + req_status_zone server_name $server_name 256k; + req_status_zone server_addr $server_addr 256k; + + req_status server_name server_addr; + + server { + location /req-status { + req_status_show on; + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts + } } } -} -``` + ``` 3. Reload Nginx -```shell - -nginx -s reload -``` + ```shell + + nginx -s reload + ``` 4. Access `http://localhost/req-status` in the browser to view the Nginx monitoring status information. diff --git a/home/docs/help/rabbitmq.md b/home/docs/help/rabbitmq.md index e49d572ee72..91fad16ff1e 100644 --- a/home/docs/help/rabbitmq.md +++ b/home/docs/help/rabbitmq.md @@ -14,9 +14,9 @@ keywords: [open source monitoring tool, open source rabbitmq monitoring tool, mo 1. Open the Management plugin, or use the self-opening version -```shell -rabbitmq-plugins enable rabbitmq_management -``` + ```shell + rabbitmq-plugins enable rabbitmq_management + ``` 2. Access with a browser, and the default account password is `guest/guest`. Successful login means that it is successfully opened. diff --git a/home/docs/help/shenyu.md b/home/docs/help/shenyu.md index aa4a43a8d5c..7bc5f61bdc0 100644 --- a/home/docs/help/shenyu.md +++ b/home/docs/help/shenyu.md @@ -17,27 +17,27 @@ Two Steps Mainly: 1. add metrics plugin dependency in gateway's pom.xml. -```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - -``` + ```xml + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + + ``` 2. modify this config in shenyu gateway yaml. -```yaml -shenyu: - metrics: - enabled: false #false is close, true is open - name : prometheus - host: 127.0.0.1 - port: 8090 - jmxConfig: - props: - jvm_enabled: true #enable jvm monitoring -``` + ```yaml + shenyu: + metrics: + enabled: false #false is close, true is open + name : prometheus + host: 127.0.0.1 + port: 8090 + jmxConfig: + props: + jvm_enabled: true #enable jvm monitoring + ``` Finally, restart the access gateway metrics endpoint `http://ip:8090` to respond to prometheus format data. diff --git a/home/docs/start/custom-config.md b/home/docs/start/custom-config.md index 7f45b5dd27d..b88a2ff4af4 100644 --- a/home/docs/start/custom-config.md +++ b/home/docs/start/custom-config.md @@ -16,44 +16,44 @@ The installation package can be decompressed and modified in `hertzbeat/config/a 1. Configure the SMS sending server -> Only when your own SMS server is successfully configured, the alarm SMS triggered in the monitoring tool will be sent normally. + > Only when your own SMS server is successfully configured, the alarm SMS triggered in the monitoring tool will be sent normally. -Add the following Tencent platform SMS server configuration in `application.yml` (parameters need to be replaced with your SMS server configuration) + Add the following Tencent platform SMS server configuration in `application.yml` (parameters need to be replaced with your SMS server configuration) -```yaml -common: - sms: - tencent: - secret-id: AKIDbQ4VhdMr89wDedFrIcgU2PaaMvOuBCzY - secret-key: PaXGl0ziY9UcWFjUyiFlCPMr77rLkJYlyA - app-id: 1435441637 - sign-name: XX Technology - template-id: 1343434 -``` + ```yaml + common: + sms: + tencent: + secret-id: AKIDbQ4VhdMr89wDedFrIcgU2PaaMvOuBCzY + secret-key: PaXGl0ziY9UcWFjUyiFlCPMr77rLkJYlyA + app-id: 1435441637 + sign-name: XX Technology + template-id: 1343434 + ``` 2. Configure alarm custom parameters -```yaml -alerter: - # Custom console address - console-url: https://console.tancloud.io -``` + ```yaml + alerter: + # Custom console address + console-url: https://console.tancloud.io + ``` 3. Use external redis instead of memory to store real-time metric data -> By default, the real-time data of our metrics is stored in memory, which can be configured as follows to use redis instead of memory storage. - -Note ⚠️ `memory.enabled: false, redis.enabled: true` - -```yaml -warehouse: - store: - memory: - enabled: false - init-size: 1024 - redis: - enabled: true - host: 127.0.0.1 - port: 6379 - password: 123456 -``` + > By default, the real-time data of our metrics is stored in memory, which can be configured as follows to use redis instead of memory storage. + + Note ⚠️ `memory.enabled: false, redis.enabled: true` + + ```yaml + warehouse: + store: + memory: + enabled: false + init-size: 1024 + redis: + enabled: true + host: 127.0.0.1 + port: 6379 + password: 123456 + ``` diff --git a/home/docs/start/docker-compose-deploy.md b/home/docs/start/docker-compose-deploy.md index f365eff86a2..9caefdbf626 100644 --- a/home/docs/start/docker-compose-deploy.md +++ b/home/docs/start/docker-compose-deploy.md @@ -15,47 +15,47 @@ Run the `docker compose version` command to check if you have a Docker Compose e 1. Download the startup script package -Download the installation script package `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` from the [download](https://github.com/apache/hertzbeat/releases/download/v1.6.0/apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz) + Download the installation script package `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` from the [download](https://github.com/apache/hertzbeat/releases/download/v1.6.0/apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz) 2. Choose to use the HertzBeat + PostgreSQL + VictoriaMetrics solution -:::tip + :::tip -- `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` contains multiple deployment solutions after decompression. Here we recommend choosing the `hertzbeat-postgresql-victoria-metrics` solution. -- Other deployment methods, please read the README.md file of each deployment solution in detail. The MySQL solution requires you to prepare the MySQL driver package yourself. + - `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` contains multiple deployment solutions after decompression. Here we recommend choosing the `hertzbeat-postgresql-victoria-metrics` solution. + - Other deployment methods, please read the README.md file of each deployment solution in detail. The MySQL solution requires you to prepare the MySQL driver package yourself. -::: + ::: -- Unzip the script package + - Unzip the script package -``` -tar zxvf apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz -``` + ``` + tar zxvf apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz + ``` -- Enter the decompression directory and select `HertzBeat + PostgreSQL + VictoriaMetrics` for one-click deployment + - Enter the decompression directory and select `HertzBeat + PostgreSQL + VictoriaMetrics` for one-click deployment -``` -cd apache-hertzbeat-1.6.0-incubating-docker-compose -cd hertzbeat-postgresql-victoria-metrics -``` + ``` + cd apache-hertzbeat-1.6.0-incubating-docker-compose + cd hertzbeat-postgresql-victoria-metrics + ``` -- One-click start + - One-click start -> Run script in `hertzbeat-postgresql-victoria-metrics` directory + > Run script in `hertzbeat-postgresql-victoria-metrics` directory -``` -docker-compose up -d -``` + ``` + docker-compose up -d + ``` -- View service status + - View service status -> View the running status of each container, up is the normal running status + > View the running status of each container, up is the normal running status -``` -docker-compose ps -``` + ``` + docker-compose ps + ``` -4. Start exploring HertzBeat +3. Start exploring HertzBeat Access in the browser to start exploring and using it. The default account password is admin/hertzbeat. **HAVE FUN** diff --git a/home/docs/start/docker-deploy.md b/home/docs/start/docker-deploy.md index cc4670c16f6..dbc6cd35f85 100644 --- a/home/docs/start/docker-deploy.md +++ b/home/docs/start/docker-deploy.md @@ -17,39 +17,39 @@ It is necessary to have Docker environment in your environment. If not installed 1. Execute the following command -```shell -$ docker run -d -p 1157:1157 -p 1158:1158 \ - -v $(pwd)/data:/opt/hertzbeat/data \ - -v $(pwd)/logs:/opt/hertzbeat/logs \ - -v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml \ - -v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml \ - --restart=always \ - --name hertzbeat apache/hertzbeat -``` - -> Command parameter explanation - -- `docker run -d` : Run a container in the background via Docker -- `-p 1157:1157 -p 1158:1158` : Mapping container ports to the host, 1157 is web-ui port, 1158 is cluster port. -- `-v $(pwd)/data:/opt/hertzbeat/data` : (optional, data persistence) Important, Mount the H2 database file to the local host, to ensure that the data is not lost due creating or deleting container. -- `-v $(pwd)/logs:/opt/hertzbeat/logs` : (optional) Mount the log file to the local host to facilitate viewing. -- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional) Mount the configuration file to the container (please ensure that the file exists locally). [Download](https://github.com/apache/hertzbeat/raw/master/script/application.yml) -- `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (optional) Mount the account configuration file to the container (please ensure that the file exists locally). [Download](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) -- `-v $(pwd)/ext-lib:/opt/hertzbeat/ext-lib` : (optional) Mount external third-party JAR package [mysql-jdbc](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip) [oracle-jdbc](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) [oracle-i18n](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar) -- `--name hertzbeat` : (optional) Naming container name hertzbeat -- `--restart=always` : (optional) Configure the container to restart automatically. -- `apache/hertzbeat` : Use the [official application mirror](https://hub.docker.com/r/apache/hertzbeat) to start the container, if the network times out, use `quay.io/tancloud/hertzbeat` instead. -- `--network host` : (optional) Use the host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` - -:::tip - -- Marked as optional parameters, non-mandatory items, if not needed, delete them. -- This maps the 1157,1158 ports of the container to the 1157,1158 ports of the host. If the port on the host is already occupied, you need to modify the host mapping port. -- When mounting files, the first parameter is your custom local file address, and the second parameter is the container file address. Make sure you have this file locally when mounting. -- You can execute `docker update --restart=always hertzbeat` to configure the container to restart automatically. -- If you want to use the host network mode to start Docker, you can use `docker run -d --network host .....` - -::: + ```shell + $ docker run -d -p 1157:1157 -p 1158:1158 \ + -v $(pwd)/data:/opt/hertzbeat/data \ + -v $(pwd)/logs:/opt/hertzbeat/logs \ + -v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml \ + -v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml \ + --restart=always \ + --name hertzbeat apache/hertzbeat + ``` + + > Command parameter explanation + + - `docker run -d` : Run a container in the background via Docker + - `-p 1157:1157 -p 1158:1158` : Mapping container ports to the host, 1157 is web-ui port, 1158 is cluster port. + - `-v $(pwd)/data:/opt/hertzbeat/data` : (optional, data persistence) Important, Mount the H2 database file to the local host, to ensure that the data is not lost due creating or deleting container. + - `-v $(pwd)/logs:/opt/hertzbeat/logs` : (optional) Mount the log file to the local host to facilitate viewing. + - `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional) Mount the configuration file to the container (please ensure that the file exists locally). [Download](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + - `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (optional) Mount the account configuration file to the container (please ensure that the file exists locally). [Download](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) + - `-v $(pwd)/ext-lib:/opt/hertzbeat/ext-lib` : (optional) Mount external third-party JAR package [mysql-jdbc](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip) [oracle-jdbc](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) [oracle-i18n](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar) + - `--name hertzbeat` : (optional) Naming container name hertzbeat + - `--restart=always` : (optional) Configure the container to restart automatically. + - `apache/hertzbeat` : Use the [official application mirror](https://hub.docker.com/r/apache/hertzbeat) to start the container, if the network times out, use `quay.io/tancloud/hertzbeat` instead. + - `--network host` : (optional) Use the host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` + + :::tip + + - Marked as optional parameters, non-mandatory items, if not needed, delete them. + - This maps the 1157,1158 ports of the container to the 1157,1158 ports of the host. If the port on the host is already occupied, you need to modify the host mapping port. + - When mounting files, the first parameter is your custom local file address, and the second parameter is the container file address. Make sure you have this file locally when mounting. + - You can execute `docker update --restart=always hertzbeat` to configure the container to restart automatically. + - If you want to use the host network mode to start Docker, you can use `docker run -d --network host .....` + + ::: 2. Start to explore HertzBeat Access using browser. You can explore HertzBeat with default account `admin/hertzbeat` now! @@ -65,36 +65,36 @@ By deploying multiple HertzBeat Collectors, high availability, load balancing, a 1. Execute the following command -```shell -$ docker run -d \ - -e IDENTITY=custom-collector-name \ - -e MODE=public \ - -e MANAGER_HOST=127.0.0.1 \ - -e MANAGER_PORT=1158 \ - --name hertzbeat-collector apache/hertzbeat-collector -``` - -> Command parameter explanation - -- `docker run -d` : Run a container in the background via Docker -- `-e IDENTITY=custom-collector-name` : (optional) Set the collector unique identity name. Attention the clusters collector name must unique. -- `-e MODE=public` : set the running mode(public or private), public cluster or private -- `-e MANAGER_HOST=127.0.0.1` : Important, Set the main hertzbeat server ip host, must use the server host instead of 127.0.0.1. -- `-e MANAGER_PORT=1158` : (optional) Set the main hertzbeat server port, default 1158. -- `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (optional) Mount the log file to the local host to facilitate viewing. -- `--name hertzbeat-collector` : Naming container name hertzbeat-collector -- `apache/hertzbeat-collector` : Use the [official application mirror](https://hub.docker.com/r/apache/hertzbeat-collector) to start the container, if the network times out, use `quay.io/tancloud/hertzbeat-collector` instead. -- `--network host` : (optional) Use the host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` - -:::tip - -- Marked as optional parameters, non-mandatory items, if not needed, delete them. -- The `127.0.0.1` in `MANAGER_HOST` needs to be replaced with the external IP address of the HertzBeat Server. -- When mounting files, the first parameter is your custom local file address, and the second parameter is the container file address. Make sure you have this file locally when mounting. -- You can execute `docker update --restart=always hertzbeat-collector` to configure the container to restart automatically. -- If you want to use the host network mode to start Docker, you can use `docker run -d --network host .....` - -::: + ```shell + $ docker run -d \ + -e IDENTITY=custom-collector-name \ + -e MODE=public \ + -e MANAGER_HOST=127.0.0.1 \ + -e MANAGER_PORT=1158 \ + --name hertzbeat-collector apache/hertzbeat-collector + ``` + + > Command parameter explanation + + - `docker run -d` : Run a container in the background via Docker + - `-e IDENTITY=custom-collector-name` : (optional) Set the collector unique identity name. Attention the clusters collector name must unique. + - `-e MODE=public` : set the running mode(public or private), public cluster or private + - `-e MANAGER_HOST=127.0.0.1` : Important, Set the main hertzbeat server ip host, must use the server host instead of 127.0.0.1. + - `-e MANAGER_PORT=1158` : (optional) Set the main hertzbeat server port, default 1158. + - `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (optional) Mount the log file to the local host to facilitate viewing. + - `--name hertzbeat-collector` : Naming container name hertzbeat-collector + - `apache/hertzbeat-collector` : Use the [official application mirror](https://hub.docker.com/r/apache/hertzbeat-collector) to start the container, if the network times out, use `quay.io/tancloud/hertzbeat-collector` instead. + - `--network host` : (optional) Use the host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` + + :::tip + + - Marked as optional parameters, non-mandatory items, if not needed, delete them. + - The `127.0.0.1` in `MANAGER_HOST` needs to be replaced with the external IP address of the HertzBeat Server. + - When mounting files, the first parameter is your custom local file address, and the second parameter is the container file address. Make sure you have this file locally when mounting. + - You can execute `docker update --restart=always hertzbeat-collector` to configure the container to restart automatically. + - If you want to use the host network mode to start Docker, you can use `docker run -d --network host .....` + + ::: 2. Access `http://localhost:1157` and you will see the registered new collector in dashboard. @@ -109,49 +109,49 @@ $ docker run -d \ 1. MYSQL, TDENGINE, IoTDB and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. -> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. -> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` + > Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. + > Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` 2. According to the process deploy,visit no interface Please refer to the following points to troubleshoot issues: -> 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. -> 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. -> 3:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. + > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. + > 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. + > 3:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. 3. Historical monitoring charts have been missing data for a long time -> 1:Check whether you configure victoria-metrics or Tdengine or IoTDB. No configuration means no historical chart data. -> 2: Check whether IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. + > 1:Check whether you configure victoria-metrics or Tdengine or IoTDB. No configuration means no historical chart data. + > 2: Check whether IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. 4. If the history chart on the monitoring page is not displayed,popup [please configure time series database] -> As shown in the popup window,the premise of history chart display is that you need install and configure hertzbeat's dependency service database. -> Installation and initialization this database, please refer to [Using victoria-metrics to store metrics data](victoria-metrics-init) + > As shown in the popup window,the premise of history chart display is that you need install and configure hertzbeat's dependency service database. + > Installation and initialization this database, please refer to [Using victoria-metrics to store metrics data](victoria-metrics-init) 5. The time series database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure dependent time series database] -> Please check if the configuration parameters are correct -> Is time-series database enable set to true -> Note⚠️If both hertzbeat and time-series database are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory + > Please check if the configuration parameters are correct + > Is time-series database enable set to true + > Note⚠️If both hertzbeat and time-series database are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed + > You can check the startup logs according to the logs directory 6. What is the purpose of application.yml -> This file is the configuration file of HertzBeat, used to configure various parameters of HertzBeat, such as database connection information, time series database configuration, etc. -> Download `application.yml` file to the host directory, for example: $(pwd)/application.yml -> Download source [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) -> You can modify the configuration yml file according to your needs. -> -> - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` -> - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) -> - **Recommended** If you need to use the time series database victoria-metrics to store metric data, you need to replace the `warehouse.store.victoria-metrics` parameter in `application.yml` for specific steps, see [Using victoria-metrics to store metrics data](victoria-metrics-init) + > This file is the configuration file of HertzBeat, used to configure various parameters of HertzBeat, such as database connection information, time series database configuration, etc. + > Download `application.yml` file to the host directory, for example: $(pwd)/application.yml + > Download source [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + > You can modify the configuration yml file according to your needs. + > + > - If you need to use email to send alarms, you need to replace the email server parameters `spring.mail` in `application.yml` + > - **Recommended** If you need to use an external Mysql database to replace the built-in H2 database, you need to replace the `spring.datasource` parameter in `application.yml` For specific steps, see [Using Mysql to replace H2 database](mysql-change) + > - **Recommended** If you need to use the time series database victoria-metrics to store metric data, you need to replace the `warehouse.store.victoria-metrics` parameter in `application.yml` for specific steps, see [Using victoria-metrics to store metrics data](victoria-metrics-init) 7. What is the purpose of sureness.yml -> This file is the user configuration file of HertzBeat, used to configure user information of HertzBeat, such as account password, etc. -> HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` -> If you need update account or password, configure `sureness.yml`. Ignore this step without this demand. -> Download and config `sureness.yml` in the host directory,eg:`$(pwd)/sureness.yml` -> Download from [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) -> For detail steps, please refer to [Configure Account Password](account-modify) + > This file is the user configuration file of HertzBeat, used to configure user information of HertzBeat, such as account password, etc. + > HertzBeat default built-in three user accounts, respectively `admin/hertzbeat tom/hertzbeat guest/hertzbeat` + > If you need update account or password, configure `sureness.yml`. Ignore this step without this demand. + > Download and config `sureness.yml` in the host directory,eg:`$(pwd)/sureness.yml` + > Download from [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) + > For detail steps, please refer to [Configure Account Password](account-modify) diff --git a/home/docs/start/greptime-init.md b/home/docs/start/greptime-init.md index 881e6faa0e2..57b4e9504b8 100644 --- a/home/docs/start/greptime-init.md +++ b/home/docs/start/greptime-init.md @@ -17,28 +17,28 @@ It's designed to work on infrastructure of the cloud era, and users benefit from ### Install GreptimeDB via Docker > Refer to the official website [installation tutorial](https://docs.greptime.com/getting-started/overview) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Install GreptimeDB with Docker - -```shell -$ docker run -p 127.0.0.1:4000-4003:4000-4003 \ - -v "$(pwd)/greptimedb:/tmp/greptimedb" \ - --name greptime --rm \ - greptime/greptimedb:latest standalone start \ - --http-addr 0.0.0.0:4000 \ - --rpc-addr 0.0.0.0:4001 \ - --mysql-addr 0.0.0.0:4002 \ - --postgres-addr 0.0.0.0:4003 -``` + +1. Download and install Docker environment +Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +After the installation you can check if the Docker version normally output at the terminal. + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Install GreptimeDB with Docker + + ```shell + $ docker run -p 127.0.0.1:4000-4003:4000-4003 \ + v "$(pwd)/greptimedb:/tmp/greptimedb" \ + --name greptime --rm \ + greptime/greptimedb:latest standalone start \ + --http-addr 0.0.0.0:4000 \ + --rpc-addr 0.0.0.0:4001 \ + --mysql-addr 0.0.0.0:4002 \ + --postgres-addr 0.0.0.0:4003 + ``` `-v "$(pwd)/greptimedb:/tmp/greptimedb"` is local persistent mount of greptimedb data directory. `$(pwd)/greptimedb` should be replaced with the actual local directory, default is the `greptimedb` directory under the current directory. use```$ docker ps``` to check if the database started successfully @@ -50,24 +50,24 @@ use```$ docker ps``` to check if the database started successfully Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.greptime` data source parameters, URL account and password. -```yaml -warehouse: - store: - # disable jpa - jpa: - enabled: false - # enable greptime - greptime: - enabled: true - grpc-endpoints: localhost:4001 - url: jdbc:mysql://localhost:4002/hertzbeat?connectionTimeZone=Asia/Shanghai&forceConnectionTimeZoneToSession=true - driver-class-name: com.mysql.cj.jdbc.Driver - username: greptime - password: greptime - expire-time: 30d -``` - -The default database is `hertzbeat` in the `url`, and it will be created automatically. The `expire-time` specifies the TTL(time-to-live) of the auto-created database, it's 30 days by default. + ```yaml + warehouse: + store: + # disable jpa + jpa: + enabled: false + # enable greptime + greptime: + enabled: true + grpc-endpoints: localhost:4001 + url: jdbc:mysql://localhost:4002/hertzbeat?connectionTimeZone=Asia/Shanghai&forceConnectionTimeZoneToSession=true + driver-class-name: com.mysql.cj.jdbc.Driver + username: greptime + password: greptime + expire-time: 30d + ``` + + The default database is `hertzbeat` in the `url`, and it will be created automatically. The `expire-time` specifies the TTL(time-to-live) of the auto-created database, it's 30 days by default. 2. Restart HertzBeat @@ -75,4 +75,4 @@ The default database is `hertzbeat` in the `url`, and it will be created automat 1. Do both the time series databases Greptime, IoTDB or TDengine need to be configured? Can they both be used? -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. diff --git a/home/docs/start/influxdb-init.md b/home/docs/start/influxdb-init.md index 50b86344279..8f11e9c4c91 100644 --- a/home/docs/start/influxdb-init.md +++ b/home/docs/start/influxdb-init.md @@ -23,26 +23,26 @@ Note⚠️ Need InfluxDB 1.x Version. ### 2. Install TDengine via Docker > Refer to the official website [installation tutorial](https://hub.docker.com/_/influxdb) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Install InfluxDB with Docker -> -> ``` -> $ docker run -p 8086:8086 \ -> -v /opt/influxdb:/var/lib/influxdb \ -> influxdb:1.8 -> ``` -> -> `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. -> use```$ docker ps``` to check if the database started successfully + +1. Download and install Docker environment +Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +After the installation you can check if the Docker version normally output at the terminal. + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Install InfluxDB with Docker + + ```shell + $ docker run -p 8086:8086 \ + -v /opt/influxdb:/var/lib/influxdb \ + influxdb:1.8 + ``` + + `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. + use```$ docker ps``` to check if the database started successfully ### Configure the database connection in hertzbeat `application.yml` configuration file @@ -51,21 +51,21 @@ Note⚠️ Need InfluxDB 1.x Version. Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.influxdb` data source parameters, URL account and password. -```yaml -warehouse: - store: - # disable jpa - jpa: - enabled: false - # enable influxdb - influxdb: - enabled: true - server-url: http://localhost:8086 - username: root - password: root - expire-time: '30d' - replication: 1 -``` + ```yaml + warehouse: + store: + # disable jpa + jpa: + enabled: false + # enable influxdb + influxdb: + enabled: true + server-url: http://localhost:8086 + username: root + password: root + expire-time: '30d' + replication: 1 + ``` 2. Restart HertzBeat @@ -73,4 +73,4 @@ warehouse: 1. Do both the time series databases InfluxDB, IoTDB and TDengine need to be configured? Can they both be used? -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. diff --git a/home/docs/start/iotdb-init.md b/home/docs/start/iotdb-init.md index d2838a83a6d..db3a6233f0e 100644 --- a/home/docs/start/iotdb-init.md +++ b/home/docs/start/iotdb-init.md @@ -24,22 +24,22 @@ Apache IoTDB is a software system that integrates the collection, storage, manag Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). After the installation you can check if the Docker version normally output at the terminal. - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` 2. Install IoTDB via Docker -```shell -$ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ - -v /opt/iotdb/data:/iotdb/data \ - --name iotdb \ - apache/iotdb:1.2.2-standalone -``` + ```shell + $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ + -v /opt/iotdb/data:/iotdb/data \ + --name iotdb \ + apache/iotdb:1.2.2-standalone + ``` -`-v /opt/iotdb/data:/iotdb/data` is local persistent mount of IotDB data directory.`/iotdb/data` should be replaced with the actual local directory. -use```$ docker ps``` to check if the database started successfully + `-v /opt/iotdb/data:/iotdb/data` is local persistent mount of IotDB data directory.`/iotdb/data` should be replaced with the actual local directory. + use```$ docker ps``` to check if the database started successfully 3. Configure the database connection in hertzbeat `application.yml`configuration file @@ -47,61 +47,61 @@ use```$ docker ps``` to check if the database started successfully Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Config the `warehouse.store.jpa.enabled` `false`. Replace `warehouse.store.iot-db` data source parameters, HOST account and password. -``` -warehouse: - store: - # disable JPA - jpa: - enabled: false - # enable iot-db - iot-db: - enabled: true - host: 127.0.0.1 - rpc-port: 6667 - username: root - password: root - query-timeout-in-ms: -1 - # default '7776000000'(90days,unit:ms,-1:no-expire) - expire-time: '7776000000' -``` - -**IoTDB Cluster Configuration** - -If you are using IoTDB for clustering, please refer to the configuration below: - -```yaml -warehouse: - store: - # Disable default JPA - jpa: - enabled: false - # Enable IoTDB - iot-db: - enabled: true - node-urls: ['127.0.0.1:6667','127.0.0.2:6667','127.0.0.3:6667'] - username: root - password: root - # if iotdb version >= 0.13 use default queryTimeoutInMs = -1; else use default queryTimeoutInMs = 0 - query-timeout-in-ms: -1 - # Data storage time: default '7776000000' (90 days, in milliseconds, -1 means never expire) - expire-time: '7776000000' -``` - -Configuration parameters: - -| Parameter Name | Description | -|---------------------|-------------------------------------------------------------------------------------------| -| enabled | Whether to enable | -| host | IoTDB database address | -| rpc-port | IoTDB database port | -| node-urls | IoTDB cluster addresses | -| username | IoTDB database account | -| password | IoTDB database password | -| version | deprecated | -| query-timeout-in-ms | Query timeout | -| expire-time | Data storage time, default '7776000000' (90 days, in milliseconds, -1 means never expire) | - -> If both cluster configuration `node-urls` and standalone configuration are set simultaneously, the cluster `node-urls` configuration takes precedence. + ```yml + warehouse: + store: + # disable JPA + jpa: + enabled: false + # enable iot-db + iot-db: + enabled: true + host: 127.0.0.1 + rpc-port: 6667 + username: root + password: root + query-timeout-in-ms: -1 + # default '7776000000'(90days,unit:ms,-1:no-expire) + expire-time: '7776000000' + ``` + + **IoTDB Cluster Configuration** + + If you are using IoTDB for clustering, please refer to the configuration below: + + ```yaml + warehouse: + store: + # Disable default JPA + jpa: + enabled: false + # Enable IoTDB + iot-db: + enabled: true + node-urls: ['127.0.0.1:6667','127.0.0.2:6667','127.0.0.3:6667'] + username: root + password: root + # if iotdb version >= 0.13 use default queryTimeoutInMs = -1; else use default queryTimeoutInMs = 0 + query-timeout-in-ms: -1 + # Data storage time: default '7776000000' (90 days, in milliseconds, -1 means never expire) + expire-time: '7776000000' + ``` + + Configuration parameters: + + | Parameter Name | Description | + |---------------------|-------------------------------------------------------------------------------------------| + | enabled | Whether to enable | + | host | IoTDB database address | + | rpc-port | IoTDB database port | + | node-urls | IoTDB cluster addresses | + | username | IoTDB database account | + | password | IoTDB database password | + | version | deprecated | + | query-timeout-in-ms | Query timeout | + | expire-time | Data storage time, default '7776000000' (90 days, in milliseconds, -1 means never expire) | + + > If both cluster configuration `node-urls` and standalone configuration are set simultaneously, the cluster `node-urls` configuration takes precedence. 4. Restart HertzBeat @@ -109,15 +109,15 @@ Configuration parameters: 1. Do both the time series databases IoTDB and TDengine need to be configured? Can they both be used? -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. 2. The historical chart of the monitoring page is not displayed, and pops up [Unable to provide historical chart data, please configure to rely on the time series database] -> As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database + > As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database 3. The TDengine database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure the dependent time series database] -> Please check if the configuration parameters are correct -> Is td-engine enable set to true -> Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory + > Please check if the configuration parameters are correct + > Is td-engine enable set to true + > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed + > You can check the startup logs according to the logs directory diff --git a/home/docs/start/package-deploy.md b/home/docs/start/package-deploy.md index 6280b634a59..56a7313ed91 100644 --- a/home/docs/start/package-deploy.md +++ b/home/docs/start/package-deploy.md @@ -24,15 +24,15 @@ Deployment via package relies on Java runtime environment, ensure you have Java1 tar zxvf apache-hertzbeat-xxx-incubating-bin.tar.gz ``` -:::tip -The configuration file is located in `config/application.yml`, you can modify the configuration file according to your needs to configure external dependent services, such as databases, time series databases, etc. -HertzBeat defaults to using internal services when started, but it is recommended to switch to external database services in production environments. -::: + :::tip + The configuration file is located in `config/application.yml`, you can modify the configuration file according to your needs to configure external dependent services, such as databases, time series databases, etc. + HertzBeat defaults to using internal services when started, but it is recommended to switch to external database services in production environments. + ::: -It is recommended to use [PostgreSQL](postgresql-change) for metadata storage and [VictoriaMetrics](victoria-metrics-init) for metric data storage. Specific steps are as follows + It is recommended to use [PostgreSQL](postgresql-change) for metadata storage and [VictoriaMetrics](victoria-metrics-init) for metric data storage. Specific steps are as follows -- [Switch built-in H2 database to PostgreSQL](postgresql-change) -- [Using VictoriaMetrics to store metric data](victoria-metrics-init) + - [Switch built-in H2 database to PostgreSQL](postgresql-change) + - [Using VictoriaMetrics to store metric data](victoria-metrics-init) 3. Configure the account file(optional) @@ -68,32 +68,32 @@ Deploying multiple HertzBeat Collectors can achieve high availability, load bala 2. Configure the collector configuration file -Unzip the installation package to the host eg: /opt/hertzbeat-collector - -``` -tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz -``` + Unzip the installation package to the host eg: /opt/hertzbeat-collector -Configure the collector configuration yml file `config/application.yml`: unique `identity` name, running `mode` (public or private), hertzbeat `manager-host`, hertzbeat `manager-port` + ``` + tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz + ``` -```yaml -collector: - dispatch: - entrance: - netty: - enabled: true - identity: ${IDENTITY:} - mode: ${MODE:public} - manager-host: ${MANAGER_HOST:127.0.0.1} - manager-port: ${MANAGER_PORT:1158} -``` + Configure the collector configuration yml file `config/application.yml`: unique `identity` name, running `mode` (public or private), hertzbeat `manager-host`, hertzbeat `manager-port` + + ```yaml + collector: + dispatch: + entrance: + netty: + enabled: true + identity: ${IDENTITY:} + mode: ${MODE:public} + manager-host: ${MANAGER_HOST:127.0.0.1} + manager-port: ${MANAGER_PORT:1158} + ``` -> Parameters detailed explanation + > Parameters detailed explanation -- `identity` : (optional) Set the unique identifier name of the collector. Note that the name of the collector must be unique when there are multiple collectors. -- `mode` : Configure the running mode (public or private), public cluster mode or private cloud-edge mode. -- `manager-host` : Important, configure the address of the connected HertzBeat Server, -- `manager-port` : (optional) Configure the port of the connected HertzBeat Server, default 1158. + - `identity` : (optional) Set the unique identifier name of the collector. Note that the name of the collector must be unique when there are multiple collectors. + - `mode` : Configure the running mode (public or private), public cluster mode or private cloud-edge mode. + - `manager-host` : Important, configure the address of the connected HertzBeat Server, + - `manager-port` : (optional) Configure the port of the connected HertzBeat Server, default 1158. 3. Start the service @@ -127,6 +127,6 @@ collector: 2. According to the process deploy,visit no interface Please refer to the following points to troubleshoot issues: -> 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. -> 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. -> 3:Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. + > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. + > 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. + > 3:Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. diff --git a/home/docs/start/quickstart.md b/home/docs/start/quickstart.md index 0911ef7a32c..830fc8f9d64 100644 --- a/home/docs/start/quickstart.md +++ b/home/docs/start/quickstart.md @@ -16,20 +16,20 @@ sidebar_label: Quick Start 1. Just one command to get started: -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` 2. Access `http://localhost:1157` to start, default account: `admin/hertzbeat` 3. Deploy collector clusters(Optional) -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ``` + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -- `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -- `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. -- `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. -- `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. + - `-e IDENTITY=custom-collector-name` : set the collector unique identity name. + - `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. + - `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. + - `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.apache.org/docs/start/docker-deploy) diff --git a/home/docs/start/sslcert-practice.md b/home/docs/start/sslcert-practice.md index ecd1b8bbe12..d02a4da679c 100644 --- a/home/docs/start/sslcert-practice.md +++ b/home/docs/start/sslcert-practice.md @@ -18,7 +18,7 @@ github: 1. The `docker` environment can be installed with just one command -`docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` + `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` 2. After the installation is successful, the browser can access `localhost:1157` to start, the default account password is `admin/hertzbeat` @@ -26,62 +26,62 @@ github: 1. Click Add SSL Certificate Monitor -> System Page -> Monitor Menu -> SSL Certificate -> Add SSL Certificate + > System Page -> Monitor Menu -> SSL Certificate -> Add SSL Certificate -![](/img/docs/start/ssl_1.png) + ![](/img/docs/start/ssl_1.png) 2. Configure the monitoring website -> Here we take the example of monitoring Baidu website, configure monitoring host domain name, name, collection interval, etc. -> Click OK Note ⚠️Before adding, it will test the connectivity of the website by default, and the connection will be successful before adding. Of course, you can also gray out the **Test or not** button. + > Here we take the example of monitoring Baidu website, configure monitoring host domain name, name, collection interval, etc. + > Click OK Note ⚠️Before adding, it will test the connectivity of the website by default, and the connection will be successful before adding. Of course, you can also gray out the **Test or not** button. -![](/img/docs/start/ssl_2.png) + ![](/img/docs/start/ssl_2.png) 3. View the detection index data -> In the monitoring list, you can view the monitoring status, and in the monitoring details, you can view the metric data chart, etc. + > In the monitoring list, you can view the monitoring status, and in the monitoring details, you can view the metric data chart, etc. -![](/img/docs/start/ssl_3.png) + ![](/img/docs/start/ssl_3.png) -![](/img/docs/start/ssl_11.png) + ![](/img/docs/start/ssl_11.png) 4. Set the threshold (triggered when the certificate expires) -> System Page -> Alarms -> Alarm Thresholds -> New Thresholds + > System Page -> Alarms -> Alarm Thresholds -> New Thresholds -![](/img/docs/start/ssl_4.png) + ![](/img/docs/start/ssl_4.png) -> Configure the threshold, select the SSL certificate metric object, configure the alarm expression-triggered when the metric `expired` is `true`, that is, `equals(expired,"true")`, set the alarm level notification template information, etc. + > Configure the threshold, select the SSL certificate metric object, configure the alarm expression-triggered when the metric `expired` is `true`, that is, `equals(expired,"true")`, set the alarm level notification template information, etc. -![](/img/docs/start/ssl_5.png) + ![](/img/docs/start/ssl_5.png) -> Associating thresholds with monitoring, in the threshold list, set which monitoring this threshold applies to. + > Associating thresholds with monitoring, in the threshold list, set which monitoring this threshold applies to. -![](/img/docs/start/ssl_6.png) + ![](/img/docs/start/ssl_6.png) 5. Set the threshold (triggered one week before the certificate expires) -> In the same way, add a new configuration threshold and configure an alarm expression - when the metric expires timestamp `end_timestamp`, the `now()` function is the current timestamp, if the configuration triggers an alarm one week in advance: `end_timestamp <= (now( ) + 604800000)` , where `604800000` is the 7-day total time difference in milliseconds. + > In the same way, add a new configuration threshold and configure an alarm expression - when the metric expires timestamp `end_timestamp`, the `now()` function is the current timestamp, if the configuration triggers an alarm one week in advance: `end_timestamp <= (now( ) + 604800000)` , where `604800000` is the 7-day total time difference in milliseconds. -![](/img/docs/start/ssl_7.png) + ![](/img/docs/start/ssl_7.png) -> Finally, you can see the triggered alarm in the alarm center. + > Finally, you can see the triggered alarm in the alarm center. -![](/img/docs/start/ssl_8.png) + ![](/img/docs/start/ssl_8.png) 6. Alarm notification (in time notification via Dingding WeChat Feishu, etc.) -> Monitoring Tool -> Alarm Notification -> New Receiver + > Monitoring Tool -> Alarm Notification -> New Receiver -![](/img/docs/start/ssl_10.png) + ![](/img/docs/start/ssl_10.png) -For token configuration such as Dingding WeChat Feishu, please refer to the help document + For token configuration such as Dingding WeChat Feishu, please refer to the help document - + -> Alarm Notification -> New Alarm Notification Policy -> Enable Notification for the Recipient Just Configured + > Alarm Notification -> New Alarm Notification Policy -> Enable Notification for the Recipient Just Configured -![](/img/docs/start/ssl_11.png) + ![](/img/docs/start/ssl_11.png) 7. OK When the threshold is triggered, we can receive the corresponding alarm message. If there is no notification, you can also view the alarm information in the alarm center. diff --git a/home/docs/start/tdengine-init.md b/home/docs/start/tdengine-init.md index 29ea511c6d3..82f269cb0f1 100644 --- a/home/docs/start/tdengine-init.md +++ b/home/docs/start/tdengine-init.md @@ -19,28 +19,28 @@ Note⚠️ Need TDengine 3.x Version. ### Install TDengine via Docker > Refer to the official website [installation tutorial](https://docs.taosdata.com/get-started/docker/) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Install TDengine with Docker -> -> ```shell -> $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ -> -v /opt/taosdata:/var/lib/taos \ -> --name tdengine -e TZ=Asia/Shanghai \ -> tdengine/tdengine:3.0.4.0 -> ``` -> -> `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. -> `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. -> use```$ docker ps``` to check if the database started successfully + +1. Download and install Docker environment +Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +After the installation you can check if the Docker version normally output at the terminal. + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Install TDengine with Docker + + ```shell + $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ + -v /opt/taosdata:/var/lib/taos \ + --name tdengine -e TZ=Asia/Shanghai \ + tdengine/tdengine:3.0.4.0 + ``` + + `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. + `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. + use```$ docker ps``` to check if the database started successfully ### Create database instance @@ -88,20 +88,20 @@ Note⚠️ Need TDengine 3.x Version. Note⚠️The docker container way need to mount application.yml file locally,while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.td-engine` data source parameters, URL account and password. -```yaml -warehouse: - store: - # disable jpa - jpa: - enabled: false - # enable td-engine - td-engine: - enabled: true - driver-class-name: com.taosdata.jdbc.rs.RestfulDriver - url: jdbc:TAOS-RS://localhost:6041/hertzbeat - username: root - password: taosdata -``` + ```yaml + warehouse: + store: + # disable jpa + jpa: + enabled: false + # enable td-engine + td-engine: + enabled: true + driver-class-name: com.taosdata.jdbc.rs.RestfulDriver + url: jdbc:TAOS-RS://localhost:6041/hertzbeat + username: root + password: taosdata + ``` 2. Restart HertzBeat @@ -109,19 +109,19 @@ warehouse: 1. Do both the time series databases IoTDB and TDengine need to be configured? Can they both be used? -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. 2. The historical chart of the monitoring page is not displayed, and pops up [Unable to provide historical chart data, please configure to rely on the time series database] -> As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database + > As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database 3. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed -> Please confirm whether the installed TDengine version is 3.x, version 2.x are not compatible. + > Please confirm whether the installed TDengine version is 3.x, version 2.x are not compatible. 4. The TDengine database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure the dependent time series database] -> Please check if the configuration parameters are correct -> Is td-engine enable set to true -> Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory + > Please check if the configuration parameters are correct + > Is td-engine enable set to true + > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed + > You can check the startup logs according to the logs directory diff --git a/home/docs/start/victoria-metrics-init.md b/home/docs/start/victoria-metrics-init.md index f9ca1c86216..b89b26a8fa3 100644 --- a/home/docs/start/victoria-metrics-init.md +++ b/home/docs/start/victoria-metrics-init.md @@ -18,27 +18,27 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t ### Install VictoriaMetrics via Docker > Refer to the official website [installation tutorial](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` + +1. Download and install Docker environment +Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +After the installation you can check if the Docker version normally output at the terminal. + + ``` + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` 2. Install VictoriaMetrics via Docker -```shell -$ docker run -d -p 8428:8428 \ - -v $(pwd)/victoria-metrics-data:/victoria-metrics-data \ - --name victoria-metrics \ - victoriametrics/victoria-metrics:v1.95.1 -``` + ```shell + $ docker run -d -p 8428:8428 \ + -v $(pwd)/victoria-metrics-data:/victoria-metrics-data \ + --name victoria-metrics \ + victoriametrics/victoria-metrics:v1.95.1 + ``` -`-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` is local persistent mount of VictoriaMetrics data directory -use```$ docker ps``` to check if the database started successfully + `-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` is local persistent mount of VictoriaMetrics data directory + use```$ docker ps``` to check if the database started successfully 3. Configure the database connection in hertzbeat `application.yml`configuration file @@ -46,19 +46,19 @@ use```$ docker ps``` to check if the database started successfully Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Config the `warehouse.store.jpa.enabled` `false`. Replace `warehouse.store.victoria-metrics` data source parameters, HOST account and password. -```yaml -warehouse: - store: - # disable JPA - jpa: - enabled: false - # enable victoria-metrics - victoria-metrics: - enabled: true - url: http://localhost:8428 - username: root - password: root -``` + ```yaml + warehouse: + store: + # disable JPA + jpa: + enabled: false + # enable victoria-metrics + victoria-metrics: + enabled: true + url: http://localhost:8428 + username: root + password: root + ``` 4. Restart HertzBeat @@ -66,4 +66,4 @@ warehouse: 1. Do both the time series databases need to be configured? Can they both be used? -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which can affects the historical chart data. + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which can affects the historical chart data. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-10-ssl-practice.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-10-ssl-practice.md index 9acd22c2e76..2569c5bd33e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-10-ssl-practice.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-09-10-ssl-practice.md @@ -24,11 +24,11 @@ gitee: #### 安装 HertzBeat -1.如果不想安装可以直接使用云服务 [TanCloud探云 console.tancloud.cn](https://console.tancloud.cn) +1. 如果不想安装可以直接使用云服务 [TanCloud探云 console.tancloud.cn](https://console.tancloud.cn) 2. `docker` 环境仅需一条命令即可安装 -`docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` + `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` 3. 安装成功浏览器访问 `localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` @@ -36,63 +36,63 @@ gitee: 1. 点击新增SSL证书监控 -> 系统页面 -> 监控菜单 -> SSL证书 -> 新增SSL证书 + > 系统页面 -> 监控菜单 -> SSL证书 -> 新增SSL证书 -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/bd53f343a5b54feab62e71458d076441~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/bd53f343a5b54feab62e71458d076441~tplv-k3u1fbpfcp-zoom-1.image) 2. 配置监控网站 -> 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 -> 点击确定 注意⚠️新增前默认会先去测试网站连接性,连接成功才会新增,当然也可以把**是否测试**按钮置灰。 + > 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 + > 点击确定 注意⚠️新增前默认会先去测试网站连接性,连接成功才会新增,当然也可以把**是否测试**按钮置灰。 -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ad1154670648413bb82c8bdeb5b13609~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ad1154670648413bb82c8bdeb5b13609~tplv-k3u1fbpfcp-zoom-1.image) 3. 查看检测指标数据 -> 在监控列表可以查看任务状态,进监控详情可以查看指标数据图表等。 + > 在监控列表可以查看任务状态,进监控详情可以查看指标数据图表等。 -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/f874b45e909c4bb0acdd28b3fb034a61~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/f874b45e909c4bb0acdd28b3fb034a61~tplv-k3u1fbpfcp-zoom-1.image) -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ef5d7443f8c04818ae5aa28d421203be~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ef5d7443f8c04818ae5aa28d421203be~tplv-k3u1fbpfcp-zoom-1.image) 4. 设置阈值(证书过期时触发) -> 系统页面 -> 告警 -> 告警阈值 -> 新增阈值 + > 系统页面 -> 告警 -> 告警阈值 -> 新增阈值 -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/8d6205172d43463aa34e534477f132f1~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/8d6205172d43463aa34e534477f132f1~tplv-k3u1fbpfcp-zoom-1.image) -> 配置阈值,选择SSL证书指标对象,配置告警表达式-当指标`expired`为`true`触发,即`equals(expired,"true")` , 设置告警级别通知模版信息等。 + > 配置阈值,选择SSL证书指标对象,配置告警表达式-当指标`expired`为`true`触发,即`equals(expired,"true")` , 设置告警级别通知模版信息等。 -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/83d17b381d994f26a6240e01915b2001~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/83d17b381d994f26a6240e01915b2001~tplv-k3u1fbpfcp-zoom-1.image) -> 关联阈值与监控, 在阈值列表设置此阈值应用于哪些监控。 + > 关联阈值与监控, 在阈值列表设置此阈值应用于哪些监控。 -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/9b9063d7bcf9454387be0491fc382bd1~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/9b9063d7bcf9454387be0491fc382bd1~tplv-k3u1fbpfcp-zoom-1.image) 5. 设置阈值(证书过期前一周触发) -> 同理如上,新增配置阈值,配置告警表达式-当指标有效期时间戳 `end_timestamp`,`now()`函数为当前时间戳,若配置提前一周触发告警即:`end_timestamp <= (now() + 604800000)` , 其中 `604800000` 为7天总时间差毫秒值。 + > 同理如上,新增配置阈值,配置告警表达式-当指标有效期时间戳 `end_timestamp`,`now()`函数为当前时间戳,若配置提前一周触发告警即:`end_timestamp <= (now() + 604800000)` , 其中 `604800000` 为7天总时间差毫秒值。 -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/0d6f837f57c247e09f668f60eff4a0ff~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/0d6f837f57c247e09f668f60eff4a0ff~tplv-k3u1fbpfcp-zoom-1.image) -> 最终可以在告警中心看到已触发的告警。 + > 最终可以在告警中心看到已触发的告警。 -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/5a61b23127524976b2c209ce0ca6a339~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/5a61b23127524976b2c209ce0ca6a339~tplv-k3u1fbpfcp-zoom-1.image) 6. 告警通知(通过钉钉微信飞书等及时通知) -> 监控系统 -> 告警通知 -> 新增接收人 + > 监控系统 -> 告警通知 -> 新增接收人 -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/7f36956060ef410a82bbecafcbb2957f~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/7f36956060ef410a82bbecafcbb2957f~tplv-k3u1fbpfcp-zoom-1.image) -钉钉微信飞书等token配置可以参考帮助文档 + 钉钉微信飞书等token配置可以参考帮助文档 - - + + -> 告警通知 -> 新增告警通知策略 -> 将刚才配置的接收人启用通知 + > 告警通知 -> 新增告警通知策略 -> 将刚才配置的接收人启用通知 -![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/d976343e81f843138344a039f3aff8a3~tplv-k3u1fbpfcp-zoom-1.image) + ![](https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/d976343e81f843138344a039f3aff8a3~tplv-k3u1fbpfcp-zoom-1.image) 7. OK 当阈值触发后我们就可以收到对应告警消息啦,如果没有配通知,也可以在告警中心查看告警信息。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md index 54f307891f3..519b35f1287 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md @@ -32,15 +32,15 @@ keywords: [开源监控系统, 开源数据库监控, IotDB数据库监控] 1. metric 采集默认是关闭的,需要先到 `conf/iotdb-metric.yml` 中修改参数打开后重启 server -``` -# 是否启动监控模块,默认为false -enableMetric: true - -# 数据提供方式,对外部通过jmx和prometheus协议提供metrics的数据, 可选参数:[JMX, PROMETHEUS, IOTDB],IOTDB是默认关闭的。 -metricReporterList: - - JMX - - PROMETHEUS -``` + ``` + # 是否启动监控模块,默认为false + enableMetric: true + + # 数据提供方式,对外部通过jmx和prometheus协议提供metrics的数据, 可选参数:[JMX, PROMETHEUS, IOTDB],IOTDB是默认关闭的。 + metricReporterList: + - JMX + - PROMETHEUS + ``` 2. 重启 IoTDB, 打开浏览器或者用curl 访问 , 就能看到metric数据了。 @@ -48,28 +48,28 @@ metricReporterList: 1. 点击新增IoTDB监控 -路径:菜单 -> 数据库监控 -> IoTDB监控 -> 新增IoTDB监控 + 路径:菜单 -> 数据库监控 -> IoTDB监控 -> 新增IoTDB监控 -![hertzbeat](/img/blog/monitor-iotdb-1.png) + ![hertzbeat](/img/blog/monitor-iotdb-1.png) 2. 配置监控IoTDB所需参数 -在监控页面填写 IoTDB **服务IP**,**监控端口**(默认9091),最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/iotdb/) + 在监控页面填写 IoTDB **服务IP**,**监控端口**(默认9091),最后点击确定添加即可。 + 其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/iotdb/) -![hertzbeat](/img/blog/monitor-iotdb-2.png) + ![hertzbeat](/img/blog/monitor-iotdb-2.png) 3. 完成✅,现在我们已经添加好对 IoTDB 的监控了,查看监控列表即可看到我们的添加项。 -![hertzbeat](/img/blog/monitor-iotdb-3.png) + ![hertzbeat](/img/blog/monitor-iotdb-3.png) 4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 IoTDB的实时监控指标数据。 -![hertzbeat](/img/blog/monitor-iotdb-4.png) + ![hertzbeat](/img/blog/monitor-iotdb-4.png) 5. 点击**监控历史详情TAB** 即可浏览 IoTDB的历史监控指标数据图表📈。 -![hertzbeat](/img/blog/monitor-iotdb-5.png) + ![hertzbeat](/img/blog/monitor-iotdb-5.png) **完成DONE!通过上面几步,总结起来其实也就是两步** @@ -84,29 +84,29 @@ metricReporterList: 1. 对某个重要指标配置阈值告警 -路径:菜单 -> 告警阈值 -> 新增阈值 + 路径:菜单 -> 告警阈值 -> 新增阈值 -- 选择配置的指标对象,IotDB监控有非常多的指标,其中有个指标关系到节点的状态 `cluster_node_status` -> `status` (节点状态,1=online 2=offline)。 -- 这里我们就配置当此指标 `status==2` 时发出告警,告警级别为**紧急告警**,一次即触发,具体如下图。 + - 选择配置的指标对象,IotDB监控有非常多的指标,其中有个指标关系到节点的状态 `cluster_node_status` -> `status` (节点状态,1=online 2=offline)。 + - 这里我们就配置当此指标 `status==2` 时发出告警,告警级别为**紧急告警**,一次即触发,具体如下图。 -![hertzbeat](/img/blog/monitor-iotdb-6.png) + ![hertzbeat](/img/blog/monitor-iotdb-6.png) 2. 新增消息通知接收人 -路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 + 路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 -消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 + 消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 -- 在 HertzBeat 配置接收人参数如下。 + - 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 + - 在 HertzBeat 配置接收人参数如下。 -【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 + 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-1.png) + ![hertzbeat](/img/blog/alert-notice-1.png) 3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-2.png) + ![hertzbeat](/img/blog/alert-notice-2.png) ### 完毕,现在坐等告警消息过来了。叮叮叮叮 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md index bf6a47a7e8e..74e87d11991 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md @@ -42,27 +42,27 @@ tags: [opensource, practice] 1. 在网关的 `pom.xml` 文件中添加 `metrics插件` 的依赖。 -```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - -``` + ```xml + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + + ``` 2. `metric`插件 采集默认是关闭的, 在网关的配置`yaml`文件中编辑如下内容: -```yaml -shenyu: - metrics: - enabled: true #设置为 true 表示开启 - name : prometheus - host: 127.0.0.1 #暴露的ip - port: 8090 #暴露的端口 - jmxConfig: #jmx配置 - props: - jvm_enabled: true #开启jvm的监控指标 -``` + ```yaml + shenyu: + metrics: + enabled: true #设置为 true 表示开启 + name : prometheus + host: 127.0.0.1 #暴露的ip + port: 8090 #暴露的端口 + jmxConfig: #jmx配置 + props: + jvm_enabled: true #开启jvm的监控指标 + ``` 3. 重启 ShenYu网关, 打开浏览器或者用curl 访问 `http://ip:8090`, 就能看到metric数据了。 @@ -70,30 +70,30 @@ shenyu: 1. 点击新增 ShenYu 监控 -路径:菜单 -> 中间件监控 -> ShenYu监控 -> 新增ShenYu监控 + 路径:菜单 -> 中间件监控 -> ShenYu监控 -> 新增ShenYu监控 -![hertzbeat](/img/blog/monitor-shenyu-1.png) + ![hertzbeat](/img/blog/monitor-shenyu-1.png) 2. 配置监控 ShenYu 所需参数 -在监控页面填写 ShenYu **服务IP**,**监控端口**(默认8090),最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/shenyu/) + 在监控页面填写 ShenYu **服务IP**,**监控端口**(默认8090),最后点击确定添加即可。 + 其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/shenyu/) -![hertzbeat](/img/blog/monitor-shenyu-1.png) + ![hertzbeat](/img/blog/monitor-shenyu-1.png) 3. 完成✅,现在我们已经添加好对 ShenYu 的监控了,查看监控列表即可看到我们的添加项。 -![hertzbeat](/img/blog/monitor-shenyu-3.png) + ![hertzbeat](/img/blog/monitor-shenyu-3.png) 4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 ShenYu 的实时监控指标数据。 -![hertzbeat](/img/blog/monitor-shenyu-4.png) + ![hertzbeat](/img/blog/monitor-shenyu-4.png) 5. 点击**监控历史详情TAB** 即可浏览 ShenYu 的历史监控指标数据图表📈。 -![hertzbeat](/img/blog/monitor-shenyu-5.png) + ![hertzbeat](/img/blog/monitor-shenyu-5.png) -![hertzbeat](/img/blog/monitor-shenyu-6.png) + ![hertzbeat](/img/blog/monitor-shenyu-6.png) **DONE!完成啦!通过上面几步,总结起来其实也就只用两步** @@ -111,33 +111,33 @@ shenyu: 1. 对某个重要指标配置告警阈值 -路径:菜单 -> 告警阈值 -> 新增阈值 + 路径:菜单 -> 告警阈值 -> 新增阈值 -- 选择配置的指标对象,ShenYu 监控有非常多的指标,我们举例对 `打开的文件描述符的数量` `process_open_fds` -> `value` 这个指标进行阈值设置, 当服务端打开文件描述符数量大于3000时发出告警。 -- 这里我们就配置当此指标`process_open_fds` 的 `value>3000` 时发出告警,告警级别为**警告告警**,三次即触发,具体如下图。 + - 选择配置的指标对象,ShenYu 监控有非常多的指标,我们举例对 `打开的文件描述符的数量` `process_open_fds` -> `value` 这个指标进行阈值设置, 当服务端打开文件描述符数量大于3000时发出告警。 + - 这里我们就配置当此指标`process_open_fds` 的 `value>3000` 时发出告警,告警级别为**警告告警**,三次即触发,具体如下图。 -![hertzbeat](/img/blog/monitor-shenyu-7.png) + ![hertzbeat](/img/blog/monitor-shenyu-7.png) 2. 新增消息通知接收人 -> 配置接收人,让告警消息知道要发给谁,用什么方式发。 + > 配置接收人,让告警消息知道要发给谁,用什么方式发。 -路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 + 路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 -消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 + 消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 -- 在 HertzBeat 配置接收人参数如下。 + - 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 + - 在 HertzBeat 配置接收人参数如下。 -【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 + 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-1.png) + ![hertzbeat](/img/blog/alert-notice-1.png) 3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 -> 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 + > 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 -![hertzbeat](/img/blog/alert-notice-2.png) + ![hertzbeat](/img/blog/alert-notice-2.png) ### 完毕,现在坐等告警消息过来啦。叮叮叮叮 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md index 576ace519e3..6fcb608a759 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md @@ -36,76 +36,76 @@ tags: [opensource, practice] 1. 开启 SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 -```yaml -management: - endpoints: - web: - exposure: - include: '*' -``` + ```yaml + management: + endpoints: + web: + exposure: + include: '*' + ``` 2. 重启后测试访问指标接口 `ip:port/actuator/dynamic-tp` 是否有响应json数据如下: -```json -[ - { - "poolName": "commonExecutor", - "corePoolSize": 1, - "maximumPoolSize": 1, - "queueType": "LinkedBlockingQueue", - "queueCapacity": 2147483647, - "queueSize": 0, - "fair": false, - "queueRemainingCapacity": 2147483647, - "activeCount": 0, - "taskCount": 0, - "completedTaskCount": 0, - "largestPoolSize": 0, - "poolSize": 0, - "waitTaskCount": 0, - "rejectCount": 0, - "rejectHandlerName": null, - "dynamic": false, - "runTimeoutCount": 0, - "queueTimeoutCount": 0 - }, - { - "maxMemory": "4 GB", - "totalMemory": "444 MB", - "freeMemory": "250.34 MB", - "usableMemory": "3.81 GB" - } -] -``` + ```json + [ + { + "poolName": "commonExecutor", + "corePoolSize": 1, + "maximumPoolSize": 1, + "queueType": "LinkedBlockingQueue", + "queueCapacity": 2147483647, + "queueSize": 0, + "fair": false, + "queueRemainingCapacity": 2147483647, + "activeCount": 0, + "taskCount": 0, + "completedTaskCount": 0, + "largestPoolSize": 0, + "poolSize": 0, + "waitTaskCount": 0, + "rejectCount": 0, + "rejectHandlerName": null, + "dynamic": false, + "runTimeoutCount": 0, + "queueTimeoutCount": 0 + }, + { + "maxMemory": "4 GB", + "totalMemory": "444 MB", + "freeMemory": "250.34 MB", + "usableMemory": "3.81 GB" + } + ] + ``` #### 二. 在 HertzBeat 监控页面添加 DynamicTp 线程池监控 1. 点击新增 DynamicTp 监控 -路径:菜单 -> 中间件监控 -> DynamicTp监控 -> 新增DynamicTp监控 + 路径:菜单 -> 中间件监控 -> DynamicTp监控 -> 新增DynamicTp监控 -![hertzbeat](/img/blog/monitor-dynamic-tp-1.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-1.png) 2. 配置监控 DynamicTp 所需参数 -在监控页面填写 DynamicTp **服务IP**,**监控端口**(默认8080),最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/dynamic_tp/) + 在监控页面填写 DynamicTp **服务IP**,**监控端口**(默认8080),最后点击确定添加即可。 + 其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/dynamic_tp/) -![hertzbeat](/img/blog/monitor-dynamic-tp-2.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-2.png) 3. 完成✅,现在我们已经添加好对 DynamicTp 的监控了,查看监控列表即可看到我们的添加项。 -![hertzbeat](/img/blog/monitor-dynamic-tp-1.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-1.png) 4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 DynamicTp线程池 的实时监控指标数据。 -![hertzbeat](/img/blog/monitor-dynamic-tp-3.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-3.png) 5. 点击**监控历史详情TAB** 即可浏览 DynamicTp线程池 的历史监控指标数据图表📈。 -![hertzbeat](/img/blog/monitor-dynamic-tp-4.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-4.png) -![hertzbeat](/img/blog/monitor-dynamic-tp-5.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-5.png) **DONE!完成啦!通过上面几步,总结起来其实也就只用两步** @@ -123,33 +123,33 @@ management: 1. 对某个重要指标配置告警阈值 -路径:菜单 -> 告警阈值 -> 新增阈值 + 路径:菜单 -> 告警阈值 -> 新增阈值 -- 选择配置的指标对象,DynamicTp监控主要是一些线程池相关指标,我们举例对 `运行超时线程数量` `thread_pool_running` -> `run_timeout_count` 这个指标进行阈值设置, 当线程运行超时数量大于1时发出告警。 -- 这里我们就配置当此指标`thread_pool_running` 的 `run_timeout_count>1` 时发出告警,告警级别为**严重告警**,三次即触发,具体如下图。 + - 选择配置的指标对象,DynamicTp监控主要是一些线程池相关指标,我们举例对 `运行超时线程数量` `thread_pool_running` -> `run_timeout_count` 这个指标进行阈值设置, 当线程运行超时数量大于1时发出告警。 + - 这里我们就配置当此指标`thread_pool_running` 的 `run_timeout_count>1` 时发出告警,告警级别为**严重告警**,三次即触发,具体如下图。 -![hertzbeat](/img/blog/monitor-dynamic-tp-6.png) + ![hertzbeat](/img/blog/monitor-dynamic-tp-6.png) 2. 新增消息通知接收人 -> 配置接收人,让告警消息知道要发给谁,用什么方式发。 + > 配置接收人,让告警消息知道要发给谁,用什么方式发。 -路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 + 路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 -消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 + 消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 -- 在 HertzBeat 配置接收人参数如下。 + - 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 + - 在 HertzBeat 配置接收人参数如下。 -【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 + 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-1.png) + ![hertzbeat](/img/blog/alert-notice-1.png) 3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 -> 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 + > 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 -![hertzbeat](/img/blog/alert-notice-2.png) + ![hertzbeat](/img/blog/alert-notice-2.png) ### 完毕,现在坐等告警消息过来啦。叮叮叮叮 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md index 748a3f09d53..4ec31b3926e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md @@ -33,28 +33,28 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] 1. 点击新增 Mysql 监控 -路径:菜单 -> 数据库监控 -> Mysql数据库 -> 新增Mysql数据库监控 + 路径:菜单 -> 数据库监控 -> Mysql数据库 -> 新增Mysql数据库监控 -![hertzbeat](/img/blog/monitor-mysql-1.png) + ![hertzbeat](/img/blog/monitor-mysql-1.png) 2. 配置新增监控 Mysql 数据库所需参数 -在监控页面填写 Mysql **服务IP**,**监控端口**(默认3306),**账户密码等**,最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/mysql/) + 在监控页面填写 Mysql **服务IP**,**监控端口**(默认3306),**账户密码等**,最后点击确定添加即可。 + 其他参数如**采集间隔**,**超时时间**等可以参考[帮助文档](https://hertzbeat.com/docs/help/mysql/) -![hertzbeat](/img/blog/monitor-mysql-2.png) + ![hertzbeat](/img/blog/monitor-mysql-2.png) 3. 完成✅,现在我们已经添加好对 Mysql数据库 的监控了,查看监控列表即可看到我们的添加项。 -![hertzbeat](/img/blog/monitor-mysql-1.png) + ![hertzbeat](/img/blog/monitor-mysql-1.png) 4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 Mysql数据库 的实时监控指标数据。 -![hertzbeat](/img/blog/monitor-mysql-3.png) + ![hertzbeat](/img/blog/monitor-mysql-3.png) 5. 点击**监控历史详情TAB** 即可浏览 Mysql数据库 的历史监控指标数据图表📈。 -![hertzbeat](/img/blog/monitor-mysql-4.png) + ![hertzbeat](/img/blog/monitor-mysql-4.png) **DONE!完成啦!通过上面几步,总结起来其实也就只用一步即可** @@ -71,35 +71,35 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] 1. 对某个重要指标配置告警阈值 -路径:菜单 -> 阈值规则 -> 新增阈值 + 路径:菜单 -> 阈值规则 -> 新增阈值 -- 选择配置的指标对象,Mysql 数据库监控主要是数据库性能等相关指标,我们举例对 `查询缓存命中率` `cache` -> `query_cache_hit_rate` 这个指标进行阈值设置, 当Mysql的查询缓存命中率很低小于30%时发出告警。 -- 这里我们就配置当此指标`cache` 的 `query_cache_hit_rate<30` 时发出告警,告警级别为**严重告警**,三次即触发,具体如下图。 + - 选择配置的指标对象,Mysql 数据库监控主要是数据库性能等相关指标,我们举例对 `查询缓存命中率` `cache` -> `query_cache_hit_rate` 这个指标进行阈值设置, 当Mysql的查询缓存命中率很低小于30%时发出告警。 + - 这里我们就配置当此指标`cache` 的 `query_cache_hit_rate<30` 时发出告警,告警级别为**严重告警**,三次即触发,具体如下图。 -![hertzbeat](/img/blog/monitor-mysql-5.png) + ![hertzbeat](/img/blog/monitor-mysql-5.png) -![hertzbeat](/img/blog/monitor-mysql-6.png) + ![hertzbeat](/img/blog/monitor-mysql-6.png) 2. 新增消息通知接收人 -> 配置接收人,让告警消息知道要发给谁,用什么方式发。 + > 配置接收人,让告警消息知道要发给谁,用什么方式发。 -路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 + 路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 -消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 + 消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 -- 在 HertzBeat 配置接收人参数如下。 + - 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 + - 在 HertzBeat 配置接收人参数如下。 -【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 + 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-1.png) + ![hertzbeat](/img/blog/alert-notice-1.png) 3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 -> 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 + > 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 -![hertzbeat](/img/blog/alert-notice-2.png) + ![hertzbeat](/img/blog/alert-notice-2.png) ### 完毕,现在坐等告警消息过来啦。叮叮叮叮 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md index 8df57dee476..2734281bbf1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md @@ -30,32 +30,32 @@ Github: 1. 点击新增 Linux 监控 -路径:菜单 -> 操作系统监控 -> Linux操作系统 -> 新增Linux操作系统监控 + 路径:菜单 -> 操作系统监控 -> Linux操作系统 -> 新增Linux操作系统监控 -![hertzbeat](/img/blog/monitor-linux-1.png) + ![hertzbeat](/img/blog/monitor-linux-1.png) 2. 配置新增监控 Linux 所需参数 -在监控页面填写 Linux **对端IP**,**SSH端口**(默认22),**账户密码等**,最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考帮助文档 + 在监控页面填写 Linux **对端IP**,**SSH端口**(默认22),**账户密码等**,最后点击确定添加即可。 + 其他参数如**采集间隔**,**超时时间**等可以参考帮助文档 -![hertzbeat](/img/blog/monitor-linux-2.png) + ![hertzbeat](/img/blog/monitor-linux-2.png) 3. 完成✅,现在我们已经添加好对 Linux 的监控了,查看监控列表即可看到我们的添加项。 -![hertzbeat](/img/blog/monitor-linux-3.png) + ![hertzbeat](/img/blog/monitor-linux-3.png) 4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 Linux 的实时监控指标数据。 -![hertzbeat](/img/blog/monitor-linux-4.png) + ![hertzbeat](/img/blog/monitor-linux-4.png) -![hertzbeat](/img/blog/monitor-linux-7.png) + ![hertzbeat](/img/blog/monitor-linux-7.png) 5. 点击**监控历史详情TAB** 即可浏览 Linux 的历史监控指标数据图表📈。 -![hertzbeat](/img/blog/monitor-linux-5.png) + ![hertzbeat](/img/blog/monitor-linux-5.png) -![hertzbeat](/img/blog/monitor-linux-6.png) + ![hertzbeat](/img/blog/monitor-linux-6.png) **DONE!完成啦!不需要我们去部署agent或者各种繁琐操作,是不是很简单** @@ -132,35 +132,35 @@ Github: 1. 对某个重要指标配置告警阈值 -路径:菜单 -> 阈值规则 -> 新增阈值 + 路径:菜单 -> 阈值规则 -> 新增阈值 -- 选择配置的指标对象,Linux 监控主要是cpu 内存 磁盘 网络性能等相关指标,我们举例对 `CPU利用率` `cpu` -> `usage` 这个指标进行阈值设置, 当Linux cpu利用率大于90%时发出告警。 -- 这里我们就配置当此指标`cpu` 的 `usage>90` 时发出告警,告警级别为**警告告警**,三次即触发,具体如下图。 + - 选择配置的指标对象,Linux 监控主要是cpu 内存 磁盘 网络性能等相关指标,我们举例对 `CPU利用率` `cpu` -> `usage` 这个指标进行阈值设置, 当Linux cpu利用率大于90%时发出告警。 + - 这里我们就配置当此指标`cpu` 的 `usage>90` 时发出告警,告警级别为**警告告警**,三次即触发,具体如下图。 -![hertzbeat](/img/blog/monitor-linux-8.png) + ![hertzbeat](/img/blog/monitor-linux-8.png) -![hertzbeat](/img/blog/monitor-linux-9.png) + ![hertzbeat](/img/blog/monitor-linux-9.png) 2. 新增消息通知接收人 -> 配置接收人,让告警消息知道要发给谁,用什么方式发。 + > 配置接收人,让告警消息知道要发给谁,用什么方式发。 -路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 + 路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 -消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 + 消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 -- 在 HertzBeat 配置接收人参数如下。 + - 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 + - 在 HertzBeat 配置接收人参数如下。 -【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 + 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-1.png) + ![hertzbeat](/img/blog/alert-notice-1.png) 3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 -> 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 + > 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 -![hertzbeat](/img/blog/alert-notice-2.png) + ![hertzbeat](/img/blog/alert-notice-2.png) ### 完毕,现在坐等告警消息过来啦。叮叮叮叮 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md index b03359eb1ec..413e9a7d019 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md @@ -30,81 +30,81 @@ Github: 1. 开启 SpringBoot Actuator Endpoint 暴露出`metrics health env`指标接口 -```yaml -management: - endpoints: - web: - exposure: - include: - - 'metrics' - - 'health' - - 'env' - enabled-by-default: on -``` + ```yaml + management: + endpoints: + web: + exposure: + include: + - 'metrics' + - 'health' + - 'env' + enabled-by-default: on + ``` 2. 重启后测试访问指标接口 `ip:port/actuator` 是否有响应json数据如下: -```json -{ - "_links": { - "self": { - "href": "http://localhost:1157/actuator", - "templated": false - }, - "health-path": { - "href": "http://localhost:1157/actuator/health/{*path}", - "templated": true - }, - "health": { - "href": "http://localhost:1157/actuator/health", - "templated": false - }, - "env": { - "href": "http://localhost:1157/actuator/env", - "templated": false - }, - "env-toMatch": { - "href": "http://localhost:1157/actuator/env/{toMatch}", - "templated": true - }, - "metrics-requiredMetricName": { - "href": "http://localhost:1157/actuator/metrics/{requiredMetricName}", - "templated": true - }, - "metrics": { - "href": "http://localhost:1157/actuator/metrics", - "templated": false + ```json + { + "_links": { + "self": { + "href": "http://localhost:1157/actuator", + "templated": false + }, + "health-path": { + "href": "http://localhost:1157/actuator/health/{*path}", + "templated": true + }, + "health": { + "href": "http://localhost:1157/actuator/health", + "templated": false + }, + "env": { + "href": "http://localhost:1157/actuator/env", + "templated": false + }, + "env-toMatch": { + "href": "http://localhost:1157/actuator/env/{toMatch}", + "templated": true + }, + "metrics-requiredMetricName": { + "href": "http://localhost:1157/actuator/metrics/{requiredMetricName}", + "templated": true + }, + "metrics": { + "href": "http://localhost:1157/actuator/metrics", + "templated": false + } + } } - } -} -``` + ``` #### 在开源监控系统 HertzBeat 监控页面添加对 SpringBoot2 应用监控 1. 点击新增 SpringBoot2 监控 -路径:菜单 -> 应用服务监控 -> SpringBoot2 -> 新增SpringBoot2监控 + 路径:菜单 -> 应用服务监控 -> SpringBoot2 -> 新增SpringBoot2监控 -![hertzbeat](/img/blog/monitor-springboot2-1.png) + ![hertzbeat](/img/blog/monitor-springboot2-1.png) 2. 配置新增监控 SpringBoot2 所需参数 -在监控页面填写 SpringBoot2应用 **对端IP**,**服务端口**(默认8080),**账户密码等**,最后点击确定添加即可。 -其他参数如**采集间隔**,**超时时间**等可以参考帮助文档 + 在监控页面填写 SpringBoot2应用 **对端IP**,**服务端口**(默认8080),**账户密码等**,最后点击确定添加即可。 + 其他参数如**采集间隔**,**超时时间**等可以参考帮助文档 -![hertzbeat](/img/blog/monitor-springboot2-2.png) + ![hertzbeat](/img/blog/monitor-springboot2-2.png) 3. 完成✅,现在我们已经添加好对 SpringBoot2应用 的监控了,查看监控列表即可看到我们的添加项。 -![hertzbeat](/img/blog/monitor-springboot2-3.png) + ![hertzbeat](/img/blog/monitor-springboot2-3.png) 4. 点击监控列表项的**操作**->**监控详情图标** 即可浏览 SpringBoot2应用 的实时监控指标数据。 -![hertzbeat](/img/blog/monitor-springboot2-4.png) + ![hertzbeat](/img/blog/monitor-springboot2-4.png) 5. 点击**监控历史详情TAB** 即可浏览 SpringBoot2应用 的历史监控指标数据图表📈。 -![hertzbeat](/img/blog/monitor-springboot2-5.png) + ![hertzbeat](/img/blog/monitor-springboot2-5.png) **DONE!完成啦!不需要我们去部署agent或者各种繁琐操作,是不是很简单** @@ -121,35 +121,35 @@ management: 1. 对某个重要指标配置告警阈值 -路径:菜单 -> 阈值规则 -> 新增阈值 + 路径:菜单 -> 阈值规则 -> 新增阈值 -- 选择配置的指标对象,SpringBoot2应用 监控主要是 堆栈内存 线程等相关指标,我们举例对 `状态线程数` `threads` -> `threads` 这个指标进行阈值设置, 当`runnable`状态的线程数量大于300时发出告警。 -- 这里我们就配置当此指标`size`,`state` 的 `equals(state,"runnable"") && size>300` 时发出告警,告警级别为**警告告警**,三次即触发,具体如下图。 + - 选择配置的指标对象,SpringBoot2应用 监控主要是 堆栈内存 线程等相关指标,我们举例对 `状态线程数` `threads` -> `threads` 这个指标进行阈值设置, 当`runnable`状态的线程数量大于300时发出告警。 + - 这里我们就配置当此指标`size`,`state` 的 `equals(state,"runnable"") && size>300` 时发出告警,告警级别为**警告告警**,三次即触发,具体如下图。 -![hertzbeat](/img/blog/monitor-springboot2-6.png) + ![hertzbeat](/img/blog/monitor-springboot2-6.png) -![hertzbeat](/img/blog/monitor-springboot2-7.png) + ![hertzbeat](/img/blog/monitor-springboot2-7.png) 2. 新增消息通知接收人 -> 配置接收人,让告警消息知道要发给谁,用什么方式发。 + > 配置接收人,让告警消息知道要发给谁,用什么方式发。 -路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 + 路径:菜单 -> 告警通知 -> 告警接收人 -> 新增接收人 -消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 + 消息通知方式支持 **邮件,钉钉,企业微信,飞书,WebHook,短信**等,我们这里以常用的钉钉为例。 -- 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 -- 在 HertzBeat 配置接收人参数如下。 + - 参照此[帮助文档](https://hertzbeat.com/docs/help/alert_dingtalk) 在钉钉端配置机器人,设置安全自定义关键词`HertzBeat`,获取对应`access_token`值。 + - 在 HertzBeat 配置接收人参数如下。 -【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 + 【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】 -![hertzbeat](/img/blog/alert-notice-1.png) + ![hertzbeat](/img/blog/alert-notice-1.png) 3. 配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】 -> 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 + > 配置告警通知策略,让告警消息与接收人绑定,这样就能决定哪些告警发给哪个人。 -![hertzbeat](/img/blog/alert-notice-2.png) + ![hertzbeat](/img/blog/alert-notice-2.png) ### 完毕,现在坐等告警消息过来啦。叮叮叮叮 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-11-greptimedb-store.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-11-greptimedb-store.md index 5a6170fa4a0..40f5348910d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-11-greptimedb-store.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-11-greptimedb-store.md @@ -43,16 +43,16 @@ Cloud: **[TanCloud](https://console.tancloud.cn/)** 1. Docker 安装 GreptimeDB -```shell -$ docker run -p 4000-4004:4000-4004 \ - -p 4242:4242 -v "$(pwd)/greptimedb:/tmp/greptimedb" \ - --name greptime \ - greptime/greptimedb:0.2.0 standalone start \ - --http-addr 0.0.0.0:4000 \ - --rpc-addr 0.0.0.0:4001 -``` + ```shell + $ docker run -p 4000-4004:4000-4004 \ + -p 4242:4242 -v "$(pwd)/greptimedb:/tmp/greptimedb" \ + --name greptime \ + greptime/greptimedb:0.2.0 standalone start \ + --http-addr 0.0.0.0:4000 \ + --rpc-addr 0.0.0.0:4001 + ``` -- `-v "$(pwd)/greptimedb:/tmp/greptimedb"` 为 greptimeDB 数据目录本地持久化挂载,建议将`$(pwd)/greptimedb`替换为您想指定存放的实际本地目录 + - `-v "$(pwd)/greptimedb:/tmp/greptimedb"` 为 greptimeDB 数据目录本地持久化挂载,建议将`$(pwd)/greptimedb`替换为您想指定存放的实际本地目录 2. 使用```$ docker ps | grep greptime```查看 GreptimeDB 是否启动成功 @@ -62,21 +62,21 @@ $ docker run -p 4000-4004:4000-4004 \ 1. Docker 安装 HertzBeat -```shell -$ docker run -d -p 1157:1157 \ - -e LANG=zh_CN.UTF-8 \ - -e TZ=Asia/Shanghai \ - -v /opt/data:/opt/hertzbeat/data \ - -v /opt/application.yml:/opt/hertzbeat/config/application.yml \ - --restart=always \ - --name hertzbeat apache/hertzbeat -``` + ```shell + $ docker run -d -p 1157:1157 \ + -e LANG=zh_CN.UTF-8 \ + -e TZ=Asia/Shanghai \ + -v /opt/data:/opt/hertzbeat/data \ + -v /opt/application.yml:/opt/hertzbeat/config/application.yml \ + --restart=always \ + --name hertzbeat apache/hertzbeat + ``` -- `-v /opt/data:/opt/hertzbeat/data` : (可选,数据持久化)重要⚠️ 挂载H2数据库文件到本地主机,保证数据不会因为容器的创建删除而丢失 + - `-v /opt/data:/opt/hertzbeat/data` : (可选,数据持久化)重要⚠️ 挂载H2数据库文件到本地主机,保证数据不会因为容器的创建删除而丢失 -- `-v /opt/application.yml:/opt/hertzbeat/config/application.yml` : 挂载自定义本地配置文件到容器中,即使用本地配置文件覆盖容器配置文件。 + - `-v /opt/application.yml:/opt/hertzbeat/config/application.yml` : 挂载自定义本地配置文件到容器中,即使用本地配置文件覆盖容器配置文件。 -注意⚠️ 本地挂载配置文件 `application.yml` 需提前存在,文件完整内容见项目仓库[/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + 注意⚠️ 本地挂载配置文件 `application.yml` 需提前存在,文件完整内容见项目仓库[/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 2. 浏览器访问 默认账户密码 admin/hertzbeat,查看 HertzBeat 是否启动成功。 @@ -84,26 +84,26 @@ $ docker run -d -p 1157:1157 \ 1. 修改 HertzBeat 端配置文件 -修改挂载到本地的 HertzBeat 配置文件 [application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml), 安装包模式下修改 `hertzbeat/config/application.yml` + 修改挂载到本地的 HertzBeat 配置文件 [application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml), 安装包模式下修改 `hertzbeat/config/application.yml` -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** + **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - greptime: - enabled: true - endpoint: localhost:4001 -``` + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + greptime: + enabled: true + endpoint: localhost:4001 + ``` 2. 重启 HertzBeat -```shell -docker restart hertzbeat -``` + ```shell + docker restart hertzbeat + ``` #### 观察验证效果 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md index bd69544d028..b937eff35ca 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-08-14-hertzbeat-v1.4.0.md @@ -77,23 +77,23 @@ HertzBeat 提供云边协同能力,可以在多个隔离网络部署边缘采 1. `docker` 环境仅需一条命令即可开始 -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```或者使用 quay.io (若 dockerhub 网络链接超时)``` + ```或者使用 quay.io (若 dockerhub 网络链接超时)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. 浏览器访问 `http://localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` 3. 部署采集器集群 -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_IP=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ```shell + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_IP=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -- `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 -- `-e MANAGER_IP=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 -- `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 + - `-e MANAGER_IP=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 + - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-09-26-hertzbeat-v1.4.1.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-09-26-hertzbeat-v1.4.1.md index a0f73af7921..054f7fa9975 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-09-26-hertzbeat-v1.4.1.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-09-26-hertzbeat-v1.4.1.md @@ -70,23 +70,23 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 1. `docker` 环境仅需一条命令即可开始 -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```或者使用 quay.io (若 dockerhub 网络链接超时)``` + ```或者使用 quay.io (若 dockerhub 网络链接超时)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. 浏览器访问 `http://localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` 3. 部署采集器集群 -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ```shell + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -- `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 -- `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 -- `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 + - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 + - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md index db282b4feee..2ffa09c2f1a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-11-12-hertzbeat-v1.4.2.md @@ -47,23 +47,23 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 1. `docker` 环境仅需一条命令即可开始 -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```或者使用 quay.io (若 dockerhub 网络链接超时)``` + ```或者使用 quay.io (若 dockerhub 网络链接超时)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. 浏览器访问 `http://localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` 3. 部署采集器集群 -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ```shell + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -- `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 -- `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 -- `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 + - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 + - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md index a5f0cdd27c6..3a07c44a9a3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-12-11-hertzbeat-v1.4.3.md @@ -42,23 +42,23 @@ keywords: [open source monitoring system, alerting system] 1. `docker` 环境仅需一条命令即可开始 -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```或者使用 quay.io (若 dockerhub 网络链接超时)``` + ```或者使用 quay.io (若 dockerhub 网络链接超时)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. 浏览器访问 `http://localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` 3. 部署采集器集群 -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ```shell + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -- `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 -- `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 -- `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 + - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 + - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md index 1067f96fe51..f233e3dc7c4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-01-18-hertzbeat-v1.4.4.md @@ -43,23 +43,23 @@ keywords: [open source monitoring system, alerting system] 1. `docker` 环境仅需一条命令即可开始 -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```或者使用 quay.io (若 dockerhub 网络链接超时)``` + ```或者使用 quay.io (若 dockerhub 网络链接超时)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. 浏览器访问 `http://localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` 3. 部署采集器集群 -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ```shell + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -- `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 -- `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 -- `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 + - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 + - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](https://hertzbeat.com/docs/start/docker-deploy) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-05-09-hertzbeat-ospp-subject-introduction.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-05-09-hertzbeat-ospp-subject-introduction.md index 5d64d7933a9..cb7c0af9d48 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-05-09-hertzbeat-ospp-subject-introduction.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-05-09-hertzbeat-ospp-subject-introduction.md @@ -81,10 +81,7 @@ **要求:** 1. 使⽤ Java 基于 UDP 协议实现原⽣的 IPMI2 协议(查询部分),不依赖任何第三⽅包。 -2. 利⽤实现的 IPMI2 协议查询开启 IPMI 的服务器的各项指标信息,包括主板信息、⽹卡信息、电源信息、⻛扇信 - -息、温度传感器信息和时钟信息。 - +2. 利⽤实现的 IPMI2 协议查询开启 IPMI 的服务器的各项指标信息,包括主板信息、⽹卡信息、电源信息、⻛扇信息、温度传感器信息和时钟信息。 3. 对查询到的指标信息进⾏抽象和规范化处理,实现配置化管理(可选)。 4. 输出详细的项⽬⽂档,包括设计思路、实现细节、使⽤说明等。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md index b75648284ff..708d6d7b9e3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2024-06-11-hertzbeat-v1.6.0-update.md @@ -8,184 +8,184 @@ 1. 升级Java环境 -由于1.6.0版本使用Java17,且安装包不再提供内置jdk的版本,参考以下情况使用新版Hertzbeat。 + 由于1.6.0版本使用Java17,且安装包不再提供内置jdk的版本,参考以下情况使用新版Hertzbeat。 -- 当你的服务器中默认环境变量为Java17时,这一步你无需任何操作。 -- 当你的服务器中默认环境变量不为Java17时,如Java8、Java11,若你服务器中**没有**其他应用需要低版本Java,根据你的系统,到 [https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html](https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html) 选择相应的发行版下载,并在搜索引擎搜索如何设置新的环境变量指向新的Java17。 -- 当你的服务器中默认环境变量不为Java17时,如Java8、Java11,若你服务器中**有**其他应用需要低版本Java,根据你的系统,到 [https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html](https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html) 选择相应的发行版下载,并将解压后的文件夹重命名为java,复制到Hertzbeat的解压目录下。 + - 当你的服务器中默认环境变量为Java17时,这一步你无需任何操作。 + - 当你的服务器中默认环境变量不为Java17时,如Java8、Java11,若你服务器中**没有**其他应用需要低版本Java,根据你的系统,到 [https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html](https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html) 选择相应的发行版下载,并在搜索引擎搜索如何设置新的环境变量指向新的Java17。 + - 当你的服务器中默认环境变量不为Java17时,如Java8、Java11,若你服务器中**有**其他应用需要低版本Java,根据你的系统,到 [https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html](https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html) 选择相应的发行版下载,并将解压后的文件夹重命名为java,复制到Hertzbeat的解压目录下。 2. 升级数据库 -打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), -选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件执行升级sql。 + 打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), + 选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件执行升级sql。 3. 升级配置文件 -由于 `application.yml`和 `sureness.yml`更新变动较大,建议直接使用新的yml配置文件,然后在自己的需求基础上进行修改。 - -- `application.yml`一般需要修改以下部分 - - 默认为: - -```yaml - datasource: - driver-class-name: org.h2.Driver - username: sa - password: 123456 - url: jdbc:h2:./data/hertzbeat;MODE=MYSQL - hikari: - max-lifetime: 120000 - - jpa: - show-sql: false - database-platform: org.eclipse.persistence.platform.database.MySQLPlatform - database: h2 - properties: - eclipselink: - logging: - level: SEVERE -``` - -如若修改为mysql数据库,给出一个示例: - -```yaml - datasource: - driver-class-name: com.mysql.cj.jdbc.Driver - username: root - password: root - url: jdbc:mysql://localhost:3306/hertzbeat?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai - hikari: - max-lifetime: 120000 - - jpa: - show-sql: false - database-platform: org.eclipse.persistence.platform.database.MySQLPlatform - database: mysql - properties: - eclipselink: - logging: - level: SEVERE -``` - -- `sureness.yml`修改是可选的,一般在你需要修改账号密码时 - -```yaml -# account info config -# eg: admin has role [admin,user], password is hertzbeat -# eg: tom has role [user], password is hertzbeat -# eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 -account: - - appId: admin - credential: hertzbeat - role: [admin] - - appId: tom - credential: hertzbeat - role: [user] - - appId: guest - credential: hertzbeat - role: [guest] - - appId: lili - # credential = MD5(password + salt) - # plain password: hertzbeat - # attention: digest authentication does not support salted encrypted password accounts - credential: 94C6B34E7A199A9F9D4E1F208093B489 - salt: 123 - role: [user] -``` + 由于 `application.yml`和 `sureness.yml`更新变动较大,建议直接使用新的yml配置文件,然后在自己的需求基础上进行修改。 + + - `application.yml`一般需要修改以下部分 + + 默认为: + + ```yaml + datasource: + driver-class-name: org.h2.Driver + username: sa + password: 123456 + url: jdbc:h2:./data/hertzbeat;MODE=MYSQL + hikari: + max-lifetime: 120000 + + jpa: + show-sql: false + database-platform: org.eclipse.persistence.platform.database.MySQLPlatform + database: h2 + properties: + eclipselink: + logging: + level: SEVERE + ``` + + 如若修改为mysql数据库,给出一个示例: + + ```yaml + datasource: + driver-class-name: com.mysql.cj.jdbc.Driver + username: root + password: root + url: jdbc:mysql://localhost:3306/hertzbeat?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai + hikari: + max-lifetime: 120000 + + jpa: + show-sql: false + database-platform: org.eclipse.persistence.platform.database.MySQLPlatform + database: mysql + properties: + eclipselink: + logging: + level: SEVERE + ``` + + - `sureness.yml`修改是可选的,一般在你需要修改账号密码时 + + ```yaml + # account info config + # eg: admin has role [admin,user], password is hertzbeat + # eg: tom has role [user], password is hertzbeat + # eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 + account: + - appId: admin + credential: hertzbeat + role: [admin] + - appId: tom + credential: hertzbeat + role: [user] + - appId: guest + credential: hertzbeat + role: [guest] + - appId: lili + # credential = MD5(password + salt) + # plain password: hertzbeat + # attention: digest authentication does not support salted encrypted password accounts + credential: 94C6B34E7A199A9F9D4E1F208093B489 + salt: 123 + role: [user] + ``` 4. 添加相应的数据库驱动 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动,复制到安装目录下`ext-lib`中。 -mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) -oracle(如果你要监控oracle,这两个驱动是必须的) -[https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) -[https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar?utm_source=mavenlibs.com) -接下来,像之前那样运行启动脚本,即可体验最新的HertzBeat1.6.0! + mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) + oracle(如果你要监控oracle,这两个驱动是必须的): + [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) + [https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar?utm_source=mavenlibs.com) + 接下来,像之前那样运行启动脚本,即可体验最新的HertzBeat1.6.0! ### Docker 方式升级 - Mysql数据库 1. 关闭 HertzBeat 容器 -``` -docker stop hertzbeat -``` + ```shell + docker stop hertzbeat + ``` 2. 升级数据库脚本 -打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), -选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件在 Mysql 执行升级sql。 + 打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), + 选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件在 Mysql 执行升级sql。 3. 升级配置文件 -由于 `application.yml`和 `sureness.yml`更新变动较大,建议直接挂载使用新的yml配置文件,然后在自己的需求基础上进行修改。 - -- `application.yml`一般需要修改以下部分 - - 默认为: - -```yaml - datasource: - driver-class-name: com.mysql.cj.jdbc.Driver - username: root - password: root - url: jdbc:mysql://localhost:3306/hertzbeat?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai - hikari: - max-lifetime: 120000 - - jpa: - show-sql: false - database-platform: org.eclipse.persistence.platform.database.MySQLPlatform - database: mysql - properties: - eclipselink: - logging: - level: SEVERE -``` - -- `sureness.yml`修改是可选的,一般在你需要修改账号密码时 - -```yaml -# account info config -# eg: admin has role [admin,user], password is hertzbeat -# eg: tom has role [user], password is hertzbeat -# eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 -account: - - appId: admin - credential: hertzbeat - role: [admin] - - appId: tom - credential: hertzbeat - role: [user] - - appId: guest - credential: hertzbeat - role: [guest] - - appId: lili - # credential = MD5(password + salt) - # plain password: hertzbeat - # attention: digest authentication does not support salted encrypted password accounts - credential: 94C6B34E7A199A9F9D4E1F208093B489 - salt: 123 - role: [user] -``` + 由于 `application.yml`和 `sureness.yml`更新变动较大,建议直接挂载使用新的yml配置文件,然后在自己的需求基础上进行修改。 + + - `application.yml`一般需要修改以下部分 + + 默认为: + + ```yaml + datasource: + driver-class-name: com.mysql.cj.jdbc.Driver + username: root + password: root + url: jdbc:mysql://localhost:3306/hertzbeat?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai + hikari: + max-lifetime: 120000 + + jpa: + show-sql: false + database-platform: org.eclipse.persistence.platform.database.MySQLPlatform + database: mysql + properties: + eclipselink: + logging: + level: SEVERE + ``` + + - `sureness.yml`修改是可选的,一般在你需要修改账号密码时 + + ```yaml + # account info config + # eg: admin has role [admin,user], password is hertzbeat + # eg: tom has role [user], password is hertzbeat + # eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 + account: + - appId: admin + credential: hertzbeat + role: [admin] + - appId: tom + credential: hertzbeat + role: [user] + - appId: guest + credential: hertzbeat + role: [guest] + - appId: lili + # credential = MD5(password + salt) + # plain password: hertzbeat + # attention: digest authentication does not support salted encrypted password accounts + credential: 94C6B34E7A199A9F9D4E1F208093B489 + salt: 123 + role: [user] + ``` 4. 添加相应的数据库驱动 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动 jar 放到本地 `ext-lib`目录下,然后启动时将`ext-lib`挂载到容器的 `/opt/hertzbeat/ext-lib`目录。 -mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) -oracle(如果你要监控oracle,这两个驱动是必须的) -[https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) -[https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar?utm_source=mavenlibs.com) -接下来,像之前那样 Docker 运行启动 HertzBeat,即可体验最新的HertzBeat1.6.0! + mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) + oracle(如果你要监控oracle,这两个驱动是必须的): + [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) + [https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar?utm_source=mavenlibs.com) + 接下来,像之前那样 Docker 运行启动 HertzBeat,即可体验最新的HertzBeat1.6.0! ### Docker安装升级 - H2内置数据库(生产环境不推荐使用H2) 1. 关闭 HertzBeat 容器 -``` -docker stop hertzbeat -``` + ```shell + docker stop hertzbeat + ``` 2. 编辑H2数据库文件 @@ -193,75 +193,75 @@ docker stop hertzbeat 下载 h2 驱动 jar [https://mvnrepository.com/artifact/com.h2database/h2/2.2.220](https://mvnrepository.com/artifact/com.h2database/h2/2.2.220) 使用 h2 驱动 jar 本地启动数据库 -``` -java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 -``` + ```shell + java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 + ``` -打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), -选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件在 H2 执行升级sql。 + 打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), + 选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件在 H2 执行升级sql。 3. 升级配置文件 -由于 `application.yml`和 `sureness.yml`更新变动较大,建议直接挂载使用新的yml配置文件,然后在自己的需求基础上进行修改。 - -- `application.yml`一般需要修改以下部分 - - 默认为: - -```yaml - datasource: - driver-class-name: org.h2.Driver - username: sa - password: 123456 - url: jdbc:h2:./data/hertzbeat;MODE=MYSQL - hikari: - max-lifetime: 120000 - - jpa: - show-sql: false - database-platform: org.eclipse.persistence.platform.database.MySQLPlatform - database: h2 - properties: - eclipselink: - logging: - level: SEVERE -``` - -- `sureness.yml`修改是可选的,一般在你需要修改账号密码时 - -```yaml -# account info config -# eg: admin has role [admin,user], password is hertzbeat -# eg: tom has role [user], password is hertzbeat -# eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 -account: - - appId: admin - credential: hertzbeat - role: [admin] - - appId: tom - credential: hertzbeat - role: [user] - - appId: guest - credential: hertzbeat - role: [guest] - - appId: lili - # credential = MD5(password + salt) - # plain password: hertzbeat - # attention: digest authentication does not support salted encrypted password accounts - credential: 94C6B34E7A199A9F9D4E1F208093B489 - salt: 123 - role: [user] -``` + 由于 `application.yml`和 `sureness.yml`更新变动较大,建议直接挂载使用新的yml配置文件,然后在自己的需求基础上进行修改。 + + - `application.yml`一般需要修改以下部分 + + 默认为: + + ```yaml + datasource: + driver-class-name: org.h2.Driver + username: sa + password: 123456 + url: jdbc:h2:./data/hertzbeat;MODE=MYSQL + hikari: + max-lifetime: 120000 + + jpa: + show-sql: false + database-platform: org.eclipse.persistence.platform.database.MySQLPlatform + database: h2 + properties: + eclipselink: + logging: + level: SEVERE + ``` + + - `sureness.yml`修改是可选的,一般在你需要修改账号密码时 + + ```yaml + # account info config + # eg: admin has role [admin,user], password is hertzbeat + # eg: tom has role [user], password is hertzbeat + # eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 + account: + - appId: admin + credential: hertzbeat + role: [admin] + - appId: tom + credential: hertzbeat + role: [user] + - appId: guest + credential: hertzbeat + role: [guest] + - appId: lili + # credential = MD5(password + salt) + # plain password: hertzbeat + # attention: digest authentication does not support salted encrypted password accounts + credential: 94C6B34E7A199A9F9D4E1F208093B489 + salt: 123 + role: [user] + ``` 4. 添加相应的数据库驱动 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动 jar 放到本地 `ext-lib`目录下,然后启动时将`ext-lib`挂载到容器的 `/opt/hertzbeat/ext-lib`目录。 -mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) -oracle(如果你要监控oracle,这两个驱动是必须的) -[https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) -[https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar?utm_source=mavenlibs.com) -接下来,像之前那样 Docker 运行启动,即可体验最新的HertzBeat1.6.0! + mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) + oracle(如果你要监控oracle,这两个驱动是必须的): + [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) + [https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar?utm_source=mavenlibs.com) + 接下来,像之前那样 Docker 运行启动,即可体验最新的HertzBeat1.6.0! ### 通过导出导入升级 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md index 361b4103011..eb1d4e1bd46 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md @@ -208,9 +208,9 @@ limitations under the License. 2. 确保代码的可读性和直观性 -- `annotation` 符号中的字符串不需要提取为常量。 + - `annotation` 符号中的字符串不需要提取为常量。 -- 被引用的 `package` 或 `resource` 名称不需要提取为常量。 + - 被引用的 `package` 或 `resource` 名称不需要提取为常量。 3. 未被重新分配的变量也必须声明为 final 类型。 @@ -252,17 +252,17 @@ limitations under the License. - 如果使用 `HashSet`,则返回 Set 2. 如果存在多线程,可以使用以下声明或返回类型: -```java -private CurrentHashMap map; -public CurrentHashMap funName(); -``` + ```java + private CurrentHashMap map; + public CurrentHashMap funName(); + ``` 3. 使用 `isEmpty()` 而不是 `length() == 0` 或者 `size() == 0` - 负面示例: ```java if (pathPart.length() == 0) { - return; + return; } ``` @@ -270,7 +270,7 @@ public CurrentHashMap funName(); ```java if (pathPart.isEmpty()) { - return; + return; } ``` @@ -297,89 +297,88 @@ public CurrentHashMap funName(); 以减少代码行深度并提高可读性,例如: - 联合或将逻辑合并到下一级调用中 -- 负面示例: + - 负面示例: -```java -if (isInsert) { -save(platform); -} else { -updateById(platform); -} -``` + ```java + if (isInsert) { + save(platform); + } else { + updateById(platform); + } + ``` -- 正面示例: + - 正面示例: -```java -saveOrUpdate(platform); -``` + ```java + saveOrUpdate(platform); + ``` - 合并条件 -- 负面示例: - -```java -if (expression1) { -if(expression2) { -...... -} -} + - 负面示例: -``` + ```java + if (expression1) { + if(expression2) { + // ...... + } + } + ``` -- 正面示例: + - 正面示例: - ```java - if (expression1 && expression2) { - ...... - } - ``` + ```java + if (expression1 && expression2) { + // ...... + } + ``` - 反转条件 -- 负面示例: - - ```java - public void doSomething() { - // 忽略更深的代码块行 - // ..... - if (condition1) { - ... - } else { - ... - } - } - ``` - -- 正面示例: - - ```java - public void doSomething() { - // 忽略更深的代码块行 - // ..... - if (!condition1) { - ... - return; - } - // ... - } - ``` + - 负面示例: + + ```java + public void doSomething() { + // 忽略更深的代码块行 + // ..... + if (condition1) { + // ... + } else { + // ... + } + } + ``` + + - 正面示例: + + ```java + public void doSomething() { + // 忽略更深的代码块行 + // ..... + if (!condition1) { + // ... + return; + } + // ... + } + ``` - 使用单一变量或方法减少复杂的条件表达式 -- 负面示例: + - 负面示例: - ```java - if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { - ... - } - ``` + ```java + if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { + // ... + } + ``` -- 正面示例: + - 正面示例: - ```java - if (containsSqlServer(dbType)) { - .... - } - //..... - // containsSqlServer的定义 - ``` + ```java + if (containsSqlServer(dbType)) { + // .... + } + //..... + // containsSqlServer的定义 + ``` > 在未来,使用 `sonarlint` 和 `better highlights` 检查代码深度看起来是个不错的选择。 @@ -387,20 +386,20 @@ if(expression2) { 1. 方法缺少注释: -- `When`:该方法何时可以被调用 -- `How`:如何使用此方法以及如何传递参数等 -- `What`:此方法实现了哪些功能 -- `Note`:在调用此方法时开发人员应注意什么 + - `When`:该方法何时可以被调用 + - `How`:如何使用此方法以及如何传递参数等 + - `What`:此方法实现了哪些功能 + - `Note`:在调用此方法时开发人员应注意什么 2. 缺少必要的类头部描述注释。 -添加 `What`,`Note` 等,如上述 `1` 中提到的。 + 添加 `What`,`Note` 等,如上述 `1` 中提到的。 3. 在接口中的方法声明必须被注释。 -- 如果实现的语义和接口声明的注释内容不一致,则具体的实现方法也需要用注释重写。 + - 如果实现的语义和接口声明的注释内容不一致,则具体的实现方法也需要用注释重写。 -- 如果方法实现的语义与接口声明的注释内容一致,则建议不写注释以避免重复的注释。 + - 如果方法实现的语义与接口声明的注释内容一致,则建议不写注释以避免重复的注释。 4. 在注释行中的第一个词需要大写,如 `param` 行,`return` 行。 如果特殊引用作为主题不需要大写,需要注意特殊符号,例如引号。 @@ -410,31 +409,31 @@ if(expression2) { 1. 更倾向于使用 `non-capturing` lambda(不包含对外部范围的引用的lambda)。 Capturing lambda 在每次调用时都需要创建一个新的对象实例。`Non-capturing` lambda 可以为每次调用使用相同的实例。 -- 负面示例: + - 负面示例: - ```java - map.computeIfAbsent(key, x -> key.toLowerCase()) - ``` + ```java + map.computeIfAbsent(key, x -> key.toLowerCase()) + ``` -- 正面示例: + - 正面示例: - ```java - map.computeIfAbsent(key, k -> k.toLowerCase()); - ``` + ```java + map.computeIfAbsent(key, k -> k.toLowerCase()); + ``` 2. 考虑使用方法引用而不是内联lambda -- 负面示例: + - 负面示例: - ```java - map.computeIfAbsent(key, k-> Loader.load(k)); - ``` + ```java + map.computeIfAbsent(key, k-> Loader.load(k)); + ``` -- 正面示例: + - 正面示例: - ```java - map.computeIfAbsent(key, Loader::load); - ``` + ```java + map.computeIfAbsent(key, Loader::load); + ``` ### 3.9 Java Streams @@ -452,127 +451,127 @@ if(expression2) { 1. 使用 `StringUtils.isBlank` 而不是 `StringUtils.isEmpty` -- 负面示例: + - 负面示例: - ```java - if (StringUtils.isEmpty(name)) { - return; - } - ``` + ```java + if (StringUtils.isEmpty(name)) { + return; + } + ``` -- 正面示例: + - 正面示例: - ```java - if (StringUtils.isBlank(name)) { - return; - } - ``` + ```java + if (StringUtils.isBlank(name)) { + return; + } + ``` 2. 使用 `StringUtils.isNotBlank` 而不是 `StringUtils.isNotEmpty` -- 负面示例: + - 负面示例: - ```java - if (StringUtils.isNotEmpty(name)) { - return; - } - ``` + ```java + if (StringUtils.isNotEmpty(name)) { + return; + } + ``` -- 正面示例: + - 正面示例: - ```java - if (StringUtils.isNotBlank(name)) { - return; - } - ``` + ```java + if (StringUtils.isNotBlank(name)) { + return; + } + ``` 3. 使用 `StringUtils.isAllBlank` 而不是 `StringUtils.isAllEmpty` -- 负面示例: + - 负面示例: - ```java - if (StringUtils.isAllEmpty(name, age)) { - return; - } - ``` + ```java + if (StringUtils.isAllEmpty(name, age)) { + return; + } + ``` -- 正面示例: + - 正面示例: - ```java - if (StringUtils.isAllBlank(name, age)) { - return; - } - ``` + ```java + if (StringUtils.isAllBlank(name, age)) { + return; + } + ``` ### 3.12 `Enum` 类 1. 枚举值比较 -- 负面示例: + - 负面示例: - ```java - if (status.equals(JobStatus.RUNNING)) { - return; - } - ``` + ```java + if (status.equals(JobStatus.RUNNING)) { + return; + } + ``` -- 正面示例: + - 正面示例: - ```java - if (status == JobStatus.RUNNING) { - return; - } - ``` + ```java + if (status == JobStatus.RUNNING) { + return; + } + ``` 2. 枚举类不需要实现 Serializable -- 负面示例: + - 负面示例: - ```java - public enum JobStatus implements Serializable { - ... - } - ``` + ```java + public enum JobStatus implements Serializable { + // ... + } + ``` -- 正面示例: + - 正面示例: - ```java - public enum JobStatus { - ... - } - ``` + ```java + public enum JobStatus { + // ... + } + ``` 3. 使用 `Enum.name()` 而不是 `Enum.toString()` -- 负面示例: + - 负面示例: - ```java - System.out.println(JobStatus.RUNNING.toString()); - ``` + ```java + System.out.println(JobStatus.RUNNING.toString()); + ``` -- 正面示例: + - 正面示例: - ```java - System.out.println(JobStatus.RUNNING.name()); - ``` + ```java + System.out.println(JobStatus.RUNNING.name()); + ``` 4. 枚举类名称统一使用 Enum 后缀 -- 负面示例: + - 负面示例: - ```java - public enum JobStatus { - ... - } - ``` + ```java + public enum JobStatus { + // ... + } + ``` -- 正面示例: + - 正面示例: - ```java - public enum JobStatusEnum { - ... - } - ``` + ```java + public enum JobStatusEnum { + // ... + } + ``` ### 3.13 `Deprecated` 注解 @@ -581,7 +580,7 @@ if(expression2) { ```java @deprecated public void process(String input) { - ... + // ... } ``` @@ -590,7 +589,7 @@ public void process(String input) { ```java @Deprecated public void process(String input) { - ... + // ... } ``` @@ -598,43 +597,43 @@ public void process(String input) { 1. 使用 `占位符` 进行日志输出: -- 负面示例 + - 负面示例 - ```java - log.info("Deploy cluster request " + deployRequest); - ``` + ```java + log.info("Deploy cluster request " + deployRequest); + ``` -- 正面示例 + - 正面示例 - ```java - log.info("load plugin:{} to {}", file.getName(), appPlugins); - ``` + ```java + log.info("load plugin:{} to {}", file.getName(), appPlugins); + ``` 2. 打印日志时,注意选择 `日志级别` -当打印日志内容时,如果传递了日志占位符的实际参数,必须避免过早评估,以避免由日志级别导致的不必要评估。 + 当打印日志内容时,如果传递了日志占位符的实际参数,必须避免过早评估,以避免由日志级别导致的不必要评估。 -- 负面示例: + - 负面示例: - 假设当前日志级别为 `INFO`: + 假设当前日志级别为 `INFO`: - ```java - // 忽略声明行。 - List userList = getUsersByBatch(1000); - LOG.debug("All users: {}", getAllUserIds(userList)); - ``` + ```java + // 忽略声明行。 + List userList = getUsersByBatch(1000); + LOG.debug("All users: {}", getAllUserIds(userList)); + ``` -- 正面示例: + - 正面示例: - 在这种情况下,我们应该在进行实际的日志调用之前提前确定日志级别,如下所示: + 在这种情况下,我们应该在进行实际的日志调用之前提前确定日志级别,如下所示: - ```java - // 忽略声明行。 - List userList = getUsersByBatch(1000); - if (LOG.isDebugEnabled()) { - LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); - } - ``` + ```java + // 忽略声明行。 + List userList = getUsersByBatch(1000); + if (LOG.isDebugEnabled()) { + LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); + } + ``` ## 5 测试 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contribution.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contribution.md index 7fe9bc2e194..776fcb96a5d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contribution.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/contribution.md @@ -88,33 +88,33 @@ limitations under the License. 1. 首先您需要 Fork 目标仓库 [hertzbeat repository](https://github.com/apache/hertzbeat). 2. 然后 用 git 命令 将代码下载到本地: -```shell -git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended -``` + ```shell + git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended + ``` 3. 下载完成后,请参考目标仓库的入门指南或者 README 文件对项目进行初始化。 4. 接着,您可以参考如下命令进行代码的提交, 切换新的分支, 进行开发: -```shell -git checkout -b a-feature-branch #Recommended -``` + ```shell + git checkout -b a-feature-branch #Recommended + ``` 5. 提交 commit, commit 描述信息需要符合约定格式: [module name or type name]feature or bugfix or doc: custom message. -```shell -git add -git commit -m '[docs]feature: necessary instructions' #Recommended -``` + ```shell + git add + git commit -m '[docs]feature: necessary instructions' #Recommended + ``` 6. 推送到远程仓库 -```shell -git push origin a-feature-branch -``` + ```shell + git push origin a-feature-branch + ``` 7. 然后您就可以在 GitHub 上发起新的 PR (Pull Request)。 -请注意 PR 的标题需要符合我们的规范,并且在 PR 中写上必要的说明,来方便 Committer 和其他贡献者进行代码审查。 + 请注意 PR 的标题需要符合我们的规范,并且在 PR 中写上必要的说明,来方便 Committer 和其他贡献者进行代码审查。 ### 等待PR代码被合并 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md index 3e79960877c..f931dc556b8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md @@ -348,73 +348,73 @@ hi, i accept. Thanks for invitaion. 1. 下载iCLA申请表 -打开访问:[https://www.apache.org/licenses/#clas](https://www.apache.org/licenses/#clas) + 打开访问:[https://www.apache.org/licenses/#clas](https://www.apache.org/licenses/#clas) -找到`CLAs`点击进入页面 + 找到`CLAs`点击进入页面 -![](/img/docs/community/clas-web.png) + ![](/img/docs/community/clas-web.png) -找到`ICLA(个人贡献者许可协议)`,点击[Individual Contributor License Agreement](https://www.apache.org/licenses/icla.pdf) + 找到`ICLA(个人贡献者许可协议)`,点击[Individual Contributor License Agreement](https://www.apache.org/licenses/icla.pdf) -![](/img/docs/community/icla-web.png) + ![](/img/docs/community/icla-web.png) -跳转后,会显示需要签署的协议,这里可以在网页上直接填写,也可以下载后填写。 + 跳转后,会显示需要签署的协议,这里可以在网页上直接填写,也可以下载后填写。 -![](/img/docs/community/icla-pdf.png) + ![](/img/docs/community/icla-pdf.png) 2. 填写iCLA -> tips: 签名时需要手写签名,其他的可以直接填写后打印。 + > tips: 签名时需要手写签名,其他的可以直接填写后打印。 -需要填写的内容: + 需要填写的内容: -![](/img/docs/community/icla-content-1.png) -![](/img/docs/community/icla-content-2.png) + ![](/img/docs/community/icla-content-1.png) + ![](/img/docs/community/icla-content-2.png) -在PDF中需要填写的字段: + 在PDF中需要填写的字段: -- `Full name` -- `Public name` -- `Postal Address` -- `Country` -- `E-Mail` -- `(optional) preferred Apache id(s)` -- `(optional) notify project` -- `Date` + - `Full name` + - `Public name` + - `Postal Address` + - `Country` + - `E-Mail` + - `(optional) preferred Apache id(s)` + - `(optional) notify project` + - `Date` -> 填写样例 -> ![](/img/docs/community/icla-content-3.png) -> ![](/img/docs/community/icla-content-4.png) + > 填写样例 + > ![](/img/docs/community/icla-content-3.png) + > ![](/img/docs/community/icla-content-4.png) 3. 打印并签署 -![](/img/docs/community/icla-content-5.png) + ![](/img/docs/community/icla-content-5.png) -`Please sign`字段需要将文件打印出来然后手写签名,内容为自己姓名的全拼。 + `Please sign`字段需要将文件打印出来然后手写签名,内容为自己姓名的全拼。 4. 将签名后的文件拍照或扫描转为PDF,并重命名为`姓名拼音-icla.pdf` 5. 发送邮件到指定邮箱 -邮件发送到`secretary@apache.org`,抄送到`private@hertzbeat.apache.org`。 + 邮件发送到`secretary@apache.org`,抄送到`private@hertzbeat.apache.org`。 -注意⚠️此邮件内容需要附加上之前的 committer 邀请邮件信息, 建议在之前的往来邮件点击回复然后修改标题和收件人等。 + 注意⚠️此邮件内容需要附加上之前的 committer 邀请邮件信息, 建议在之前的往来邮件点击回复然后修改标题和收件人等。 -**发送模板** + **发送模板** -主题:`Accept to become a Hertzbeat(incubating) committer` + 主题:`Accept to become a Hertzbeat(incubating) committer` -正文: + 正文: -``` -Hello Apache, - I am willing contribute to the ASF. The attachment is my ICLA information. My Github account is : https://github.com/xxxx. -Thanks ! -``` + ``` + Hello Apache, + I am willing contribute to the ASF. The attachment is my ICLA information. My Github account is : https://github.com/xxxx. + Thanks ! + ``` -大概1-5个工作日你就会收到一封来自`root@apache.org`的邮件。 + 大概1-5个工作日你就会收到一封来自`root@apache.org`的邮件。 -当您收到邮件标题为:`Welcome to the Apache Software Foundation`的邮件,恭喜你,你已经获取到Apache ID了! + 当您收到邮件标题为:`Welcome to the Apache Software Foundation`的邮件,恭喜你,你已经获取到Apache ID了! ### 设置Apache密码 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/activemq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/activemq.md index 94e2ad54899..a6c741ba2e7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/activemq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/activemq.md @@ -15,40 +15,40 @@ keywords: [开源监控系统, 开源中间件监控, ActiveMQ消息中间件监 1. 修改安装目录下的 `conf/activemq.xml` 文件,开启JMX -> 在 `broker` 标签中添加 `userJmx="true"` 属性 + > 在 `broker` 标签中添加 `userJmx="true"` 属性 -```xml - - - -``` + ```xml + + + + ``` 2. 修改安装目录下的 `bin/env` 文件,配置JMX 端口 IP等 -将如下原配置信息 - -```text -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" -``` - -更新为如下配置,⚠️注意修改`本机对外IP` - -```text -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.authenticate=false" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Djava.rmi.server.hostname=本机对外IP" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" -``` + 将如下原配置信息 + + ```text + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" + + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" + ``` + + 更新为如下配置,⚠️注意修改`本机对外IP` + + ```text + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" + + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.authenticate=false" + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Djava.rmi.server.hostname=本机对外IP" + + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" + ``` 3. 重启 ACTIVEMQ 服务,在 HertzBeat 添加对应 ActiveMQ 监控即可,参数使用 JMX 配置的 IP 端口。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_dingtalk.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_dingtalk.md index ba6b49bc58a..44e78f392da 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_dingtalk.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_dingtalk.md @@ -11,24 +11,24 @@ keywords: [告警钉钉机器人通知, 开源告警系统, 开源监控告警 1. **【钉钉桌面客户端】-> 【群设置】-> 【智能群助手】-> 【添加新建机器人-选自定义】-> 【设置机器人名称头像】-> 【注意⚠️设置自定义关键字: HertzBeat】 ->【添加成功后复制其WebHook地址】** -> 注意⚠️ 新增机器人时需在安全设置块需设置其自定义关键字: HertzBeat ,其它安全设置加签或IP段不填写 + > 注意⚠️ 新增机器人时需在安全设置块需设置其自定义关键字: HertzBeat ,其它安全设置加签或IP段不填写 -![email](/img/docs/help/alert-notice-8.png) + ![email](/img/docs/help/alert-notice-8.png) 2. **【保存机器人的WebHook地址access_token值】** -> 例如: webHook地址:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` -> 其机器人access_token值为 `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` + > 例如: webHook地址:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` + > 其机器人access_token值为 `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` 3. **【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】** -![email](/img/docs/help/alert-notice-9.png) + ![email](/img/docs/help/alert-notice-9.png) 4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### 钉钉机器人通知常见问题 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_discord.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_discord.md index bb3c6287cd4..469b9596cbe 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_discord.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_discord.md @@ -15,31 +15,31 @@ keywords: [告警 Discord 机器人通知, 开源告警系统, 开源监控告 1. 访问 [https://discord.com/developers/applications](https://discord.com/developers/applications) 创建应用 -![bot](/img/docs/help/discord-bot-1.png) + ![bot](/img/docs/help/discord-bot-1.png) 2. 在应用下创建机器人,获取机器人 Token -![bot](/img/docs/help/discord-bot-2.png) + ![bot](/img/docs/help/discord-bot-2.png) -![bot](/img/docs/help/discord-bot-3.png) + ![bot](/img/docs/help/discord-bot-3.png) 3. 授权机器人到聊天服务器 -> 在 OAuth2 菜单下给此机器人授权,`SCOPES` 范围选 `bot`, `BOT PERMISSIONS` 选发送消息 `Send Messages` + > 在 OAuth2 菜单下给此机器人授权,`SCOPES` 范围选 `bot`, `BOT PERMISSIONS` 选发送消息 `Send Messages` -![bot](/img/docs/help/discord-bot-4.png) + ![bot](/img/docs/help/discord-bot-4.png) -> 获取到最下方生成的 URL, 浏览器访问此 URL 给机器人正式授权,即设置将机器人加入哪个聊天服务器。 + > 获取到最下方生成的 URL, 浏览器访问此 URL 给机器人正式授权,即设置将机器人加入哪个聊天服务器。 4. 查看您的聊天服务器是否已经加入机器人成员 -![bot](/img/docs/help/discord-bot-5.png) + ![bot](/img/docs/help/discord-bot-5.png) ### 开启开发者模式,获取频道 Channel ID 1. 个人设置 -> 高级设置 -> 开启开发者模式 -![bot](/img/docs/help/discord-bot-6.png) + ![bot](/img/docs/help/discord-bot-6.png) 2. 获取频道 Channel ID @@ -51,13 +51,13 @@ keywords: [告警 Discord 机器人通知, 开源告警系统, 开源监控告 1. **【告警通知】->【新增接收人】 ->【选择 Discord 机器人通知方式】->【设置机器人Token和ChannelId】-> 【确定】** -![email](/img/docs/help/discord-bot-8.png) + ![email](/img/docs/help/discord-bot-8.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-policy.png) + ![email](/img/docs/help/alert-notice-policy.png) ### Discord 机器人通知常见问题 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_email.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_email.md index 0f53b58e71d..7e4f59e7900 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_email.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_email.md @@ -11,29 +11,29 @@ keywords: [告警邮件通知, 开源告警系统, 开源监控告警系统] 1. **【告警通知】->【新增接收人】 ->【选择邮件通知方式】** -![email](/img/docs/help/alert-notice-1.png) + ![email](/img/docs/help/alert-notice-1.png) 2. **【获取验证码】-> 【输入邮箱验证码】-> 【确定】** ![email](/img/docs/help/alert-notice-2.png) -![email](/img/docs/help/alert-notice-3.png) + ![email](/img/docs/help/alert-notice-3.png) 3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### 邮件通知常见问题 1. 自己内网部署的HertzBeat无法接收到邮件通知 -> HertzBeat需要自己配置邮件服务器,TanCloud无需,请确认是否在application.yml配置了自己的邮件服务器 + > HertzBeat需要自己配置邮件服务器,TanCloud无需,请确认是否在application.yml配置了自己的邮件服务器 2. 云环境TanCloud无法接收到邮件通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确邮箱,是否已配置告警策略关联 -> 请查询邮箱的垃圾箱里是否把告警邮件拦截 + > 请排查在告警中心是否已有触发的告警信息 + > 请排查是否配置正确邮箱,是否已配置告警策略关联 + > 请查询邮箱的垃圾箱里是否把告警邮件拦截 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_enterprise_wechat_app.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_enterprise_wechat_app.md index b70c8b10c40..cc146c4f257 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_enterprise_wechat_app.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_enterprise_wechat_app.md @@ -11,17 +11,17 @@ keywords: [开源告警系统, 开源监控告警系统, 企业微信应用告 1. **【企业微信后台管理】-> 【App管理】-> 【创建一个新的应用】-> 【设置应用信息】->【添加成功后复制应用的AgentId和Secret】** -![email](/img/docs/help/alert-wechat-1.jpg) + ![email](/img/docs/help/alert-wechat-1.jpg) 2. **【告警通知】->【新增接收人】 ->【选择企业微信应用通知方式】->【设置企业ID,企业应用id和应用的secret 】-> 【确定】** -![email](/img/docs/help/alert-wechat-2.jpg) + ![email](/img/docs/help/alert-wechat-2.jpg) 3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人。** + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人。** -![email](/img/docs/help/alert-wechat-3.jpg) + ![email](/img/docs/help/alert-wechat-3.jpg) ### 企业微信应用通知常见问题 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_feishu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_feishu.md index 5a6e95d7067..bc6f3672df4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_feishu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_feishu.md @@ -13,16 +13,17 @@ keywords: [告警飞书机器人通知, 开源告警系统, 开源监控告警 2. **【保存机器人的WebHook地址的KEY值】** -> 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > + > 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择飞书机器人通知方式】->【设置飞书机器人KEY】-> 【确定】** 4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### 飞书机器人通知常见问题 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md index 5c5c38c56be..f4ef1913fcd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_slack.md @@ -19,19 +19,19 @@ keywords: [告警 Slack Webhook 通知, 开源告警系统, 开源监控告警 1. **【告警通知】->【新增接收人】 ->【选择 Slack Webhook 通知方式】->【设置 Webhook URL】-> 【确定】** -![email](/img/docs/help/slack-bot-1.png) + ![email](/img/docs/help/slack-bot-1.png) 2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-policy.png) + ![email](/img/docs/help/alert-notice-policy.png) ### Slack 机器人通知常见问题 1. Slack 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_smn.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_smn.md index d6bca9843a4..bcfe5d71a79 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_smn.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_smn.md @@ -11,33 +11,33 @@ keywords: [ 告警华为云SMN通知, 开源告警系统, 开源监控告警系 1. **按照[华为云SMN官方文档](https://support.huaweicloud.com/qs-smn/smn_json.html)开通SMN服务并配置SMN** -![alert-notice-10](/img/docs/help/alert-notice-10.png) + ![alert-notice-10](/img/docs/help/alert-notice-10.png) 2. **保存SMN的主题URN** -![alert-notice-11](/img/docs/help/alert-notice-11.png) + ![alert-notice-11](/img/docs/help/alert-notice-11.png) 3. **按照[华为云签名文档](https://support.huaweicloud.com/devg-apisign/api-sign-provide.html)获取AK、SK和项目ID** -![alert-notice-12](/img/docs/help/alert-notice-12.png) + ![alert-notice-12](/img/docs/help/alert-notice-12.png) -![alert-notice-13](/img/docs/help/alert-notice-13.png) + ![alert-notice-13](/img/docs/help/alert-notice-13.png) 4. **【告警通知】->【新增接收人】 ->【选择华为云SMN通知方式】->【设置华为云SMN AK、SK等配置】-> 【确定】** -![alert-notice-14](/img/docs/help/alert-notice-14.png) + ![alert-notice-14](/img/docs/help/alert-notice-14.png) 5. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### 华为云SMN通知常见问题 1. 华为云SMN群未收到告警通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否正确配置华为云SMN AK、SK等配置,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_telegram.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_telegram.md index dfb1aa48d8a..9760660bdff 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_telegram.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_telegram.md @@ -15,32 +15,32 @@ keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] 1. 使用 [@BotFather](https://t.me/BotFather) 创建自己的机器人并获取访问令牌`Token` -![telegram-bot](/img/docs/help/telegram-bot-1.png) + ![telegram-bot](/img/docs/help/telegram-bot-1.png) 2. 获取接收人的 `User ID` -**使用您要通知的接收人账户给刚创建 Bot 账户随便发送一个信息**, -访问 ```https://api.telegram.org/bot/getUpdates``` , **`使用上一步的 Bot Token 替换其中的`**, 响应`Json`数据中第一个`result.message.from.id` 值即为接收人的 `User ID` - -```json -{ - "ok":true, - "result":[ - { - "update_id":632299191, - "message":{ - "from":{ - "id": "User ID" - }, - "chat":{ - }, - "date":1673858065, - "text":"111" + **使用您要通知的接收人账户给刚创建 Bot 账户随便发送一个信息**, + 访问 ```https://api.telegram.org/bot/getUpdates``` , **`使用上一步的 Bot Token 替换其中的`**, 响应`Json`数据中第一个`result.message.from.id` 值即为接收人的 `User ID` + + ```json + { + "ok":true, + "result":[ + { + "update_id":632299191, + "message":{ + "from":{ + "id": "User ID" + }, + "chat":{ + }, + "date":1673858065, + "text":"111" + } } - } - ] -} -``` + ] + } + ``` 3. 记录保存我们获得的 `Token` 和 `User Id` @@ -48,20 +48,20 @@ keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] 1. **【告警通知】->【新增接收人】 ->【选择 Telegram 机器人通知方式】->【设置机器人Token和UserId】-> 【确定】** -![email](/img/docs/help/telegram-bot-2.png) + ![email](/img/docs/help/telegram-bot-2.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-policy.png) + ![email](/img/docs/help/alert-notice-policy.png) ### Telegram 机器人通知常见问题 1. Telegram 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 +> 请排查在告警中心是否已有触发的告警信息 +> 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 > UserId 应为消息接收对象的UserId 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_webhook.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_webhook.md index 272c59cfd4c..e07a3e61cce 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_webhook.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_webhook.md @@ -11,13 +11,13 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 1. **【告警通知】->【新增接收人】 ->【选择WebHook通知方式】-> 【设置WebHook回调地址】 -> 【确定】** -![email](/img/docs/help/alert-notice-5.png) + ![email](/img/docs/help/alert-notice-5.png) 2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### WebHook回调POST请求体BODY内容 @@ -60,7 +60,7 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 1. WebHook回调未生效 -> 请查看告警中心是否已经产生此条告警信息 +> 请查看告警中心是否已经产生此条告警信息 > 请排查配置的WebHook回调地址是否正确 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_wework.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_wework.md index 5c73ffee2a6..2fa9ae6ea0e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_wework.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/alert_wework.md @@ -11,28 +11,29 @@ keywords: [告警企业微信通知, 开源告警系统, 开源监控告警系 1. **【企业微信端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** -![email](/img/docs/help/alert-notice-6.jpg) + ![email](/img/docs/help/alert-notice-6.jpg) 2. **【保存机器人的WebHook地址的KEY值】** -> 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > + > 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择企业微信机器人通知方式】->【设置企业微信机器人KEY】-> 【确定】** -![email](/img/docs/help/alert-notice-7.png) + ![email](/img/docs/help/alert-notice-7.png) 4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### 企业微信机器人通知常见问题 1. 企业微信群未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 +> 请排查在告警中心是否已有触发的告警信息 > 请排查是否配置正确机器人KEY,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dynamic_tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dynamic_tp.md index 1abcb732289..ba5aecde81b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dynamic_tp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/dynamic_tp.md @@ -11,53 +11,53 @@ keywords: [开源监控系统, 开源中间件监控, DynamicTp线程池监控] 1. 集成使用 `DynamicTp` -`DynamicTp` 是Jvm语言的基于配置中心的轻量级动态线程池,内置监控告警功能,可通过SPI自定义扩展实现。 + `DynamicTp` 是Jvm语言的基于配置中心的轻量级动态线程池,内置监控告警功能,可通过SPI自定义扩展实现。 -集成使用,请参考文档 [快速接入](https://dynamictp.cn/guide/use/quick-start.html) + 集成使用,请参考文档 [快速接入](https://dynamictp.cn/guide/use/quick-start.html) 2. 开启SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 -```yaml -management: - endpoints: - web: - exposure: - include: '*' -``` + ```yaml + management: + endpoints: + web: + exposure: + include: '*' + ``` -测试访问指标接口 `ip:port/actuator/dynamic-tp` 是否有响应json数据如下: + 测试访问指标接口 `ip:port/actuator/dynamic-tp` 是否有响应json数据如下: -```json -[ - { - "poolName": "commonExecutor", - "corePoolSize": 1, - "maximumPoolSize": 1, - "queueType": "LinkedBlockingQueue", - "queueCapacity": 2147483647, - "queueSize": 0, - "fair": false, - "queueRemainingCapacity": 2147483647, - "activeCount": 0, - "taskCount": 0, - "completedTaskCount": 0, - "largestPoolSize": 0, - "poolSize": 0, - "waitTaskCount": 0, - "rejectCount": 0, - "rejectHandlerName": null, - "dynamic": false, - "runTimeoutCount": 0, - "queueTimeoutCount": 0 - }, - { - "maxMemory": "4 GB", - "totalMemory": "444 MB", - "freeMemory": "250.34 MB", - "usableMemory": "3.81 GB" - } -] -``` + ```json + [ + { + "poolName": "commonExecutor", + "corePoolSize": 1, + "maximumPoolSize": 1, + "queueType": "LinkedBlockingQueue", + "queueCapacity": 2147483647, + "queueSize": 0, + "fair": false, + "queueRemainingCapacity": 2147483647, + "activeCount": 0, + "taskCount": 0, + "completedTaskCount": 0, + "largestPoolSize": 0, + "poolSize": 0, + "waitTaskCount": 0, + "rejectCount": 0, + "rejectHandlerName": null, + "dynamic": false, + "runTimeoutCount": 0, + "queueTimeoutCount": 0 + }, + { + "maxMemory": "4 GB", + "totalMemory": "444 MB", + "freeMemory": "250.34 MB", + "usableMemory": "3.81 GB" + } + ] + ``` 3. 在HertzBeat中间件监控下添加DynamicTp监控即可 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md index 8bb3bbb25e0..e266fa96feb 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md @@ -17,29 +17,29 @@ keywords: [开源监控系统, 开源数据库监控, IoTDB数据库监控] 1. metric 采集默认是关闭的,需要先到 `conf/iotdb-metric.yml` 中修改参数打开后重启 server -``` -# 是否启动监控模块,默认为false -enableMetric: true - -# 是否启用操作延迟统计 -enablePerformanceStat: false - -# 数据提供方式,对外部通过jmx和prometheus协议提供metrics的数据, 可选参数:[JMX, PROMETHEUS, IOTDB],IOTDB是默认关闭的。 -metricReporterList: - - JMX - - PROMETHEUS - -# 底层使用的metric架构,可选参数:[MICROMETER, DROPWIZARD] -monitorType: MICROMETER - -# 初始化metric的级别,可选参数: [CORE, IMPORTANT, NORMAL, ALL] -metricLevel: IMPORTANT - -# 预定义的指标集, 可选参数: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] -predefinedMetrics: - - JVM - - FILE -``` + ``` + # 是否启动监控模块,默认为false + enableMetric: true + + # 是否启用操作延迟统计 + enablePerformanceStat: false + + # 数据提供方式,对外部通过jmx和prometheus协议提供metrics的数据, 可选参数:[JMX, PROMETHEUS, IOTDB],IOTDB是默认关闭的。 + metricReporterList: + - JMX + - PROMETHEUS + + # 底层使用的metric架构,可选参数:[MICROMETER, DROPWIZARD] + monitorType: MICROMETER + + # 初始化metric的级别,可选参数: [CORE, IMPORTANT, NORMAL, ALL] + metricLevel: IMPORTANT + + # 预定义的指标集, 可选参数: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] + predefinedMetrics: + - JVM + - FILE + ``` 2. 重启 IoTDB, 打开浏览器或者用curl 访问 , 就能看到metric数据了。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/issue.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/issue.md index f17a60a9b9f..27e9c63b3ba 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/issue.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/issue.md @@ -8,25 +8,25 @@ sidebar_label: 常见问题 1. **页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名** -> 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http + > 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http 2. **网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK** -> 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) + > 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) 3. 安装包部署的hertzbeat下ping连通性监控异常 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 -> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 -> docker安装默认启用无此问题 -> 详见 + > 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 + > docker安装默认启用无此问题 + > 详见 4. 配置了k8s监控,但是实际监控时间并未按照正确间隔时间执行 请参考下面几点排查问题: -> 一:首先查看hertzbeat的错误日志,如果出现了'desc: SQL statement too long, check maxSQLLength config',信息 -> 二:需要调整tdengine配置文件,可在服务器创建taos.cfg文件,调整# max length of an SQL : maxSQLLength 654800,然后重启tdengine,需要加入配置文件的挂载 -> 三:如果遇到了重启tdengine失败,需要调整挂载数据文件中的配置,见 .../taosdata/dnode/dnodeEps.json,中dnodeFqdn调整为启动失败的dockerId即可,然后docker restart tdengine + > 一:首先查看hertzbeat的错误日志,如果出现了'desc: SQL statement too long, check maxSQLLength config',信息 + > 二:需要调整tdengine配置文件,可在服务器创建taos.cfg文件,调整# max length of an SQL : maxSQLLength 654800,然后重启tdengine,需要加入配置文件的挂载 + > 三:如果遇到了重启tdengine失败,需要调整挂载数据文件中的配置,见 .../taosdata/dnode/dnodeEps.json,中dnodeFqdn调整为启动失败的dockerId即可,然后docker restart tdengine 5. 配置http api监控,用于进行业务接口探测,确保业务可以用,另外接口有进行token鉴权校验,"Authorization:Bearer eyJhbGciOiJIUzI1....",配置后测试,提示“StatusCode 401”。服务端应用收到的token为"Authorization:Bearer%20eyJhbGciOiJIUzI1....",hertzbeat对空格进行转义为“%20”,服务器没有转义导致鉴权失败,建议转义功能作为可选项。 @@ -35,31 +35,31 @@ sidebar_label: 常见问题 1. **MYSQL,TDENGINE和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 -> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP -> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` + > 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP + > 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` 2. **按照流程部署,访问 无界面** 请参考下面几点排查问题: -> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 -> 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 + > 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 + > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 + > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 3. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 + > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter ### 安装包部署常见问题 1. **按照流程部署,访问 无界面** 请参考下面几点排查问题: -> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 + > 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 + > 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 + > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 2. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 + > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jetty.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jetty.md index 31e297703fc..5e5603783cc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jetty.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/jetty.md @@ -19,33 +19,33 @@ keywords: [开源监控系统, 开源中间件监控, Jetty应用服务器监控 1. 在 Jetty 启动 JMX JMX-REMOTE 模块 -```shell -java -jar $JETTY_HOME/start.jar --add-module=jmx -java -jar $JETTY_HOME/start.jar --add-module=jmx-remote -``` + ```shell + java -jar $JETTY_HOME/start.jar --add-module=jmx + java -jar $JETTY_HOME/start.jar --add-module=jmx-remote + ``` -命令执行成功会创建出 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件 + 命令执行成功会创建出 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件 2. 编辑 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件,修改 JMX 的 IP 端口等参数。 -**`localhost` 需修改为对外暴露 IP** - -```text -## The host/address to bind the RMI server to. -# jetty.jmxremote.rmiserverhost=localhost - -## The port the RMI server listens to (0 means a random port is chosen). -# jetty.jmxremote.rmiserverport=1099 - -## The host/address to bind the RMI registry to. -# jetty.jmxremote.rmiregistryhost=localhost - -## The port the RMI registry listens to. -# jetty.jmxremote.rmiregistryport=1099 - -## The host name exported in the RMI stub. --Djava.rmi.server.hostname=localhost -``` + **`localhost` 需修改为对外暴露 IP** + + ```text + ## The host/address to bind the RMI server to. + # jetty.jmxremote.rmiserverhost=localhost + + ## The port the RMI server listens to (0 means a random port is chosen). + # jetty.jmxremote.rmiserverport=1099 + + ## The host/address to bind the RMI registry to. + # jetty.jmxremote.rmiregistryhost=localhost + + ## The port the RMI registry listens to. + # jetty.jmxremote.rmiregistryport=1099 + + ## The host name exported in the RMI stub. + -Djava.rmi.server.hostname=localhost + ``` 3. 重启 Jetty Server 即可。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka.md index a79bb0e91c2..8663360a9ad 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kafka.md @@ -17,16 +17,16 @@ keywords: [开源监控系统, 开源消息中间件监控, Kafka监控] 2. 修改 Kafka 启动脚本 -修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` -在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 - -```shell -export JMX_PORT=9999; -export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=ip地址 -Dcom.sun.management.jmxremote.rmi.port=9999 -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"; - -# 这是最后一行本来就存在的 -# exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" -``` + 修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` + 在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 + + ```shell + export JMX_PORT=9999; + export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=ip地址 -Dcom.sun.management.jmxremote.rmi.port=9999 -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"; + + # 这是最后一行本来就存在的 + # exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" + ``` 3. 重启 Kafka 服务 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md index cc4c7254afe..162262ab8cd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md @@ -17,15 +17,15 @@ keywords: [开源监控系统, 开源Kubernetes监控] 1. 创建service account并绑定默认cluster-admin管理员集群角色 -```kubectl create serviceaccount dashboard-admin -n kube-system``` + ```kubectl create serviceaccount dashboard-admin -n kube-system``` 2. 用户授权 -```shell -kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin -kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' -kubectl describe secret {secret} -n kube-system -``` + ```shell + kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin + kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' + kubectl describe secret {secret} -n kube-system + ``` ### 方式二 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md index 0b9b96b6099..0b3cf704589 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md @@ -14,9 +14,9 @@ keywords: [开源监控系统, 中间件监控, Nacos分布式监控] 1. 按照[部署文档](https://nacos.io/zh-cn/docs/deployment.html)搭建好Nacos集群。 2. 配置application.properties文件,暴露metrics数据。 -``` -management.endpoints.web.exposure.include=* -``` + ``` + management.endpoints.web.exposure.include=* + ``` 3. 访问```{ip}:8848/nacos/actuator/prometheus```,查看是否能访问到metrics数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nginx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nginx.md index 8c81c5a82c2..50837f79394 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nginx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nginx.md @@ -17,45 +17,45 @@ keywords: [开源监控工具, 开源Java监控工具, 监控Nginx指标] 1. 检查是否已添加 `ngx_http_stub_status_module` -```shell -nginx -V -``` + ```shell + nginx -V + ``` -查看是否包含 `--with-http_stub_status_module`,如果没有则需要重新编译安装 Nginx。 + 查看是否包含 `--with-http_stub_status_module`,如果没有则需要重新编译安装 Nginx。 2. 编译安装 Nginx, 添加 `ngx_http_stub_status_module` 模块 -下载 Nginx 并解压,在目录下执行 + 下载 Nginx 并解压,在目录下执行 -```shell -./configure --prefix=/usr/local/nginx --with-http_stub_status_module - -make && make install -``` + ```shell + ./configure --prefix=/usr/local/nginx --with-http_stub_status_module + + make && make install + ``` 3. 修改 Nginx 配置文件 -修改 `nginx.conf` 文件,添加监控模块暴露端点,如下配置: + 修改 `nginx.conf` 文件,添加监控模块暴露端点,如下配置: -```shell -# modify nginx.conf -server { - listen 80; # port - server_name localhost; - location /nginx-status { - stub_status on; - access_log on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts - } -} -``` + ```shell + # modify nginx.conf + server { + listen 80; # port + server_name localhost; + location /nginx-status { + stub_status on; + access_log on; + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts + } + } + ``` 4. 重新加载 Nginx -```shell -nginx -s reload -``` + ```shell + nginx -s reload + ``` 5. 在浏览器访问 `http://localhost/nginx-status` 即可查看 Nginx 监控状态信息。 @@ -63,48 +63,48 @@ nginx -s reload 1. 安装 `ngx_http_reqstat_module` 模块 -```shell -# install `ngx_http_reqstat_module` -wget https://github.com/zls0424/ngx_req_status/archive/master.zip -O ngx_req_status.zip - -unzip ngx_req_status.zip - -patch -p1 < ../ngx_req_status-master/write_filter.patch - -./configure --prefix=/usr/local/nginx --add-module=/path/to/ngx_req_status-master - -make -j2 - -make install -``` + ```shell + # install `ngx_http_reqstat_module` + wget https://github.com/zls0424/ngx_req_status/archive/master.zip -O ngx_req_status.zip + + unzip ngx_req_status.zip + + patch -p1 < ../ngx_req_status-master/write_filter.patch + + ./configure --prefix=/usr/local/nginx --add-module=/path/to/ngx_req_status-master + + make -j2 + + make install + ``` 2. 修改 Nginx 配置文件 -修改 `nginx.conf` 文件,添加状态模块暴露端点,如下配置: - -```shell -# modify nginx.conf -http { - req_status_zone server_name $server_name 256k; - req_status_zone server_addr $server_addr 256k; - - req_status server_name server_addr; - - server { - location /req-status { - req_status_show on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + 修改 `nginx.conf` 文件,添加状态模块暴露端点,如下配置: + + ```shell + # modify nginx.conf + http { + req_status_zone server_name $server_name 256k; + req_status_zone server_addr $server_addr 256k; + + req_status server_name server_addr; + + server { + location /req-status { + req_status_show on; + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts + } } } -} -``` + ``` 3. 重新加载 Nginx -```shell -nginx -s reload -``` + ```shell + nginx -s reload + ``` 4. 在浏览器访问 `http://localhost/req-status` 即可查看 Nginx 监控状态信息。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rabbitmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rabbitmq.md index 2210a2452e0..7e4ff6383d7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rabbitmq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/rabbitmq.md @@ -14,9 +14,9 @@ keywords: [开源监控系统, 开源消息中间件监控, RabbitMQ消息中间 1. 开启 Management 插件,或使用自开启版本 -```shell -rabbitmq-plugins enable rabbitmq_management -``` + ```shell + rabbitmq-plugins enable rabbitmq_management + ``` 2. 浏览器访问 ,默认账户密码 `guest/guest`. 成功登录即开启成功。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/shenyu.md index 87bb81b7800..40710bb3986 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/shenyu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/shenyu.md @@ -17,27 +17,27 @@ keywords: [开源监控系统, 开源消息中间件监控, ShenYu网关监控 1. 在网关的 pom.xml 文件中添加 metrics 的依赖。 -```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - -``` + ```xml + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + + ``` 2. 在网关的配置yaml文件中编辑如下内容: -```yaml -shenyu: - metrics: - enabled: true #设置为 true 表示开启 - name : prometheus - host: 127.0.0.1 #暴露的ip - port: 8090 #暴露的端口 - jmxConfig: #jmx配置 - props: - jvm_enabled: true #开启jvm的监控指标 -``` + ```yaml + shenyu: + metrics: + enabled: true #设置为 true 表示开启 + name : prometheus + host: 127.0.0.1 #暴露的ip + port: 8090 #暴露的端口 + jmxConfig: #jmx配置 + props: + jvm_enabled: true #开启jvm的监控指标 + ``` 最后重启访问网关指标接口 `http://ip:8090` 响应 prometheus 格式数据即可。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md index db6043f104a..9b8e3427f6b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/zookeeper.md @@ -14,23 +14,23 @@ keywords: [开源监控系统, Zookeeper监控监控] 1. 加白名单步骤 -> 1.找到我们 zookeeper 的配置文件,一般是 `zoo.cfg` -> -> 2.配置文件中加入以下命令 + 1. 找到我们 zookeeper 的配置文件,一般是 `zoo.cfg` -```shell -# 将需要的命令添加到白名单中 -4lw.commands.whitelist=stat, ruok, conf, isro + 2. 配置文件中加入以下命令 -# 将所有命令添加到白名单中 -4lw.commands.whitelist=* -``` + ```shell + # 将需要的命令添加到白名单中 + 4lw.commands.whitelist=stat, ruok, conf, isro + + # 将所有命令添加到白名单中 + 4lw.commands.whitelist=* + ``` -> 3.重启服务 + 3. 重启服务 -```shell -zkServer.sh restart -``` + ```shell + zkServer.sh restart + ``` 2. netcat 协议 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md index 95bedddc350..6c30086e1b8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md @@ -16,61 +16,61 @@ sidebar_label: 常见参数配置 1. 配置短信发送服务器 -> 只有成功配置了您自己的短信服务器,监控系统内触发的告警短信才会正常发送。 + > 只有成功配置了您自己的短信服务器,监控系统内触发的告警短信才会正常发送。 -在`application.yml`新增如下腾讯平台短信服务器配置(参数需替换为您的短信服务器配置) + 在`application.yml`新增如下腾讯平台短信服务器配置(参数需替换为您的短信服务器配置) -```yaml -common: - sms: - tencent: - secret-id: AKIDbQ4VhdMr89wDedFrIcgU2PaaMvOuBCzY - secret-key: PaXGl0ziY9UcWFjUyiFlCPMr77rLkJYlyA - app-id: 1435441637 - sign-name: 赫兹跳动 - template-id: 1343434 -``` + ```yaml + common: + sms: + tencent: + secret-id: AKIDbQ4VhdMr89wDedFrIcgU2PaaMvOuBCzY + secret-key: PaXGl0ziY9UcWFjUyiFlCPMr77rLkJYlyA + app-id: 1435441637 + sign-name: 赫兹跳动 + template-id: 1343434 + ``` -1.1 腾讯云短信创建签名(sign-name) -![image](https://github.com/apache/hertzbeat/assets/40455946/3a4c287d-b23d-4398-8562-4894296af485) + 1.1 腾讯云短信创建签名(sign-name) + ![image](https://github.com/apache/hertzbeat/assets/40455946/3a4c287d-b23d-4398-8562-4894296af485) -1.2 腾讯云短信创建正文模板(template-id) + 1.2 腾讯云短信创建正文模板(template-id) -``` -监控:{1},告警级别:{2}。内容:{3} -``` + ``` + 监控:{1},告警级别:{2}。内容:{3} + ``` -![image](https://github.com/apache/hertzbeat/assets/40455946/face71a6-46d5-452c-bed3-59d2a975afeb) + ![image](https://github.com/apache/hertzbeat/assets/40455946/face71a6-46d5-452c-bed3-59d2a975afeb) -1.3 腾讯云短信创建应用(app-id) -![image](https://github.com/apache/hertzbeat/assets/40455946/2732d710-37fa-4455-af64-48bba273c2f8) + 1.3 腾讯云短信创建应用(app-id) + ![image](https://github.com/apache/hertzbeat/assets/40455946/2732d710-37fa-4455-af64-48bba273c2f8) -1.4 腾讯云访问管理(secret-id、secret-key) -![image](https://github.com/apache/hertzbeat/assets/40455946/36f056f0-94e7-43db-8f07-82893c98024e) + 1.4 腾讯云访问管理(secret-id、secret-key) + ![image](https://github.com/apache/hertzbeat/assets/40455946/36f056f0-94e7-43db-8f07-82893c98024e) 2. 配置告警自定义参数 -```yaml -alerter: - # 自定义控制台地址 - console-url: https://console.tancloud.io -``` + ```yaml + alerter: + # 自定义控制台地址 + console-url: https://console.tancloud.io + ``` 3. 使用外置redis代替内存存储实时指标数据 -> 默认我们的指标实时数据存储在内存中,可以配置如下来使用redis代替内存存储。 - -注意⚠️ `memory.enabled: false, redis.enabled: true` - -```yaml -warehouse: - store: - memory: - enabled: false - init-size: 1024 - redis: - enabled: true - host: 127.0.0.1 - port: 6379 - password: 123456 -``` + > 默认我们的指标实时数据存储在内存中,可以配置如下来使用redis代替内存存储。 + + 注意⚠️ `memory.enabled: false, redis.enabled: true` + + ```yaml + warehouse: + store: + memory: + enabled: false + init-size: 1024 + redis: + enabled: true + host: 127.0.0.1 + port: 6379 + password: 123456 + ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md index 06ae9bc2f29..ff2f22d3adc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md @@ -15,47 +15,47 @@ sidebar_label: Docker Compose方式安装 1. 下载启动脚本包 -从 [下载地址](https://github.com/apache/hertzbeat/releases/download/v1.6.0/apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz) 下载安装脚本包 `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` + 从 [下载地址](https://github.com/apache/hertzbeat/releases/download/v1.6.0/apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz) 下载安装脚本包 `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` 2. 选择使用 HertzBeat + PostgreSQL + VictoriaMetrics 方案 -:::tip + :::tip -- `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` 解压后包含多个部署方案,这里我们推荐选择 `hertzbeat-postgresql-victoria-metrics` 方案。 -- 其它部署方式请详细阅读各个部署方案的 README.md 文件, MySQL 方案需要自行准备 MySQL 驱动包。 + - `apache-hertzbeat-xxx-incubating-docker-compose.tar.gz` 解压后包含多个部署方案,这里我们推荐选择 `hertzbeat-postgresql-victoria-metrics` 方案。 + - 其它部署方式请详细阅读各个部署方案的 README.md 文件, MySQL 方案需要自行准备 MySQL 驱动包。 -::: + ::: -- 解压脚本包 + - 解压脚本包 -``` -tar zxvf apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz -``` + ``` + tar zxvf apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz + ``` -- 进入解压目录, 选择 `HertzBeat + PostgreSQL + VictoriaMetrics` 一键部署 + - 进入解压目录, 选择 `HertzBeat + PostgreSQL + VictoriaMetrics` 一键部署 -``` -cd apache-hertzbeat-1.6.0-incubating-docker-compose -cd hertzbeat-postgresql-victoria-metrics -``` + ``` + cd apache-hertzbeat-1.6.0-incubating-docker-compose + cd hertzbeat-postgresql-victoria-metrics + ``` -- 一键启动 + - 一键启动 -> 在 `hertzbeat-postgresql-victoria-metrics` 目录下执行以下命令 + > 在 `hertzbeat-postgresql-victoria-metrics` 目录下执行以下命令 -``` -docker-compose up -d -``` + ``` + docker-compose up -d + ``` -- 查看服务状态 + - 查看服务状态 -> 查看各个容器的运行状态,up 为正常运行状态 + > 查看各个容器的运行状态,up 为正常运行状态 -``` -docker-compose ps -``` + ``` + docker-compose ps + ``` -4. 开始探索 HertzBeat +3. 开始探索 HertzBeat 浏览器访问 即可开始探索使用,默认账户密码 admin/hertzbeat。 **HAVE FUN** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-deploy.md index caf412441d7..894b8b79684 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-deploy.md @@ -17,37 +17,37 @@ sidebar_label: Docker方式安装 1. 执行以下命令 -```shell -$ docker run -d -p 1157:1157 -p 1158:1158 \ - -v $(pwd)/data:/opt/hertzbeat/data \ - -v $(pwd)/logs:/opt/hertzbeat/logs \ - -v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml \ - -v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml \ - --restart=always \ - --name hertzbeat apache/hertzbeat -``` - -> 命令参数详解 - -- `docker run -d` : 通过 Docker 后台运行容器 -- `-p 1157:1157 -p 1158:1158` : 映射容器端口到主机端口(前面是宿主机的端口号,后面是容器的端口号)。1157是页面端口,1158是集群端口。 -- `-v $(pwd)/data:/opt/hertzbeat/data` : (可选,数据持久化) 重要,挂载数据库文件到本地主机,保证数据不会因为容器的创建删除而丢失 -- `-v $(pwd)/logs:/opt/hertzbeat/logs` : (可选) 挂载日志文件到本地主机方便查看 -- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (可选) 挂载配置文件到容器中(请确保本地已有此文件)。[下载源](https://github.com/apache/hertzbeat/raw/master/script/application.yml) -- `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (可选) 挂载账户配置文件到容器中(请确保本地已有此文件)。[下载源](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) -- `-v $(pwd)/ext-lib:/opt/hertzbeat/ext-lib` : (可选) 挂载外部的第三方 JAR 包 [mysql-jdbc](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip) [oracle-jdbc](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) [oracle-i18n](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar) -- `--name hertzbeat` : (可选) 命名容器名称为 hertzbeat -- `--restart=always` : (可选) 配置容器自动重启。 -- `apache/hertzbeat` : 使用[官方应用镜像](https://hub.docker.com/r/apache/hertzbeat)来启动容器, 若网络超时可用`quay.io/tancloud/hertzbeat`代替。 - -:::tip - -- 标记为可选的参数,非必填项,若不需要则删除。 -- 此将容器的 1157,1158 端口映射到宿主机的 1157,1158 端口上。若宿主机该端口已被占用,则需修改主机映射端口。 -- 挂载文件时,前面参数为你自定义本地文件地址,后面参数为容器内文件地址。挂载时请确保你本地已有此文件。 -- 可执行```docker update --restart=always hertzbeat```配置容器自动重启。 - -::: + ```shell + $ docker run -d -p 1157:1157 -p 1158:1158 \ + -v $(pwd)/data:/opt/hertzbeat/data \ + -v $(pwd)/logs:/opt/hertzbeat/logs \ + -v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml \ + -v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml \ + --restart=always \ + --name hertzbeat apache/hertzbeat + ``` + + > 命令参数详解 + + - `docker run -d` : 通过 Docker 后台运行容器 + - `-p 1157:1157 -p 1158:1158` : 映射容器端口到主机端口(前面是宿主机的端口号,后面是容器的端口号)。1157是页面端口,1158是集群端口。 + - `-v $(pwd)/data:/opt/hertzbeat/data` : (可选,数据持久化) 重要,挂载数据库文件到本地主机,保证数据不会因为容器的创建删除而丢失 + - `-v $(pwd)/logs:/opt/hertzbeat/logs` : (可选) 挂载日志文件到本地主机方便查看 + - `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (可选) 挂载配置文件到容器中(请确保本地已有此文件)。[下载源](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + - `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (可选) 挂载账户配置文件到容器中(请确保本地已有此文件)。[下载源](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) + - `-v $(pwd)/ext-lib:/opt/hertzbeat/ext-lib` : (可选) 挂载外部的第三方 JAR 包 [mysql-jdbc](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip) [oracle-jdbc](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) [oracle-i18n](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar) + - `--name hertzbeat` : (可选) 命名容器名称为 hertzbeat + - `--restart=always` : (可选) 配置容器自动重启。 + - `apache/hertzbeat` : 使用[官方应用镜像](https://hub.docker.com/r/apache/hertzbeat)来启动容器, 若网络超时可用`quay.io/tancloud/hertzbeat`代替。 + + :::tip + + - 标记为可选的参数,非必填项,若不需要则删除。 + - 此将容器的 1157,1158 端口映射到宿主机的 1157,1158 端口上。若宿主机该端口已被占用,则需修改主机映射端口。 + - 挂载文件时,前面参数为你自定义本地文件地址,后面参数为容器内文件地址。挂载时请确保你本地已有此文件。 + - 可执行```docker update --restart=always hertzbeat```配置容器自动重启。 + + ::: 2. 开始探索 HertzBeat 浏览器访问 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 @@ -63,34 +63,34 @@ HertzBeat Collector 是一个轻量级的数据采集器,用于采集并将数 1. 执行以下命令 -```shell -$ docker run -d \ - -e IDENTITY=custom-collector-name \ - -e MODE=public \ - -e MANAGER_HOST=127.0.0.1 \ - -e MANAGER_PORT=1158 \ - --name hertzbeat-collector apache/hertzbeat-collector -``` - -> 命令参数详解 - -- `docker run -d` : 通过 Docker 后台运行容器 -- `-e IDENTITY=custom-collector-name` : (可选) 设置采集器的唯一标识名称。注意多采集器时名称需保证唯一性。 -- `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 -- `-e MANAGER_HOST=127.0.0.1` : 重要, 配置连接的 HertzBeat Server 地址,127.0.0.1 需替换为 HertzBeat Server 对外 IP 地址。 -- `-e MANAGER_PORT=1158` : (可选) 配置连接的 HertzBeat Server 端口,默认 1158. -- `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (可选)挂载日志文件到本地主机方便查看 -- `--name hertzbeat-collector` : 命名容器名称为 hertzbeat-collector -- `apache/hertzbeat-collector` : 使用[官方应用镜像](https://hub.docker.com/r/apache/hertzbeat-collector)来启动容器, 若网络超时可用`quay.io/tancloud/hertzbeat-collector`代替。 + ```shell + $ docker run -d \ + -e IDENTITY=custom-collector-name \ + -e MODE=public \ + -e MANAGER_HOST=127.0.0.1 \ + -e MANAGER_PORT=1158 \ + --name hertzbeat-collector apache/hertzbeat-collector + ``` -:::tip + > 命令参数详解 -- `MANAGER_HOST=127.0.0.1` 中的 `127.0.0.1` 需被替换为 HertzBeat Server 对外 IP 地址。 -- 标记为可选的参数,非必填项,若不需要则删除。 -- 挂载文件时,前面参数为你自定义本地文件地址,后面参数为容器内文件地址。挂载时请确保你本地已有此文件。 -- 可执行```docker update --restart=always hertzbeat-collector```配置容器自动重启。 + - `docker run -d` : 通过 Docker 后台运行容器 + - `-e IDENTITY=custom-collector-name` : (可选) 设置采集器的唯一标识名称。注意多采集器时名称需保证唯一性。 + - `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 + - `-e MANAGER_HOST=127.0.0.1` : 重要, 配置连接的 HertzBeat Server 地址,127.0.0.1 需替换为 HertzBeat Server 对外 IP 地址。 + - `-e MANAGER_PORT=1158` : (可选) 配置连接的 HertzBeat Server 端口,默认 1158. + - `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (可选)挂载日志文件到本地主机方便查看 + - `--name hertzbeat-collector` : 命名容器名称为 hertzbeat-collector + - `apache/hertzbeat-collector` : 使用[官方应用镜像](https://hub.docker.com/r/apache/hertzbeat-collector)来启动容器, 若网络超时可用`quay.io/tancloud/hertzbeat-collector`代替。 -::: + :::tip + + - `MANAGER_HOST=127.0.0.1` 中的 `127.0.0.1` 需被替换为 HertzBeat Server 对外 IP 地址。 + - 标记为可选的参数,非必填项,若不需要则删除。 + - 挂载文件时,前面参数为你自定义本地文件地址,后面参数为容器内文件地址。挂载时请确保你本地已有此文件。 + - 可执行```docker update --restart=always hertzbeat-collector```配置容器自动重启。 + + ::: 2. 开始探索 HertzBeat Collector 浏览器访问 即可开始探索使用,默认账户密码 admin/hertzbeat。 @@ -106,45 +106,45 @@ $ docker run -d \ 1. MYSQL,TDENGINE或IotDB和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 -> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP -> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` + > 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP + > 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` 2. 按照流程部署,访问 无界面 请参考下面几点排查问题: -> 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 + > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 + > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 + > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 3. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] -> 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - -> 安装初始化此时序数据库 + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - + > 安装初始化此时序数据库 4. 安装配置了时序数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] -> 请检查配置的时许数据库参数是否正确 -> 时序数据库对应的 enable 是否设置为true -> 注意⚠️若hertzbeat和外置数据库都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP -> 可根据logs目录下启动日志排查 + > 请检查配置的时许数据库参数是否正确 + > 时序数据库对应的 enable 是否设置为true + > 注意⚠️若hertzbeat和外置数据库都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP + > 可根据logs目录下启动日志排查 5. application.yml 是干什么用的 -> 此文件是HertzBeat的配置文件,用于配置HertzBeat的各种参数,如数据库连接信息,时序数据库配置等。 + > 此文件是HertzBeat的配置文件,用于配置HertzBeat的各种参数,如数据库连接信息,时序数据库配置等。 -下载 `application.yml` 文件到主机目录下,例如: $(pwd)/application.yml -下载源 [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) + 下载 `application.yml` 文件到主机目录下,例如: $(pwd)/application.yml + 下载源 [github/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) -- 若需使用邮件发送告警,需替换 `application.yml` 里面的邮件服务器参数 -- 若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) -- 若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.victoria-metrics`参数 具体步骤参见 [使用victoria-metrics存储指标数据](victoria-metrics-init) + - 若需使用邮件发送告警,需替换 `application.yml` 里面的邮件服务器参数 + - 若需使用外置Mysql数据库替换内置H2数据库,需替换`application.yml`里面的`spring.datasource`参数 具体步骤参见 [H2数据库切换为MYSQL](mysql-change)) + - 若需使用时序数据库TDengine来存储指标数据,需替换`application.yml`里面的`warehouse.store.victoria-metrics`参数 具体步骤参见 [使用victoria-metrics存储指标数据](victoria-metrics-init) 6. sureness.yml 是干什么用的 -> 此文件是HertzBeat的用户配置文件,用于配置HertzBeat的用户信息,如账户密码等。 + > 此文件是HertzBeat的用户配置文件,用于配置HertzBeat的用户信息,如账户密码等。 -HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat -若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 -下载 `sureness.yml` 文件到主机目录下,例如: $(pwd)/sureness.yml -下载源 [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) -具体修改步骤参考 [配置修改账户密码](account-modify) + HertzBeat默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat + 若需要新增删除修改账户或密码,可以通过配置 `sureness.yml` 实现,若无此需求可忽略此步骤 + 下载 `sureness.yml` 文件到主机目录下,例如: $(pwd)/sureness.yml + 下载源 [github/script/sureness.yml](https://github.com/apache/hertzbeat/raw/master/script/sureness.yml) + 具体修改步骤参考 [配置修改账户密码](account-modify) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md index 5928c7b826a..1f24f70f3e5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md @@ -16,32 +16,32 @@ It's designed to work on infrastructure of the cloud era, and users benefit from ### 通过Docker方式安装GreptimeDB > 可参考官方网站[安装教程](https://docs.greptime.com/getting-started/overview) -> -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装GreptimeDB - -```shell -$ docker run -p 127.0.0.1:4000-4003:4000-4003 \ - -v "$(pwd)/greptimedb:/tmp/greptimedb" \ - --name greptime --rm \ - greptime/greptimedb:latest standalone start \ - --http-addr 0.0.0.0:4000 \ - --rpc-addr 0.0.0.0:4001 \ - --mysql-addr 0.0.0.0:4002 \ - --postgres-addr 0.0.0.0:4003 -``` - -`-v "$(pwd)/greptimedb:/tmp/greptimedb` 为 greptimedb 数据目录本地持久化挂载,需将 `$(pwd)/greptimedb` 替换为实际本地存在的目录,默认使用执行命令的当前目录下的 `greptimedb` 目录作为数据目录。 - -使用```$ docker ps```查看数据库是否启动成功 + +1. 下载安装Docker环境 +Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +安装完毕后终端查看Docker版本是否正常输出。 + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Docker安装GreptimeDB + + ```shell + $ docker run -p 127.0.0.1:4000-4003:4000-4003 \ + -v "$(pwd)/greptimedb:/tmp/greptimedb" \ + --name greptime --rm \ + greptime/greptimedb:latest standalone start \ + --http-addr 0.0.0.0:4000 \ + --rpc-addr 0.0.0.0:4001 \ + --mysql-addr 0.0.0.0:4002 \ + --postgres-addr 0.0.0.0:4003 + ``` + + `-v "$(pwd)/greptimedb:/tmp/greptimedb` 为 greptimedb 数据目录本地持久化挂载,需将 `$(pwd)/greptimedb` 替换为实际本地存在的目录,默认使用执行命令的当前目录下的 `greptimedb` 目录作为数据目录。 + + 使用```$ docker ps```查看数据库是否启动成功 ### 在hertzbeat的`application.yml`配置文件配置此数据库连接 @@ -49,25 +49,25 @@ $ docker run -p 127.0.0.1:4000-4003:4000-4003 \ 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - greptime: - enabled: true - grpc-endpoints: localhost:4001 - url: jdbc:mysql://localhost:4002/hertzbeat?connectionTimeZone=Asia/Shanghai&forceConnectionTimeZoneToSession=true - driver-class-name: com.mysql.cj.jdbc.Driver - username: greptime - password: greptime - expire-time: 30d -``` - -默认数据库是 URL 中配置的 `hertzbeat` ,将自动创建。 `expire-time` 是自动创建的数据库的 TTL (数据过期)时间,默认为 30 天。 + **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** + + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + greptime: + enabled: true + grpc-endpoints: localhost:4001 + url: jdbc:mysql://localhost:4002/hertzbeat?connectionTimeZone=Asia/Shanghai&forceConnectionTimeZoneToSession=true + driver-class-name: com.mysql.cj.jdbc.Driver + username: greptime + password: greptime + expire-time: 30d + ``` + + 默认数据库是 URL 中配置的 `hertzbeat` ,将自动创建。 `expire-time` 是自动创建的数据库的 TTL (数据过期)时间,默认为 30 天。 2. 重启 HertzBeat @@ -75,4 +75,4 @@ warehouse: 1. 时序数据库 GreptimeDB 或者 IoTDB 或者 TDengine 是否都需要配置,能不能都用 -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md index c21d02e9e6b..1c97c2ccfb9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md @@ -24,26 +24,26 @@ InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海 ### 2. 通过Docker方式安装InfluxDB > 可参考官方网站[安装教程](https://hub.docker.com/_/influxdb) -> -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装InfluxDB 1.x -```shell -$ docker run -p 8086:8086 \ - -v /opt/influxdb:/var/lib/influxdb \ - influxdb:1.8 -``` +1. 下载安装Docker环境 +Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +安装完毕后终端查看Docker版本是否正常输出。 + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Docker安装InfluxDB 1.x + + ```shell + $ docker run -p 8086:8086 \ + -v /opt/influxdb:/var/lib/influxdb \ + influxdb:1.8 + ``` -`-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 -使用```$ docker ps```查看数据库是否启动成功 + `-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 + 使用```$ docker ps```查看数据库是否启动成功 ### 在hertzbeat的`application.yml`配置文件配置此数据库连接 @@ -51,22 +51,22 @@ $ docker run -p 8086:8086 \ 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - influxdb: - enabled: true - server-url: http://localhost:8086 - username: root - password: root - expire-time: '30d' - replication: 1 -``` + **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** + + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + influxdb: + enabled: true + server-url: http://localhost:8086 + username: root + password: root + expire-time: '30d' + replication: 1 + ``` 2. 重启 HertzBeat @@ -74,4 +74,4 @@ warehouse: 1. 时序数据库InfluxDb, IoTDB和TDengine是否都需要配置,能不能都用 -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md index 2132e24b010..f2ce6087dc5 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md @@ -31,15 +31,15 @@ Apache IoTDB是一体化收集、存储、管理与分析物联网时序数据 2. Docker安装IoTDB -```shell -$ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ - -v /opt/iotdb/data:/iotdb/data \ - --name iotdb \ - apache/iotdb:1.2.2-standalone -``` + ```shell + $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ + -v /opt/iotdb/data:/iotdb/data \ + --name iotdb \ + apache/iotdb:1.2.2-standalone + ``` -`-v /opt/iotdb/data:/iotdb/data` 为IoTDB数据目录本地持久化挂载,需将`/iotdb/data`替换为实际本地存在的目录 -使用```$ docker ps```查看数据库是否启动成功 + `-v /opt/iotdb/data:/iotdb/data` 为IoTDB数据目录本地持久化挂载,需将`/iotdb/data`替换为实际本地存在的目录 + 使用```$ docker ps```查看数据库是否启动成功 3. 在hertzbeat的`application.yml`配置文件配置IoTDB数据库连接 @@ -47,63 +47,63 @@ $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.iot-db`数据源参数,HOST账户密码等,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - # 启用IotDB - iot-db: - enabled: true - host: 127.0.0.1 - rpc-port: 6667 - username: root - password: root - # use default queryTimeoutInMs = -1 - query-timeout-in-ms: -1 - # 数据存储时间:默认'7776000000'(90天,单位为毫秒,-1代表永不过期) - expire-time: '7776000000' -``` - -**IoTDB集群版配置** -如果您使用IoTDB为集群请参考下面配置 - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - # 启用IotDB - iot-db: - enabled: true - node-urls: ['127.0.0.1:6667','127.0.0.2:6667','127.0.0.3:6667''] - username: root - password: root - # if iotdb version >= 0.13 use default queryTimeoutInMs = -1; else use default queryTimeoutInMs = 0 - query-timeout-in-ms: -1 - # 数据存储时间:默认'7776000000'(90天,单位为毫秒,-1代表永不过期) - expire-time: '7776000000' -``` - -参数说明: - -| 参数名称 | 参数说明 | -|---------------------|-------------------------------------------| -| enabled | 是否启用 | -| host | IoTDB数据库地址 | -| rpc-port | IoTDB数据库端口 | -| node-urls | IoTDB集群地址 | -| username | IoTDB数据库账户 | -| password | IoTDB数据库密码 | -| version | IoTDB数据库版本,已废弃,仅支持V1.* | -| query-timeout-in-ms | 查询超时时间 | -| expire-time | 数据存储时间,默认'7776000000'(90天,单位为毫秒,-1代表永不过期) | - -> 如果集群配置`node-urls`和单机配置同时设置,以集群`node-urls`配置稳准 + **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.iot-db`数据源参数,HOST账户密码等,并启用`enabled`为`true`** + + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + # 启用IotDB + iot-db: + enabled: true + host: 127.0.0.1 + rpc-port: 6667 + username: root + password: root + # use default queryTimeoutInMs = -1 + query-timeout-in-ms: -1 + # 数据存储时间:默认'7776000000'(90天,单位为毫秒,-1代表永不过期) + expire-time: '7776000000' + ``` + + **IoTDB集群版配置** + 如果您使用IoTDB为集群请参考下面配置 + + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + # 启用IotDB + iot-db: + enabled: true + node-urls: ['127.0.0.1:6667','127.0.0.2:6667','127.0.0.3:6667''] + username: root + password: root + # if iotdb version >= 0.13 use default queryTimeoutInMs = -1; else use default queryTimeoutInMs = 0 + query-timeout-in-ms: -1 + # 数据存储时间:默认'7776000000'(90天,单位为毫秒,-1代表永不过期) + expire-time: '7776000000' + ``` + + 参数说明: + + | 参数名称 | 参数说明 | + |---------------------|-------------------------------------------| + | enabled | 是否启用 | + | host | IoTDB数据库地址 | + | rpc-port | IoTDB数据库端口 | + | node-urls | IoTDB集群地址 | + | username | IoTDB数据库账户 | + | password | IoTDB数据库密码 | + | version | IoTDB数据库版本,已废弃,仅支持V1.* | + | query-timeout-in-ms | 查询超时时间 | + | expire-time | 数据存储时间,默认'7776000000'(90天,单位为毫秒,-1代表永不过期) | + + > 如果集群配置`node-urls`和单机配置同时设置,以集群`node-urls`配置稳准 4. 重启 HertzBeat @@ -111,15 +111,15 @@ warehouse: 1. 时序数据库IoTDB和TDengine是否都需要配置,能不能都用 -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] -> 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 3. 安装配置了IotDB数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] -> 请检查配置参数是否正确 -> iot-db enable是否设置为true -> 注意⚠️若hertzbeat和IotDB都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP -> 可根据logs目录下启动日志排查 + > 请检查配置参数是否正确 + > iot-db enable是否设置为true + > 注意⚠️若hertzbeat和IotDB都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP + > 可根据logs目录下启动日志排查 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md index ea90a2ed9d5..bd758bc1502 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md @@ -13,40 +13,40 @@ Apache HertzBeat (incubating) 支持在Linux Windows Mac系统安装运行,CPU 1. 下载安装包 -从 [下载页面](/docs/download) 下载您系统环境对应的安装包版本 `apache-hertzbeat-xxx-incubating-bin.tar.gz` + 从 [下载页面](/docs/download) 下载您系统环境对应的安装包版本 `apache-hertzbeat-xxx-incubating-bin.tar.gz` 2. 设置配置文件(可选) -解压安装包到主机 eg: /opt/hertzbeat + 解压安装包到主机 eg: /opt/hertzbeat -``` -tar zxvf apache-hertzbeat-xxx-incubating-bin.tar.gz -``` + ```shell + tar zxvf apache-hertzbeat-xxx-incubating-bin.tar.gz + ``` -:::tip -位于 `config/application.yml` 的配置文件,您可以根据需求修改配置文件来配置外部依赖的服务,如数据库,时序数据库等参数。 -HertzBeat 启动时默认全使用内部服务,但生产环境建议切换为外部数据库服务。 -::: + :::tip + 位于 `config/application.yml` 的配置文件,您可以根据需求修改配置文件来配置外部依赖的服务,如数据库,时序数据库等参数。 + HertzBeat 启动时默认全使用内部服务,但生产环境建议切换为外部数据库服务。 + ::: -建议元数据存储使用 [PostgreSQL](postgresql-change), 指标数据存储使用 [VictoriaMetrics](victoria-metrics-init), 具体步骤参见 + 建议元数据存储使用 [PostgreSQL](postgresql-change), 指标数据存储使用 [VictoriaMetrics](victoria-metrics-init), 具体步骤参见 -- [内置 H2 数据库切换为 PostgreSQL](postgresql-change) -- [使用 VictoriaMetrics 存储指标数据](victoria-metrics-init) + - [内置 H2 数据库切换为 PostgreSQL](postgresql-change) + - [使用 VictoriaMetrics 存储指标数据](victoria-metrics-init) 3. 配置账户文件(可选) -HertzBeat 默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat -若需要新增删除修改账户或密码,可以通过修改位于 `config/sureness.yml` 的配置文件实现,具体参考 + HertzBeat 默认内置三个用户账户,分别为 admin/hertzbeat tom/hertzbeat guest/hertzbeat + 若需要新增删除修改账户或密码,可以通过修改位于 `config/sureness.yml` 的配置文件实现,具体参考 -- [配置修改账户密码](account-modify) + - [配置修改账户密码](account-modify) 4. 启动 -执行位于安装目录 bin 下的启动脚本 startup.sh, windows 环境下为 startup.bat + 执行位于安装目录 bin 下的启动脚本 startup.sh, windows 环境下为 startup.bat -``` -./startup.sh -``` + ```shell + ./startup.sh + ``` 5. 开始探索HertzBeat 浏览器访问 即刻开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 @@ -62,44 +62,44 @@ HertzBeat Collector 是一个轻量级的数据采集器,用于采集并将数 1. 下载安装包 -从 [下载页面](/docs/download) 下载您系统环境对应的安装包版本 `apache-hertzbeat-collector-xxx-incubating-bin.tar.gz` + 从 [下载页面](/docs/download) 下载您系统环境对应的安装包版本 `apache-hertzbeat-collector-xxx-incubating-bin.tar.gz` 2. 设置配置文件 -解压安装包到主机 eg: /opt/hertzbeat-collector + 解压安装包到主机 eg: /opt/hertzbeat-collector -``` -tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz -``` + ```shell + tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz + ``` -配置采集器的配置文件 `config/application.yml` 里面的 HertzBeat Server 连接 IP, 端口, 采集器名称(需保证唯一性)等参数。 + 配置采集器的配置文件 `config/application.yml` 里面的 HertzBeat Server 连接 IP, 端口, 采集器名称(需保证唯一性)等参数。 -```yaml -collector: - dispatch: - entrance: - netty: - enabled: true - identity: ${IDENTITY:} - mode: ${MODE:public} - manager-host: ${MANAGER_HOST:127.0.0.1} - manager-port: ${MANAGER_PORT:1158} -``` + ```yaml + collector: + dispatch: + entrance: + netty: + enabled: true + identity: ${IDENTITY:} + mode: ${MODE:public} + manager-host: ${MANAGER_HOST:127.0.0.1} + manager-port: ${MANAGER_PORT:1158} + ``` -> 参数详解 + > 参数详解 -- `identity` : (可选) 设置采集器的唯一标识名称。注意多采集器时名称需保证唯一性。 -- `mode` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 -- `manager-host` : 重要, 配置连接的 HertzBeat Server 地址, -- `manager-port` : (可选) 配置连接的 HertzBeat Server 端口,默认 1158. + - `identity` : (可选) 设置采集器的唯一标识名称。注意多采集器时名称需保证唯一性。 + - `mode` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 + - `manager-host` : 重要, 配置连接的 HertzBeat Server 地址, + - `manager-port` : (可选) 配置连接的 HertzBeat Server 端口,默认 1158. 3. 启动 -执行位于安装目录 hertzbeat-collector/bin/ 下的启动脚本 startup.sh, windows 环境下为 startup.bat + 执行位于安装目录 hertzbeat-collector/bin/ 下的启动脚本 startup.sh, windows 环境下为 startup.bat -``` -./startup.sh -``` + ```shell + ./startup.sh + ``` 4. 开始探索 HertzBeat Collector 浏览器访问 即可开始探索使用,默认账户密码 admin/hertzbeat。 @@ -114,22 +114,22 @@ collector: 1. 启动失败,需您提前准备JAVA运行环境 -安装JAVA运行环境-可参考[官方网站](http://www.oracle.com/technetwork/java/javase/downloads/index.html) -要求:JAVA17环境 -下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) -安装后命令行检查是否成功安装 - -``` -$ java -version -java version "17.0.9" -Java(TM) SE Runtime Environment 17.0.9 (build 17.0.9+8-LTS-237) -Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) + 安装JAVA运行环境-可参考[官方网站](http://www.oracle.com/technetwork/java/javase/downloads/index.html) + 要求:JAVA17环境 + 下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) + 安装后命令行检查是否成功安装 -``` + ```shell + $ java -version + java version "17.0.9" + Java(TM) SE Runtime Environment 17.0.9 (build 17.0.9+8-LTS-237) + Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) + + ``` 2. 按照流程部署,访问 无界面 请参考下面几点排查问题: -> 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 + > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 + > 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 + > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/quickstart.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/quickstart.md index 570d21dbc35..310bc9e2f71 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/quickstart.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/quickstart.md @@ -16,24 +16,24 @@ sidebar_label: 快速开始 1. `docker` 环境仅需一条命令即可开始 -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```或者使用 quay.io (若 dockerhub 网络链接超时)``` + ```或者使用 quay.io (若 dockerhub 网络链接超时)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. 浏览器访问 `http://localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` 3. 部署采集器集群(可选) -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ```shell + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -- `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 -- `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 -- `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 -- `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 + - `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 + - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 + - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](docker-deploy) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/sslcert-practice.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/sslcert-practice.md index b18881b7b93..f8c604c2924 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/sslcert-practice.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/sslcert-practice.md @@ -18,7 +18,7 @@ github: 1. `docker` 环境仅需一条命令即可安装 -`docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` + `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` 2. 安装成功浏览器访问 `localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` @@ -26,63 +26,64 @@ github: 1. 点击新增SSL证书监控 -> 系统页面 -> 监控菜单 -> SSL证书 -> 新增SSL证书 + > 系统页面 -> 监控菜单 -> SSL证书 -> 新增SSL证书 -![](/img/docs/start/ssl_1.png) + ![](/img/docs/start/ssl_1.png) 2. 配置监控网站 -> 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 -> 点击确定 注意⚠️新增前默认会先去测试网站连接性,连接成功才会新增,当然也可以把**是否测试**按钮置灰。 + > 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 + > + > 点击确定 注意⚠️新增前默认会先去测试网站连接性,连接成功才会新增,当然也可以把**是否测试**按钮置灰。 -![](/img/docs/start/ssl_2.png) + ![](/img/docs/start/ssl_2.png) 3. 查看检测指标数据 -> 在监控列表可以查看任务状态,进监控详情可以查看指标数据图表等。 + > 在监控列表可以查看任务状态,进监控详情可以查看指标数据图表等。 -![](/img/docs/start/ssl_3.png) + ![](/img/docs/start/ssl_3.png) -![](/img/docs/start/ssl_11.png) + ![](/img/docs/start/ssl_11.png) 4. 设置阈值(证书过期时触发) -> 系统页面 -> 告警 -> 告警阈值 -> 新增阈值 + > 系统页面 -> 告警 -> 告警阈值 -> 新增阈值 -![](/img/docs/start/ssl_4.png) + ![](/img/docs/start/ssl_4.png) -> 配置阈值,选择SSL证书指标对象,配置告警表达式-当指标`expired`为`true`触发,即`equals(expired,"true")` , 设置告警级别通知模版信息等。 + > 配置阈值,选择SSL证书指标对象,配置告警表达式-当指标`expired`为`true`触发,即`equals(expired,"true")` , 设置告警级别通知模版信息等。 -![](/img/docs/start/ssl_5.png) + ![](/img/docs/start/ssl_5.png) -> 关联阈值与监控, 在阈值列表设置此阈值应用于哪些监控。 + > 关联阈值与监控, 在阈值列表设置此阈值应用于哪些监控。 -![](/img/docs/start/ssl_6.png) + ![](/img/docs/start/ssl_6.png) 5. 设置阈值(证书过期前一周触发) -> 同理如上,新增配置阈值,配置告警表达式-当指标有效期时间戳 `end_timestamp`,`now()`函数为当前时间戳,若配置提前一周触发告警即:`end_timestamp <= (now() + 604800000)` , 其中 `604800000` 为7天总时间差毫秒值。 + > 同理如上,新增配置阈值,配置告警表达式-当指标有效期时间戳 `end_timestamp`,`now()`函数为当前时间戳,若配置提前一周触发告警即:`end_timestamp <= (now() + 604800000)` , 其中 `604800000` 为7天总时间差毫秒值。 -![](/img/docs/start/ssl_7.png) + ![](/img/docs/start/ssl_7.png) -> 最终可以在告警中心看到已触发的告警。 + > 最终可以在告警中心看到已触发的告警。 -![](/img/docs/start/ssl_8.png) + ![](/img/docs/start/ssl_8.png) 6. 告警通知(通过钉钉微信飞书等及时通知) -> 监控系统 -> 告警通知 -> 新增接收人 + > 监控系统 -> 告警通知 -> 新增接收人 -![](/img/docs/start/ssl_9.png) + ![](/img/docs/start/ssl_9.png) -钉钉微信飞书等token配置可以参考帮助文档 + 钉钉微信飞书等token配置可以参考帮助文档 - - + + -> 告警通知 -> 新增告警通知策略 -> 将刚才配置的接收人启用通知 + > 告警通知 -> 新增告警通知策略 -> 将刚才配置的接收人启用通知 -![](/img/docs/start/ssl_10.png) + ![](/img/docs/start/ssl_10.png) 7. OK 当阈值触发后我们就可以收到对应告警消息啦,如果没有配通知,也可以在告警中心查看告警信息。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md index d85ca355bd9..ee447e1be7d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md @@ -18,28 +18,28 @@ TDengine是一款开源物联网时序型数据库,我们用其存储采集到 ### 通过Docker方式安装TDengine > 可参考官方网站[安装教程](https://docs.taosdata.com/get-started/docker/) -> -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装TDengine - -```shell -$ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ - -v /opt/taosdata:/var/lib/taos \ - --name tdengine -e TZ=Asia/Shanghai \ - tdengine/tdengine:3.0.4.0 -``` - -`-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 -`-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 -使用```$ docker ps```查看数据库是否启动成功 + +1. 下载安装Docker环境 +Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +安装完毕后终端查看Docker版本是否正常输出。 + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Docker安装TDengine + + ```shell + $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ + -v /opt/taosdata:/var/lib/taos \ + --name tdengine -e TZ=Asia/Shanghai \ + tdengine/tdengine:3.0.4.0 + ``` + + `-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 + `-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 + 使用```$ docker ps```查看数据库是否启动成功 ### 创建数据库实例 @@ -47,7 +47,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ 1. 进入数据库Docker容器 - ``` + ```shell docker exec -it tdengine /bin/bash ``` @@ -69,7 +69,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ 执行创建数据库命令 - ``` + ```shell taos> show databases; taos> CREATE DATABASE hertzbeat KEEP 90 DURATION 10 BUFFER 16; ``` @@ -78,7 +78,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ 4. 查看hertzbeat数据库是否成功创建 - ``` + ```shell taos> show databases; taos> use hertzbeat; ``` @@ -99,21 +99,21 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - td-engine: - enabled: true - driver-class-name: com.taosdata.jdbc.rs.RestfulDriver - url: jdbc:TAOS-RS://localhost:6041/hertzbeat - username: root - password: taosdata -``` + **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** + + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + td-engine: + enabled: true + driver-class-name: com.taosdata.jdbc.rs.RestfulDriver + url: jdbc:TAOS-RS://localhost:6041/hertzbeat + username: root + password: taosdata + ``` 2. 重启 HertzBeat @@ -121,19 +121,19 @@ warehouse: 1. 时序数据库IoTDB和TDengine是否都需要配置,能不能都用 -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] -> 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 3. 监控详情历史图片不展示或无数据,已经配置了TDengine -> 请确认是否安装的TDengine版本为3.0以上,版本2.x不支持兼容 + > 请确认是否安装的TDengine版本为3.0以上,版本2.x不支持兼容 4. 安装配置了TDengine数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] -> 请检查配置参数是否正确 -> td-engine enable是否设置为true -> 注意⚠️若hertzbeat和TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP -> 可根据logs目录下启动日志排查 + > 请检查配置参数是否正确 + > td-engine enable是否设置为true + > 注意⚠️若hertzbeat和TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP + > 可根据logs目录下启动日志排查 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/update-1.6.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/update-1.6.0.md index 7b1a30c7f06..f0d04a18726 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/update-1.6.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/update-1.6.0.md @@ -14,184 +14,184 @@ sidebar_label: 1.6.0升级指南 1. 升级Java环境 -由于1.6.0版本使用Java17,且安装包不再提供内置jdk的版本,参考以下情况使用新版Hertzbeat。 + 由于1.6.0版本使用Java17,且安装包不再提供内置jdk的版本,参考以下情况使用新版Hertzbeat。 -- 当你的服务器中默认环境变量为Java17时,这一步你无需任何操作。 -- 当你的服务器中默认环境变量不为Java17时,如Java8、Java11,若你服务器中**没有**其他应用需要低版本Java,根据你的系统,到 [https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html](https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html) 选择相应的发行版下载,并在搜索引擎搜索如何设置新的环境变量指向新的Java17。 -- 当你的服务器中默认环境变量不为Java17时,如Java8、Java11,若你服务器中**有**其他应用需要低版本Java,根据你的系统,到 [https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html](https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html) 选择相应的发行版下载,并将解压后的文件夹重命名为java,复制到Hertzbeat的解压目录下。 + - 当你的服务器中默认环境变量为Java17时,这一步你无需任何操作。 + - 当你的服务器中默认环境变量不为Java17时,如Java8、Java11,若你服务器中**没有**其他应用需要低版本Java,根据你的系统,到 [https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html](https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html) 选择相应的发行版下载,并在搜索引擎搜索如何设置新的环境变量指向新的Java17。 + - 当你的服务器中默认环境变量不为Java17时,如Java8、Java11,若你服务器中**有**其他应用需要低版本Java,根据你的系统,到 [https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html](https://www.oracle.com/java/technologies/javase/jdk17-archive-downloads.html) 选择相应的发行版下载,并将解压后的文件夹重命名为java,复制到Hertzbeat的解压目录下。 2. 升级数据库 -打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), -选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件执行升级sql。 + 打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), + 选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件执行升级sql。 3. 升级配置文件 -由于 `application.yml`和 `sureness.yml`更新变动较大,建议直接使用新的yml配置文件,然后在自己的需求基础上进行修改。 - -- `application.yml`一般需要修改以下部分 - - 默认为: - -```yaml - datasource: - driver-class-name: org.h2.Driver - username: sa - password: 123456 - url: jdbc:h2:./data/hertzbeat;MODE=MYSQL - hikari: - max-lifetime: 120000 - - jpa: - show-sql: false - database-platform: org.eclipse.persistence.platform.database.MySQLPlatform - database: h2 - properties: - eclipselink: - logging: - level: SEVERE -``` - -如若修改为mysql数据库,给出一个示例: - -```yaml - datasource: - driver-class-name: com.mysql.cj.jdbc.Driver - username: root - password: root - url: jdbc:mysql://localhost:3306/hertzbeat?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai - hikari: - max-lifetime: 120000 - - jpa: - show-sql: false - database-platform: org.eclipse.persistence.platform.database.MySQLPlatform - database: mysql - properties: - eclipselink: - logging: - level: SEVERE -``` - -- `sureness.yml`修改是可选的,一般在你需要修改账号密码时 - -```yaml -# account info config -# eg: admin has role [admin,user], password is hertzbeat -# eg: tom has role [user], password is hertzbeat -# eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 -account: - - appId: admin - credential: hertzbeat - role: [admin] - - appId: tom - credential: hertzbeat - role: [user] - - appId: guest - credential: hertzbeat - role: [guest] - - appId: lili - # credential = MD5(password + salt) - # plain password: hertzbeat - # attention: digest authentication does not support salted encrypted password accounts - credential: 94C6B34E7A199A9F9D4E1F208093B489 - salt: 123 - role: [user] -``` + 由于 `application.yml`和 `sureness.yml`更新变动较大,建议直接使用新的yml配置文件,然后在自己的需求基础上进行修改。 + + - `application.yml`一般需要修改以下部分 + + 默认为: + + ```yaml + datasource: + driver-class-name: org.h2.Driver + username: sa + password: 123456 + url: jdbc:h2:./data/hertzbeat;MODE=MYSQL + hikari: + max-lifetime: 120000 + + jpa: + show-sql: false + database-platform: org.eclipse.persistence.platform.database.MySQLPlatform + database: h2 + properties: + eclipselink: + logging: + level: SEVERE + ``` + + 如若修改为mysql数据库,给出一个示例: + + ```yaml + datasource: + driver-class-name: com.mysql.cj.jdbc.Driver + username: root + password: root + url: jdbc:mysql://localhost:3306/hertzbeat?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai + hikari: + max-lifetime: 120000 + + jpa: + show-sql: false + database-platform: org.eclipse.persistence.platform.database.MySQLPlatform + database: mysql + properties: + eclipselink: + logging: + level: SEVERE + ``` + + - `sureness.yml`修改是可选的,一般在你需要修改账号密码时 + + ```yaml + # account info config + # eg: admin has role [admin,user], password is hertzbeat + # eg: tom has role [user], password is hertzbeat + # eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 + account: + - appId: admin + credential: hertzbeat + role: [admin] + - appId: tom + credential: hertzbeat + role: [user] + - appId: guest + credential: hertzbeat + role: [guest] + - appId: lili + # credential = MD5(password + salt) + # plain password: hertzbeat + # attention: digest authentication does not support salted encrypted password accounts + credential: 94C6B34E7A199A9F9D4E1F208093B489 + salt: 123 + role: [user] + ``` 4. 添加相应的数据库驱动 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动,复制到安装目录下`ext-lib`中。 -mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) -oracle(如果你要监控oracle,这两个驱动是必须的) -[https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) -[https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar?utm_source=mavenlibs.com) -接下来,像之前那样运行启动脚本,即可体验最新的HertzBeat1.6.0! + mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) + oracle(如果你要监控oracle,这两个驱动是必须的): + [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) + [https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar?utm_source=mavenlibs.com) + 接下来,像之前那样运行启动脚本,即可体验最新的HertzBeat1.6.0! ### Docker 方式升级 - Mysql数据库 1. 关闭 HertzBeat 容器 -``` -docker stop hertzbeat -``` + ```shell + docker stop hertzbeat + ``` 2. 升级数据库脚本 -打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), -选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件在 Mysql 执行升级sql。 + 打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), + 选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件在 Mysql 执行升级sql。 3. 升级配置文件 -由于 `application.yml`和 `sureness.yml`更新变动较大,建议直接挂载使用新的yml配置文件,然后在自己的需求基础上进行修改。 - -- `application.yml`一般需要修改以下部分 - - 默认为: - -```yaml - datasource: - driver-class-name: com.mysql.cj.jdbc.Driver - username: root - password: root - url: jdbc:mysql://localhost:3306/hertzbeat?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai - hikari: - max-lifetime: 120000 - - jpa: - show-sql: false - database-platform: org.eclipse.persistence.platform.database.MySQLPlatform - database: mysql - properties: - eclipselink: - logging: - level: SEVERE -``` - -- `sureness.yml`修改是可选的,一般在你需要修改账号密码时 - -```yaml -# account info config -# eg: admin has role [admin,user], password is hertzbeat -# eg: tom has role [user], password is hertzbeat -# eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 -account: - - appId: admin - credential: hertzbeat - role: [admin] - - appId: tom - credential: hertzbeat - role: [user] - - appId: guest - credential: hertzbeat - role: [guest] - - appId: lili - # credential = MD5(password + salt) - # plain password: hertzbeat - # attention: digest authentication does not support salted encrypted password accounts - credential: 94C6B34E7A199A9F9D4E1F208093B489 - salt: 123 - role: [user] -``` + 由于 `application.yml`和 `sureness.yml`更新变动较大,建议直接挂载使用新的yml配置文件,然后在自己的需求基础上进行修改。 + + - `application.yml`一般需要修改以下部分 + + 默认为: + + ```yaml + datasource: + driver-class-name: com.mysql.cj.jdbc.Driver + username: root + password: root + url: jdbc:mysql://localhost:3306/hertzbeat?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai + hikari: + max-lifetime: 120000 + + jpa: + show-sql: false + database-platform: org.eclipse.persistence.platform.database.MySQLPlatform + database: mysql + properties: + eclipselink: + logging: + level: SEVERE + ``` + + - `sureness.yml`修改是可选的,一般在你需要修改账号密码时 + + ```yaml + # account info config + # eg: admin has role [admin,user], password is hertzbeat + # eg: tom has role [user], password is hertzbeat + # eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 + account: + - appId: admin + credential: hertzbeat + role: [admin] + - appId: tom + credential: hertzbeat + role: [user] + - appId: guest + credential: hertzbeat + role: [guest] + - appId: lili + # credential = MD5(password + salt) + # plain password: hertzbeat + # attention: digest authentication does not support salted encrypted password accounts + credential: 94C6B34E7A199A9F9D4E1F208093B489 + salt: 123 + role: [user] + ``` 4. 添加相应的数据库驱动 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动 jar 放到本地 `ext-lib`目录下,然后启动时将`ext-lib`挂载到容器的 `/opt/hertzbeat/ext-lib`目录。 -mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) -oracle(如果你要监控oracle,这两个驱动是必须的) -[https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) -[https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar?utm_source=mavenlibs.com) -接下来,像之前那样 Docker 运行启动 HertzBeat,即可体验最新的HertzBeat1.6.0! + mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) + oracle(如果你要监控oracle,这两个驱动是必须的): + [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) + [https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar?utm_source=mavenlibs.com) + 接下来,像之前那样 Docker 运行启动 HertzBeat,即可体验最新的HertzBeat1.6.0! ### Docker安装升级 - H2内置数据库(生产环境不推荐使用H2) 1. 关闭 HertzBeat 容器 -``` -docker stop hertzbeat -``` + ```shell + docker stop hertzbeat + ``` 2. 编辑H2数据库文件 @@ -199,75 +199,75 @@ docker stop hertzbeat 下载 h2 驱动 jar [https://mvnrepository.com/artifact/com.h2database/h2/2.2.220](https://mvnrepository.com/artifact/com.h2database/h2/2.2.220) 使用 h2 驱动 jar 本地启动数据库 -``` -java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 -``` + ```shell + java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 + ``` -打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), -选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件在 H2 执行升级sql。 + 打开[https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration](https://github.com/apache/hertzbeat/tree/master/manager/src/main/resources/db/migration), + 选择你使用的数据库的目录下相应的 `V160__update_column.sql`文件在 H2 执行升级sql。 3. 升级配置文件 -由于 `application.yml`和 `sureness.yml`更新变动较大,建议直接挂载使用新的yml配置文件,然后在自己的需求基础上进行修改。 - -- `application.yml`一般需要修改以下部分 - - 默认为: - -```yaml - datasource: - driver-class-name: org.h2.Driver - username: sa - password: 123456 - url: jdbc:h2:./data/hertzbeat;MODE=MYSQL - hikari: - max-lifetime: 120000 - - jpa: - show-sql: false - database-platform: org.eclipse.persistence.platform.database.MySQLPlatform - database: h2 - properties: - eclipselink: - logging: - level: SEVERE -``` - -- `sureness.yml`修改是可选的,一般在你需要修改账号密码时 - -```yaml -# account info config -# eg: admin has role [admin,user], password is hertzbeat -# eg: tom has role [user], password is hertzbeat -# eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 -account: - - appId: admin - credential: hertzbeat - role: [admin] - - appId: tom - credential: hertzbeat - role: [user] - - appId: guest - credential: hertzbeat - role: [guest] - - appId: lili - # credential = MD5(password + salt) - # plain password: hertzbeat - # attention: digest authentication does not support salted encrypted password accounts - credential: 94C6B34E7A199A9F9D4E1F208093B489 - salt: 123 - role: [user] -``` + 由于 `application.yml`和 `sureness.yml`更新变动较大,建议直接挂载使用新的yml配置文件,然后在自己的需求基础上进行修改。 + + - `application.yml`一般需要修改以下部分 + + 默认为: + + ```yaml + datasource: + driver-class-name: org.h2.Driver + username: sa + password: 123456 + url: jdbc:h2:./data/hertzbeat;MODE=MYSQL + hikari: + max-lifetime: 120000 + + jpa: + show-sql: false + database-platform: org.eclipse.persistence.platform.database.MySQLPlatform + database: h2 + properties: + eclipselink: + logging: + level: SEVERE + ``` + + - `sureness.yml`修改是可选的,一般在你需要修改账号密码时 + + ```yaml + # account info config + # eg: admin has role [admin,user], password is hertzbeat + # eg: tom has role [user], password is hertzbeat + # eg: lili has role [guest], plain password is lili, salt is 123, salted password is 1A676730B0C7F54654B0E09184448289 + account: + - appId: admin + credential: hertzbeat + role: [admin] + - appId: tom + credential: hertzbeat + role: [user] + - appId: guest + credential: hertzbeat + role: [guest] + - appId: lili + # credential = MD5(password + salt) + # plain password: hertzbeat + # attention: digest authentication does not support salted encrypted password accounts + credential: 94C6B34E7A199A9F9D4E1F208093B489 + salt: 123 + role: [user] + ``` 4. 添加相应的数据库驱动 由于apache基金会对于license合规的要求,HertzBeat的安装包不能包含mysql,oracle等gpl许可的依赖,需要用户自行添加,用户可通过以下链接自行下载驱动 jar 放到本地 `ext-lib`目录下,然后启动时将`ext-lib`挂载到容器的 `/opt/hertzbeat/ext-lib`目录。 -mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) -oracle(如果你要监控oracle,这两个驱动是必须的) -[https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) -[https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar?utm_source=mavenlibs.com) -接下来,像之前那样 Docker 运行启动,即可体验最新的HertzBeat1.6.0! + mysql:[https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.zip](https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.18.zip) + oracle(如果你要监控oracle,这两个驱动是必须的): + [https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar](https://download.oracle.com/otn-pub/otn_software/jdbc/234/ojdbc8.jar) + [https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar](https://repo.mavenlibs.com/maven/com/oracle/database/nls/orai18n/21.5.0.0/orai18n-21.5.0.0.jar?utm_source=mavenlibs.com) + 接下来,像之前那样 Docker 运行启动,即可体验最新的HertzBeat1.6.0! ### 通过导出导入升级 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md index 253fe909107..24cb43f7b24 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md @@ -18,49 +18,49 @@ VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决 ### 通过Docker方式安装VictoriaMetrics > 可参考官方网站[安装教程](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -> -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装VictoriaMetrics - -```shell -$ docker run -d -p 8428:8428 \ - -v $(pwd)/victoria-metrics-data:/victoria-metrics-data \ - --name victoria-metrics \ - victoriametrics/victoria-metrics:v1.95.1 -``` - -`-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` 为VictoriaMetrics数据目录本地持久化挂载 -使用```$ docker ps```查看数据库是否启动成功 + +1. 下载安装Docker环境 +Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +安装完毕后终端查看Docker版本是否正常输出。 + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Docker安装VictoriaMetrics + + ```shell + $ docker run -d -p 8428:8428 \ + -v $(pwd)/victoria-metrics-data:/victoria-metrics-data \ + --name victoria-metrics \ + victoriametrics/victoria-metrics:v1.95.1 + ``` + + `-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` 为VictoriaMetrics数据目录本地持久化挂载 + 使用```$ docker ps```查看数据库是否启动成功 3. 在hertzbeat的`application.yml`配置文件配置VictoriaMetrics数据库连接 - 配置HertzBeat的配置文件 - 修改位于 `hertzbeat/config/application.yml` 的配置文件 + 配置HertzBeat的配置文件 + 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - # 启用 victoria-metrics - victoria-metrics: - enabled: true - url: http://localhost:8428 - username: root - password: root -``` + **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** + + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + # 启用 victoria-metrics + victoria-metrics: + enabled: true + url: http://localhost:8428 + username: root + password: root + ``` 4. 重启 HertzBeat @@ -68,4 +68,4 @@ warehouse: 1. 时序数据库是否都需要配置,能不能都用 -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,但会影响历史图表数据和存储时长等。 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,但会影响历史图表数据和存储时长等。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md index 042e33f0558..d3e6a1aa780 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md @@ -145,9 +145,9 @@ limitations under the License. 2. 确保代码的可读性和直观性 -- `annotation` 符号中的字符串不需要提取为常量。 + - `annotation` 符号中的字符串不需要提取为常量。 -- 被引用的 `package` 或 `resource` 名称不需要提取为常量。 + - 被引用的 `package` 或 `resource` 名称不需要提取为常量。 3. 未被重新分配的变量也必须声明为 final 类型。 @@ -189,17 +189,17 @@ limitations under the License. - 如果使用 `HashSet`,则返回 Set 2. 如果存在多线程,可以使用以下声明或返回类型: -```java -private CurrentHashMap map; -public CurrentHashMap funName(); -``` + ```java + private CurrentHashMap map; + public CurrentHashMap funName(); + ``` 3. 使用 `isEmpty()` 而不是 `length() == 0` 或者 `size() == 0` - 负面示例: ```java if (pathPart.length() == 0) { - return; + return; } ``` @@ -207,7 +207,7 @@ public CurrentHashMap funName(); ```java if (pathPart.isEmpty()) { - return; + return; } ``` @@ -225,98 +225,97 @@ public CurrentHashMap funName(); - 多个代码行的 `深度` 为 `n+1` - 多余的行 -一般来说,如果一个方法的代码行深度由于连续嵌套的 `if... else..` 超过了 `2+ Tabs`,那么应该考虑试图 + 一般来说,如果一个方法的代码行深度由于连续嵌套的 `if... else..` 超过了 `2+ Tabs`,那么应该考虑试图 -- `合并分支`, -- `反转分支条件` -- `提取私有方法` + - `合并分支`, + - `反转分支条件` + - `提取私有方法` -以减少代码行深度并提高可读性,例如: + 以减少代码行深度并提高可读性,例如: -- 联合或将逻辑合并到下一级调用中 -- 负面示例: + - 联合或将逻辑合并到下一级调用中 + - 负面示例: -```java -if (isInsert) { -save(platform); -} else { -updateById(platform); -} -``` + ```java + if (isInsert) { + save(platform); + } else { + updateById(platform); + } + ``` -- 正面示例: + - 正面示例: -```java -saveOrUpdate(platform); -``` + ```java + saveOrUpdate(platform); + ``` -- 合并条件 -- 负面示例: + - 合并条件 + - 负面示例: -```java -if (expression1) { -if(expression2) { -...... -} -} + ```java + if (expression1) { + if(expression2) { + // ...... + } + } + ``` -``` + - 正面示例: -- 正面示例: - - ```java - if (expression1 && expression2) { - ...... - } - ``` - -- 反转条件 -- 负面示例: - - ```java - public void doSomething() { - // 忽略更深的代码块行 - // ..... - if (condition1) { - ... - } else { - ... - } - } - ``` - -- 正面示例: - - ```java - public void doSomething() { - // 忽略更深的代码块行 - // ..... - if (!condition1) { - ... - return; - } - // ... - } - ``` - -- 使用单一变量或方法减少复杂的条件表达式 -- 负面示例: - - ```java - if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { - ... - } - ``` - -- 正面示例: - - ```java - if (containsSqlServer(dbType)) { - .... - } - //..... - // containsSqlServer的定义 - ``` + ```java + if (expression1 && expression2) { + // ...... + } + ``` + + - 反转条件 + - 负面示例: + + ```java + public void doSomething() { + // 忽略更深的代码块行 + // ..... + if (condition1) { + // ... + } else { + // ... + } + } + ``` + + - 正面示例: + + ```java + public void doSomething() { + // 忽略更深的代码块行 + // ..... + if (!condition1) { + // ... + return; + } + // ... + } + ``` + + - 使用单一变量或方法减少复杂的条件表达式 + - 负面示例: + + ```java + if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { + // ... + } + ``` + + - 正面示例: + + ```java + if (containsSqlServer(dbType)) { + // .... + } + //..... + // containsSqlServer的定义 + ``` > 在未来,使用 `sonarlint` 和 `better highlights` 检查代码深度看起来是个不错的选择。 @@ -324,20 +323,20 @@ if(expression2) { 1. 方法缺少注释: -- `When`:该方法何时可以被调用 -- `How`:如何使用此方法以及如何传递参数等 -- `What`:此方法实现了哪些功能 -- `Note`:在调用此方法时开发人员应注意什么 + - `When`:该方法何时可以被调用 + - `How`:如何使用此方法以及如何传递参数等 + - `What`:此方法实现了哪些功能 + - `Note`:在调用此方法时开发人员应注意什么 2. 缺少必要的类头部描述注释。 -添加 `What`,`Note` 等,如上述 `1` 中提到的。 + 添加 `What`,`Note` 等,如上述 `1` 中提到的。 3. 在接口中的方法声明必须被注释。 -- 如果实现的语义和接口声明的注释内容不一致,则具体的实现方法也需要用注释重写。 + - 如果实现的语义和接口声明的注释内容不一致,则具体的实现方法也需要用注释重写。 -- 如果方法实现的语义与接口声明的注释内容一致,则建议不写注释以避免重复的注释。 + - 如果方法实现的语义与接口声明的注释内容一致,则建议不写注释以避免重复的注释。 4. 在注释行中的第一个词需要大写,如 `param` 行,`return` 行。 如果特殊引用作为主题不需要大写,需要注意特殊符号,例如引号。 @@ -347,31 +346,31 @@ if(expression2) { 1. 更倾向于使用 `non-capturing` lambda(不包含对外部范围的引用的lambda)。 Capturing lambda 在每次调用时都需要创建一个新的对象实例。`Non-capturing` lambda 可以为每次调用使用相同的实例。 -- 负面示例: + - 负面示例: - ```java - map.computeIfAbsent(key, x -> key.toLowerCase()) - ``` + ```java + map.computeIfAbsent(key, x -> key.toLowerCase()) + ``` -- 正面示例: + - 正面示例: - ```java - map.computeIfAbsent(key, k -> k.toLowerCase()); - ``` + ```java + map.computeIfAbsent(key, k -> k.toLowerCase()); + ``` 2. 考虑使用方法引用而不是内联lambda -- 负面示例: + - 负面示例: - ```java - map.computeIfAbsent(key, k-> Loader.load(k)); - ``` + ```java + map.computeIfAbsent(key, k-> Loader.load(k)); + ``` -- 正面示例: + - 正面示例: - ```java - map.computeIfAbsent(key, Loader::load); - ``` + ```java + map.computeIfAbsent(key, Loader::load); + ``` ### 3.9 Java Streams @@ -389,127 +388,127 @@ if(expression2) { 1. 使用 `StringUtils.isBlank` 而不是 `StringUtils.isEmpty` -- 负面示例: + - 负面示例: - ```java - if (StringUtils.isEmpty(name)) { - return; - } - ``` + ```java + if (StringUtils.isEmpty(name)) { + return; + } + ``` -- 正面示例: + - 正面示例: - ```java - if (StringUtils.isBlank(name)) { - return; - } - ``` + ```java + if (StringUtils.isBlank(name)) { + return; + } + ``` 2. 使用 `StringUtils.isNotBlank` 而不是 `StringUtils.isNotEmpty` -- 负面示例: + - 负面示例: - ```java - if (StringUtils.isNotEmpty(name)) { - return; - } - ``` + ```java + if (StringUtils.isNotEmpty(name)) { + return; + } + ``` -- 正面示例: + - 正面示例: - ```java - if (StringUtils.isNotBlank(name)) { - return; - } - ``` + ```java + if (StringUtils.isNotBlank(name)) { + return; + } + ``` 3. 使用 `StringUtils.isAllBlank` 而不是 `StringUtils.isAllEmpty` -- 负面示例: + - 负面示例: - ```java - if (StringUtils.isAllEmpty(name, age)) { - return; - } - ``` + ```java + if (StringUtils.isAllEmpty(name, age)) { + return; + } + ``` -- 正面示例: + - 正面示例: - ```java - if (StringUtils.isAllBlank(name, age)) { - return; - } - ``` + ```java + if (StringUtils.isAllBlank(name, age)) { + return; + } + ``` ### 3.12 `Enum` 类 1. 枚举值比较 -- 负面示例: + - 负面示例: - ```java - if (status.equals(JobStatus.RUNNING)) { - return; - } - ``` + ```java + if (status.equals(JobStatus.RUNNING)) { + return; + } + ``` -- 正面示例: + - 正面示例: - ```java - if (status == JobStatus.RUNNING) { - return; - } - ``` + ```java + if (status == JobStatus.RUNNING) { + return; + } + ``` 2. 枚举类不需要实现 Serializable -- 负面示例: + - 负面示例: - ```java - public enum JobStatus implements Serializable { - ... - } - ``` + ```java + public enum JobStatus implements Serializable { + // ... + } + ``` -- 正面示例: + - 正面示例: - ```java - public enum JobStatus { - ... - } - ``` + ```java + public enum JobStatus { + // ... + } + ``` 3. 使用 `Enum.name()` 而不是 `Enum.toString()` -- 负面示例: + - 负面示例: - ```java - System.out.println(JobStatus.RUNNING.toString()); - ``` + ```java + System.out.println(JobStatus.RUNNING.toString()); + ``` -- 正面示例: + - 正面示例: - ```java - System.out.println(JobStatus.RUNNING.name()); - ``` + ```java + System.out.println(JobStatus.RUNNING.name()); + ``` 4. 枚举类名称统一使用 Enum 后缀 -- 负面示例: + - 负面示例: - ```java - public enum JobStatus { - ... - } - ``` + ```java + public enum JobStatus { + // ... + } + ``` -- 正面示例: + - 正面示例: - ```java - public enum JobStatusEnum { - ... - } - ``` + ```java + public enum JobStatusEnum { + // ... + } + ``` ### 3.13 `Deprecated` 注解 @@ -518,7 +517,7 @@ if(expression2) { ```java @deprecated public void process(String input) { - ... + // ... } ``` @@ -527,7 +526,7 @@ public void process(String input) { ```java @Deprecated public void process(String input) { - ... + // ... } ``` @@ -535,43 +534,43 @@ public void process(String input) { 1. 使用 `占位符` 进行日志输出: -- 负面示例 + - 负面示例 - ```java - log.info("Deploy cluster request " + deployRequest); - ``` + ```java + log.info("Deploy cluster request " + deployRequest); + ``` -- 正面示例 + - 正面示例 - ```java - log.info("load plugin:{} to {}", file.getName(), appPlugins); - ``` + ```java + log.info("load plugin:{} to {}", file.getName(), appPlugins); + ``` 2. 打印日志时,注意选择 `日志级别` -当打印日志内容时,如果传递了日志占位符的实际参数,必须避免过早评估,以避免由日志级别导致的不必要评估。 + 当打印日志内容时,如果传递了日志占位符的实际参数,必须避免过早评估,以避免由日志级别导致的不必要评估。 -- 负面示例: + - 负面示例: - 假设当前日志级别为 `INFO`: + 假设当前日志级别为 `INFO`: - ```java - // 忽略声明行。 - List userList = getUsersByBatch(1000); - LOG.debug("All users: {}", getAllUserIds(userList)); - ``` + ```java + // 忽略声明行。 + List userList = getUsersByBatch(1000); + LOG.debug("All users: {}", getAllUserIds(userList)); + ``` -- 正面示例: + - 正面示例: - 在这种情况下,我们应该在进行实际的日志调用之前提前确定日志级别,如下所示: + 在这种情况下,我们应该在进行实际的日志调用之前提前确定日志级别,如下所示: - ```java - // 忽略声明行。 - List userList = getUsersByBatch(1000); - if (LOG.isDebugEnabled()) { - LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); - } - ``` + ```java + // 忽略声明行。 + List userList = getUsersByBatch(1000); + if (LOG.isDebugEnabled()) { + LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); + } + ``` ## 5 测试 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contribution.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contribution.md index 8a353d72ba7..8b2a17fd1d6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contribution.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/contribution.md @@ -88,29 +88,29 @@ limitations under the License. 1. 首先您需要 Fork 目标仓库 [hertzbeat repository](https://github.com/apache/hertzbeat). 2. 然后 用git命令 将代码下载到本地: -```shell -git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended -``` + ```shell + git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended + ``` 3. 下载完成后,请参考目标仓库的入门指南或者 README 文件对项目进行初始化。 4. 接着,您可以参考如下命令进行代码的提交, 切换新的分支, 进行开发: -```shell -git checkout -b a-feature-branch #Recommended -``` + ```shell + git checkout -b a-feature-branch #Recommended + ``` 5. 提交 commit , commit 描述信息需要符合约定格式: [module name or type name]feature or bugfix or doc: custom message. -```shell -git add -git commit -m '[docs]feature: necessary instructions' #Recommended -``` + ```shell + git add + git commit -m '[docs]feature: necessary instructions' #Recommended + ``` 6. 推送到远程仓库 -```shell -git push origin a-feature-branch -``` + ```shell + git push origin a-feature-branch + ``` 7. 然后您就可以在 GitHub 上发起新的 PR (Pull Request)。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/activemq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/activemq.md index 94e2ad54899..a6c741ba2e7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/activemq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/activemq.md @@ -15,40 +15,40 @@ keywords: [开源监控系统, 开源中间件监控, ActiveMQ消息中间件监 1. 修改安装目录下的 `conf/activemq.xml` 文件,开启JMX -> 在 `broker` 标签中添加 `userJmx="true"` 属性 + > 在 `broker` 标签中添加 `userJmx="true"` 属性 -```xml - - - -``` + ```xml + + + + ``` 2. 修改安装目录下的 `bin/env` 文件,配置JMX 端口 IP等 -将如下原配置信息 - -```text -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" -``` - -更新为如下配置,⚠️注意修改`本机对外IP` - -```text -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.authenticate=false" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Djava.rmi.server.hostname=本机对外IP" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" -``` + 将如下原配置信息 + + ```text + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" + + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" + ``` + + 更新为如下配置,⚠️注意修改`本机对外IP` + + ```text + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" + + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.authenticate=false" + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Djava.rmi.server.hostname=本机对外IP" + + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" + ``` 3. 重启 ACTIVEMQ 服务,在 HertzBeat 添加对应 ActiveMQ 监控即可,参数使用 JMX 配置的 IP 端口。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_dingtalk.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_dingtalk.md index ba6b49bc58a..dbc7c583921 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_dingtalk.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_dingtalk.md @@ -11,31 +11,31 @@ keywords: [告警钉钉机器人通知, 开源告警系统, 开源监控告警 1. **【钉钉桌面客户端】-> 【群设置】-> 【智能群助手】-> 【添加新建机器人-选自定义】-> 【设置机器人名称头像】-> 【注意⚠️设置自定义关键字: HertzBeat】 ->【添加成功后复制其WebHook地址】** -> 注意⚠️ 新增机器人时需在安全设置块需设置其自定义关键字: HertzBeat ,其它安全设置加签或IP段不填写 + > 注意⚠️ 新增机器人时需在安全设置块需设置其自定义关键字: HertzBeat ,其它安全设置加签或IP段不填写 -![email](/img/docs/help/alert-notice-8.png) + ![email](/img/docs/help/alert-notice-8.png) 2. **【保存机器人的WebHook地址access_token值】** -> 例如: webHook地址:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` -> 其机器人access_token值为 `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` + > 例如: webHook地址:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` + > 其机器人access_token值为 `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` 3. **【告警通知】->【新增接收人】 ->【选择钉钉机器人通知方式】->【设置钉钉机器人ACCESS_TOKEN】-> 【确定】** -![email](/img/docs/help/alert-notice-9.png) + ![email](/img/docs/help/alert-notice-9.png) 4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### 钉钉机器人通知常见问题 1. 钉钉群未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查钉钉机器人是否配置了安全自定义关键字:HertzBeat -> 请排查是否配置正确机器人ACCESS_TOKEN,是否已配置告警策略关联 + > 请排查在告警中心是否已有触发的告警信息 + > 请排查钉钉机器人是否配置了安全自定义关键字:HertzBeat + > 请排查是否配置正确机器人ACCESS_TOKEN,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_discord.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_discord.md index bb3c6287cd4..675c606928a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_discord.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_discord.md @@ -15,56 +15,56 @@ keywords: [告警 Discord 机器人通知, 开源告警系统, 开源监控告 1. 访问 [https://discord.com/developers/applications](https://discord.com/developers/applications) 创建应用 -![bot](/img/docs/help/discord-bot-1.png) + ![bot](/img/docs/help/discord-bot-1.png) 2. 在应用下创建机器人,获取机器人 Token -![bot](/img/docs/help/discord-bot-2.png) + ![bot](/img/docs/help/discord-bot-2.png) -![bot](/img/docs/help/discord-bot-3.png) + ![bot](/img/docs/help/discord-bot-3.png) 3. 授权机器人到聊天服务器 -> 在 OAuth2 菜单下给此机器人授权,`SCOPES` 范围选 `bot`, `BOT PERMISSIONS` 选发送消息 `Send Messages` + > 在 OAuth2 菜单下给此机器人授权,`SCOPES` 范围选 `bot`, `BOT PERMISSIONS` 选发送消息 `Send Messages` -![bot](/img/docs/help/discord-bot-4.png) + ![bot](/img/docs/help/discord-bot-4.png) -> 获取到最下方生成的 URL, 浏览器访问此 URL 给机器人正式授权,即设置将机器人加入哪个聊天服务器。 + > 获取到最下方生成的 URL, 浏览器访问此 URL 给机器人正式授权,即设置将机器人加入哪个聊天服务器。 4. 查看您的聊天服务器是否已经加入机器人成员 -![bot](/img/docs/help/discord-bot-5.png) + ![bot](/img/docs/help/discord-bot-5.png) ### 开启开发者模式,获取频道 Channel ID 1. 个人设置 -> 高级设置 -> 开启开发者模式 -![bot](/img/docs/help/discord-bot-6.png) + ![bot](/img/docs/help/discord-bot-6.png) 2. 获取频道 Channel ID -> 右键选中您想要发送机器人消息的聊天频道,点击 COPY ID 按钮获取 Channel ID + > 右键选中您想要发送机器人消息的聊天频道,点击 COPY ID 按钮获取 Channel ID -![bot](/img/docs/help/discord-bot-7.png) + ![bot](/img/docs/help/discord-bot-7.png) ### 在 HertzBeat 新增告警通知人,通知方式为 Discord Bot 1. **【告警通知】->【新增接收人】 ->【选择 Discord 机器人通知方式】->【设置机器人Token和ChannelId】-> 【确定】** -![email](/img/docs/help/discord-bot-8.png) + ![email](/img/docs/help/discord-bot-8.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-policy.png) + ![email](/img/docs/help/alert-notice-policy.png) ### Discord 机器人通知常见问题 1. Discord 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人Token, ChannelId,是否已配置告警策略关联 -> 请排查机器人是否被 Discord聊天服务器正确赋权 + > 请排查在告警中心是否已有触发的告警信息 + > 请排查是否配置正确机器人Token, ChannelId,是否已配置告警策略关联 + > 请排查机器人是否被 Discord聊天服务器正确赋权 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_email.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_email.md index 0f53b58e71d..7e4f59e7900 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_email.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_email.md @@ -11,29 +11,29 @@ keywords: [告警邮件通知, 开源告警系统, 开源监控告警系统] 1. **【告警通知】->【新增接收人】 ->【选择邮件通知方式】** -![email](/img/docs/help/alert-notice-1.png) + ![email](/img/docs/help/alert-notice-1.png) 2. **【获取验证码】-> 【输入邮箱验证码】-> 【确定】** ![email](/img/docs/help/alert-notice-2.png) -![email](/img/docs/help/alert-notice-3.png) + ![email](/img/docs/help/alert-notice-3.png) 3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### 邮件通知常见问题 1. 自己内网部署的HertzBeat无法接收到邮件通知 -> HertzBeat需要自己配置邮件服务器,TanCloud无需,请确认是否在application.yml配置了自己的邮件服务器 + > HertzBeat需要自己配置邮件服务器,TanCloud无需,请确认是否在application.yml配置了自己的邮件服务器 2. 云环境TanCloud无法接收到邮件通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确邮箱,是否已配置告警策略关联 -> 请查询邮箱的垃圾箱里是否把告警邮件拦截 + > 请排查在告警中心是否已有触发的告警信息 + > 请排查是否配置正确邮箱,是否已配置告警策略关联 + > 请查询邮箱的垃圾箱里是否把告警邮件拦截 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_enterprise_wechat_app.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_enterprise_wechat_app.md index b70c8b10c40..1c6f18cfeb0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_enterprise_wechat_app.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_enterprise_wechat_app.md @@ -11,24 +11,24 @@ keywords: [开源告警系统, 开源监控告警系统, 企业微信应用告 1. **【企业微信后台管理】-> 【App管理】-> 【创建一个新的应用】-> 【设置应用信息】->【添加成功后复制应用的AgentId和Secret】** -![email](/img/docs/help/alert-wechat-1.jpg) + ![email](/img/docs/help/alert-wechat-1.jpg) 2. **【告警通知】->【新增接收人】 ->【选择企业微信应用通知方式】->【设置企业ID,企业应用id和应用的secret 】-> 【确定】** -![email](/img/docs/help/alert-wechat-2.jpg) + ![email](/img/docs/help/alert-wechat-2.jpg) 3. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人。** + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人。** -![email](/img/docs/help/alert-wechat-3.jpg) + ![email](/img/docs/help/alert-wechat-3.jpg) ### 企业微信应用通知常见问题 1. 企业微信应用未收到告警通知. -> 请检查用户是否具有应用程序权限. -> 请检查企业应用程序回调地址设置是否正常. -> 请检查服务器IP是否在企业应用程序白名单上. + > 请检查用户是否具有应用程序权限. + > 请检查企业应用程序回调地址设置是否正常. + > 请检查服务器IP是否在企业应用程序白名单上. 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_feishu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_feishu.md index 5a6e95d7067..dd5c9e0c519 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_feishu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_feishu.md @@ -13,22 +13,22 @@ keywords: [告警飞书机器人通知, 开源告警系统, 开源监控告警 2. **【保存机器人的WebHook地址的KEY值】** -> 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > 例如: webHook地址:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择飞书机器人通知方式】->【设置飞书机器人KEY】-> 【确定】** 4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### 飞书机器人通知常见问题 1. 飞书群未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 + > 请排查在告警中心是否已有触发的告警信息 + > 请排查是否配置正确机器人KEY,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md index 5c5c38c56be..05dbcd4f9e3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_slack.md @@ -19,19 +19,19 @@ keywords: [告警 Slack Webhook 通知, 开源告警系统, 开源监控告警 1. **【告警通知】->【新增接收人】 ->【选择 Slack Webhook 通知方式】->【设置 Webhook URL】-> 【确定】** -![email](/img/docs/help/slack-bot-1.png) + ![email](/img/docs/help/slack-bot-1.png) 2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-policy.png) + ![email](/img/docs/help/alert-notice-policy.png) ### Slack 机器人通知常见问题 1. Slack 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 + > 请排查在告警中心是否已有触发的告警信息 + > 请排查是否配置正确 Slack Webhook URL,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_smn.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_smn.md index d6bca9843a4..5bd4aeec219 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_smn.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_smn.md @@ -11,33 +11,33 @@ keywords: [ 告警华为云SMN通知, 开源告警系统, 开源监控告警系 1. **按照[华为云SMN官方文档](https://support.huaweicloud.com/qs-smn/smn_json.html)开通SMN服务并配置SMN** -![alert-notice-10](/img/docs/help/alert-notice-10.png) + ![alert-notice-10](/img/docs/help/alert-notice-10.png) 2. **保存SMN的主题URN** -![alert-notice-11](/img/docs/help/alert-notice-11.png) + ![alert-notice-11](/img/docs/help/alert-notice-11.png) 3. **按照[华为云签名文档](https://support.huaweicloud.com/devg-apisign/api-sign-provide.html)获取AK、SK和项目ID** -![alert-notice-12](/img/docs/help/alert-notice-12.png) + ![alert-notice-12](/img/docs/help/alert-notice-12.png) -![alert-notice-13](/img/docs/help/alert-notice-13.png) + ![alert-notice-13](/img/docs/help/alert-notice-13.png) 4. **【告警通知】->【新增接收人】 ->【选择华为云SMN通知方式】->【设置华为云SMN AK、SK等配置】-> 【确定】** -![alert-notice-14](/img/docs/help/alert-notice-14.png) + ![alert-notice-14](/img/docs/help/alert-notice-14.png) 5. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### 华为云SMN通知常见问题 1. 华为云SMN群未收到告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否正确配置华为云SMN AK、SK等配置,是否已配置告警策略关联 + > 请排查在告警中心是否已有触发的告警信息 + > 请排查是否正确配置华为云SMN AK、SK等配置,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_telegram.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_telegram.md index dfb1aa48d8a..6b1c6427335 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_telegram.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_telegram.md @@ -15,32 +15,32 @@ keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] 1. 使用 [@BotFather](https://t.me/BotFather) 创建自己的机器人并获取访问令牌`Token` -![telegram-bot](/img/docs/help/telegram-bot-1.png) + ![telegram-bot](/img/docs/help/telegram-bot-1.png) 2. 获取接收人的 `User ID` -**使用您要通知的接收人账户给刚创建 Bot 账户随便发送一个信息**, -访问 ```https://api.telegram.org/bot/getUpdates``` , **`使用上一步的 Bot Token 替换其中的`**, 响应`Json`数据中第一个`result.message.from.id` 值即为接收人的 `User ID` - -```json -{ - "ok":true, - "result":[ - { - "update_id":632299191, - "message":{ - "from":{ - "id": "User ID" - }, - "chat":{ - }, - "date":1673858065, - "text":"111" + **使用您要通知的接收人账户给刚创建 Bot 账户随便发送一个信息**, + 访问 ```https://api.telegram.org/bot/getUpdates``` , **`使用上一步的 Bot Token 替换其中的`**, 响应`Json`数据中第一个`result.message.from.id` 值即为接收人的 `User ID` + + ```json + { + "ok":true, + "result":[ + { + "update_id":632299191, + "message":{ + "from":{ + "id": "User ID" + }, + "chat":{ + }, + "date":1673858065, + "text":"111" + } } - } - ] -} -``` + ] + } + ``` 3. 记录保存我们获得的 `Token` 和 `User Id` @@ -48,20 +48,20 @@ keywords: [告警 Telegram 通知, 开源告警系统, 开源监控告警系统] 1. **【告警通知】->【新增接收人】 ->【选择 Telegram 机器人通知方式】->【设置机器人Token和UserId】-> 【确定】** -![email](/img/docs/help/telegram-bot-2.png) + ![email](/img/docs/help/telegram-bot-2.png) -4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** +2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-policy.png) + ![email](/img/docs/help/alert-notice-policy.png) ### Telegram 机器人通知常见问题 1. Telegram 未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 -> UserId 应为消息接收对象的UserId + > 请排查在告警中心是否已有触发的告警信息 + > 请排查是否配置正确机器人Token, UserId,是否已配置告警策略关联 + > UserId 应为消息接收对象的UserId 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_webhook.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_webhook.md index 272c59cfd4c..267fd1770ab 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_webhook.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_webhook.md @@ -11,13 +11,13 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 1. **【告警通知】->【新增接收人】 ->【选择WebHook通知方式】-> 【设置WebHook回调地址】 -> 【确定】** -![email](/img/docs/help/alert-notice-5.png) + ![email](/img/docs/help/alert-notice-5.png) 2. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### WebHook回调POST请求体BODY内容 @@ -60,7 +60,7 @@ keywords: [告警 Webhook 回调通知, 开源告警系统, 开源监控告警 1. WebHook回调未生效 -> 请查看告警中心是否已经产生此条告警信息 -> 请排查配置的WebHook回调地址是否正确 + > 请查看告警中心是否已经产生此条告警信息 + > 请排查配置的WebHook回调地址是否正确 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_wework.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_wework.md index 5c73ffee2a6..c8ad278707c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_wework.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/alert_wework.md @@ -11,28 +11,28 @@ keywords: [告警企业微信通知, 开源告警系统, 开源监控告警系 1. **【企业微信端】-> 【群设置】-> 【群机器人】-> 【添加新建机器人】-> 【设置机器人名称头像】-> 【添加成功后复制其WebHook地址】** -![email](/img/docs/help/alert-notice-6.jpg) + ![email](/img/docs/help/alert-notice-6.jpg) 2. **【保存机器人的WebHook地址的KEY值】** -> 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > 例如: webHook地址:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > 其机器人KEY值为 `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【告警通知】->【新增接收人】 ->【选择企业微信机器人通知方式】->【设置企业微信机器人KEY】-> 【确定】** -![email](/img/docs/help/alert-notice-7.png) + ![email](/img/docs/help/alert-notice-7.png) 4. **配置关联的告警通知策略⚠️ 【新增通知策略】-> 【将刚设置的接收人关联】-> 【确定】** -> **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 + > **注意⚠️ 新增了接收人并不代表已经生效可以接收告警信息,还需配置关联的告警通知策略,即指定哪些消息发给哪些接收人**。 -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### 企业微信机器人通知常见问题 1. 企业微信群未收到机器人告警通知 -> 请排查在告警中心是否已有触发的告警信息 -> 请排查是否配置正确机器人KEY,是否已配置告警策略关联 + > 请排查在告警中心是否已有触发的告警信息 + > 请排查是否配置正确机器人KEY,是否已配置告警策略关联 其它问题可以通过交流群ISSUE反馈哦! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dynamic_tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dynamic_tp.md index 1abcb732289..b88627eaa35 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dynamic_tp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/dynamic_tp.md @@ -11,53 +11,53 @@ keywords: [开源监控系统, 开源中间件监控, DynamicTp线程池监控] 1. 集成使用 `DynamicTp` -`DynamicTp` 是Jvm语言的基于配置中心的轻量级动态线程池,内置监控告警功能,可通过SPI自定义扩展实现。 + `DynamicTp` 是Jvm语言的基于配置中心的轻量级动态线程池,内置监控告警功能,可通过SPI自定义扩展实现。 -集成使用,请参考文档 [快速接入](https://dynamictp.cn/guide/use/quick-start.html) + 集成使用,请参考文档 [快速接入](https://dynamictp.cn/guide/use/quick-start.html) 2. 开启SpringBoot Actuator Endpoint 暴露出`DynamicTp`指标接口 -```yaml -management: - endpoints: - web: - exposure: - include: '*' -``` + ```yaml + management: + endpoints: + web: + exposure: + include: '*' + ``` -测试访问指标接口 `ip:port/actuator/dynamic-tp` 是否有响应json数据如下: + 测试访问指标接口 `ip:port/actuator/dynamic-tp` 是否有响应json数据如下: -```json -[ - { - "poolName": "commonExecutor", - "corePoolSize": 1, - "maximumPoolSize": 1, - "queueType": "LinkedBlockingQueue", - "queueCapacity": 2147483647, - "queueSize": 0, - "fair": false, - "queueRemainingCapacity": 2147483647, - "activeCount": 0, - "taskCount": 0, - "completedTaskCount": 0, - "largestPoolSize": 0, - "poolSize": 0, - "waitTaskCount": 0, - "rejectCount": 0, - "rejectHandlerName": null, - "dynamic": false, - "runTimeoutCount": 0, - "queueTimeoutCount": 0 - }, - { - "maxMemory": "4 GB", - "totalMemory": "444 MB", - "freeMemory": "250.34 MB", - "usableMemory": "3.81 GB" - } -] -``` + ```json + [ + { + "poolName": "commonExecutor", + "corePoolSize": 1, + "maximumPoolSize": 1, + "queueType": "LinkedBlockingQueue", + "queueCapacity": 2147483647, + "queueSize": 0, + "fair": false, + "queueRemainingCapacity": 2147483647, + "activeCount": 0, + "taskCount": 0, + "completedTaskCount": 0, + "largestPoolSize": 0, + "poolSize": 0, + "waitTaskCount": 0, + "rejectCount": 0, + "rejectHandlerName": null, + "dynamic": false, + "runTimeoutCount": 0, + "queueTimeoutCount": 0 + }, + { + "maxMemory": "4 GB", + "totalMemory": "444 MB", + "freeMemory": "250.34 MB", + "usableMemory": "3.81 GB" + } + ] + ``` 3. 在HertzBeat中间件监控下添加DynamicTp监控即可 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/iotdb.md index 8bb3bbb25e0..8170aaad0aa 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/iotdb.md @@ -17,29 +17,29 @@ keywords: [开源监控系统, 开源数据库监控, IoTDB数据库监控] 1. metric 采集默认是关闭的,需要先到 `conf/iotdb-metric.yml` 中修改参数打开后重启 server -``` -# 是否启动监控模块,默认为false -enableMetric: true - -# 是否启用操作延迟统计 -enablePerformanceStat: false - -# 数据提供方式,对外部通过jmx和prometheus协议提供metrics的数据, 可选参数:[JMX, PROMETHEUS, IOTDB],IOTDB是默认关闭的。 -metricReporterList: - - JMX - - PROMETHEUS - -# 底层使用的metric架构,可选参数:[MICROMETER, DROPWIZARD] -monitorType: MICROMETER - -# 初始化metric的级别,可选参数: [CORE, IMPORTANT, NORMAL, ALL] -metricLevel: IMPORTANT - -# 预定义的指标集, 可选参数: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] -predefinedMetrics: - - JVM - - FILE -``` + ```yml + # 是否启动监控模块,默认为false + enableMetric: true + + # 是否启用操作延迟统计 + enablePerformanceStat: false + + # 数据提供方式,对外部通过jmx和prometheus协议提供metrics的数据, 可选参数:[JMX, PROMETHEUS, IOTDB],IOTDB是默认关闭的。 + metricReporterList: + - JMX + - PROMETHEUS + + # 底层使用的metric架构,可选参数:[MICROMETER, DROPWIZARD] + monitorType: MICROMETER + + # 初始化metric的级别,可选参数: [CORE, IMPORTANT, NORMAL, ALL] + metricLevel: IMPORTANT + + # 预定义的指标集, 可选参数: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] + predefinedMetrics: + - JVM + - FILE + ``` 2. 重启 IoTDB, 打开浏览器或者用curl 访问 , 就能看到metric数据了。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/issue.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/issue.md index f17a60a9b9f..27e9c63b3ba 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/issue.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/issue.md @@ -8,25 +8,25 @@ sidebar_label: 常见问题 1. **页面反馈:monitor.host:监控Host必须是ipv4,ipv6或域名** -> 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http + > 如信息所示,输入的监控Host须是ipv4,ipv6或域名,不能携带协议头,例如协议头http 2. **网站API等监控反馈statusCode:403或401,但对端服务本身无需认证,浏览器直接访问是OK** -> 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) + > 请排查是否是被防火墙拦截,如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在) 3. 安装包部署的hertzbeat下ping连通性监控异常 安装包安装部署的hertzbeat,对ping连通性监控不可用,但本地直接ping是可用的。 -> 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 -> docker安装默认启用无此问题 -> 详见 + > 安装包部署需要配置java虚拟机root权限启动hertzbeat从而使用ICMP,若未启用root权限则是判断telnet对端7号端口是否开通 + > docker安装默认启用无此问题 + > 详见 4. 配置了k8s监控,但是实际监控时间并未按照正确间隔时间执行 请参考下面几点排查问题: -> 一:首先查看hertzbeat的错误日志,如果出现了'desc: SQL statement too long, check maxSQLLength config',信息 -> 二:需要调整tdengine配置文件,可在服务器创建taos.cfg文件,调整# max length of an SQL : maxSQLLength 654800,然后重启tdengine,需要加入配置文件的挂载 -> 三:如果遇到了重启tdengine失败,需要调整挂载数据文件中的配置,见 .../taosdata/dnode/dnodeEps.json,中dnodeFqdn调整为启动失败的dockerId即可,然后docker restart tdengine + > 一:首先查看hertzbeat的错误日志,如果出现了'desc: SQL statement too long, check maxSQLLength config',信息 + > 二:需要调整tdengine配置文件,可在服务器创建taos.cfg文件,调整# max length of an SQL : maxSQLLength 654800,然后重启tdengine,需要加入配置文件的挂载 + > 三:如果遇到了重启tdengine失败,需要调整挂载数据文件中的配置,见 .../taosdata/dnode/dnodeEps.json,中dnodeFqdn调整为启动失败的dockerId即可,然后docker restart tdengine 5. 配置http api监控,用于进行业务接口探测,确保业务可以用,另外接口有进行token鉴权校验,"Authorization:Bearer eyJhbGciOiJIUzI1....",配置后测试,提示“StatusCode 401”。服务端应用收到的token为"Authorization:Bearer%20eyJhbGciOiJIUzI1....",hertzbeat对空格进行转义为“%20”,服务器没有转义导致鉴权失败,建议转义功能作为可选项。 @@ -35,31 +35,31 @@ sidebar_label: 常见问题 1. **MYSQL,TDENGINE和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 -> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP -> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` + > 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP + > 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` 2. **按照流程部署,访问 无界面** 请参考下面几点排查问题: -> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 -> 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 + > 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 + > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 + > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 3. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 + > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter ### 安装包部署常见问题 1. **按照流程部署,访问 无界面** 请参考下面几点排查问题: -> 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 + > 一:依赖服务MYSQL数据库,TDENGINE数据库是否已按照启动成功,对应hertzbeat数据库是否已创建,SQL脚本是否执行 + > 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 + > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 2. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 + > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jetty.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jetty.md index 31e297703fc..5e5603783cc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jetty.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/jetty.md @@ -19,33 +19,33 @@ keywords: [开源监控系统, 开源中间件监控, Jetty应用服务器监控 1. 在 Jetty 启动 JMX JMX-REMOTE 模块 -```shell -java -jar $JETTY_HOME/start.jar --add-module=jmx -java -jar $JETTY_HOME/start.jar --add-module=jmx-remote -``` + ```shell + java -jar $JETTY_HOME/start.jar --add-module=jmx + java -jar $JETTY_HOME/start.jar --add-module=jmx-remote + ``` -命令执行成功会创建出 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件 + 命令执行成功会创建出 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件 2. 编辑 `${JETTY_BASE}/start.d/jmx-remote.ini` 配置文件,修改 JMX 的 IP 端口等参数。 -**`localhost` 需修改为对外暴露 IP** - -```text -## The host/address to bind the RMI server to. -# jetty.jmxremote.rmiserverhost=localhost - -## The port the RMI server listens to (0 means a random port is chosen). -# jetty.jmxremote.rmiserverport=1099 - -## The host/address to bind the RMI registry to. -# jetty.jmxremote.rmiregistryhost=localhost - -## The port the RMI registry listens to. -# jetty.jmxremote.rmiregistryport=1099 - -## The host name exported in the RMI stub. --Djava.rmi.server.hostname=localhost -``` + **`localhost` 需修改为对外暴露 IP** + + ```text + ## The host/address to bind the RMI server to. + # jetty.jmxremote.rmiserverhost=localhost + + ## The port the RMI server listens to (0 means a random port is chosen). + # jetty.jmxremote.rmiserverport=1099 + + ## The host/address to bind the RMI registry to. + # jetty.jmxremote.rmiregistryhost=localhost + + ## The port the RMI registry listens to. + # jetty.jmxremote.rmiregistryport=1099 + + ## The host name exported in the RMI stub. + -Djava.rmi.server.hostname=localhost + ``` 3. 重启 Jetty Server 即可。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka.md index a79bb0e91c2..9268c920e5d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kafka.md @@ -17,16 +17,16 @@ keywords: [开源监控系统, 开源消息中间件监控, Kafka监控] 2. 修改 Kafka 启动脚本 -修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` -在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 - -```shell -export JMX_PORT=9999; -export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=ip地址 -Dcom.sun.management.jmxremote.rmi.port=9999 -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"; - -# 这是最后一行本来就存在的 -# exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" -``` + 修改 Kafka 安装目录下的启动脚本 `/bin/kafka-server-start.sh` + 在脚本正文(即非注释行)的第一行前添加如下内容, ⚠️注意替换您自己的端口和对外 IP 地址 + + ```shell + export JMX_PORT=9999; + export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=ip地址 -Dcom.sun.management.jmxremote.rmi.port=9999 -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"; + + # 这是最后一行本来就存在的 + # exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" + ``` 3. 重启 Kafka 服务 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md index cc4c7254afe..58ed7e3fcf3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/kubernetes.md @@ -17,19 +17,19 @@ keywords: [开源监控系统, 开源Kubernetes监控] 1. 创建service account并绑定默认cluster-admin管理员集群角色 -```kubectl create serviceaccount dashboard-admin -n kube-system``` + ```kubectl create serviceaccount dashboard-admin -n kube-system``` 2. 用户授权 -```shell -kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin -kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' -kubectl describe secret {secret} -n kube-system -``` + ```shell + kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin + kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' + kubectl describe secret {secret} -n kube-system + ``` ### 方式二 -``` +```shell kubectl create serviceaccount cluster-admin kubectl create clusterrolebinding cluster-admin-manual --clusterrole=cluster-admin --serviceaccount=default:cluster-admin diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nacos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nacos.md index 0b9b96b6099..f95da705d58 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nacos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nacos.md @@ -14,9 +14,9 @@ keywords: [开源监控系统, 中间件监控, Nacos分布式监控] 1. 按照[部署文档](https://nacos.io/zh-cn/docs/deployment.html)搭建好Nacos集群。 2. 配置application.properties文件,暴露metrics数据。 -``` -management.endpoints.web.exposure.include=* -``` + ```properties + management.endpoints.web.exposure.include=* + ``` 3. 访问```{ip}:8848/nacos/actuator/prometheus```,查看是否能访问到metrics数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nginx.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nginx.md index 8c81c5a82c2..50837f79394 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nginx.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/nginx.md @@ -17,45 +17,45 @@ keywords: [开源监控工具, 开源Java监控工具, 监控Nginx指标] 1. 检查是否已添加 `ngx_http_stub_status_module` -```shell -nginx -V -``` + ```shell + nginx -V + ``` -查看是否包含 `--with-http_stub_status_module`,如果没有则需要重新编译安装 Nginx。 + 查看是否包含 `--with-http_stub_status_module`,如果没有则需要重新编译安装 Nginx。 2. 编译安装 Nginx, 添加 `ngx_http_stub_status_module` 模块 -下载 Nginx 并解压,在目录下执行 + 下载 Nginx 并解压,在目录下执行 -```shell -./configure --prefix=/usr/local/nginx --with-http_stub_status_module - -make && make install -``` + ```shell + ./configure --prefix=/usr/local/nginx --with-http_stub_status_module + + make && make install + ``` 3. 修改 Nginx 配置文件 -修改 `nginx.conf` 文件,添加监控模块暴露端点,如下配置: + 修改 `nginx.conf` 文件,添加监控模块暴露端点,如下配置: -```shell -# modify nginx.conf -server { - listen 80; # port - server_name localhost; - location /nginx-status { - stub_status on; - access_log on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts - } -} -``` + ```shell + # modify nginx.conf + server { + listen 80; # port + server_name localhost; + location /nginx-status { + stub_status on; + access_log on; + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts + } + } + ``` 4. 重新加载 Nginx -```shell -nginx -s reload -``` + ```shell + nginx -s reload + ``` 5. 在浏览器访问 `http://localhost/nginx-status` 即可查看 Nginx 监控状态信息。 @@ -63,48 +63,48 @@ nginx -s reload 1. 安装 `ngx_http_reqstat_module` 模块 -```shell -# install `ngx_http_reqstat_module` -wget https://github.com/zls0424/ngx_req_status/archive/master.zip -O ngx_req_status.zip - -unzip ngx_req_status.zip - -patch -p1 < ../ngx_req_status-master/write_filter.patch - -./configure --prefix=/usr/local/nginx --add-module=/path/to/ngx_req_status-master - -make -j2 - -make install -``` + ```shell + # install `ngx_http_reqstat_module` + wget https://github.com/zls0424/ngx_req_status/archive/master.zip -O ngx_req_status.zip + + unzip ngx_req_status.zip + + patch -p1 < ../ngx_req_status-master/write_filter.patch + + ./configure --prefix=/usr/local/nginx --add-module=/path/to/ngx_req_status-master + + make -j2 + + make install + ``` 2. 修改 Nginx 配置文件 -修改 `nginx.conf` 文件,添加状态模块暴露端点,如下配置: - -```shell -# modify nginx.conf -http { - req_status_zone server_name $server_name 256k; - req_status_zone server_addr $server_addr 256k; - - req_status server_name server_addr; - - server { - location /req-status { - req_status_show on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + 修改 `nginx.conf` 文件,添加状态模块暴露端点,如下配置: + + ```shell + # modify nginx.conf + http { + req_status_zone server_name $server_name 256k; + req_status_zone server_addr $server_addr 256k; + + req_status server_name server_addr; + + server { + location /req-status { + req_status_show on; + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts + } } } -} -``` + ``` 3. 重新加载 Nginx -```shell -nginx -s reload -``` + ```shell + nginx -s reload + ``` 4. 在浏览器访问 `http://localhost/req-status` 即可查看 Nginx 监控状态信息。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rabbitmq.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rabbitmq.md index 2210a2452e0..7e4ff6383d7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rabbitmq.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/rabbitmq.md @@ -14,9 +14,9 @@ keywords: [开源监控系统, 开源消息中间件监控, RabbitMQ消息中间 1. 开启 Management 插件,或使用自开启版本 -```shell -rabbitmq-plugins enable rabbitmq_management -``` + ```shell + rabbitmq-plugins enable rabbitmq_management + ``` 2. 浏览器访问 ,默认账户密码 `guest/guest`. 成功登录即开启成功。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/shenyu.md index 87bb81b7800..40710bb3986 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/shenyu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/shenyu.md @@ -17,27 +17,27 @@ keywords: [开源监控系统, 开源消息中间件监控, ShenYu网关监控 1. 在网关的 pom.xml 文件中添加 metrics 的依赖。 -```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - -``` + ```xml + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + + ``` 2. 在网关的配置yaml文件中编辑如下内容: -```yaml -shenyu: - metrics: - enabled: true #设置为 true 表示开启 - name : prometheus - host: 127.0.0.1 #暴露的ip - port: 8090 #暴露的端口 - jmxConfig: #jmx配置 - props: - jvm_enabled: true #开启jvm的监控指标 -``` + ```yaml + shenyu: + metrics: + enabled: true #设置为 true 表示开启 + name : prometheus + host: 127.0.0.1 #暴露的ip + port: 8090 #暴露的端口 + jmxConfig: #jmx配置 + props: + jvm_enabled: true #开启jvm的监控指标 + ``` 最后重启访问网关指标接口 `http://ip:8090` 响应 prometheus 格式数据即可。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/zookeeper.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/zookeeper.md index 9752c22bc4e..7ff88b21f13 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/zookeeper.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/zookeeper.md @@ -14,23 +14,23 @@ keywords: [开源监控系统, Zookeeper监控监控] 1. 加白名单步骤 -> 1.找到我们 zookeeper 的配置文件,一般是 `zoo.cfg` -> -> 2.配置文件中加入以下命令 + 1. 找到我们 zookeeper 的配置文件,一般是 `zoo.cfg` -```shell -# 将需要的命令添加到白名单中 -4lw.commands.whitelist=stat, ruok, conf, isro + 2. 配置文件中加入以下命令 -# 将所有命令添加到白名单中 -4lw.commands.whitelist=* -``` + ```shell + # 将需要的命令添加到白名单中 + 4lw.commands.whitelist=stat, ruok, conf, isro + + # 将所有命令添加到白名单中 + 4lw.commands.whitelist=* + ``` -> 3.重启服务 + 3. 重启服务 -```shell -zkServer.sh restart -``` + ```shell + zkServer.sh restart + ``` 2. netcat 协议 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md index 95bedddc350..6c30086e1b8 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md @@ -16,61 +16,61 @@ sidebar_label: 常见参数配置 1. 配置短信发送服务器 -> 只有成功配置了您自己的短信服务器,监控系统内触发的告警短信才会正常发送。 + > 只有成功配置了您自己的短信服务器,监控系统内触发的告警短信才会正常发送。 -在`application.yml`新增如下腾讯平台短信服务器配置(参数需替换为您的短信服务器配置) + 在`application.yml`新增如下腾讯平台短信服务器配置(参数需替换为您的短信服务器配置) -```yaml -common: - sms: - tencent: - secret-id: AKIDbQ4VhdMr89wDedFrIcgU2PaaMvOuBCzY - secret-key: PaXGl0ziY9UcWFjUyiFlCPMr77rLkJYlyA - app-id: 1435441637 - sign-name: 赫兹跳动 - template-id: 1343434 -``` + ```yaml + common: + sms: + tencent: + secret-id: AKIDbQ4VhdMr89wDedFrIcgU2PaaMvOuBCzY + secret-key: PaXGl0ziY9UcWFjUyiFlCPMr77rLkJYlyA + app-id: 1435441637 + sign-name: 赫兹跳动 + template-id: 1343434 + ``` -1.1 腾讯云短信创建签名(sign-name) -![image](https://github.com/apache/hertzbeat/assets/40455946/3a4c287d-b23d-4398-8562-4894296af485) + 1.1 腾讯云短信创建签名(sign-name) + ![image](https://github.com/apache/hertzbeat/assets/40455946/3a4c287d-b23d-4398-8562-4894296af485) -1.2 腾讯云短信创建正文模板(template-id) + 1.2 腾讯云短信创建正文模板(template-id) -``` -监控:{1},告警级别:{2}。内容:{3} -``` + ``` + 监控:{1},告警级别:{2}。内容:{3} + ``` -![image](https://github.com/apache/hertzbeat/assets/40455946/face71a6-46d5-452c-bed3-59d2a975afeb) + ![image](https://github.com/apache/hertzbeat/assets/40455946/face71a6-46d5-452c-bed3-59d2a975afeb) -1.3 腾讯云短信创建应用(app-id) -![image](https://github.com/apache/hertzbeat/assets/40455946/2732d710-37fa-4455-af64-48bba273c2f8) + 1.3 腾讯云短信创建应用(app-id) + ![image](https://github.com/apache/hertzbeat/assets/40455946/2732d710-37fa-4455-af64-48bba273c2f8) -1.4 腾讯云访问管理(secret-id、secret-key) -![image](https://github.com/apache/hertzbeat/assets/40455946/36f056f0-94e7-43db-8f07-82893c98024e) + 1.4 腾讯云访问管理(secret-id、secret-key) + ![image](https://github.com/apache/hertzbeat/assets/40455946/36f056f0-94e7-43db-8f07-82893c98024e) 2. 配置告警自定义参数 -```yaml -alerter: - # 自定义控制台地址 - console-url: https://console.tancloud.io -``` + ```yaml + alerter: + # 自定义控制台地址 + console-url: https://console.tancloud.io + ``` 3. 使用外置redis代替内存存储实时指标数据 -> 默认我们的指标实时数据存储在内存中,可以配置如下来使用redis代替内存存储。 - -注意⚠️ `memory.enabled: false, redis.enabled: true` - -```yaml -warehouse: - store: - memory: - enabled: false - init-size: 1024 - redis: - enabled: true - host: 127.0.0.1 - port: 6379 - password: 123456 -``` + > 默认我们的指标实时数据存储在内存中,可以配置如下来使用redis代替内存存储。 + + 注意⚠️ `memory.enabled: false, redis.enabled: true` + + ```yaml + warehouse: + store: + memory: + enabled: false + init-size: 1024 + redis: + enabled: true + host: 127.0.0.1 + port: 6379 + password: 123456 + ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/docker-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/docker-deploy.md index 76efdf8f5d0..d72d068562e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/docker-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/docker-deploy.md @@ -12,7 +12,7 @@ sidebar_label: Docker方式部署 [菜鸟教程-Docker教程](https://www.runoob.com/docker/docker-tutorial.html) 安装完毕后终端查看Docker版本是否正常输出。 - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` @@ -64,64 +64,64 @@ sidebar_label: Docker方式部署 具体修改步骤参考 [配置修改账户密码](account-modify) 6. 启动HertzBeat Docker容器 -```shell -$ docker run -d -p 1157:1157 -p 1158:1158 \ - -e LANG=zh_CN.UTF-8 \ - -e TZ=Asia/Shanghai \ - -v $(pwd)/data:/opt/hertzbeat/data \ - -v $(pwd)/logs:/opt/hertzbeat/logs \ - -v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml \ - -v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml \ - --restart=always \ - --name hertzbeat apache/hertzbeat -``` + ```shell + $ docker run -d -p 1157:1157 -p 1158:1158 \ + -e LANG=zh_CN.UTF-8 \ + -e TZ=Asia/Shanghai \ + -v $(pwd)/data:/opt/hertzbeat/data \ + -v $(pwd)/logs:/opt/hertzbeat/logs \ + -v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml \ + -v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml \ + --restart=always \ + --name hertzbeat apache/hertzbeat + ``` 这条命令启动一个运行HertzBeat的Docker容器,并且将容器的1157端口映射到宿主机的1157端口上。若宿主机已有进程占用该端口,则需要修改主机映射端口。 -- `docker run -d` : 通过Docker运行一个容器,使其在后台运行 -- `-e LANG=zh_CN.UTF-8` : 设置系统语言 -- `-e TZ=Asia/Shanghai` : 设置系统时区 -- `-p 1157:1157 -p 1158:1158` : 映射容器端口到主机端口,请注意,前面是宿主机的端口号,后面是容器的端口号。1157是WEB端口,1158是集群端口。 -- `-v $(pwd)/data:/opt/hertzbeat/data` : (可选,数据持久化)重要⚠️ 挂载H2数据库文件到本地主机,保证数据不会因为容器的创建删除而丢失 -- `-v $(pwd)/logs:/opt/hertzbeat/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 -- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (可选,不需要可删除)挂载上上一步修改的本地配置文件到容器中,即使用本地配置文件覆盖容器配置文件。我们需要修改此配置文件的MYSQL,TDengine配置信息来连接外部服务。 -- `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (可选,不需要可删除)挂载上一步修改的账户配置文件到容器中,若无修改账户需求可删除此命令参数。 -- 注意⚠️ 挂载文件时,前面参数为你自定义本地文件地址,后面参数为docker容器内文件地址(固定) -- `--name hertzbeat` : 命名容器名称 hertzbeat -- `--restart=always`:(可选,不需要可删除)使容器在Docker启动后自动重启。若您未在容器创建时指定该参数,可通过以下命令实现该容器自启。 + - `docker run -d` : 通过Docker运行一个容器,使其在后台运行 + - `-e LANG=zh_CN.UTF-8` : 设置系统语言 + - `-e TZ=Asia/Shanghai` : 设置系统时区 + - `-p 1157:1157 -p 1158:1158` : 映射容器端口到主机端口,请注意,前面是宿主机的端口号,后面是容器的端口号。1157是WEB端口,1158是集群端口。 + - `-v $(pwd)/data:/opt/hertzbeat/data` : (可选,数据持久化)重要⚠️ 挂载H2数据库文件到本地主机,保证数据不会因为容器的创建删除而丢失 + - `-v $(pwd)/logs:/opt/hertzbeat/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 + - `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (可选,不需要可删除)挂载上上一步修改的本地配置文件到容器中,即使用本地配置文件覆盖容器配置文件。我们需要修改此配置文件的MYSQL,TDengine配置信息来连接外部服务。 + - `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (可选,不需要可删除)挂载上一步修改的账户配置文件到容器中,若无修改账户需求可删除此命令参数。 + - 注意⚠️ 挂载文件时,前面参数为你自定义本地文件地址,后面参数为docker容器内文件地址(固定) + - `--name hertzbeat` : 命名容器名称 hertzbeat + - `--restart=always`:(可选,不需要可删除)使容器在Docker启动后自动重启。若您未在容器创建时指定该参数,可通过以下命令实现该容器自启。 - ```shell - docker update --restart=always hertzbeat - ``` + ```shell + docker update --restart=always hertzbeat + ``` -- `apache/hertzbeat` : 使用拉取最新的的HertzBeat官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat`代替。** + - `apache/hertzbeat` : 使用拉取最新的的HertzBeat官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat`代替。** 7. 开始探索HertzBeat 浏览器访问 即可开始探索使用HertzBeat,默认账户密码 admin/hertzbeat。 8. 部署采集器集群(可选) -```shell -$ docker run -d \ - -e IDENTITY=custom-collector-name \ - -e MODE=public \ - -e MANAGER_HOST=127.0.0.1 \ - -e MANAGER_PORT=1158 \ - --name hertzbeat-collector apache/hertzbeat-collector -``` + ```shell + $ docker run -d \ + -e IDENTITY=custom-collector-name \ + -e MODE=public \ + -e MANAGER_HOST=127.0.0.1 \ + -e MANAGER_PORT=1158 \ + --name hertzbeat-collector apache/hertzbeat-collector + ``` -这条命令启动一个运行HertzBeat采集器的Docker容器,并直连上了HertzBeat主服务节点。 + 这条命令启动一个运行HertzBeat采集器的Docker容器,并直连上了HertzBeat主服务节点。 -- `docker run -d` : 通过Docker运行一个容器,使其在后台运行 -- `-e IDENTITY=custom-collector-name` : (可选) 设置采集器的唯一标识名称。⚠️注意多采集器时采集器名称需保证唯一性。 -- `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 -- `-e MANAGER_HOST=127.0.0.1` : 重要⚠️ 设置连接的主HertzBeat服务地址IP。 -- `-e MANAGER_PORT=1158` : (可选) 设置连接的主HertzBeat服务地址端口,默认 1158. -- `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 -- `--name hertzbeat-collector` : 命名容器名称 hertzbeat-collector -- `apache/hertzbeat-collector` : 使用拉取最新的的HertzBeat采集器官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat-collector`代替。** + - `docker run -d` : 通过Docker运行一个容器,使其在后台运行 + - `-e IDENTITY=custom-collector-name` : (可选) 设置采集器的唯一标识名称。⚠️注意多采集器时采集器名称需保证唯一性。 + - `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 + - `-e MANAGER_HOST=127.0.0.1` : 重要⚠️ 设置连接的主HertzBeat服务地址IP。 + - `-e MANAGER_PORT=1158` : (可选) 设置连接的主HertzBeat服务地址端口,默认 1158. + - `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (可选,不需要可删除)挂载日志文件到本地主机,保证日志不会因为容器的创建删除而丢失,方便查看 + - `--name hertzbeat-collector` : 命名容器名称 hertzbeat-collector + - `apache/hertzbeat-collector` : 使用拉取最新的的HertzBeat采集器官方发布的应用镜像来启动容器,**若使用`quay.io`镜像需用参数`quay.io/tancloud/hertzbeat-collector`代替。** -8. 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 +9. 浏览器访问主HertzBeat服务 `http://localhost:1157` 查看概览页面即可看到注册上来的新采集器 **HAVE FUN** @@ -132,35 +132,35 @@ $ docker run -d \ 1. **MYSQL,TDENGINE或IotDB和HertzBeat都Docker部署在同一主机上,HertzBeat使用localhost或127.0.0.1连接数据库失败** 此问题本质为Docker容器访问宿主机端口连接失败,由于docker默认网络模式为Bridge模式,其通过localhost访问不到宿主机。 -> 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP -> 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` + > 解决办法一:配置application.yml将数据库的连接地址由localhost修改为宿主机的对外IP + > 解决办法二:使用Host网络模式启动Docker,即使Docker容器和宿主机共享网络 `docker run -d --network host .....` 2. **按照流程部署,访问 无界面** 请参考下面几点排查问题: -> 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 + > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 + > 二:HertzBeat的配置文件 `application.yml` 里面的依赖服务IP账户密码等配置是否正确 + > 三:若都无问题可以 `docker logs hertzbeat` 查看容器日志是否有明显错误,提issue或交流群或社区反馈 3. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 + > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter 4. **监控历史图表长时间都一直无数据** -> 一:Tdengine或IoTDB是否配置,未配置则无历史图表数据 -> 二:Tdengine的数据库`hertzbeat`是否创建 -> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB或Tdengine IP账户密码等配置是否正确 + > 一:Tdengine或IoTDB是否配置,未配置则无历史图表数据 + > 二:Tdengine的数据库`hertzbeat`是否创建 + > 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 IotDB或Tdengine IP账户密码等配置是否正确 5. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] -> 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - -> 安装初始化此数据库参考 [TDengine安装初始化](tdengine-init) 或 [IoTDB安装初始化](iotdb-init) + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - + > 安装初始化此数据库参考 [TDengine安装初始化](tdengine-init) 或 [IoTDB安装初始化](iotdb-init) 6. 安装配置了时序数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] -> 请检查配置参数是否正确 -> iot-db 或td-engine enable 是否设置为true -> 注意⚠️若hertzbeat和IotDB,TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP -> 可根据logs目录下启动日志排查 + > 请检查配置参数是否正确 + > iot-db 或td-engine enable 是否设置为true + > 注意⚠️若hertzbeat和IotDB,TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP + > 可根据logs目录下启动日志排查 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md index ed69d93bfba..d8a86a88acd 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md @@ -17,28 +17,28 @@ It's designed to work on infrastructure of the cloud era, and users benefit from > 可参考官方网站[安装教程](https://docs.greptime.com/getting-started/overview) > -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装GreptimeDB +1. 下载安装Docker环境 +Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +安装完毕后终端查看Docker版本是否正常输出。 + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Docker安装GreptimeDB -```shell -$ docker run -p 4000-4004:4000-4004 \ - -p 4242:4242 -v /opt/greptimedb:/tmp/greptimedb \ - --name greptime \ - greptime/greptimedb standalone start \ - --http-addr 0.0.0.0:4000 \ - --rpc-addr 0.0.0.0:4001 -``` + ```shell + $ docker run -p 4000-4004:4000-4004 \ + -p 4242:4242 -v /opt/greptimedb:/tmp/greptimedb \ + --name greptime \ + greptime/greptimedb standalone start \ + --http-addr 0.0.0.0:4000 \ + --rpc-addr 0.0.0.0:4001 + ``` -`-v /opt/greptimedb:/tmp/greptimedb` 为greptimedb数据目录本地持久化挂载,需将`/opt/greptimedb`替换为实际本地存在的目录 -使用```$ docker ps```查看数据库是否启动成功 + `-v /opt/greptimedb:/tmp/greptimedb` 为greptimedb数据目录本地持久化挂载,需将`/opt/greptimedb`替换为实际本地存在的目录 + 使用```$ docker ps```查看数据库是否启动成功 ### 在hertzbeat的`application.yml`配置文件配置此数据库连接 @@ -46,18 +46,18 @@ $ docker run -p 4000-4004:4000-4004 \ 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** + **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.greptime`数据源参数,URL账户密码,并启用`enabled`为`true`** -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - greptime: - enabled: true - endpoint: localhost:4001 -``` + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + greptime: + enabled: true + endpoint: localhost:4001 + ``` 2. 重启 HertzBeat @@ -65,4 +65,4 @@ warehouse: 1. 时序数据库 GreptimeDB 或者 IoTDB 或者 TDengine 是否都需要配置,能不能都用 -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md index d1d4b3f241f..bbba8a1df89 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md @@ -24,26 +24,26 @@ InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海 ### 2. 通过Docker方式安装InfluxDB > 可参考官方网站[安装教程](https://hub.docker.com/_/influxdb) -> -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装InfluxDB 1.x -```shell -$ docker run -p 8086:8086 \ - -v /opt/influxdb:/var/lib/influxdb \ - influxdb:1.8 -``` +1. 下载安装Docker环境 +Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +安装完毕后终端查看Docker版本是否正常输出。 + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Docker安装InfluxDB 1.x + + ```shell + $ docker run -p 8086:8086 \ + -v /opt/influxdb:/var/lib/influxdb \ + influxdb:1.8 + ``` -`-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 -使用```$ docker ps```查看数据库是否启动成功 + `-v /opt/influxdb:/var/lib/influxdb` 为influxdb数据目录本地持久化挂载,需将`/opt/influxdb`替换为实际本地存在的目录 + 使用```$ docker ps```查看数据库是否启动成功 ### 在hertzbeat的`application.yml`配置文件配置此数据库连接 @@ -51,22 +51,22 @@ $ docker run -p 8086:8086 \ 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - influxdb: - enabled: true - server-url: http://localhost:8086 - username: root - password: root - expire-time: '30d' - replication: 1 -``` + **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.influxdb`数据源参数,URL账户密码,并启用`enabled`为`true`** + + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + influxdb: + enabled: true + server-url: http://localhost:8086 + username: root + password: root + expire-time: '30d' + replication: 1 + ``` 2. 重启 HertzBeat @@ -74,4 +74,4 @@ warehouse: 1. 时序数据库InfluxDb, IoTDB和TDengine是否都需要配置,能不能都用 -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/iotdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/iotdb-init.md index c5286af5330..7bf90831333 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/iotdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/iotdb-init.md @@ -24,22 +24,22 @@ Apache IoTDB是一体化收集、存储、管理与分析物联网时序数据 Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后终端查看Docker版本是否正常输出。 - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` 2. Docker安装IoTDB -```shell -$ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ - -v /opt/iotdb/data:/iotdb/data \ - --name iotdb \ - apache/iotdb:1.2.2-standalone -``` + ```shell + $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ + -v /opt/iotdb/data:/iotdb/data \ + --name iotdb \ + apache/iotdb:1.2.2-standalone + ``` -`-v /opt/iotdb/data:/iotdb/data` 为IoTDB数据目录本地持久化挂载,需将`/iotdb/data`替换为实际本地存在的目录 -使用```$ docker ps```查看数据库是否启动成功 + `-v /opt/iotdb/data:/iotdb/data` 为IoTDB数据目录本地持久化挂载,需将`/iotdb/data`替换为实际本地存在的目录 + 使用```$ docker ps```查看数据库是否启动成功 3. 在hertzbeat的`application.yml`配置文件配置IoTDB数据库连接 @@ -47,63 +47,63 @@ $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.iot-db`数据源参数,HOST账户密码等,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - # 启用IotDB - iot-db: - enabled: true - host: 127.0.0.1 - rpc-port: 6667 - username: root - password: root - # use default queryTimeoutInMs = -1 - query-timeout-in-ms: -1 - # 数据存储时间:默认'7776000000'(90天,单位为毫秒,-1代表永不过期) - expire-time: '7776000000' -``` - -**IoTDB集群版配置** -如果您使用IoTDB为集群请参考下面配置 - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - # 启用IotDB - iot-db: - enabled: true - node-urls: ['127.0.0.1:6667','127.0.0.2:6667','127.0.0.3:6667''] - username: root - password: root - # if iotdb version >= 0.13 use default queryTimeoutInMs = -1; else use default queryTimeoutInMs = 0 - query-timeout-in-ms: -1 - # 数据存储时间:默认'7776000000'(90天,单位为毫秒,-1代表永不过期) - expire-time: '7776000000' -``` - -参数说明: - -| 参数名称 | 参数说明 | -|---------------------|-------------------------------------------| -| enabled | 是否启用 | -| host | IoTDB数据库地址 | -| rpc-port | IoTDB数据库端口 | -| node-urls | IoTDB集群地址 | -| username | IoTDB数据库账户 | -| password | IoTDB数据库密码 | -| version | IoTDB数据库版本,已废弃,仅支持V1.* | -| query-timeout-in-ms | 查询超时时间 | -| expire-time | 数据存储时间,默认'7776000000'(90天,单位为毫秒,-1代表永不过期) | - -> 如果集群配置`node-urls`和单机配置同时设置,以集群`node-urls`配置稳准 + **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.iot-db`数据源参数,HOST账户密码等,并启用`enabled`为`true`** + + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + # 启用IotDB + iot-db: + enabled: true + host: 127.0.0.1 + rpc-port: 6667 + username: root + password: root + # use default queryTimeoutInMs = -1 + query-timeout-in-ms: -1 + # 数据存储时间:默认'7776000000'(90天,单位为毫秒,-1代表永不过期) + expire-time: '7776000000' + ``` + + **IoTDB集群版配置** + 如果您使用IoTDB为集群请参考下面配置 + + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + # 启用IotDB + iot-db: + enabled: true + node-urls: ['127.0.0.1:6667','127.0.0.2:6667','127.0.0.3:6667''] + username: root + password: root + # if iotdb version >= 0.13 use default queryTimeoutInMs = -1; else use default queryTimeoutInMs = 0 + query-timeout-in-ms: -1 + # 数据存储时间:默认'7776000000'(90天,单位为毫秒,-1代表永不过期) + expire-time: '7776000000' + ``` + + 参数说明: + + | 参数名称 | 参数说明 | + |---------------------|-------------------------------------------| + | enabled | 是否启用 | + | host | IoTDB数据库地址 | + | rpc-port | IoTDB数据库端口 | + | node-urls | IoTDB集群地址 | + | username | IoTDB数据库账户 | + | password | IoTDB数据库密码 | + | version | IoTDB数据库版本,已废弃,仅支持V1.* | + | query-timeout-in-ms | 查询超时时间 | + | expire-time | 数据存储时间,默认'7776000000'(90天,单位为毫秒,-1代表永不过期) | + + > 如果集群配置`node-urls`和单机配置同时设置,以集群`node-urls`配置稳准 4. 重启 HertzBeat @@ -111,15 +111,15 @@ warehouse: 1. 时序数据库IoTDB和TDengine是否都需要配置,能不能都用 -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] -> 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 3. 安装配置了IotDB数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] -> 请检查配置参数是否正确 -> iot-db enable是否设置为true -> 注意⚠️若hertzbeat和IotDB都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP -> 可根据logs目录下启动日志排查 + > 请检查配置参数是否正确 + > iot-db enable是否设置为true + > 注意⚠️若hertzbeat和IotDB都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP + > 可根据logs目录下启动日志排查 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md index e89cabd8b10..39744f8ef23 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md @@ -66,33 +66,33 @@ sidebar_label: 安装包方式部署 1. **若您使用的是不含JDK的安装包,需您提前准备JAVA运行环境** -安装JAVA运行环境-可参考[官方网站](http://www.oracle.com/technetwork/java/javase/downloads/index.html) -要求:JAVA17环境 -下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) -安装后命令行检查是否成功安装 - -``` -$ java -version -java version "17.0.9" -Java(TM) SE Runtime Environment 17.0.9 (build 17.0.9+8-LTS-237) -Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) - -``` + 安装JAVA运行环境-可参考[官方网站](http://www.oracle.com/technetwork/java/javase/downloads/index.html) + 要求:JAVA17环境 + 下载JAVA安装包: [镜像站](https://repo.huaweicloud.com/java/jdk/) + 安装后命令行检查是否成功安装 + + ```shell + $ java -version + java version "17.0.9" + Java(TM) SE Runtime Environment 17.0.9 (build 17.0.9+8-LTS-237) + Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) + + ``` 2. **按照流程部署,访问 无界面** 请参考下面几点排查问题: -> 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 -> 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 -> 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 + > 一:若切换了依赖服务MYSQL数据库,排查数据库是否成功创建,是否启动成功 + > 二:HertzBeat的配置文件 `hertzbeat/config/application.yml` 里面的依赖服务IP账户密码等配置是否正确 + > 三:若都无问题可以查看 `hertzbeat/logs/` 目录下面的运行日志是否有明显错误,提issue或交流群或社区反馈 3. **日志报错TDengine连接或插入SQL失败** -> 一:排查配置的数据库账户密码是否正确,数据库是否创建 -> 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter + > 一:排查配置的数据库账户密码是否正确,数据库是否创建 + > 二:若是安装包安装的TDengine2.3+,除了启动server外,还需执行 `systemctl start taosadapter` 启动 adapter 4. **监控历史图表长时间都一直无数据** -> 一:时序数据库是否配置,未配置则无历史图表数据 -> 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 -> 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 时序数据库 IP账户密码等配置是否正确 + > 一:时序数据库是否配置,未配置则无历史图表数据 + > 二:若使用了Tdengine,排查Tdengine的数据库`hertzbeat`是否创建 + > 三: HertzBeat的配置文件 `application.yml` 里面的依赖服务 时序数据库 IP账户密码等配置是否正确 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md index 07d5a8b8ed3..9f405dd061a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/quickstart.md @@ -16,24 +16,24 @@ sidebar_label: 快速开始 1. `docker` 环境仅需一条命令即可开始 -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```或者使用 quay.io (若 dockerhub 网络链接超时)``` + ```或者使用 quay.io (若 dockerhub 网络链接超时)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. 浏览器访问 `http://localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` 3. 部署采集器集群(可选) -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ```shell + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -- `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 -- `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 -- `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 -- `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 + - `-e IDENTITY=custom-collector-name` : 配置此采集器的唯一性标识符名称,多个采集器名称不能相同,建议自定义英文名称。 + - `-e MODE=public` : 配置运行模式(public or private), 公共集群模式或私有云边模式。 + - `-e MANAGER_HOST=127.0.0.1` : 配置连接主HertzBeat服务的对外IP。 + - `-e MANAGER_PORT=1158` : 配置连接主HertzBeat服务的对外端口,默认1158。 更多配置详细步骤参考 [通过Docker方式安装HertzBeat](docker-deploy) diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/sslcert-practice.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/sslcert-practice.md index 1a0bc9a39f6..c2e7bb9314c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/sslcert-practice.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/sslcert-practice.md @@ -18,7 +18,7 @@ github: 1. `docker` 环境仅需一条命令即可安装 -`docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` + `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` 2. 安装成功浏览器访问 `localhost:1157` 即可开始,默认账号密码 `admin/hertzbeat` @@ -26,63 +26,63 @@ github: 1. 点击新增SSL证书监控 -> 系统页面 -> 监控菜单 -> SSL证书 -> 新增SSL证书 + > 系统页面 -> 监控菜单 -> SSL证书 -> 新增SSL证书 -![](/img/docs/start/ssl_1.png) + ![](/img/docs/start/ssl_1.png) 2. 配置监控网站 -> 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 -> 点击确定 注意⚠️新增前默认会先去测试网站连接性,连接成功才会新增,当然也可以把**是否测试**按钮置灰。 + > 我们这里举例监控百度网站, 配置监控host域名,名称,采集间隔等。 + > 点击确定 注意⚠️新增前默认会先去测试网站连接性,连接成功才会新增,当然也可以把**是否测试**按钮置灰。 -![](/img/docs/start/ssl_2.png) + ![](/img/docs/start/ssl_2.png) 3. 查看检测指标数据 -> 在监控列表可以查看任务状态,进监控详情可以查看指标数据图表等。 + > 在监控列表可以查看任务状态,进监控详情可以查看指标数据图表等。 -![](/img/docs/start/ssl_3.png) + ![](/img/docs/start/ssl_3.png) -![](/img/docs/start/ssl_11.png) + ![](/img/docs/start/ssl_11.png) 4. 设置阈值(证书过期时触发) -> 系统页面 -> 告警 -> 告警阈值 -> 新增阈值 + > 系统页面 -> 告警 -> 告警阈值 -> 新增阈值 -![](/img/docs/start/ssl_4.png) + ![](/img/docs/start/ssl_4.png) -> 配置阈值,选择SSL证书指标对象,配置告警表达式-当指标`expired`为`true`触发,即`equals(expired,"true")` , 设置告警级别通知模版信息等。 + > 配置阈值,选择SSL证书指标对象,配置告警表达式-当指标`expired`为`true`触发,即`equals(expired,"true")` , 设置告警级别通知模版信息等。 -![](/img/docs/start/ssl_5.png) + ![](/img/docs/start/ssl_5.png) -> 关联阈值与监控, 在阈值列表设置此阈值应用于哪些监控。 + > 关联阈值与监控, 在阈值列表设置此阈值应用于哪些监控。 -![](/img/docs/start/ssl_6.png) + ![](/img/docs/start/ssl_6.png) 5. 设置阈值(证书过期前一周触发) -> 同理如上,新增配置阈值,配置告警表达式-当指标有效期时间戳 `end_timestamp`,`now()`函数为当前时间戳,若配置提前一周触发告警即:`end_timestamp <= (now() + 604800000)` , 其中 `604800000` 为7天总时间差毫秒值。 + > 同理如上,新增配置阈值,配置告警表达式-当指标有效期时间戳 `end_timestamp`,`now()`函数为当前时间戳,若配置提前一周触发告警即:`end_timestamp <= (now() + 604800000)` , 其中 `604800000` 为7天总时间差毫秒值。 -![](/img/docs/start/ssl_7.png) + ![](/img/docs/start/ssl_7.png) -> 最终可以在告警中心看到已触发的告警。 + > 最终可以在告警中心看到已触发的告警。 -![](/img/docs/start/ssl_8.png) + ![](/img/docs/start/ssl_8.png) 6. 告警通知(通过钉钉微信飞书等及时通知) -> 监控系统 -> 告警通知 -> 新增接收人 + > 监控系统 -> 告警通知 -> 新增接收人 -![](/img/docs/start/ssl_9.png) + ![](/img/docs/start/ssl_9.png) -钉钉微信飞书等token配置可以参考帮助文档 + 钉钉微信飞书等token配置可以参考帮助文档 - - + + -> 告警通知 -> 新增告警通知策略 -> 将刚才配置的接收人启用通知 + > 告警通知 -> 新增告警通知策略 -> 将刚才配置的接收人启用通知 -![](/img/docs/start/ssl_10.png) + ![](/img/docs/start/ssl_10.png) 7. OK 当阈值触发后我们就可以收到对应告警消息啦,如果没有配通知,也可以在告警中心查看告警信息。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md index 3daaa5fa17d..813eb94ad1a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md @@ -18,28 +18,28 @@ TDengine是一款开源物联网时序型数据库,我们用其存储采集到 ### 通过Docker方式安装TDengine > 可参考官方网站[安装教程](https://docs.taosdata.com/get-started/docker/) -> -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装TDengine - -```shell -$ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ - -v /opt/taosdata:/var/lib/taos \ - --name tdengine -e TZ=Asia/Shanghai \ - tdengine/tdengine:3.0.4.0 -``` - -`-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 -`-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 -使用```$ docker ps```查看数据库是否启动成功 + +1. 下载安装Docker环境 +Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +安装完毕后终端查看Docker版本是否正常输出。 + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Docker安装TDengine + + ```shell + $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ + -v /opt/taosdata:/var/lib/taos \ + --name tdengine -e TZ=Asia/Shanghai \ + tdengine/tdengine:3.0.4.0 + ``` + + `-v /opt/taosdata:/var/lib/taos` 为tdengine数据目录本地持久化挂载,需将`/opt/taosdata`替换为实际本地存在的目录 + `-e TZ="Asia/Shanghai"` 为tdengine设置时区,这里可选设置对应的时区 + 使用```$ docker ps```查看数据库是否启动成功 ### 创建数据库实例 @@ -99,21 +99,21 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ 修改位于 `hertzbeat/config/application.yml` 的配置文件 [/script/application.yml](https://github.com/apache/hertzbeat/raw/master/script/application.yml) 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - td-engine: - enabled: true - driver-class-name: com.taosdata.jdbc.rs.RestfulDriver - url: jdbc:TAOS-RS://localhost:6041/hertzbeat - username: root - password: taosdata -``` + **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置里面的`warehouse.store.td-engine`数据源参数,URL账户密码,并启用`enabled`为`true`** + + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + td-engine: + enabled: true + driver-class-name: com.taosdata.jdbc.rs.RestfulDriver + url: jdbc:TAOS-RS://localhost:6041/hertzbeat + username: root + password: taosdata + ``` 2. 重启 HertzBeat @@ -121,19 +121,19 @@ warehouse: 1. 时序数据库IoTDB和TDengine是否都需要配置,能不能都用 -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,只影响历史图表数据。 2. 监控页面历史图表不显示,弹出 [无法提供历史图表数据,请配置依赖时序数据库] -> 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 + > 如弹窗所示,历史图表展示的前提是需要安装配置hertzbeat的依赖服务 - IotDB数据库或TDengine数据库 3. 监控详情历史图片不展示或无数据,已经配置了TDengine -> 请确认是否安装的TDengine版本为3.0以上,版本2.x不支持兼容 + > 请确认是否安装的TDengine版本为3.0以上,版本2.x不支持兼容 4. 安装配置了TDengine数据库,但页面依旧显示弹出 [无法提供历史图表数据,请配置依赖时序数据库] -> 请检查配置参数是否正确 -> td-engine enable是否设置为true -> 注意⚠️若hertzbeat和TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP -> 可根据logs目录下启动日志排查 + > 请检查配置参数是否正确 + > td-engine enable是否设置为true + > 注意⚠️若hertzbeat和TDengine都为docker容器在同一主机下启动,容器之间默认不能用127.0.0.1通讯,改为主机IP + > 可根据logs目录下启动日志排查 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md index 1b07dd1c789..c38e59b3198 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md @@ -18,27 +18,27 @@ VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决 ### 通过Docker方式安装VictoriaMetrics > 可参考官方网站[安装教程](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -> -> 1. 下载安装Docker环境 -> Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 -> 安装完毕后终端查看Docker版本是否正常输出。 -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Docker安装VictoriaMetrics - -```shell -$ docker run -d -p 8428:8428 \ - -v $(pwd)/victoria-metrics-data:/victoria-metrics-data \ - --name victoria-metrics \ - victoriametrics/victoria-metrics:v1.95.1 -``` - -`-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` 为VictoriaMetrics数据目录本地持久化挂载 -使用```$ docker ps```查看数据库是否启动成功 + +1. 下载安装Docker环境 +Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 +安装完毕后终端查看Docker版本是否正常输出。 + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Docker安装VictoriaMetrics + + ```shell + $ docker run -d -p 8428:8428 \ + -v $(pwd)/victoria-metrics-data:/victoria-metrics-data \ + --name victoria-metrics \ + victoriametrics/victoria-metrics:v1.95.1 + ``` + + `-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` 为VictoriaMetrics数据目录本地持久化挂载 + 使用```$ docker ps```查看数据库是否启动成功 3. 在hertzbeat的`application.yml`配置文件配置VictoriaMetrics数据库连接 @@ -46,21 +46,21 @@ $ docker run -d -p 8428:8428 \ 修改位于 `hertzbeat/config/application.yml` 的配置文件 注意⚠️docker容器方式需要将application.yml文件挂载到主机本地,安装包方式解压修改位于 `hertzbeat/config/application.yml` 即可 -**修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** - -```yaml -warehouse: - store: - # 关闭默认JPA - jpa: - enabled: false - # 启用 victoria-metrics - victoria-metrics: - enabled: true - url: http://localhost:8428 - username: root - password: root -``` + **修改里面的`warehouse.store.jpa.enabled`参数为`false`, 配置`warehouse.store.victoria-metrics`数据源参数,HOST账户密码等,并启用`enabled`为`true`** + + ```yaml + warehouse: + store: + # 关闭默认JPA + jpa: + enabled: false + # 启用 victoria-metrics + victoria-metrics: + enabled: true + url: http://localhost:8428 + username: root + password: root + ``` 4. 重启 HertzBeat @@ -68,4 +68,4 @@ warehouse: 1. 时序数据库是否都需要配置,能不能都用 -> 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,但会影响历史图表数据和存储时长等。 + > 不需要都配置,任选其一即可,用enable参数控制其是否使用,也可都不安装配置,但会影响历史图表数据和存储时长等。 diff --git a/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md b/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md index 5b87ee9bf49..342c176673e 100644 --- a/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md +++ b/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md @@ -145,9 +145,9 @@ limitations under the License. 2. Ensure code readability and intuitiveness -- The string in the `annotation` symbol doesn't need to be extracted as constant. + - The string in the `annotation` symbol doesn't need to be extracted as constant. -- The referenced `package` or `resource` name doesn't need to be extracted as constant. + - The referenced `package` or `resource` name doesn't need to be extracted as constant. 3. Variables that have not been reassigned must also be declared as final types. @@ -189,10 +189,10 @@ limitations under the License. - returns Set if use `HashSet` 2. If there are multiple threads, the following declaration or returned types can be used: -```java -private CurrentHashMap map; -public CurrentHashMap funName(); -``` + ```java + private CurrentHashMap map; + public CurrentHashMap funName(); + ``` 3. Use `isEmpty()` instead of `length() == 0` or `size() == 0` - Negative demo: @@ -225,98 +225,97 @@ public CurrentHashMap funName(); - Multiple code line `depths` of `n+1` - Redundant lines -Generally speaking, if a method's code line depth exceeds `2+ Tabs` due to continuous nested `if... else..`, it should be considered to try + Generally speaking, if a method's code line depth exceeds `2+ Tabs` due to continuous nested `if... else..`, it should be considered to try -- `merging branches`, -- `inverting branch conditions` -- `extracting private methods` + - `merging branches`, + - `inverting branch conditions` + - `extracting private methods` -to reduce code line depth and improve readability like follows: + to reduce code line depth and improve readability like follows: -- Union or merge the logic into the next level calling -- Negative demo: + - Union or merge the logic into the next level calling + - Negative demo: -```java -if (isInsert) { -save(platform); -} else { -updateById(platform); -} -``` + ```java + if (isInsert) { + save(platform); + } else { + updateById(platform); + } + ``` -- Positive demo: + - Positive demo: -```java -saveOrUpdate(platform); -``` + ```java + saveOrUpdate(platform); + ``` -- Merge the conditions -- Negative demo: - -```java -if (expression1) { -if(expression2) { -...... -} -} - -``` + - Merge the conditions + - Negative demo: -- Positive demo: - - ```java - if (expression1 && expression2) { - ...... - } - ``` - -- Reverse the condition -- Negative demo: + ```java + if (expression1) { + if(expression2) { + // ...... + } + } + ``` - ```java - public void doSomething() { - // Ignored more deeper block lines - // ..... - if (condition1) { - ... - } else { - ... - } - } - ``` + - Positive demo: -- Positive demo: + ```java + if (expression1 && expression2) { + // ...... + } + ``` + + - Reverse the condition + - Negative demo: + + ```java + public void doSomething() { + // Ignored more deeper block lines + // ..... + if (condition1) { + // ... + } else { + // ... + } + } + ``` + + - Positive demo: + + ```java + public void doSomething() { + // Ignored more deeper block lines + // ..... + if (!condition1) { + // ... + return; + } + // ... + } + ``` - ```java - public void doSomething() { - // Ignored more deeper block lines - // ..... - if (!condition1) { - ... - return; - } - // ... - } - ``` - -- Using a single variable or method to reduce the complex conditional expression -- Negative demo: + - Using a single variable or method to reduce the complex conditional expression + - Negative demo: - ```java - if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { - ... - } - ``` + ```java + if (dbType.indexOf("sqlserver") >= 0 || dbType.indexOf("sql server") >= 0) { + // ... + } + ``` -- Positive demo: + - Positive demo: - ```java - if (containsSqlServer(dbType)) { - .... - } - //..... - // definition of the containsSqlServer - ``` + ```java + if (containsSqlServer(dbType)) { + // .... + } + //..... + // definition of the containsSqlServer + ``` > Using `sonarlint` and `better highlights` to check code depth looks like good in the future. @@ -324,20 +323,20 @@ if(expression2) { 1. Method lacks comments: -- `When`: When can the method be called -- `How`: How to use this method and how to pass parameters, etc. -- `What`: What functions does this method achieve -- `Note`: What should developers pay attention to when calling this method + - `When`: When can the method be called + - `How`: How to use this method and how to pass parameters, etc. + - `What`: What functions does this method achieve + - `Note`: What should developers pay attention to when calling this method 2. Missing necessary class header description comments. -Add `What`, `Note`, etc. like mentioned in the `1`. + Add `What`, `Note`, etc. like mentioned in the `1`. 3. The method declaration in the interface must be annotated. -- If the semantics of the implementation and the annotation content at the interface declaration are inconsistent, the specific implementation method also needs to be rewritten with annotations. + - If the semantics of the implementation and the annotation content at the interface declaration are inconsistent, the specific implementation method also needs to be rewritten with annotations. -- If the semantics of the method implementation are consistent with the annotation content at the interface declaration, it is not recommended to write annotations to avoid duplicate annotations. + - If the semantics of the method implementation are consistent with the annotation content at the interface declaration, it is not recommended to write annotations to avoid duplicate annotations. 4. The first word in the comment lines need to be capitalized, like `param` lines, `return` lines. If a special reference as a subject does not need to be capitalized, special symbols such as quotation marks need to be noted. @@ -347,31 +346,31 @@ Add `What`, `Note`, etc. like mentioned in the `1`. 1. Prefer `non-capturing` lambdas (lambdas that do not contain references to the outer scope). Capturing lambdas need to create a new object instance for every call. `Non-capturing` lambdas can use the same instance for each invocation. -- Negative demo: + - Negative demo: - ```java - map.computeIfAbsent(key, x -> key.toLowerCase()) - ``` + ```java + map.computeIfAbsent(key, x -> key.toLowerCase()) + ``` -- Positive demo: + - Positive demo: - ```java - map.computeIfAbsent(key, k -> k.toLowerCase()); - ``` + ```java + map.computeIfAbsent(key, k -> k.toLowerCase()); + ``` 2. Consider method references instead of inline lambdas -- Negative demo: + - Negative demo: - ```java - map.computeIfAbsent(key, k-> Loader.load(k)); - ``` + ```java + map.computeIfAbsent(key, k-> Loader.load(k)); + ``` -- Positive demo: + - Positive demo: - ```java - map.computeIfAbsent(key, Loader::load); - ``` + ```java + map.computeIfAbsent(key, Loader::load); + ``` ### 3.9 Java Streams @@ -389,127 +388,127 @@ Add `What`, `Note`, etc. like mentioned in the `1`. 1. Use `StringUtils.isBlank` instead of `StringUtils.isEmpty` -- Negative demo: + - Negative demo: - ```java - if (StringUtils.isEmpty(name)) { - return; - } - ``` + ```java + if (StringUtils.isEmpty(name)) { + return; + } + ``` -- Positive demo: + - Positive demo: - ```java - if (StringUtils.isBlank(name)) { - return; - } - ``` + ```java + if (StringUtils.isBlank(name)) { + return; + } + ``` 2. Use `StringUtils.isNotBlank` instead of `StringUtils.isNotEmpty` -- Negative demo: + - Negative demo: - ```java - if (StringUtils.isNotEmpty(name)) { - return; - } - ``` + ```java + if (StringUtils.isNotEmpty(name)) { + return; + } + ``` -- Positive demo: + - Positive demo: - ```java - if (StringUtils.isNotBlank(name)) { - return; - } - ``` + ```java + if (StringUtils.isNotBlank(name)) { + return; + } + ``` 3. Use `StringUtils.isAllBlank` instead of `StringUtils.isAllEmpty` -- Negative demo: + - Negative demo: - ```java - if (StringUtils.isAllEmpty(name, age)) { - return; - } - ``` + ```java + if (StringUtils.isAllEmpty(name, age)) { + return; + } + ``` -- Positive demo: + - Positive demo: - ```java - if (StringUtils.isAllBlank(name, age)) { - return; - } - ``` + ```java + if (StringUtils.isAllBlank(name, age)) { + return; + } + ``` ### 3.12 `Enum` Class 1. Enumeration value comparison -- Negative demo: + - Negative demo: - ```java - if (status.equals(JobStatus.RUNNING)) { - return; - } - ``` + ```java + if (status.equals(JobStatus.RUNNING)) { + return; + } + ``` -- Positive demo: + - Positive demo: - ```java - if (status == JobStatus.RUNNING) { - return; - } - ``` + ```java + if (status == JobStatus.RUNNING) { + return; + } + ``` 2. Enumeration classes do not need to implement Serializable -- Negative demo: + - Negative demo: - ```java - public enum JobStatus implements Serializable { - ... - } - ``` + ```java + public enum JobStatus implements Serializable { + ... + } + ``` -- Positive demo: + - Positive demo: - ```java - public enum JobStatus { - ... - } - ``` + ```java + public enum JobStatus { + ... + } + ``` 3. Use `Enum.name()` instead of `Enum.toString()` -- Negative demo: + - Negative demo: - ```java - System.out.println(JobStatus.RUNNING.toString()); - ``` + ```java + System.out.println(JobStatus.RUNNING.toString()); + ``` -- Positive demo: + - Positive demo: - ```java - System.out.println(JobStatus.RUNNING.name()); - ``` + ```java + System.out.println(JobStatus.RUNNING.name()); + ``` 4. Enumeration class names uniformly use the Enum suffix -- Negative demo: + - Negative demo: - ```java - public enum JobStatus { - ... - } - ``` + ```java + public enum JobStatus { + // ... + } + ``` -- Positive demo: + - Positive demo: - ```java - public enum JobStatusEnum { - ... - } - ``` + ```java + public enum JobStatusEnum { + // ... + } + ``` ### 3.13 `Deprecated` Annotation @@ -518,7 +517,7 @@ Add `What`, `Note`, etc. like mentioned in the `1`. ```java @deprecated public void process(String input) { - ... + // ... } ``` @@ -527,7 +526,7 @@ public void process(String input) { ```java @Deprecated public void process(String input) { - ... + // ... } ``` @@ -535,43 +534,43 @@ public void process(String input) { 1. Use `placeholders` for log output: -- Negative demo + - Negative demo - ```java - log.info("Deploy cluster request " + deployRequest); - ``` + ```java + log.info("Deploy cluster request " + deployRequest); + ``` -- Positive demo + - Positive demo - ```java - log.info("load plugin:{} to {}", file.getName(), appPlugins); - ``` + ```java + log.info("load plugin:{} to {}", file.getName(), appPlugins); + ``` 2. Pay attention to the selection of `log level` when printing logs -When printing the log content, if the actual parameters of the log placeholder are passed, it is necessary to avoid premature evaluation to avoid unnecessary evaluation caused by the log level. + When printing the log content, if the actual parameters of the log placeholder are passed, it is necessary to avoid premature evaluation to avoid unnecessary evaluation caused by the log level. -- Negative demo: + - Negative demo: - Assuming the current log level is `INFO`: + Assuming the current log level is `INFO`: - ```java - // ignored declaration lines. - List userList = getUsersByBatch(1000); - LOG.debug("All users: {}", getAllUserIds(userList)); - ``` + ```java + // ignored declaration lines. + List userList = getUsersByBatch(1000); + LOG.debug("All users: {}", getAllUserIds(userList)); + ``` -- Positive demo: + - Positive demo: - In this case, we should determine the log level in advance before making actual log calls as follows: + In this case, we should determine the log level in advance before making actual log calls as follows: - ```java - // ignored declaration lines. - List userList = getUsersByBatch(1000); - if (LOG.isDebugEnabled()) { - LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); - } - ``` + ```java + // ignored declaration lines. + List userList = getUsersByBatch(1000); + if (LOG.isDebugEnabled()) { + LOG.debug("All ids of users: {}", getAllIDsOfUsers(userList)); + } + ``` ## 5 Testing diff --git a/home/versioned_docs/version-v1.5.x/community/contribution.md b/home/versioned_docs/version-v1.5.x/community/contribution.md index 9a634ffffed..fcbd25ceffa 100644 --- a/home/versioned_docs/version-v1.5.x/community/contribution.md +++ b/home/versioned_docs/version-v1.5.x/community/contribution.md @@ -88,29 +88,29 @@ Of course, if you have a good idea, you can also propose it directly on GitHub D 1. First you need to fork your target [hertzbeat repository](https://github.com/apache/hertzbeat). 2. Then download the code locally with git command: -```shell -git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended -``` + ```shell + git clone git@github.com:${YOUR_USERNAME}/hertzbeat.git #Recommended + ``` 3. After the download is complete, please refer to the getting started guide or README file of the target repository to initialize the project. 4. Then, you can refer to the following command to submit the code: -```shell -git checkout -b a-feature-branch #Recommended -``` + ```shell + git checkout -b a-feature-branch #Recommended + ``` 5. Submit the coed as a commit, the commit message format specification required: [module name or type name]feature or bugfix or doc: custom message. -```shell -git add -git commit -m '[docs]feature: necessary instructions' #Recommended -``` + ```shell + git add + git commit -m '[docs]feature: necessary instructions' #Recommended + ``` 6. Push to the remote repository -```shell -git push origin a-feature-branch -``` + ```shell + git push origin a-feature-branch + ``` 7. Then you can initiate a new PR (Pull Request) on GitHub. diff --git a/home/versioned_docs/version-v1.5.x/help/activemq.md b/home/versioned_docs/version-v1.5.x/help/activemq.md index ef3cc911969..ee014e7ce8c 100644 --- a/home/versioned_docs/version-v1.5.x/help/activemq.md +++ b/home/versioned_docs/version-v1.5.x/help/activemq.md @@ -15,40 +15,40 @@ keywords: [open source monitoring tool, monitoring Apache ActiveMQ metrics] 1. Modify the `conf/activemq.xml` file in the installation directory to enable JMX -> Add `userJmx="true"` attribute in `broker` tag + > Add `userJmx="true"` attribute in `broker` tag -```xml - - - -``` + ```xml + + + + ``` 2. Modify the `bin/env` file in the installation directory, configure the JMX port IP, etc. -The original configuration information will be as follows - -```text -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" -``` - -Update to the following configuration, ⚠️ pay attention to modify `local external IP` - -```text -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" -# ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.authenticate=false" -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Djava.rmi.server.hostname=本机对外IP" - -ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" -``` + The original configuration information will be as follows + + ```text + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" + + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" + ``` + + Update to the following configuration, ⚠️ pay attention to modify `local external IP` + + ```text + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.password.file=${ACTIVEMQ_CONF}/jmx.password" + # ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.access.file=${ACTIVEMQ_CONF}/jmx.access" + + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.port=11099" + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.ssl=false" + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote.authenticate=false" + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Djava.rmi.server.hostname=本机对外IP" + + ACTIVEMQ_SUNJMX_START="$ACTIVEMQ_SUNJMX_START -Dcom.sun.management.jmxremote" + ``` 3. Restart the ACTIVEMQ service, and add the corresponding ActiveMQ monitoring in HertzBeat. The parameters use the IP port configured by JMX. diff --git a/home/versioned_docs/version-v1.5.x/help/alert_dingtalk.md b/home/versioned_docs/version-v1.5.x/help/alert_dingtalk.md index 36e332d9b21..57efd130881 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_dingtalk.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_dingtalk.md @@ -11,31 +11,31 @@ keywords: [open source monitoring tool, open source alerter, open source DingDin 1. **【DingDing desktop client】-> 【Group settings】-> 【Intelligent group assistant】-> 【Add new robot-select custom】-> 【Set robot name and avatar】-> 【Note⚠️Set custom keywords: HertzBeat】 ->【Copy its webhook address after adding successfully】** -> Note⚠️ When adding a robot, its custom keywords need to be set in the security setting block: HertzBeat. Other security settings or the IP segment don't need to be filled in. + > Note⚠️ When adding a robot, its custom keywords need to be set in the security setting block: HertzBeat. Other security settings or the IP segment don't need to be filled in. -![email](/img/docs/help/alert-notice-8.png) + ![email](/img/docs/help/alert-notice-8.png) 2. **【Save access_token value of the WebHook address of the robot】** -> eg: webHook address:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` -> Its robot access_token value is `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` + > eg: webHook address:`https://oapi.dingtalk.com/robot/send?access_token=43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` + > Its robot access_token value is `43aac28a236e001285ed84e473f8eabee70f63c7a70287acb0e0f8b65fade64f` 3. **【Alarm notification】->【Add new recipient】 ->【Select DingDing robot notification method】->【Set DingDing robot ACCESS_TOKEN】-> 【Confirm】** -![email](/img/docs/help/alert-notice-9.png) + ![email](/img/docs/help/alert-notice-9.png) 4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** + > **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### DingDing robot common issues 1. DingDing group did not receive the robot alarm notification. -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether DingDing robot is configured with security custom keywords :HertzBeat. -> Please check whether the robot ACCESS_TOKEN is configured correctly and whether the alarm strategy association is configured. + > Please check whether there is any triggered alarm information in the alarm center. + > Please check whether DingDing robot is configured with security custom keywords :HertzBeat. + > Please check whether the robot ACCESS_TOKEN is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_discord.md b/home/versioned_docs/version-v1.5.x/help/alert_discord.md index 68296148f22..7d38b18b1dd 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_discord.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_discord.md @@ -13,56 +13,56 @@ keywords: [open source monitoring tool, open source alerter, open source Discord 1. Visit [https://discord.com/developers/applications](https://discord.com/developers/applications) to create an application -![bot](/img/docs/help/discord-bot-1.png) + ![bot](/img/docs/help/discord-bot-1.png) 2. Create a robot under the application and get the robot Token -![bot](/img/docs/help/discord-bot-2.png) + ![bot](/img/docs/help/discord-bot-2.png) -![bot](/img/docs/help/discord-bot-3.png) + ![bot](/img/docs/help/discord-bot-3.png) 3. Authorize the bot to the chat server -> Authorize the robot under the OAuth2 menu, select `bot` for `SCOPES`, `BOT PERMISSIONS` select `Send Messages` + > Authorize the robot under the OAuth2 menu, select `bot` for `SCOPES`, `BOT PERMISSIONS` select `Send Messages` -![bot](/img/docs/help/discord-bot-4.png) + ![bot](/img/docs/help/discord-bot-4.png) -> Obtain the URL generated at the bottom, and the browser accesses this URL to officially authorize the robot, that is, to set which chat server the robot will join. + > Obtain the URL generated at the bottom, and the browser accesses this URL to officially authorize the robot, that is, to set which chat server the robot will join. 4. Check if your chat server has joined robot members -![bot](/img/docs/help/discord-bot-5.png) + ![bot](/img/docs/help/discord-bot-5.png) ### Enable developer mode and get Channel ID 1. Personal Settings -> Advanced Settings -> Enable Developer Mode -![bot](/img/docs/help/discord-bot-6.png) + ![bot](/img/docs/help/discord-bot-6.png) 2. Get channel Channel ID -> Right-click the chat channel you want to send the robot message to, click the COPY ID button to get the Channel ID + > Right-click the chat channel you want to send the robot message to, click the COPY ID button to get the Channel ID -![bot](/img/docs/help/discord-bot-7.png) + ![bot](/img/docs/help/discord-bot-7.png) ### Add an alarm notification person in HertzBeat, the notification method is Discord Bot 1. **[Alarm notification] -> [Add recipient] -> [Select Discord robot notification method] -> [Set robot Token and ChannelId] -> [OK]** -![email](/img/docs/help/discord-bot-8.png) + ![email](/img/docs/help/discord-bot-8.png) -4. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** +2. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** -> **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. + > **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. -![email](/img/docs/help/alert-notice-policy.png) + ![email](/img/docs/help/alert-notice-policy.png) ### Discord Bot Notification FAQ 1. Discord doesn't receive bot alert notifications -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured -> Please check whether the bot is properly authorized by the Discord chat server + > Please check whether the alarm information has been triggered in the alarm center + > Please check whether the robot Token and ChannelId are configured correctly, and whether the alarm policy association has been configured + > Please check whether the bot is properly authorized by the Discord chat server Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_email.md b/home/versioned_docs/version-v1.5.x/help/alert_email.md index c507a970bae..10f15a6bfb9 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_email.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_email.md @@ -11,29 +11,29 @@ keywords: [open source monitoring tool, open source alerter, open source email n 1. **【Alarm notification】->【Add new recipient】 ->【Select email notification method】** -![email](/img/docs/help/alert-notice-1.png) + ![email](/img/docs/help/alert-notice-1.png) 2. **【Get verification code】-> 【Enter email verification code】-> 【Confirm】** ![email](/img/docs/help/alert-notice-2.png) -![email](/img/docs/help/alert-notice-3.png) + ![email](/img/docs/help/alert-notice-3.png) 3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** + > **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### Email notification common issues 1. Hertzbeat deployed on its own intranet cannot receive email notifications -> Hertzbeat needs to configure its own mail server, not tancloud. Please confirm whether you have configured its own mail server in application.yml + > Hertzbeat needs to configure its own mail server, not tancloud. Please confirm whether you have configured its own mail server in application.yml 2. Cloud environment tancloud cannot receive email notification -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the mailbox is configured correctly and whether the alarm strategy association is configured. -> Please check whether the warning email is blocked in the trash can of the mailbox. + > Please check whether there is any triggered alarm information in the alarm center. + > Please check whether the mailbox is configured correctly and whether the alarm strategy association is configured. + > Please check whether the warning email is blocked in the trash can of the mailbox. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_enterprise_wechat_app.md b/home/versioned_docs/version-v1.5.x/help/alert_enterprise_wechat_app.md index 1d5d41a15bc..352981b7d85 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_enterprise_wechat_app.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_enterprise_wechat_app.md @@ -11,24 +11,24 @@ keywords: [open source monitoring tool, open source alerter, open source Enterpr 1. **【Enterprise Wechat backstage】-> 【App Management】-> 【Create an app】-> 【Set App message】->【Copy AgentId and Secret adding successfully】** -![email](/img/docs/help/alert-wechat-1.jpg) + ![email](/img/docs/help/alert-wechat-1.jpg) 2. **【Alarm notification】->【Add new recipient】 ->【Select Enterprise WeChat App notification method】->【Set Enterprise WeChat ID,Enterprise App ID and Enterprise App Secret 】-> 【Confirm】** -![email](/img/docs/help/alert-wechat-2.jpg) + ![email](/img/docs/help/alert-wechat-2.jpg) 3. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** + > **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-wechat-3.jpg) + ![email](/img/docs/help/alert-wechat-3.jpg) ### Enterprise WeChat App common issues 1. Enterprise WeChat App did not receive the alarm notification. -> Please check if the user has application permissions. -> Please check if the enterprise application callback address settings are normal. -> Please check if the server IP is on the enterprise application whitelist. + > Please check if the user has application permissions. + > Please check if the enterprise application callback address settings are normal. + > Please check if the server IP is on the enterprise application whitelist. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_feishu.md b/home/versioned_docs/version-v1.5.x/help/alert_feishu.md index 38f7c72cf03..da5a2674133 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_feishu.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_feishu.md @@ -13,22 +13,22 @@ keywords: [open source monitoring tool, open source alerter, open source feishu 2. **【Save the key value of the WebHook address of the robot】** -> eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > eg: webHook address:`https://open.feishu.cn/open-apis/bot/v2/hook/3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select FeiShu robot notification method】->【Set FeiShu robot KEY】-> 【Confirm】** 4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** + > **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### FeiShu robot notification common issues 1. FeiShu group did not receive the robot alarm notification. -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. + > Please check whether there is any triggered alarm information in the alarm center. + > Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_slack.md b/home/versioned_docs/version-v1.5.x/help/alert_slack.md index 26bde4ed2e5..7b5c395c729 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_slack.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_slack.md @@ -17,19 +17,19 @@ Refer to the official website document [Sending messages using Incoming Webhooks 1. **【Alarm Notification】->【Add Recipient】->【Select Slack Webhook Notification Method】->【Set Webhook URL】-> 【OK】** -![email](/img/docs/help/slack-bot-1.png) + ![email](/img/docs/help/slack-bot-1.png) 2. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** -> **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. + > **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. -![email](/img/docs/help/alert-notice-policy.png) + ![email](/img/docs/help/alert-notice-policy.png) ### Slack Notification FAQ 1. Slack did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured + > Please check whether the alarm information has been triggered in the alarm center + > Please check whether the slack webhook url are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_smn.md b/home/versioned_docs/version-v1.5.x/help/alert_smn.md index 53774315561..6d049dfbe39 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_smn.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_smn.md @@ -11,33 +11,33 @@ keywords: [ open source monitoring tool, open source alerter, open source Huawei 1. **According to [Huawei Cloud SMN Official Document](https://support.huaweicloud.com/qs-smn/smn_json.html) activate the SMN service and configure SMN** -![alert-notice-10](/img/docs/help/alert-notice-10.png) + ![alert-notice-10](/img/docs/help/alert-notice-10.png) 2. **Save topic URN for SMN** -![alert-notice-11](/img/docs/help/alert-notice-11.png) + ![alert-notice-11](/img/docs/help/alert-notice-11.png) 3. **According to [Huawei Cloud Signature Document](https://support.huaweicloud.com/devg-apisign/api-sign-provide.html) obtain AK, SK, and project ID** -![alert-notice-12](/img/docs/help/alert-notice-12.png) + ![alert-notice-12](/img/docs/help/alert-notice-12.png) -![alert-notice-13](/img/docs/help/alert-notice-13.png) + ![alert-notice-13](/img/docs/help/alert-notice-13.png) 4. **【Alarm Notification】->【Add Recipient】->【Select Slack Webhook Notification Method】->【Set Huawei Cloud SMN AK, SK and other configurations】-> 【OK】** -![alert-notice-14](/img/docs/help/alert-notice-14.png) + ![alert-notice-14](/img/docs/help/alert-notice-14.png) 5. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** -> **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. + > **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### Huawei Cloud SMN Notification FAQ 1. Huawei Cloud SMN did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the Huawei Cloud SMN AK, SK and other configurations are configured correctly, and whether the alarm policy association has been configured + > Please check whether the alarm information has been triggered in the alarm center + > Please check whether the Huawei Cloud SMN AK, SK and other configurations are configured correctly, and whether the alarm policy association has been configured Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_telegram.md b/home/versioned_docs/version-v1.5.x/help/alert_telegram.md index 1689788f0f4..9cf2c84bdc2 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_telegram.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_telegram.md @@ -13,32 +13,32 @@ keywords: [open source monitoring tool, open source alerter, open source Telegra 1. Use [@BotFather](https://t.me/BotFather) to create your own bot and get an access token `Token` -![telegram-bot](/img/docs/help/telegram-bot-1.png) + ![telegram-bot](/img/docs/help/telegram-bot-1.png) 2. Get the `User ID` of the recipient -**Use the recipient account you want to notify to send a message to the newly created Bot account**, -Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token from the previous step to replace the `**, and respond to the first in the `Json` data A `result.message.from.id` value is the recipient's `User ID` - -```json -{ - "ok": true, - "result": [ - { - "update_id": 632299191, - "message": { - "from":{ - "id": "User ID" - }, - "chat":{ - }, - "date": 1673858065, - "text": "111" + **Use the recipient account you want to notify to send a message to the newly created Bot account**, + Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token from the previous step to replace the `**, and respond to the first in the `Json` data A `result.message.from.id` value is the recipient's `User ID` + + ```json + { + "ok": true, + "result": [ + { + "update_id": 632299191, + "message": { + "from":{ + "id": "User ID" + }, + "chat":{ + }, + "date": 1673858065, + "text": "111" + } } - } - ] -} -``` + ] + } + ``` 3. Record and save the `Token` and `User Id` we got @@ -46,20 +46,20 @@ Visit ```https://api.telegram.org/bot/getUpdates```, **`use the Bot Token 1. **【Alarm Notification】->【Add Recipient】->【Select Telegram Robot Notification Method】->【Set Robot Token and UserId】-> 【OK】** -![email](/img/docs/help/telegram-bot-2.png) + ![email](/img/docs/help/telegram-bot-2.png) -4. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** +2. **Configure the associated alarm notification strategy⚠️ [Add notification strategy] -> [Associate the recipient just set] -> [OK]** -> **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. + > **Note ⚠️ Adding a new recipient does not mean that it has taken effect and can receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, specify which messages are sent to which recipients**. -![email](/img/docs/help/alert-notice-policy.png) + ![email](/img/docs/help/alert-notice-policy.png) ### Telegram Bot Notification FAQ 1. Telegram did not receive the robot warning notification -> Please check whether the alarm information has been triggered in the alarm center -> Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured -> UserId should be the UserId of the recipient of the message + > Please check whether the alarm information has been triggered in the alarm center + > Please check whether the robot Token and UserId are configured correctly, and whether the alarm policy association has been configured + > UserId should be the UserId of the recipient of the message Other questions can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_webhook.md b/home/versioned_docs/version-v1.5.x/help/alert_webhook.md index d1741d71481..7768dcf29d4 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_webhook.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_webhook.md @@ -11,13 +11,13 @@ keywords: [open source monitoring tool, open source alerter, open source webhook 1. **【Alarm notification】->【Add new recipient】 ->【Select WebHook notification method】-> 【Set WebHook callback address】 -> 【Confirm】** -![email](/img/docs/help/alert-notice-5.png) + ![email](/img/docs/help/alert-notice-5.png) 2. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** + > **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### WebHook callback POST body BODY content @@ -60,7 +60,7 @@ Content format:JSON 1. WebHook callback did not take effect -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the configured webhook callback address is correct. + > Please check whether there is any triggered alarm information in the alarm center. + > Please check whether the configured webhook callback address is correct. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/alert_wework.md b/home/versioned_docs/version-v1.5.x/help/alert_wework.md index ce344200301..1e72ad5d401 100644 --- a/home/versioned_docs/version-v1.5.x/help/alert_wework.md +++ b/home/versioned_docs/version-v1.5.x/help/alert_wework.md @@ -11,28 +11,28 @@ keywords: [open source monitoring tool, open source alerter, open source WeWork 1. **【Enterprise Wechat】-> 【Group settings】-> 【Group robot】-> 【Add new robot】-> 【Set robot name and avatar】-> 【Copy its webhook address after adding successfully】** -![email](/img/docs/help/alert-notice-6.jpg) + ![email](/img/docs/help/alert-notice-6.jpg) 2. **【Save the key value of the WebHook address of the robot】** -> eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` -> Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > eg: webHook address:`https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` + > Its robot KEY value is `3adafc96-23d0-4cd5-8feb-17f6e0b5fcs4` 3. **【Alarm notification】->【Add new recipient】 ->【Select enterprise Wechat robot notification method】->【Set enterprise Wechat robot KEY】-> 【Confirm】** -![email](/img/docs/help/alert-notice-7.png) + ![email](/img/docs/help/alert-notice-7.png) 4. **Configure the associated alarm notification strategy⚠️ 【Add new notification strategy】-> 【Associate the recipient just set】-> 【Confirm】** -> **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** + > **Note⚠️ Adding a new recipient does not mean that it is effective to receive alarm information. It is also necessary to configure the associated alarm notification strategy, that is, to specify which messages are sent to which recipients.** -![email](/img/docs/help/alert-notice-4.png) + ![email](/img/docs/help/alert-notice-4.png) ### Enterprise Wechat robot common issues 1. The enterprise wechat group did not receive the robot alarm notification. -> Please check whether there is any triggered alarm information in the alarm center. -> Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. + > Please check whether there is any triggered alarm information in the alarm center. + > Please check whether the robot key is configured correctly and whether the alarm strategy association is configured. Other issues can be fed back through the communication group ISSUE! diff --git a/home/versioned_docs/version-v1.5.x/help/dynamic_tp.md b/home/versioned_docs/version-v1.5.x/help/dynamic_tp.md index 332767b2a39..72e6389fdde 100644 --- a/home/versioned_docs/version-v1.5.x/help/dynamic_tp.md +++ b/home/versioned_docs/version-v1.5.x/help/dynamic_tp.md @@ -11,53 +11,53 @@ keywords: [open source monitoring tool, open source dynamicTp monitoring tool, m 1. Integration Using `DynamicTp` -`DynamicTp` is a lightweight dynamic thread pool based on the configuration center of the Jvm language. It has built-in monitoring and alarm functions, which can be realized through SPI custom extensions. + `DynamicTp` is a lightweight dynamic thread pool based on the configuration center of the Jvm language. It has built-in monitoring and alarm functions, which can be realized through SPI custom extensions. -For integrated use, please refer to the document [Quick Start](https://dynamictp.cn/guide/use/quick-start.html) + For integrated use, please refer to the document [Quick Start](https://dynamictp.cn/guide/use/quick-start.html) 2. Open SpringBoot Actuator Endpoint to expose `DynamicTp` Metric interface -```yaml -management: - endpoints: - web: - exposure: - include: '*' -``` + ```yaml + management: + endpoints: + web: + exposure: + include: '*' + ``` -Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has response json data as follows: + Test whether the access Metric interface `ip:port/actuator/dynamic-tp` has response json data as follows: -```json -[ - { - "poolName": "commonExecutor", - "corePoolSize": 1, - "maximumPoolSize": 1, - "queueType": "LinkedBlockingQueue", - "queueCapacity": 2147483647, - "queueSize": 0, - "fair": false, - "queueRemainingCapacity": 2147483647, - "activeCount": 0, - "taskCount": 0, - "completedTaskCount": 0, - "largestPoolSize": 0, - "poolSize": 0, - "waitTaskCount": 0, - "rejectCount": 0, - "rejectHandlerName": null, - "dynamic": false, - "runTimeoutCount": 0, - "queueTimeoutCount": 0 - }, - { - "maxMemory": "4GB", - "totalMemory": "444MB", - "freeMemory": "250.34 MB", - "usableMemory": "3.81GB" - } -] -``` + ```json + [ + { + "poolName": "commonExecutor", + "corePoolSize": 1, + "maximumPoolSize": 1, + "queueType": "LinkedBlockingQueue", + "queueCapacity": 2147483647, + "queueSize": 0, + "fair": false, + "queueRemainingCapacity": 2147483647, + "activeCount": 0, + "taskCount": 0, + "completedTaskCount": 0, + "largestPoolSize": 0, + "poolSize": 0, + "waitTaskCount": 0, + "rejectCount": 0, + "rejectHandlerName": null, + "dynamic": false, + "runTimeoutCount": 0, + "queueTimeoutCount": 0 + }, + { + "maxMemory": "4GB", + "totalMemory": "444MB", + "freeMemory": "250.34 MB", + "usableMemory": "3.81GB" + } + ] + ``` 3. Add DynamicTp monitoring under HertzBeat middleware monitoring diff --git a/home/versioned_docs/version-v1.5.x/help/iotdb.md b/home/versioned_docs/version-v1.5.x/help/iotdb.md index 011b9cbec12..5399caa3c41 100644 --- a/home/versioned_docs/version-v1.5.x/help/iotdb.md +++ b/home/versioned_docs/version-v1.5.x/help/iotdb.md @@ -17,29 +17,29 @@ The main steps are as follows: 1. The metric collection is disabled by default, you need to modify the parameters in `conf/iotdb-metric.yml` first, then restart the server -``` -# Whether to start the monitoring module, the default is false -enableMetric: true - -# Whether to enable operation delay statistics -enablePerformanceStat: false - -# Data provision method, externally provide metrics data through jmx and prometheus protocol, optional parameters: [JMX, PROMETHEUS, IOTDB], IOTDB is closed by default. -metricReporterList: - - JMX - - PROMETHEUS - -# The metric architecture used at the bottom layer, optional parameters: [MICROMETER, DROPWIZARD] -monitorType: MICROMETER - -# Initialize the level of the metric, optional parameters: [CORE, IMPORTANT, NORMAL, ALL] -metricLevel: IMPORTANT - -# Predefined metrics set, optional parameters: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] -predefinedMetrics: - - JVM - - FILE -``` + ```yaml + # Whether to start the monitoring module, the default is false + enableMetric: true + + # Whether to enable operation delay statistics + enablePerformanceStat: false + + # Data provision method, externally provide metrics data through jmx and prometheus protocol, optional parameters: [JMX, PROMETHEUS, IOTDB], IOTDB is closed by default. + metricReporterList: + - JMX + - PROMETHEUS + + # The metric architecture used at the bottom layer, optional parameters: [MICROMETER, DROPWIZARD] + monitorType: MICROMETER + + # Initialize the level of the metric, optional parameters: [CORE, IMPORTANT, NORMAL, ALL] + metricLevel: IMPORTANT + + # Predefined metrics set, optional parameters: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] + predefinedMetrics: + - JVM + - FILE + ``` 2. Restart IoTDB, open a browser or use curl to access http://servier_ip:9091/metrics, and you can see the metric data. diff --git a/home/versioned_docs/version-v1.5.x/help/issue.md b/home/versioned_docs/version-v1.5.x/help/issue.md index 9904fab6551..0bb4f069f3a 100644 --- a/home/versioned_docs/version-v1.5.x/help/issue.md +++ b/home/versioned_docs/version-v1.5.x/help/issue.md @@ -8,50 +8,49 @@ sidebar_label: Common issues 1. **Page feedback:monitor.host: Monitoring Host must be ipv4, ipv6 or domain name** -> As shown in the information, the entered monitoring Host must be ipv4, ipv6 or domain name, and cannot carry a protocol header, such as http + > As shown in the information, the entered monitoring Host must be ipv4, ipv6 or domain name, and cannot carry a protocol header, such as http 2. **The website API and other monitoring feedback statusCode:403 or 401, but the opposite end service itself does not need authentication, and the direct access of the browser is OK** -> Please check whether it is blocked by the firewall. For example, BaoTa/aaPanel have set the blocking of `User-Agent=Apache-HttpClient` in the request header by default. If it is blocked, please delete this blocking rule. (user-agent has been simulated as a browser in the v1.0.beat5 version. This problem does not exist) + > Please check whether it is blocked by the firewall. For example, BaoTa/aaPanel have set the blocking of `User-Agent=Apache-HttpClient` in the request header by default. If it is blocked, please delete this blocking rule. (user-agent has been simulated as a browser in the v1.0.beat5 version. This problem does not exist) 3. Ping connectivity monitoring exception when installing hertzbeat for package deployment. The hertzbeat installed and deployed by the installation package is not available for ping connectivity monitoring, but local direct ping is available 。 -> The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. -> When you install HertzBeat via DockerDocker root is enabled by default. No such problem. -> See + > The deployment of the installation package requires configuring the root permission of the Java virtual machine to start hertzbeat to use ICMP. If the root permission is not enabled, judge whether port 7 of telnet opposite end is opened. + > When you install HertzBeat via DockerDocker root is enabled by default. No such problem. + > See ### Docker Deployment common issues 1. **MYSQL, TDENGINE and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. -> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. -> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` + > Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. + > Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` 2. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: -> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. -> two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. -> ->> three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. + > one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. + > two:Check whether dependent service, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. + > three:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. 3. **Log an error TDengine connection or insert SQL failed** -> one:Check whether database account and password configured is correct, the database is created. -> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. + > one:Check whether database account and password configured is correct, the database is created. + > two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. ### Package Deployment common issues 1. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: -> one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. -> two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. -> three: Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. + > one:Whether the MySQL database and tdengine database as dependent services have been successfully started, whether the corresponding hertzbeat database has been created, and whether the SQL script has been executed. + > two:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. + > three: Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 2. **Log an error TDengine connection or insert SQL failed** -> one:Check whether database account and password configured is correct, the database is created. -> two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. + > one:Check whether database account and password configured is correct, the database is created. + > two:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. diff --git a/home/versioned_docs/version-v1.5.x/help/jetty.md b/home/versioned_docs/version-v1.5.x/help/jetty.md index ccec65b5559..2a3f69c13e4 100644 --- a/home/versioned_docs/version-v1.5.x/help/jetty.md +++ b/home/versioned_docs/version-v1.5.x/help/jetty.md @@ -19,33 +19,33 @@ keywords: [open source monitoring tool, open source jetty web server monitoring 1. Start the JMX JMX-REMOTE module in Jetty -```shell -java -jar $JETTY_HOME/start.jar --add-module=jmx -java -jar $JETTY_HOME/start.jar --add-module=jmx-remote -``` + ```shell + java -jar $JETTY_HOME/start.jar --add-module=jmx + java -jar $JETTY_HOME/start.jar --add-module=jmx-remote + ``` -Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file + Successful command execution will create `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file 2. Edit the `${JETTY_BASE}/start.d/jmx-remote.ini` configuration file to modify the JMX IP port and other parameters. -**`localhost` needs to be modified to expose the IP** - -```text -## The host/address to bind the RMI server to. -# jetty.jmxremote.rmiserverhost=localhost - -## The port the RMI server listens to (0 means a random port is chosen). -# jetty.jmxremote.rmiserverport=1099 - -## The host/address to bind the RMI registry to. -# jetty.jmxremote.rmiregistryhost=localhost - -## The port the RMI registry listens to. -# jetty.jmxremote.rmiregistryport=1099 - -## The host name exported in the RMI stub. --Djava.rmi.server.hostname=localhost -``` + **`localhost` needs to be modified to expose the IP** + + ```text + ## The host/address to bind the RMI server to. + # jetty.jmxremote.rmiserverhost=localhost + + ## The port the RMI server listens to (0 means a random port is chosen). + # jetty.jmxremote.rmiserverport=1099 + + ## The host/address to bind the RMI registry to. + # jetty.jmxremote.rmiregistryhost=localhost + + ## The port the RMI registry listens to. + # jetty.jmxremote.rmiregistryport=1099 + + ## The host name exported in the RMI stub. + -Djava.rmi.server.hostname=localhost + ``` 3. Restart Jetty Server. diff --git a/home/versioned_docs/version-v1.5.x/help/kubernetes.md b/home/versioned_docs/version-v1.5.x/help/kubernetes.md index d7e6b657ea6..836c84f3818 100644 --- a/home/versioned_docs/version-v1.5.x/help/kubernetes.md +++ b/home/versioned_docs/version-v1.5.x/help/kubernetes.md @@ -17,15 +17,15 @@ Refer to the steps to obtain token 1. Create a service account and bind the default cluster-admin administrator cluster role -```kubectl create serviceaccount dashboard-admin -n kube-system``` + ```kubectl create serviceaccount dashboard-admin -n kube-system``` 2. User Authorization -```shell -kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin -kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' -kubectl describe secret {secret} -n kube-system -``` + ```shell + kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin + kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}' + kubectl describe secret {secret} -n kube-system + ``` ### method two diff --git a/home/versioned_docs/version-v1.5.x/help/nacos.md b/home/versioned_docs/version-v1.5.x/help/nacos.md index f7c8815521f..eb4cb0b5e25 100644 --- a/home/versioned_docs/version-v1.5.x/help/nacos.md +++ b/home/versioned_docs/version-v1.5.x/help/nacos.md @@ -14,9 +14,9 @@ keywords: [open source monitoring tool, open source middleware monitoring tool, 1. Deploy the Nacos cluster according to [deployment document](https://nacos.io/en-us/docs/deployment.html). 2. Configure the application. properties file to expose metrics data. -``` -management.endpoints.web.exposure.include=* -``` + ```properties + management.endpoints.web.exposure.include=* + ``` 3. Access ```{ip}:8848/nacos/actuator/prometheus``` to see if metrics data can be accessed. diff --git a/home/versioned_docs/version-v1.5.x/help/nginx.md b/home/versioned_docs/version-v1.5.x/help/nginx.md index a5662be985f..2c9f12f4900 100644 --- a/home/versioned_docs/version-v1.5.x/help/nginx.md +++ b/home/versioned_docs/version-v1.5.x/help/nginx.md @@ -17,101 +17,103 @@ If you want to monitor information in 'Nginx' with this monitoring type, you nee 1. Check if `ngx_http_stub_status_module` has been added -```shell -nginx -V -``` + ```shell + nginx -V + ``` -View whether it contains `--with-http_stub_status_module`, if not, you need to recompile and install Nginx. + View whether it contains `--with-http_stub_status_module`, if not, you need to recompile and install Nginx. 2. Compile and install Nginx, add `ngx_http_stub_status_module` module -Download Nginx and unzip it, execute the following command in the directory + Download Nginx and unzip it, execute the following command in the directory -```shell - -./configure --prefix=/usr/local/nginx --with-http_stub_status_module - -make && make install -``` + ```shell + + ./configure --prefix=/usr/local/nginx --with-http_stub_status_module + + make && make install + ``` 3. Modify Nginx configure file -Modify the `nginx.conf` file and add the monitoring module exposed endpoint, as follows: + Modify the `nginx.conf` file and add the monitoring module exposed endpoint, as follows: -```shell -# modify nginx.conf -server { - listen 80; # port - server_name localhost; - location /nginx-status { - stub_status on; - access_log on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts - } -} -``` + ```shell + # modify nginx.conf + server { + listen 80; # port + server_name localhost; + location /nginx-status { + stub_status on; + access_log on; + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts + } + } + ``` 4. Reload Nginx -```shell - -nginx -s reload -``` + ```shell + + nginx -s reload + ``` 5. Access `http://localhost/nginx-status` in the browser to view the Nginx monitoring status information. ### Enable `ngx_http_reqstat_module` -```shell -# install `ngx_http_reqstat_module` -wget https://github.com/zls0424/ngx_req_status/archive/master.zip -O ngx_req_status.zip - -unzip ngx_req_status.zip - -patch -p1 < ../ngx_req_status-master/write_filter.patch - -./configure --prefix=/usr/local/nginx --add-module=/path/to/ngx_req_status-master - -make -j2 - -make install -``` +1. install `ngx_http_reqstat_module` + + ```shell + # install `ngx_http_reqstat_module` + wget https://github.com/zls0424/ngx_req_status/archive/master.zip -O ngx_req_status.zip + + unzip ngx_req_status.zip + + patch -p1 < ../ngx_req_status-master/write_filter.patch + + ./configure --prefix=/usr/local/nginx --add-module=/path/to/ngx_req_status-master + + make -j2 + + make install + ``` 2. Modify Nginx configure file -update `nginx.conf` file, add status module exposed endpoint, as follows: - -```shell -# modify nginx.conf -http { - req_status_zone server_name $server_name 256k; - req_status_zone server_addr $server_addr 256k; - - req_status server_name server_addr; - - server { - location /req-status { - req_status_show on; - #allow 127.0.0.1; #only allow requests from localhost - #deny all; #deny all other hosts + update `nginx.conf` file, add status module exposed endpoint, as follows: + + ```shell + # modify nginx.conf + http { + req_status_zone server_name $server_name 256k; + req_status_zone server_addr $server_addr 256k; + + req_status server_name server_addr; + + server { + location /req-status { + req_status_show on; + #allow 127.0.0.1; #only allow requests from localhost + #deny all; #deny all other hosts + } } } -} -``` + ``` 3. Reload Nginx -```shell - -nginx -s reload -``` + ```shell + + nginx -s reload + ``` 4. Access `http://localhost/req-status` in the browser to view the Nginx monitoring status information. -**Refer Doc: ** + **Refer Doc: ** -**⚠️Attention: The endpoint path of the monitoring module is `/nginx-status` `/req-status`** + **⚠️Attention: The endpoint path of the monitoring module is `/nginx-status` `/req-status`** ### Configuration parameter diff --git a/home/versioned_docs/version-v1.5.x/help/rabbitmq.md b/home/versioned_docs/version-v1.5.x/help/rabbitmq.md index e49d572ee72..91fad16ff1e 100644 --- a/home/versioned_docs/version-v1.5.x/help/rabbitmq.md +++ b/home/versioned_docs/version-v1.5.x/help/rabbitmq.md @@ -14,9 +14,9 @@ keywords: [open source monitoring tool, open source rabbitmq monitoring tool, mo 1. Open the Management plugin, or use the self-opening version -```shell -rabbitmq-plugins enable rabbitmq_management -``` + ```shell + rabbitmq-plugins enable rabbitmq_management + ``` 2. Access with a browser, and the default account password is `guest/guest`. Successful login means that it is successfully opened. diff --git a/home/versioned_docs/version-v1.5.x/help/shenyu.md b/home/versioned_docs/version-v1.5.x/help/shenyu.md index aa4a43a8d5c..7bc5f61bdc0 100644 --- a/home/versioned_docs/version-v1.5.x/help/shenyu.md +++ b/home/versioned_docs/version-v1.5.x/help/shenyu.md @@ -17,27 +17,27 @@ Two Steps Mainly: 1. add metrics plugin dependency in gateway's pom.xml. -```xml - - org.apache.shenyu - shenyu-spring-boot-starter-plugin-metrics - ${project.version} - -``` + ```xml + + org.apache.shenyu + shenyu-spring-boot-starter-plugin-metrics + ${project.version} + + ``` 2. modify this config in shenyu gateway yaml. -```yaml -shenyu: - metrics: - enabled: false #false is close, true is open - name : prometheus - host: 127.0.0.1 - port: 8090 - jmxConfig: - props: - jvm_enabled: true #enable jvm monitoring -``` + ```yaml + shenyu: + metrics: + enabled: false #false is close, true is open + name : prometheus + host: 127.0.0.1 + port: 8090 + jmxConfig: + props: + jvm_enabled: true #enable jvm monitoring + ``` Finally, restart the access gateway metrics endpoint `http://ip:8090` to respond to prometheus format data. diff --git a/home/versioned_docs/version-v1.5.x/start/custom-config.md b/home/versioned_docs/version-v1.5.x/start/custom-config.md index 7f45b5dd27d..b88a2ff4af4 100644 --- a/home/versioned_docs/version-v1.5.x/start/custom-config.md +++ b/home/versioned_docs/version-v1.5.x/start/custom-config.md @@ -16,44 +16,44 @@ The installation package can be decompressed and modified in `hertzbeat/config/a 1. Configure the SMS sending server -> Only when your own SMS server is successfully configured, the alarm SMS triggered in the monitoring tool will be sent normally. + > Only when your own SMS server is successfully configured, the alarm SMS triggered in the monitoring tool will be sent normally. -Add the following Tencent platform SMS server configuration in `application.yml` (parameters need to be replaced with your SMS server configuration) + Add the following Tencent platform SMS server configuration in `application.yml` (parameters need to be replaced with your SMS server configuration) -```yaml -common: - sms: - tencent: - secret-id: AKIDbQ4VhdMr89wDedFrIcgU2PaaMvOuBCzY - secret-key: PaXGl0ziY9UcWFjUyiFlCPMr77rLkJYlyA - app-id: 1435441637 - sign-name: XX Technology - template-id: 1343434 -``` + ```yaml + common: + sms: + tencent: + secret-id: AKIDbQ4VhdMr89wDedFrIcgU2PaaMvOuBCzY + secret-key: PaXGl0ziY9UcWFjUyiFlCPMr77rLkJYlyA + app-id: 1435441637 + sign-name: XX Technology + template-id: 1343434 + ``` 2. Configure alarm custom parameters -```yaml -alerter: - # Custom console address - console-url: https://console.tancloud.io -``` + ```yaml + alerter: + # Custom console address + console-url: https://console.tancloud.io + ``` 3. Use external redis instead of memory to store real-time metric data -> By default, the real-time data of our metrics is stored in memory, which can be configured as follows to use redis instead of memory storage. - -Note ⚠️ `memory.enabled: false, redis.enabled: true` - -```yaml -warehouse: - store: - memory: - enabled: false - init-size: 1024 - redis: - enabled: true - host: 127.0.0.1 - port: 6379 - password: 123456 -``` + > By default, the real-time data of our metrics is stored in memory, which can be configured as follows to use redis instead of memory storage. + + Note ⚠️ `memory.enabled: false, redis.enabled: true` + + ```yaml + warehouse: + store: + memory: + enabled: false + init-size: 1024 + redis: + enabled: true + host: 127.0.0.1 + port: 6379 + password: 123456 + ``` diff --git a/home/versioned_docs/version-v1.5.x/start/docker-deploy.md b/home/versioned_docs/version-v1.5.x/start/docker-deploy.md index ce7b784bfe0..255216c9aff 100644 --- a/home/versioned_docs/version-v1.5.x/start/docker-deploy.md +++ b/home/versioned_docs/version-v1.5.x/start/docker-deploy.md @@ -47,29 +47,29 @@ sidebar_label: Install via Docker For detail steps, please refer to [Configure Account Password](account-modify) 5. Start the HertzBeat Docker container -```shell -$ docker run -d -p 1157:1157 -p 1158:1158 \ - -e LANG=en_US.UTF-8 \ - -e TZ=Asia/Shanghai \ - -v $(pwd)/data:/opt/hertzbeat/data \ - -v $(pwd)/logs:/opt/hertzbeat/logs \ - -v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml \ - -v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml \ - --name hertzbeat apache/hertzbeat -``` - -This command starts a running HertzBeat Docker container with mapping port 1157-1158. If existing processes on the host use the port, please modify host mapped port. - -- `docker run -d` : Run a container in the background via Docker -- `-p 1157:1157 -p 1158:1158` : Mapping container ports to the host, 1157 is web-ui port, 1158 is cluster port. -- `-e LANG=en_US.UTF-8` : Set the system language -- `-e TZ=Asia/Shanghai` : Set the system timezone -- `-v $(pwd)/data:/opt/hertzbeat/data` : (optional, data persistence) Important⚠️ Mount the H2 database file to the local host, to ensure that the data is not lost due creating or deleting container. -- `-v $(pwd)/logs:/opt/hertzbeat/logs` : (optional, if you don't have a need, just delete it) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. -- `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional, if you don't have a need, just delete it) Mount the local configuration file into the container which has been modified in the previous step, namely using the local configuration file to cover container configuration file. -- `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (optional, if you don't have a need, just delete it) Mount account configuration file modified in the previous step into the container. Delete this command parameters if no needs. -- `--name hertzbeat` : Naming container name hertzbeat -- `apache/hertzbeat` : Use the pulled latest HertzBeat official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat` instead if you pull `quay.io` docker image.** + ```shell + $ docker run -d -p 1157:1157 -p 1158:1158 \ + -e LANG=en_US.UTF-8 \ + -e TZ=Asia/Shanghai \ + -v $(pwd)/data:/opt/hertzbeat/data \ + -v $(pwd)/logs:/opt/hertzbeat/logs \ + -v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml \ + -v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml \ + --name hertzbeat apache/hertzbeat + ``` + + This command starts a running HertzBeat Docker container with mapping port 1157-1158. If existing processes on the host use the port, please modify host mapped port. + + - `docker run -d` : Run a container in the background via Docker + - `-p 1157:1157 -p 1158:1158` : Mapping container ports to the host, 1157 is web-ui port, 1158 is cluster port. + - `-e LANG=en_US.UTF-8` : Set the system language + - `-e TZ=Asia/Shanghai` : Set the system timezone + - `-v $(pwd)/data:/opt/hertzbeat/data` : (optional, data persistence) Important⚠️ Mount the H2 database file to the local host, to ensure that the data is not lost due creating or deleting container. + - `-v $(pwd)/logs:/opt/hertzbeat/logs` : (optional, if you don't have a need, just delete it) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. + - `-v $(pwd)/application.yml:/opt/hertzbeat/config/application.yml` : (optional, if you don't have a need, just delete it) Mount the local configuration file into the container which has been modified in the previous step, namely using the local configuration file to cover container configuration file. + - `-v $(pwd)/sureness.yml:/opt/hertzbeat/config/sureness.yml` : (optional, if you don't have a need, just delete it) Mount account configuration file modified in the previous step into the container. Delete this command parameters if no needs. + - `--name hertzbeat` : Naming container name hertzbeat + - `apache/hertzbeat` : Use the pulled latest HertzBeat official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat` instead if you pull `quay.io` docker image.** 6. Begin to explore HertzBeat @@ -77,25 +77,25 @@ This command starts a running HertzBeat Docker container with mapping port 1157- 7. Deploy collector cluster (Optional) -```shell -$ docker run -d \ - -e IDENTITY=custom-collector-name \ - -e MODE=public \ - -e MANAGER_HOST=127.0.0.1 \ - -e MANAGER_PORT=1158 \ - --name hertzbeat-collector apache/hertzbeat-collector -``` - -This command starts a running HertzBeat-Collector container. - -- `docker run -d` : Run a container in the background via Docker -- `-e IDENTITY=custom-collector-name` : (optional) Set the collector unique identity name. Attention the clusters collector name must unique. -- `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. -- `-e MANAGER_HOST=127.0.0.1` : Important⚠️ Set the main hertzbeat server ip. -- `-e MANAGER_PORT=1158` : (optional) Set the main hertzbeat server port, default 1158. -- `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (optional) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. -- `--name hertzbeat-collector` : Naming container name hertzbeat-collector -- `apache/hertzbeat-collector` : Use the pulled latest HertzBeat-Collector official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat-collector` instead if you pull `quay.io` docker image.** + ```shell + $ docker run -d \ + -e IDENTITY=custom-collector-name \ + -e MODE=public \ + -e MANAGER_HOST=127.0.0.1 \ + -e MANAGER_PORT=1158 \ + --name hertzbeat-collector apache/hertzbeat-collector + ``` + + This command starts a running HertzBeat-Collector container. + + - `docker run -d` : Run a container in the background via Docker + - `-e IDENTITY=custom-collector-name` : (optional) Set the collector unique identity name. Attention the clusters collector name must unique. + - `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. + - `-e MANAGER_HOST=127.0.0.1` : Important⚠️ Set the main hertzbeat server ip. + - `-e MANAGER_PORT=1158` : (optional) Set the main hertzbeat server port, default 1158. + - `-v $(pwd)/logs:/opt/hertzbeat-collector/logs` : (optional) Mount the log file to the local host, to ensure the log will not be lost due creating or deleting container. + - `--name hertzbeat-collector` : Naming container name hertzbeat-collector + - `apache/hertzbeat-collector` : Use the pulled latest HertzBeat-Collector official application mirror to start the container. **Use `quay.io/tancloud/hertzbeat-collector` instead if you pull `quay.io` docker image.** 8. Access `http://localhost:1157` and you will see the registered new collector in dashboard. @@ -108,39 +108,39 @@ This command starts a running HertzBeat-Collector container. 1. **MYSQL, TDENGINE, IoTDB and HertzBeat are deployed on the same host by Docker,HertzBeat use localhost or 127.0.0.1 connect to the database but fail** The problems lies in Docker container failed to visit and connect localhost port. Because the docker default network mode is Bridge mode which can't access local machine through localhost. -> Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. -> Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` + > Solution A:Configure application.yml. Change database connection address from localhost to external IP of the host machine. + > Solution B:Use the Host network mode to start Docker, namely making Docker container and hosting share network. `docker run -d --network host .....` 2. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: -> 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. -> 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. -> 3:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. + > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. + > 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. + > 3:`docker logs hertzbeat` Check whether the container log has errors. If you haven't solved the issue, report it to the communication group or community. 3. **Log an error TDengine connection or insert SQL failed** -> 1:Check whether database account and password configured is correct, the database is created. -> 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. + > 1:Check whether database account and password configured is correct, the database is created. + > 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. 4. **Historical monitoring charts have been missing data for a long time** -> 1:Check whether you configure Tdengine or IoTDB. No configuration means no historical chart data. -> 2:Check whether Tdengine database `hertzbeat` is created. -> 3: Check whether IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. + > 1:Check whether you configure Tdengine or IoTDB. No configuration means no historical chart data. + > 2:Check whether Tdengine database `hertzbeat` is created. + > 3: Check whether IP account and password configuration is correct in HertzBeat's configuration file `application.yml`. 5. If the history chart on the monitoring page is not displayed,popup [please configure time series database] -> As shown in the popup window,the premise of history chart display is that you need install and configure hertzbeat's dependency service - IoTDB or TDengine database. -> Installation and initialization this database refer to [TDengine Installation](tdengine-init) or [IoTDB Installation](iotdb-init) + > As shown in the popup window,the premise of history chart display is that you need install and configure hertzbeat's dependency service - IoTDB or TDengine database. + > Installation and initialization this database refer to [TDengine Installation](tdengine-init) or [IoTDB Installation](iotdb-init) 6. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed -> Please confirm whether the installed TDengine version is near 2.4.0.12, version 3.0 and 2.2 are not compatible. + > Please confirm whether the installed TDengine version is near 2.4.0.12, version 3.0 and 2.2 are not compatible. 7. The time series database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure dependent time series database] -> Please check if the configuration parameters are correct -> Is iot-db or td-engine enable set to true -> Note⚠️If both hertzbeat and IotDB, TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory + > Please check if the configuration parameters are correct + > Is iot-db or td-engine enable set to true + > Note⚠️If both hertzbeat and IotDB, TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed + > You can check the startup logs according to the logs directory diff --git a/home/versioned_docs/version-v1.5.x/start/greptime-init.md b/home/versioned_docs/version-v1.5.x/start/greptime-init.md index c58325ee3a7..5ff9268213b 100644 --- a/home/versioned_docs/version-v1.5.x/start/greptime-init.md +++ b/home/versioned_docs/version-v1.5.x/start/greptime-init.md @@ -17,29 +17,29 @@ It's designed to work on infrastructure of the cloud era, and users benefit from ### Install GreptimeDB via Docker > Refer to the official website [installation tutorial](https://docs.greptime.com/getting-started/overview) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Install GreptimeDB with Docker - -```shell -$ docker run -p 4000-4004:4000-4004 \ - -p 4242:4242 -v /opt/greptimedb:/tmp/greptimedb \ - --name greptime \ - greptime/greptimedb standalone start \ - --http-addr 0.0.0.0:4000 \ - --rpc-addr 0.0.0.0:4001 \ -``` - -`-v /opt/greptimedb:/tmp/greptimedb` is local persistent mount of greptimedb data directory. `/opt/greptimedb` should be replaced with the actual local directory. -use```$ docker ps``` to check if the database started successfully + +1. Download and install Docker environment +Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +After the installation you can check if the Docker version normally output at the terminal. + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Install GreptimeDB with Docker + + ```shell + $ docker run -p 4000-4004:4000-4004 \ + -p 4242:4242 -v /opt/greptimedb:/tmp/greptimedb \ + --name greptime \ + greptime/greptimedb standalone start \ + --http-addr 0.0.0.0:4000 \ + --rpc-addr 0.0.0.0:4001 \ + ``` + + `-v /opt/greptimedb:/tmp/greptimedb` is local persistent mount of greptimedb data directory. `/opt/greptimedb` should be replaced with the actual local directory. + use```$ docker ps``` to check if the database started successfully ### Configure the database connection in hertzbeat `application.yml` configuration file @@ -48,17 +48,17 @@ use```$ docker ps``` to check if the database started successfully Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.greptime` data source parameters, URL account and password. -```yaml -warehouse: - store: - # disable jpa - jpa: - enabled: false - # enable greptime - greptime: - enabled: true - endpoint: localhost:4001 -``` + ```yaml + warehouse: + store: + # disable jpa + jpa: + enabled: false + # enable greptime + greptime: + enabled: true + endpoint: localhost:4001 + ``` 2. Restart HertzBeat @@ -66,4 +66,4 @@ warehouse: 1. Do both the time series databases Greptime, IoTDB or TDengine need to be configured? Can they both be used? -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. diff --git a/home/versioned_docs/version-v1.5.x/start/influxdb-init.md b/home/versioned_docs/version-v1.5.x/start/influxdb-init.md index a1b68b3b077..c295f908d3e 100644 --- a/home/versioned_docs/version-v1.5.x/start/influxdb-init.md +++ b/home/versioned_docs/version-v1.5.x/start/influxdb-init.md @@ -23,26 +23,26 @@ Note⚠️ Need InfluxDB 1.x Version. ### 2. Install TDengine via Docker > Refer to the official website [installation tutorial](https://hub.docker.com/_/influxdb) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Install InfluxDB with Docker -> -> ``` -> $ docker run -p 8086:8086 \ -> -v /opt/influxdb:/var/lib/influxdb \ -> influxdb:1.8 -> ``` -> -> `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. -> use```$ docker ps``` to check if the database started successfully + +1. Download and install Docker environment +Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +After the installation you can check if the Docker version normally output at the terminal. + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Install InfluxDB with Docker + + ```shell + $ docker run -p 8086:8086 \ + -v /opt/influxdb:/var/lib/influxdb \ + influxdb:1.8 + ``` + + `-v /opt/influxdb:/var/lib/influxdb` is local persistent mount of InfluxDB data directory. `/opt/influxdb` should be replaced with the actual local directory. + use```$ docker ps``` to check if the database started successfully ### Configure the database connection in hertzbeat `application.yml` configuration file @@ -51,21 +51,21 @@ Note⚠️ Need InfluxDB 1.x Version. Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.influxdb` data source parameters, URL account and password. -```yaml -warehouse: - store: - # disable jpa - jpa: - enabled: false - # enable influxdb - influxdb: - enabled: true - server-url: http://localhost:8086 - username: root - password: root - expire-time: '30d' - replication: 1 -``` + ```yaml + warehouse: + store: + # disable jpa + jpa: + enabled: false + # enable influxdb + influxdb: + enabled: true + server-url: http://localhost:8086 + username: root + password: root + expire-time: '30d' + replication: 1 + ``` 2. Restart HertzBeat @@ -73,4 +73,4 @@ warehouse: 1. Do both the time series databases InfluxDB, IoTDB and TDengine need to be configured? Can they both be used? -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. diff --git a/home/versioned_docs/version-v1.5.x/start/iotdb-init.md b/home/versioned_docs/version-v1.5.x/start/iotdb-init.md index a105bd6b769..9cd1157db22 100644 --- a/home/versioned_docs/version-v1.5.x/start/iotdb-init.md +++ b/home/versioned_docs/version-v1.5.x/start/iotdb-init.md @@ -31,15 +31,15 @@ Apache IoTDB is a software system that integrates the collection, storage, manag 2. Install IoTDB via Docker -```shell -$ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ - -v /opt/iotdb/data:/iotdb/data \ - --name iotdb \ - apache/iotdb:1.2.2-standalone -``` + ```shell + $ docker run -d -p 6667:6667 -p 31999:31999 -p 8181:8181 \ + -v /opt/iotdb/data:/iotdb/data \ + --name iotdb \ + apache/iotdb:1.2.2-standalone + ``` -`-v /opt/iotdb/data:/iotdb/data` is local persistent mount of IotDB data directory.`/iotdb/data` should be replaced with the actual local directory. -use```$ docker ps``` to check if the database started successfully + `-v /opt/iotdb/data:/iotdb/data` is local persistent mount of IotDB data directory.`/iotdb/data` should be replaced with the actual local directory. + use```$ docker ps``` to check if the database started successfully 3. Configure the database connection in hertzbeat `application.yml`configuration file @@ -47,61 +47,61 @@ use```$ docker ps``` to check if the database started successfully Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Config the `warehouse.store.jpa.enabled` `false`. Replace `warehouse.store.iot-db` data source parameters, HOST account and password. -``` -warehouse: - store: - # disable JPA - jpa: - enabled: false - # enable iot-db - iot-db: - enabled: true - host: 127.0.0.1 - rpc-port: 6667 - username: root - password: root - query-timeout-in-ms: -1 - # default '7776000000'(90days,unit:ms,-1:no-expire) - expire-time: '7776000000' -``` - -**IoTDB Cluster Configuration** - -If you are using IoTDB for clustering, please refer to the configuration below: - -```yaml -warehouse: - store: - # Disable default JPA - jpa: - enabled: false - # Enable IoTDB - iot-db: - enabled: true - node-urls: ['127.0.0.1:6667','127.0.0.2:6667','127.0.0.3:6667'] - username: root - password: root - # if iotdb version >= 0.13 use default queryTimeoutInMs = -1; else use default queryTimeoutInMs = 0 - query-timeout-in-ms: -1 - # Data storage time: default '7776000000' (90 days, in milliseconds, -1 means never expire) - expire-time: '7776000000' -``` - -Configuration parameters: - -| Parameter Name | Description | -|---------------------|-------------------------------------------------------------------------------------------| -| enabled | Whether to enable | -| host | IoTDB database address | -| rpc-port | IoTDB database port | -| node-urls | IoTDB cluster addresses | -| username | IoTDB database account | -| password | IoTDB database password | -| version | deprecated | -| query-timeout-in-ms | Query timeout | -| expire-time | Data storage time, default '7776000000' (90 days, in milliseconds, -1 means never expire) | - -> If both cluster configuration `node-urls` and standalone configuration are set simultaneously, the cluster `node-urls` configuration takes precedence. + ```yaml + warehouse: + store: + # disable JPA + jpa: + enabled: false + # enable iot-db + iot-db: + enabled: true + host: 127.0.0.1 + rpc-port: 6667 + username: root + password: root + query-timeout-in-ms: -1 + # default '7776000000'(90days,unit:ms,-1:no-expire) + expire-time: '7776000000' + ``` + + **IoTDB Cluster Configuration** + + If you are using IoTDB for clustering, please refer to the configuration below: + + ```yaml + warehouse: + store: + # Disable default JPA + jpa: + enabled: false + # Enable IoTDB + iot-db: + enabled: true + node-urls: ['127.0.0.1:6667','127.0.0.2:6667','127.0.0.3:6667'] + username: root + password: root + # if iotdb version >= 0.13 use default queryTimeoutInMs = -1; else use default queryTimeoutInMs = 0 + query-timeout-in-ms: -1 + # Data storage time: default '7776000000' (90 days, in milliseconds, -1 means never expire) + expire-time: '7776000000' + ``` + + Configuration parameters: + + | Parameter Name | Description | + |---------------------|-------------------------------------------------------------------------------------------| + | enabled | Whether to enable | + | host | IoTDB database address | + | rpc-port | IoTDB database port | + | node-urls | IoTDB cluster addresses | + | username | IoTDB database account | + | password | IoTDB database password | + | version | deprecated | + | query-timeout-in-ms | Query timeout | + | expire-time | Data storage time, default '7776000000' (90 days, in milliseconds, -1 means never expire) | + + > If both cluster configuration `node-urls` and standalone configuration are set simultaneously, the cluster `node-urls` configuration takes precedence. 4. Restart HertzBeat @@ -109,15 +109,15 @@ Configuration parameters: 1. Do both the time series databases IoTDB and TDengine need to be configured? Can they both be used? -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. 2. The historical chart of the monitoring page is not displayed, and pops up [Unable to provide historical chart data, please configure to rely on the time series database] -> As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database + > As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database 3. The TDengine database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure the dependent time series database] -> Please check if the configuration parameters are correct -> Is td-engine enable set to true -> Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory + > Please check if the configuration parameters are correct + > Is td-engine enable set to true + > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed + > You can check the startup logs according to the logs directory diff --git a/home/versioned_docs/version-v1.5.x/start/package-deploy.md b/home/versioned_docs/version-v1.5.x/start/package-deploy.md index ed26d0532e2..21430e6682b 100644 --- a/home/versioned_docs/version-v1.5.x/start/package-deploy.md +++ b/home/versioned_docs/version-v1.5.x/start/package-deploy.md @@ -12,7 +12,7 @@ sidebar_label: Install via Package 2. Configure HertzBeat's configuration file(optional) Unzip the installation package to the host eg: /opt/hertzbeat - ``` + ```shell $ tar zxvf hertzbeat-xx.tar.gz or $ unzip -o hertzbeat-xx.zip @@ -33,7 +33,7 @@ sidebar_label: Install via Package 4. Start the service Execute the startup script `startup.sh` in the installation directory `hertzbeat/bin/`, or `startup.bat` in windows. - ``` + ```shell ./startup.sh ``` @@ -72,7 +72,7 @@ sidebar_label: Install via Package download JAVA installation package: [mirror website](https://repo.huaweicloud.com/java/jdk/) After installation use command line to check whether you install it successfully. - ``` + ```shell $ java -version java version "17.0.9" Java(TM) SE Runtime Environment 17.0.9 (build 17.0.9+8-LTS-237) @@ -83,17 +83,17 @@ sidebar_label: Install via Package 2. **According to the process deploy,visit no interface** Please refer to the following points to troubleshoot issues: -> 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. -> 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. -> 3:Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. + > 1:If you switch to dependency service MYSQL database,check whether the database is created and started successfully. + > 2:Check whether dependent services, IP account and password configuration is correct in HertzBeat's configuration file `hertzbeat/config/application.yml`. + > 3:Check whether the running log has errors in `hertzbeat/logs/` directory. If you haven't solved the issue, report it to the communication group or community. 3. **Log an error TDengine connection or insert SQL failed** -> 1:Check whether database account and password configured is correct, the database is created. -> 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. + > 1:Check whether database account and password configured is correct, the database is created. + > 2:If you install TDengine2.3+ version, you must execute `systemctl start taosadapter` to start adapter in addition to start the server. 4. **Monitoring historical charts with no data for a long time** -> 1: Whether the time series database is configured or not, if it is not configured, there is no historical chart data. -> 2: If you are using Tdengine, check whether the database `hertzbeat` of Tdengine is created. -> 3: HertzBeat's configuration file `application.yml`, the dependent services in it, the time series, the IP account password, etc. are configured correctly. + > 1: Whether the time series database is configured or not, if it is not configured, there is no historical chart data. + > 2: If you are using Tdengine, check whether the database `hertzbeat` of Tdengine is created. + > 3: HertzBeat's configuration file `application.yml`, the dependent services in it, the time series, the IP account password, etc. are configured correctly. diff --git a/home/versioned_docs/version-v1.5.x/start/quickstart.md b/home/versioned_docs/version-v1.5.x/start/quickstart.md index 319e046b2ea..e8b099e7058 100644 --- a/home/versioned_docs/version-v1.5.x/start/quickstart.md +++ b/home/versioned_docs/version-v1.5.x/start/quickstart.md @@ -16,24 +16,24 @@ sidebar_label: Quick Start 1. Just one command to get started: -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat apache/hertzbeat``` -```or use quay.io (if dockerhub network connect timeout)``` + ```or use quay.io (if dockerhub network connect timeout)``` -```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` + ```docker run -d -p 1157:1157 -p 1158:1158 --name hertzbeat quay.io/tancloud/hertzbeat``` 2. Access `http://localhost:1157` to start, default account: `admin/hertzbeat` 3. Deploy collector clusters(Optional) -``` -docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector -``` + ```shell + docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector + ``` -- `-e IDENTITY=custom-collector-name` : set the collector unique identity name. -- `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. -- `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. -- `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. + - `-e IDENTITY=custom-collector-name` : set the collector unique identity name. + - `-e MODE=public` : set the running mode(public or private), public cluster or private cloud-edge. + - `-e MANAGER_HOST=127.0.0.1` : set the main hertzbeat server ip. + - `-e MANAGER_PORT=1158` : set the main hertzbeat server port, default 1158. Detailed config refer to [Install HertzBeat via Docker](https://hertzbeat.apache.org/docs/start/docker-deploy) diff --git a/home/versioned_docs/version-v1.5.x/start/sslcert-practice.md b/home/versioned_docs/version-v1.5.x/start/sslcert-practice.md index 919e1aa1669..5e203b4fc7f 100644 --- a/home/versioned_docs/version-v1.5.x/start/sslcert-practice.md +++ b/home/versioned_docs/version-v1.5.x/start/sslcert-practice.md @@ -18,7 +18,7 @@ github: 1. The `docker` environment can be installed with just one command -`docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` + `docker run -d -p 1157:1157 --name hertzbeat apache/hertzbeat` 2. After the installation is successful, the browser can access `localhost:1157` to start, the default account password is `admin/hertzbeat` @@ -26,62 +26,62 @@ github: 1. Click Add SSL Certificate Monitor -> System Page -> Monitor Menu -> SSL Certificate -> Add SSL Certificate + > System Page -> Monitor Menu -> SSL Certificate -> Add SSL Certificate -![](/img/docs/start/ssl_1.png) + ![](/img/docs/start/ssl_1.png) 2. Configure the monitoring website -> Here we take the example of monitoring Baidu website, configure monitoring host domain name, name, collection interval, etc. -> Click OK Note ⚠️Before adding, it will test the connectivity of the website by default, and the connection will be successful before adding. Of course, you can also gray out the **Test or not** button. + > Here we take the example of monitoring Baidu website, configure monitoring host domain name, name, collection interval, etc. + > Click OK Note ⚠️Before adding, it will test the connectivity of the website by default, and the connection will be successful before adding. Of course, you can also gray out the **Test or not** button. -![](/img/docs/start/ssl_2.png) + ![](/img/docs/start/ssl_2.png) 3. View the detection index data -> In the monitoring list, you can view the monitoring status, and in the monitoring details, you can view the metric data chart, etc. + > In the monitoring list, you can view the monitoring status, and in the monitoring details, you can view the metric data chart, etc. -![](/img/docs/start/ssl_3.png) + ![](/img/docs/start/ssl_3.png) -![](/img/docs/start/ssl_11.png) + ![](/img/docs/start/ssl_11.png) 4. Set the threshold (triggered when the certificate expires) -> System Page -> Alarms -> Alarm Thresholds -> New Thresholds + > System Page -> Alarms -> Alarm Thresholds -> New Thresholds -![](/img/docs/start/ssl_4.png) + ![](/img/docs/start/ssl_4.png) -> Configure the threshold, select the SSL certificate metric object, configure the alarm expression-triggered when the metric `expired` is `true`, that is, `equals(expired,"true")`, set the alarm level notification template information, etc. + > Configure the threshold, select the SSL certificate metric object, configure the alarm expression-triggered when the metric `expired` is `true`, that is, `equals(expired,"true")`, set the alarm level notification template information, etc. -![](/img/docs/start/ssl_5.png) + ![](/img/docs/start/ssl_5.png) -> Associating thresholds with monitoring, in the threshold list, set which monitoring this threshold applies to. + > Associating thresholds with monitoring, in the threshold list, set which monitoring this threshold applies to. -![](/img/docs/start/ssl_6.png) + ![](/img/docs/start/ssl_6.png) 5. Set the threshold (triggered one week before the certificate expires) -> In the same way, add a new configuration threshold and configure an alarm expression - when the metric expires timestamp `end_timestamp`, the `now()` function is the current timestamp, if the configuration triggers an alarm one week in advance: `end_timestamp <= (now( ) + 604800000)` , where `604800000` is the 7-day total time difference in milliseconds. + > In the same way, add a new configuration threshold and configure an alarm expression - when the metric expires timestamp `end_timestamp`, the `now()` function is the current timestamp, if the configuration triggers an alarm one week in advance: `end_timestamp <= (now( ) + 604800000)` , where `604800000` is the 7-day total time difference in milliseconds. -![](/img/docs/start/ssl_7.png) + ![](/img/docs/start/ssl_7.png) -> Finally, you can see the triggered alarm in the alarm center. + > Finally, you can see the triggered alarm in the alarm center. -![](/img/docs/start/ssl_8.png) + ![](/img/docs/start/ssl_8.png) 6. Alarm notification (in time notification via Dingding WeChat Feishu, etc.) -> Monitoring Tool -> Alarm Notification -> New Receiver + > Monitoring Tool -> Alarm Notification -> New Receiver -![](/img/docs/start/ssl_10.png) + ![](/img/docs/start/ssl_10.png) -For token configuration such as Dingding WeChat Feishu, please refer to the help document + For token configuration such as Dingding WeChat Feishu, please refer to the help document - + -> Alarm Notification -> New Alarm Notification Policy -> Enable Notification for the Recipient Just Configured + > Alarm Notification -> New Alarm Notification Policy -> Enable Notification for the Recipient Just Configured -![](/img/docs/start/ssl_11.png) + ![](/img/docs/start/ssl_11.png) 7. OK When the threshold is triggered, we can receive the corresponding alarm message. If there is no notification, you can also view the alarm information in the alarm center. diff --git a/home/versioned_docs/version-v1.5.x/start/tdengine-init.md b/home/versioned_docs/version-v1.5.x/start/tdengine-init.md index 0ec76f6c8f6..e2d6e21936a 100644 --- a/home/versioned_docs/version-v1.5.x/start/tdengine-init.md +++ b/home/versioned_docs/version-v1.5.x/start/tdengine-init.md @@ -19,28 +19,28 @@ Note⚠️ Need TDengine 3.x Version. ### Install TDengine via Docker > Refer to the official website [installation tutorial](https://docs.taosdata.com/get-started/docker/) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` -> -> 2. Install TDengine with Docker -> -> ```shell -> $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ -> -v /opt/taosdata:/var/lib/taos \ -> --name tdengine -e TZ=Asia/Shanghai \ -> tdengine/tdengine:3.0.4.0 -> ``` -> -> `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. -> `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. -> use```$ docker ps``` to check if the database started successfully + +1. Download and install Docker environment +Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +After the installation you can check if the Docker version normally output at the terminal. + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` + +2. Install TDengine with Docker + + ```shell + $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp \ + -v /opt/taosdata:/var/lib/taos \ + --name tdengine -e TZ=Asia/Shanghai \ + tdengine/tdengine:3.0.4.0 + ``` + + `-v /opt/taosdata:/var/lib/taos` is local persistent mount of TDengine data directory. `/opt/taosdata` should be replaced with the actual local directory. + `-e TZ="Asia/Shanghai"` can set time zone for TDengine.Set up the corresponding time zone you want. + use```$ docker ps``` to check if the database started successfully ### Create database instance @@ -88,20 +88,20 @@ Note⚠️ Need TDengine 3.x Version. Note⚠️The docker container way need to mount application.yml file locally,while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Replace `warehouse.store.td-engine` data source parameters, URL account and password. -```yaml -warehouse: - store: - # disable jpa - jpa: - enabled: false - # enable td-engine - td-engine: - enabled: true - driver-class-name: com.taosdata.jdbc.rs.RestfulDriver - url: jdbc:TAOS-RS://localhost:6041/hertzbeat - username: root - password: taosdata -``` + ```yaml + warehouse: + store: + # disable jpa + jpa: + enabled: false + # enable td-engine + td-engine: + enabled: true + driver-class-name: com.taosdata.jdbc.rs.RestfulDriver + url: jdbc:TAOS-RS://localhost:6041/hertzbeat + username: root + password: taosdata + ``` 2. Restart HertzBeat @@ -109,19 +109,19 @@ warehouse: 1. Do both the time series databases IoTDB and TDengine need to be configured? Can they both be used? -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which only affects the historical chart data. 2. The historical chart of the monitoring page is not displayed, and pops up [Unable to provide historical chart data, please configure to rely on the time series database] -> As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database + > As shown in the pop-up window, the premise of displaying the history chart is to install and configure the dependent services of hertzbeat - IotDB database or TDengine database 3. The historical picture of monitoring details is not displayed or has no data, and TDengine has been deployed -> Please confirm whether the installed TDengine version is 3.x, version 2.x are not compatible. + > Please confirm whether the installed TDengine version is 3.x, version 2.x are not compatible. 4. The TDengine database is installed and configured, but the page still displays a pop-up [Unable to provide historical chart data, please configure the dependent time series database] -> Please check if the configuration parameters are correct -> Is td-engine enable set to true -> Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed -> You can check the startup logs according to the logs directory + > Please check if the configuration parameters are correct + > Is td-engine enable set to true + > Note⚠️If both hertzbeat and TDengine are started under the same host for docker containers, 127.0.0.1 cannot be used for communication between containers by default, and the host IP is changed + > You can check the startup logs according to the logs directory diff --git a/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md b/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md index b2ae6a65799..9d1d1f76f53 100644 --- a/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md +++ b/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md @@ -18,27 +18,27 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t ### Install VictoriaMetrics via Docker > Refer to the official website [installation tutorial](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) -> -> 1. Download and install Docker environment -> Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). -> After the installation you can check if the Docker version normally output at the terminal. -> -> ``` -> $ docker -v -> Docker version 20.10.12, build e91ed57 -> ``` + +1. Download and install Docker environment +Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). +After the installation you can check if the Docker version normally output at the terminal. + + ```shell + $ docker -v + Docker version 20.10.12, build e91ed57 + ``` 2. Install VictoriaMetrics via Docker -```shell -$ docker run -d -p 8428:8428 \ - -v $(pwd)/victoria-metrics-data:/victoria-metrics-data \ - --name victoria-metrics \ - victoriametrics/victoria-metrics:v1.95.1 -``` + ```shell + $ docker run -d -p 8428:8428 \ + -v $(pwd)/victoria-metrics-data:/victoria-metrics-data \ + --name victoria-metrics \ + victoriametrics/victoria-metrics:v1.95.1 + ``` -`-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` is local persistent mount of VictoriaMetrics data directory -use```$ docker ps``` to check if the database started successfully + `-v $(pwd)/victoria-metrics-data:/victoria-metrics-data` is local persistent mount of VictoriaMetrics data directory + use```$ docker ps``` to check if the database started successfully 3. Configure the database connection in hertzbeat `application.yml`configuration file @@ -46,19 +46,19 @@ use```$ docker ps``` to check if the database started successfully Note⚠️The docker container way need to mount application.yml file locally, while you can use installation package way to unzip and modify `hertzbeat/config/application.yml` Config the `warehouse.store.jpa.enabled` `false`. Replace `warehouse.store.victoria-metrics` data source parameters, HOST account and password. -```yaml -warehouse: - store: - # disable JPA - jpa: - enabled: false - # enable victoria-metrics - victoria-metrics: - enabled: true - url: http://localhost:8428 - username: root - password: root -``` + ```yaml + warehouse: + store: + # disable JPA + jpa: + enabled: false + # enable victoria-metrics + victoria-metrics: + enabled: true + url: http://localhost:8428 + username: root + password: root + ``` 4. Restart HertzBeat @@ -66,4 +66,4 @@ warehouse: 1. Do both the time series databases need to be configured? Can they both be used? -> You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which can affects the historical chart data. + > You don't need to configure all of them, you can choose one of them. Use the enable parameter to control whether it is used or not. You can also install and configure neither, which can affects the historical chart data. diff --git a/web-app/README.md b/web-app/README.md index 8a75b725ea6..a1f66311f38 100644 --- a/web-app/README.md +++ b/web-app/README.md @@ -18,14 +18,14 @@ 1. Execute command in web-app -```ng build --configuration production``` + ```ng build --configuration production``` 2. Execute command in root -```mvn clean install``` + ```mvn clean install``` -The HertzBeat install package will at `manager/target/hertzbeat-{version}.tar.gz` + The HertzBeat install package will at `manager/target/hertzbeat-{version}.tar.gz` 3. Execute command in collector -```mvn clean package -Pcluster``` + ```mvn clean package -Pcluster``` From b7969e78c97a5d8b5b55a5c9a723f66a1f37a54a Mon Sep 17 00:00:00 2001 From: shown Date: Mon, 26 Aug 2024 13:10:13 +0800 Subject: [PATCH 232/257] [improve]: enable md040 rules (#2607) Signed-off-by: yuluo-yx Co-authored-by: Jast --- .markdownlint-cli2.jsonc | 2 +- home/blog/2022-06-19-hertzbeat-v1.1.0.md | 4 ++-- home/blog/2022-06-22-one-step-up.md | 4 ++-- home/blog/2022-10-08-hertzbeat-v1.2.0.md | 20 ++++++++----------- home/blog/2023-01-05-monitor-iotdb.md | 2 +- home/blog/2023-01-08-monitor-shenyu.md | 2 +- home/blog/2023-02-02-monitor-dynamic-tp.md | 2 +- home/blog/2023-02-11-monitor-mysql.md | 2 +- home/blog/2023-02-15-monitor-linux.md | 2 +- home/blog/2023-03-15-hertzbeat-v1.3.0.md | 4 ++-- home/blog/2023-03-22-monitor-springboot2.md | 2 +- home/blog/2023-05-09-hertzbeat-v1.3.1.md | 2 +- home/blog/2023-07-05-hertzbeat-v1.3.2.md | 2 +- home/blog/2023-08-14-hertzbeat-v1.4.0.md | 2 +- home/blog/2023-09-26-hertzbeat-v1.4.1.md | 2 +- home/blog/2023-11-12-hertzbeat-v1.4.2.md | 2 +- home/blog/2023-12-11-hertzbeat-v1.4.3.md | 2 +- home/blog/2024-01-18-hertzbeat-v1.4.4.md | 2 +- .../2024-06-11-hertzbeat-v1.6.0-update.md | 6 +++--- .../advanced/extend-http-example-hertzbeat.md | 4 ++-- home/docs/advanced/extend-ssh.md | 2 +- home/docs/advanced/extend-tutorial.md | 4 ++-- .../community/code-style-and-quality-guide.md | 4 ---- home/docs/community/how-to-release.md | 18 ++++++++--------- home/docs/help/iotdb.md | 2 +- home/docs/help/nacos.md | 2 +- home/docs/help/redis_cluster.md | 4 ++-- home/docs/help/time_expression.md | 2 +- home/docs/start/docker-compose-deploy.md | 8 ++++---- home/docs/start/greptime-init.md | 2 -- home/docs/start/influxdb-init.md | 2 -- home/docs/start/mysql-change.md | 4 ++-- home/docs/start/package-deploy.md | 8 ++++---- home/docs/start/quickstart.md | 2 +- home/docs/start/tdengine-init.md | 10 ++++------ home/docs/start/update-1.6.0.md | 6 +++--- home/docs/start/victoria-metrics-init.md | 4 +--- .../2022-06-19-hertzbeat-v1.1.0.md | 4 ++-- .../2022-06-22-one-step-up.md | 4 ++-- .../2022-10-08-hertzbeat-v1.2.0.md | 8 ++++---- .../2023-01-05-monitor-iotdb.md | 4 ++-- .../2023-01-08-monitor-shenyu.md | 2 +- .../2023-02-02-monitor-dynamic-tp.md | 2 +- .../2023-02-11-monitor-mysql.md | 2 +- .../2023-02-15-monitor-linux.md | 2 +- .../2023-03-15-hertzbeat-v1.3.0.md | 4 ++-- .../2023-03-22-monitor-springboot2.md | 2 +- .../2023-05-09-hertzbeat-v1.3.1.md | 2 +- .../advanced/extend-http-example-hertzbeat.md | 4 ++-- .../current/advanced/extend-ssh.md | 2 +- .../current/advanced/extend-tutorial.md | 4 ++-- .../community/code-style-and-quality-guide.md | 4 ---- .../current/community/how-to-release.md | 18 ++++++++--------- .../community/new_committer_process.md | 6 +++--- .../current/help/iotdb.md | 2 +- .../current/help/kubernetes.md | 2 +- .../current/help/nacos.md | 2 +- .../current/help/redis_cluster.md | 4 ++-- .../current/help/time_expression.md | 2 +- .../current/start/custom-config.md | 2 +- .../current/start/docker-compose-deploy.md | 8 ++++---- .../current/start/greptime-init.md | 2 -- .../current/start/influxdb-init.md | 2 -- .../current/start/iotdb-init.md | 2 +- .../current/start/mysql-change.md | 4 ++-- .../current/start/package-deploy.md | 1 - .../current/start/tdengine-init.md | 6 ++---- .../current/start/victoria-metrics-init.md | 4 +--- .../advanced/extend-http-example-hertzbeat.md | 4 ++-- .../version-v1.5.x/advanced/extend-ssh.md | 2 +- .../advanced/extend-tutorial.md | 4 ++-- .../community/code-style-and-quality-guide.md | 4 ---- .../community/how-to-release.md | 18 ++++++++--------- .../version-v1.5.x/help/time_expression.md | 2 +- .../version-v1.5.x/start/custom-config.md | 2 +- .../version-v1.5.x/start/greptime-init.md | 2 -- .../version-v1.5.x/start/influxdb-init.md | 2 -- .../version-v1.5.x/start/mysql-change.md | 4 ++-- .../version-v1.5.x/start/package-deploy.md | 5 ++--- .../version-v1.5.x/start/postgresql-change.md | 6 +++--- .../version-v1.5.x/start/tdengine-init.md | 12 +++++------ .../start/victoria-metrics-init.md | 2 -- .../advanced/extend-http-example-hertzbeat.md | 4 ++-- .../version-v1.5.x/advanced/extend-ssh.md | 2 +- .../advanced/extend-tutorial.md | 4 ++-- .../community/code-style-and-quality-guide.md | 4 ---- .../community/how-to-release.md | 18 ++++++++--------- .../version-v1.5.x/help/time_expression.md | 2 +- .../version-v1.5.x/start/docker-deploy.md | 2 +- .../version-v1.5.x/start/iotdb-init.md | 2 +- .../version-v1.5.x/start/mysql-change.md | 4 ++-- .../version-v1.5.x/start/postgresql-change.md | 6 +++--- .../version-v1.5.x/start/tdengine-init.md | 8 ++++---- .../start/victoria-metrics-init.md | 2 -- package.json | 5 +++++ 95 files changed, 180 insertions(+), 223 deletions(-) create mode 100644 package.json diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index 844b297a9d1..626362c511b 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -32,7 +32,7 @@ "MD029": true, "MD033": false, "MD036": false, - "MD040": false, + "MD040": true, "MD045": false, "MD046": false, "MD047": true diff --git a/home/blog/2022-06-19-hertzbeat-v1.1.0.md b/home/blog/2022-06-19-hertzbeat-v1.1.0.md index 3e0c69bf8db..be1b6d3a73e 100644 --- a/home/blog/2022-06-19-hertzbeat-v1.1.0.md +++ b/home/blog/2022-06-19-hertzbeat-v1.1.0.md @@ -42,7 +42,7 @@ Windows Monitor coming: ⚠️ ⚠️⚠️⚠️Please note that upgrading to v1.1.0 from other versions requires running the following SQL script. Now, our table names have a unified prefix "hzb_prefix". -``` +```properties ALTER TABLE alert RENAME TO hzb_alert; ALTER TABLE alert_define RENAME TO hzb_alert_define; ALTER TABLE alert_define_monitor_bind RENAME TO hzb_alert_define_monitor_bind; @@ -96,7 +96,7 @@ Windows Monitor coming: ⚠️ ⚠️⚠️⚠️Attention other version upgrade to v1.1.0 need run sql script. Now the tables name has hzb_ prefix. -``` +```properties ALTER TABLE alert RENAME TO hzb_alert; ALTER TABLE alert_define RENAME TO hzb_alert_define; ALTER TABLE alert_define_monitor_bind RENAME TO hzb_alert_define_monitor_bind; diff --git a/home/blog/2022-06-22-one-step-up.md b/home/blog/2022-06-22-one-step-up.md index 84e5381f25f..ce93f9a89ea 100644 --- a/home/blog/2022-06-22-one-step-up.md +++ b/home/blog/2022-06-22-one-step-up.md @@ -42,7 +42,7 @@ Windows Monitor is coming: ⚠️ ⚠️⚠️⚠️Please note that upgrading to v1.1.0 from other versions requires running the following SQL script. Now, our table names have a unified prefix hzb_prefix. -``` +```properties ALTER TABLE alert RENAME TO hzb_alert; ALTER TABLE alert_define RENAME TO hzb_alert_define; ALTER TABLE alert_define_monitor_bind RENAME TO hzb_alert_define_monitor_bind; @@ -96,7 +96,7 @@ Windows Monitor coming: ⚠️ ⚠️⚠️⚠️Attention other version upgrade to v1.1.0 need run sql script. Now the tables name has hzb_ prefix. -``` +```properties ALTER TABLE alert RENAME TO hzb_alert; ALTER TABLE alert_define RENAME TO hzb_alert_define; ALTER TABLE alert_define_monitor_bind RENAME TO hzb_alert_define_monitor_bind; diff --git a/home/blog/2022-10-08-hertzbeat-v1.2.0.md b/home/blog/2022-10-08-hertzbeat-v1.2.0.md index dad19834fab..598e304fe6c 100644 --- a/home/blog/2022-10-08-hertzbeat-v1.2.0.md +++ b/home/blog/2022-10-08-hertzbeat-v1.2.0.md @@ -59,7 +59,7 @@ Have Fun! Need Convert `application.yml`. -``` +```yaml spring. resources: static-locations. static-locations. @@ -68,16 +68,12 @@ spring. ``` To -`` -spring. -web. -resources: static-locations. -static-locations. - -- classpath:/dist/ -- classpath:... /dist/ -``` - ----- +```yaml +spring. + web. + resources: static-locations. + static-locations. + - classpath:/dist/ + - classpath:... /dist/ ``` diff --git a/home/blog/2023-01-05-monitor-iotdb.md b/home/blog/2023-01-05-monitor-iotdb.md index 6ab4d25f11e..b6e6fa84188 100644 --- a/home/blog/2023-01-05-monitor-iotdb.md +++ b/home/blog/2023-01-05-monitor-iotdb.md @@ -109,7 +109,7 @@ tags: [opensource, practice] ### Finished, now wait for the warning message to come. ding ding ding ding -``` +```text [HertzBeat warning notification] Alarm target object: iotdb.cluster_node_status.status Affiliated monitoring ID: 205540620349696 diff --git a/home/blog/2023-01-08-monitor-shenyu.md b/home/blog/2023-01-08-monitor-shenyu.md index c271fa9a231..cc01f2b5579 100644 --- a/home/blog/2023-01-08-monitor-shenyu.md +++ b/home/blog/2023-01-08-monitor-shenyu.md @@ -141,7 +141,7 @@ Of course, just looking at it is not perfect, monitoring is often accompanied by ### Over and out, now wait for the alert message to come through. Ding, ding, ding, ding -``` +```text [HertzBeat Alert Notification] Alert target object : shenyu.process_open_fds.value Task ID : 205540620349696 diff --git a/home/blog/2023-02-02-monitor-dynamic-tp.md b/home/blog/2023-02-02-monitor-dynamic-tp.md index 9cc7d45a3ca..c9e526547c1 100644 --- a/home/blog/2023-02-02-monitor-dynamic-tp.md +++ b/home/blog/2023-02-02-monitor-dynamic-tp.md @@ -153,7 +153,7 @@ Of course, just watching is not perfect, monitoring is often accompanied by alar ### Over and out, now wait for the alert message to come through. Ding, ding, ding, ding -``` +```text [HertzBeat alert notification] Alert target object : dynamic_tp.thread_pool_running.run_timeout_count Task ID : 205540620349493 diff --git a/home/blog/2023-02-11-monitor-mysql.md b/home/blog/2023-02-11-monitor-mysql.md index 485855fa814..4afbe9aebf2 100644 --- a/home/blog/2023-02-11-monitor-mysql.md +++ b/home/blog/2023-02-11-monitor-mysql.md @@ -103,7 +103,7 @@ Of course, just looking at it is definitely not perfect. Monitoring is often acc ### Finished, now wait for the warning message to come. ding ding ding ding -``` +```text [HertzBeat warning notification] Alarm target object: mysql.cahce.query_cache_hit_rate Affiliated monitoring ID: 205540620394932 diff --git a/home/blog/2023-02-15-monitor-linux.md b/home/blog/2023-02-15-monitor-linux.md index 9128c72f40f..99b52cd105e 100644 --- a/home/blog/2023-02-15-monitor-linux.md +++ b/home/blog/2023-02-15-monitor-linux.md @@ -104,7 +104,7 @@ Of course, just looking at it is definitely not perfect. Monitoring is often acc ### Finished, now wait for the warning message to come. ding ding ding ding -``` +```text [HertzBeat warning notification] Alarm target object: linux.cpu.usage Affiliated monitoring ID: 483783444839382 diff --git a/home/blog/2023-03-15-hertzbeat-v1.3.0.md b/home/blog/2023-03-15-hertzbeat-v1.3.0.md index 170c2fa1516..b15a9501b54 100644 --- a/home/blog/2023-03-15-hertzbeat-v1.3.0.md +++ b/home/blog/2023-03-15-hertzbeat-v1.3.0.md @@ -62,7 +62,7 @@ For users who previously used iotdb or tdengine to store metrics data, you need Modify `application.yml` and set `warehouse.store.jpa.enabled` parameter to false -``` +```yaml warehouse: store: jpa: @@ -71,7 +71,7 @@ warehouse: Execute SQL script -``` +```text ALTER table hzb_monitor modify job_id bigint default null; COMMIT; ``` diff --git a/home/blog/2023-03-22-monitor-springboot2.md b/home/blog/2023-03-22-monitor-springboot2.md index 2772d358420..8f827969130 100644 --- a/home/blog/2023-03-22-monitor-springboot2.md +++ b/home/blog/2023-03-22-monitor-springboot2.md @@ -157,7 +157,7 @@ Of course, it is impossible to manually check the metrics in real time. Monitori ### Finished, now wait for the warning message to come. ding ding ding ding -``` +```text [HertzBeat warning notification] Alarm target object: springboot2.threads.size Affiliated monitoring ID: 483783444839322 diff --git a/home/blog/2023-05-09-hertzbeat-v1.3.1.md b/home/blog/2023-05-09-hertzbeat-v1.3.1.md index e2b4e50c169..85cd5b5b75e 100644 --- a/home/blog/2023-05-09-hertzbeat-v1.3.1.md +++ b/home/blog/2023-05-09-hertzbeat-v1.3.1.md @@ -49,7 +49,7 @@ If use tdengine before, please upgrade tdengine to 3.0+ Please Run SQL Script When Upgrade. -``` +```text ALTER table hzb_alert_define modify field varchar(255) default null; COMMIT; ``` diff --git a/home/blog/2023-07-05-hertzbeat-v1.3.2.md b/home/blog/2023-07-05-hertzbeat-v1.3.2.md index c676c96028f..b8f331082db 100644 --- a/home/blog/2023-07-05-hertzbeat-v1.3.2.md +++ b/home/blog/2023-07-05-hertzbeat-v1.3.2.md @@ -72,7 +72,7 @@ ALTER TABLE HZB_PARAM DROP CONSTRAINT CONSTRAINT_82;; How to Enable H2 WEB Console: Modify `application.yml` and restart, access `ip:1157/h2-console` -``` +```yaml spring: h2: console: diff --git a/home/blog/2023-08-14-hertzbeat-v1.4.0.md b/home/blog/2023-08-14-hertzbeat-v1.4.0.md index b769ec923b3..4ca5d9c128c 100644 --- a/home/blog/2023-08-14-hertzbeat-v1.4.0.md +++ b/home/blog/2023-08-14-hertzbeat-v1.4.0.md @@ -85,7 +85,7 @@ As for open source commercialization, the premise of open source commercializati 3. Deploy collector clusters - ``` + ```shell docker run -d -e IDENTITY=custom-collector-name -e MANAGER_IP=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` diff --git a/home/blog/2023-09-26-hertzbeat-v1.4.1.md b/home/blog/2023-09-26-hertzbeat-v1.4.1.md index e04b55bc71f..275a0b61c41 100644 --- a/home/blog/2023-09-26-hertzbeat-v1.4.1.md +++ b/home/blog/2023-09-26-hertzbeat-v1.4.1.md @@ -68,7 +68,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 3. Deploy collector clusters - ``` + ```shell docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` diff --git a/home/blog/2023-11-12-hertzbeat-v1.4.2.md b/home/blog/2023-11-12-hertzbeat-v1.4.2.md index d59e5076d4c..51634bba606 100644 --- a/home/blog/2023-11-12-hertzbeat-v1.4.2.md +++ b/home/blog/2023-11-12-hertzbeat-v1.4.2.md @@ -50,7 +50,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 3. Deploy collector clusters - ``` + ```shell docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` diff --git a/home/blog/2023-12-11-hertzbeat-v1.4.3.md b/home/blog/2023-12-11-hertzbeat-v1.4.3.md index 966946b4f81..ff0d1e5d3de 100644 --- a/home/blog/2023-12-11-hertzbeat-v1.4.3.md +++ b/home/blog/2023-12-11-hertzbeat-v1.4.3.md @@ -56,7 +56,7 @@ Compatible with the Prometheus ecosystem, now we can monitor what Prometheus can 3. Deploy collector clusters - ``` + ```shell docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` diff --git a/home/blog/2024-01-18-hertzbeat-v1.4.4.md b/home/blog/2024-01-18-hertzbeat-v1.4.4.md index 420d9e1457f..255e307296f 100644 --- a/home/blog/2024-01-18-hertzbeat-v1.4.4.md +++ b/home/blog/2024-01-18-hertzbeat-v1.4.4.md @@ -57,7 +57,7 @@ keywords: [open source monitoring system, alerting system] 3. Deploy collector clusters - ``` + ```shell docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` diff --git a/home/blog/2024-06-11-hertzbeat-v1.6.0-update.md b/home/blog/2024-06-11-hertzbeat-v1.6.0-update.md index 47dfe69fb79..7ae352ff7d7 100644 --- a/home/blog/2024-06-11-hertzbeat-v1.6.0-update.md +++ b/home/blog/2024-06-11-hertzbeat-v1.6.0-update.md @@ -106,7 +106,7 @@ Next, run the start-up script as before to experience the latest HertzBeat 1.6.0 - Stop the HertzBeat container: - ``` + ```shell docker stop hertzbeat ``` @@ -123,7 +123,7 @@ Next, run HertzBeat using Docker as before to experience the latest HertzBeat 1. - Stop the HertzBeat container: - ``` + ```shell docker stop hertzbeat ``` @@ -132,7 +132,7 @@ Next, run HertzBeat using Docker as before to experience the latest HertzBeat 1. - Download the H2 driver jar from [https://mvnrepository.com/artifact/com.h2database/h2/2.2.220](https://mvnrepository.com/artifact/com.h2database/h2/2.2.220). - Start the database locally using the H2 driver jar: - ``` + ```shell java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 ``` diff --git a/home/docs/advanced/extend-http-example-hertzbeat.md b/home/docs/advanced/extend-http-example-hertzbeat.md index 5dabc107865..2d154d81b57 100644 --- a/home/docs/advanced/extend-http-example-hertzbeat.md +++ b/home/docs/advanced/extend-http-example-hertzbeat.md @@ -12,7 +12,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz > In many scenarios, we need to monitor the provided HTTP API interface and obtain the index value returned by the interface. In this article, we use the http custom protocol to parse our common http interface response structure, and obtain the fields in the returned body as metric data. -``` +```json { "code": 200, "msg": "success", @@ -24,7 +24,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz As above, usually our background API interface will design such a general return. The same is true for the background of the hertzbeat system. Today, we will use the hertzbeat API as an example, add a new monitoring type **hertzbeat**, and monitor and collect its system summary statistics API `http://localhost:1157/api/summary`, the response data is: -``` +```json { "msg": null, "code": 0, diff --git a/home/docs/advanced/extend-ssh.md b/home/docs/advanced/extend-ssh.md index 3a5486c394b..62f9453a2d2 100644 --- a/home/docs/advanced/extend-ssh.md +++ b/home/docs/advanced/extend-ssh.md @@ -29,7 +29,7 @@ Then the query script of the two Metrics in hertzbeat is(Use `;` Connect them to `hostname; uptime | awk -F "," '{print $1}'` The data responded by the terminal is: -``` +```shell tombook 14:00:15 up 72 days ``` diff --git a/home/docs/advanced/extend-tutorial.md b/home/docs/advanced/extend-tutorial.md index 8db349d96c1..23d460d0923 100644 --- a/home/docs/advanced/extend-tutorial.md +++ b/home/docs/advanced/extend-tutorial.md @@ -12,7 +12,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz > In many scenarios, we need to monitor the provided HTTP API interface and obtain the index value returned by the interface. In this article, we use the http custom protocol to parse our common http interface response structure, and obtain the fields in the returned body as metric data. -``` +```json { "code": 200, "msg": "success", @@ -24,7 +24,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz As above, usually our background API interface will design such a general return. The same is true for the background of the hertzbeat system. Today, we will use the hertzbeat API as an example, add a new monitoring type **hertzbeat**, and monitor and collect its system summary statistics API `http://localhost:1157/api/summary`, the response data is: -``` +```json { "msg": null, "code": 0, diff --git a/home/docs/community/code-style-and-quality-guide.md b/home/docs/community/code-style-and-quality-guide.md index 7e65b0c0a92..e0ec5231c26 100644 --- a/home/docs/community/code-style-and-quality-guide.md +++ b/home/docs/community/code-style-and-quality-guide.md @@ -647,7 +647,3 @@ public void process(String input) { - - - - -``` - -``` diff --git a/home/docs/community/how-to-release.md b/home/docs/community/how-to-release.md index 46ab8a2bc86..6a3eeb54c84 100644 --- a/home/docs/community/how-to-release.md +++ b/home/docs/community/how-to-release.md @@ -250,7 +250,7 @@ for i in *.tar.gz; do echo $i; sha512sum $i > $i.sha512 ; done > The final file list is as follows -``` +```text apache-hertzbeat-1.6.0-incubating-src.tar.gz apache-hertzbeat-1.6.0-incubating-src.tar.gz.asc apache-hertzbeat-1.6.0-incubating-src.tar.gz.sha512 @@ -347,7 +347,7 @@ Send a voting email in the community requires at least three `+1` and no `-1`. > `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0 rc1
> `Body`: -``` +```text Hello HertzBeat Community: This is a call for vote to release Apache HertzBeat (incubating) version release-1.6.0-RC1. @@ -403,7 +403,7 @@ After 72 hours, the voting results will be counted, and the voting result email > `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: -``` +```text Dear HertzBeat community, Thanks for your review and vote for "Release Apache HertzBeat (incubating) 1.6.0-rc1" @@ -439,7 +439,7 @@ Send a voting email in the incubator community requires at least three `+1` and > `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: -``` +```text Hello Incubator Community: This is a call for a vote to release Apache HertzBeat (incubating) version 1.6.0-RC1. @@ -492,7 +492,7 @@ If there is no -1 after 72 hours, reply to the email as follows > `Send to`:
> `Body`: -``` +```text Thanks everyone for review and vote, 72H passed. I'll announce the vote result soon. Best, @@ -505,7 +505,7 @@ Then the voting results will be counted, and the voting result email will be sen > `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: -``` +```text Hi Incubator Community, The vote to release Apache HertzBeat (incubating) 1.6.0-rc4 has passed with 3 +1 binding and no +0 or -1 votes. @@ -560,13 +560,13 @@ Then enter Release Title and Describe - Release Title: -``` +```text v1.6.0 ``` - Describe: -``` +```text xxx release note: xxx ``` @@ -582,7 +582,7 @@ The rename the release-1.6.0-rc1 branch to release-1.6.0. > `Title`: [ANNOUNCE] Apache HertzBeat (incubating) 1.6.0 released
> `Body`: -``` +```text Hi Community, We are glad to announce the release of Apache HertzBeat (incubating) 1.6.0. diff --git a/home/docs/help/iotdb.md b/home/docs/help/iotdb.md index c87f4fba7d6..5399caa3c41 100644 --- a/home/docs/help/iotdb.md +++ b/home/docs/help/iotdb.md @@ -17,7 +17,7 @@ The main steps are as follows: 1. The metric collection is disabled by default, you need to modify the parameters in `conf/iotdb-metric.yml` first, then restart the server - ``` + ```yaml # Whether to start the monitoring module, the default is false enableMetric: true diff --git a/home/docs/help/nacos.md b/home/docs/help/nacos.md index 3eeafc9ac47..eb4cb0b5e25 100644 --- a/home/docs/help/nacos.md +++ b/home/docs/help/nacos.md @@ -14,7 +14,7 @@ keywords: [open source monitoring tool, open source middleware monitoring tool, 1. Deploy the Nacos cluster according to [deployment document](https://nacos.io/en-us/docs/deployment.html). 2. Configure the application. properties file to expose metrics data. - ``` + ```properties management.endpoints.web.exposure.include=* ``` diff --git a/home/docs/help/redis_cluster.md b/home/docs/help/redis_cluster.md index b10d3ff684e..a805c2f973b 100644 --- a/home/docs/help/redis_cluster.md +++ b/home/docs/help/redis_cluster.md @@ -11,7 +11,7 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to *redis.config* - ``` + ```shell port 6379 cluster-enabled yes cluster-config-file nodes.conf @@ -93,7 +93,7 @@ keywords: [ open source monitoring tool, open source Redis Cluster monitoring to docker network inspect hertzbeat-redis-cluste ``` - ``` + ```json "Containers": { "187b879f73c473b3cbb82ff95f668e65af46115ddaa27f3ff1a712332b981531": { ... diff --git a/home/docs/help/time_expression.md b/home/docs/help/time_expression.md index 2f0711c4cf9..07eea14a81d 100644 --- a/home/docs/help/time_expression.md +++ b/home/docs/help/time_expression.md @@ -11,7 +11,7 @@ HertzBeat supports using expressions to calculate relative time during monitorin ### Syntax -``` +```shell ${FORMATTER [{ + | - } ]} ``` diff --git a/home/docs/start/docker-compose-deploy.md b/home/docs/start/docker-compose-deploy.md index 9caefdbf626..fb16819c6ce 100644 --- a/home/docs/start/docker-compose-deploy.md +++ b/home/docs/start/docker-compose-deploy.md @@ -28,13 +28,13 @@ Run the `docker compose version` command to check if you have a Docker Compose e - Unzip the script package - ``` + ```shell tar zxvf apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz ``` - Enter the decompression directory and select `HertzBeat + PostgreSQL + VictoriaMetrics` for one-click deployment - ``` + ```shell cd apache-hertzbeat-1.6.0-incubating-docker-compose cd hertzbeat-postgresql-victoria-metrics ``` @@ -43,7 +43,7 @@ Run the `docker compose version` command to check if you have a Docker Compose e > Run script in `hertzbeat-postgresql-victoria-metrics` directory - ``` + ```shell docker-compose up -d ``` @@ -51,7 +51,7 @@ Run the `docker compose version` command to check if you have a Docker Compose e > View the running status of each container, up is the normal running status - ``` + ```shell docker-compose ps ``` diff --git a/home/docs/start/greptime-init.md b/home/docs/start/greptime-init.md index 57b4e9504b8..a04823dfc2d 100644 --- a/home/docs/start/greptime-init.md +++ b/home/docs/start/greptime-init.md @@ -16,8 +16,6 @@ It's designed to work on infrastructure of the cloud era, and users benefit from ### Install GreptimeDB via Docker -> Refer to the official website [installation tutorial](https://docs.greptime.com/getting-started/overview) - 1. Download and install Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). After the installation you can check if the Docker version normally output at the terminal. diff --git a/home/docs/start/influxdb-init.md b/home/docs/start/influxdb-init.md index 8f11e9c4c91..19a45147cb4 100644 --- a/home/docs/start/influxdb-init.md +++ b/home/docs/start/influxdb-init.md @@ -22,8 +22,6 @@ Note⚠️ Need InfluxDB 1.x Version. ### 2. Install TDengine via Docker -> Refer to the official website [installation tutorial](https://hub.docker.com/_/influxdb) - 1. Download and install Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). After the installation you can check if the Docker version normally output at the terminal. diff --git a/home/docs/start/mysql-change.md b/home/docs/start/mysql-change.md index 70c6fab5baf..1d1a6aabda7 100644 --- a/home/docs/start/mysql-change.md +++ b/home/docs/start/mysql-change.md @@ -14,14 +14,14 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data For Docker installation, please refer to the [Docker official documentation](https://docs.docker.com/get-docker/). After the installation, please verify in the terminal that the Docker version can be printed normally. - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` 2. Install MYSQl with Docker - ``` + ```shell $ docker run -d --name mysql \ -p 3306:3306 \ -v /opt/data:/var/lib/mysql \ diff --git a/home/docs/start/package-deploy.md b/home/docs/start/package-deploy.md index 56a7313ed91..fdd2e7a6a0b 100644 --- a/home/docs/start/package-deploy.md +++ b/home/docs/start/package-deploy.md @@ -20,7 +20,7 @@ Deployment via package relies on Java runtime environment, ensure you have Java1 Unzip the installation package to the host eg: /opt/hertzbeat - ``` + ```shell tar zxvf apache-hertzbeat-xxx-incubating-bin.tar.gz ``` @@ -44,7 +44,7 @@ Deployment via package relies on Java runtime environment, ensure you have Java1 Execute the startup script in the installation directory `bin/`, or `startup.bat` in windows. - ``` + ```shell ./startup.sh ``` @@ -70,7 +70,7 @@ Deploying multiple HertzBeat Collectors can achieve high availability, load bala Unzip the installation package to the host eg: /opt/hertzbeat-collector - ``` + ```shell tar zxvf apache-hertzbeat-collector-xxx-incubating-bin.tar.gz ``` @@ -116,7 +116,7 @@ Deploying multiple HertzBeat Collectors can achieve high availability, load bala download JAVA installation package: [mirror website](https://repo.huaweicloud.com/java/jdk/) After installation use command line to check whether you install it successfully. - ``` + ```shell $ java -version java version "17.0.9" Java(TM) SE Runtime Environment 17.0.9 (build 17.0.9+8-LTS-237) diff --git a/home/docs/start/quickstart.md b/home/docs/start/quickstart.md index 830fc8f9d64..be0336f1a19 100644 --- a/home/docs/start/quickstart.md +++ b/home/docs/start/quickstart.md @@ -22,7 +22,7 @@ sidebar_label: Quick Start 3. Deploy collector clusters(Optional) - ``` + ```shell docker run -d -e IDENTITY=custom-collector-name -e MANAGER_HOST=127.0.0.1 -e MANAGER_PORT=1158 --name hertzbeat-collector apache/hertzbeat-collector ``` diff --git a/home/docs/start/tdengine-init.md b/home/docs/start/tdengine-init.md index 82f269cb0f1..25a1b7793eb 100644 --- a/home/docs/start/tdengine-init.md +++ b/home/docs/start/tdengine-init.md @@ -18,8 +18,6 @@ Note⚠️ Need TDengine 3.x Version. ### Install TDengine via Docker -> Refer to the official website [installation tutorial](https://docs.taosdata.com/get-started/docker/) - 1. Download and install Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). After the installation you can check if the Docker version normally output at the terminal. @@ -46,14 +44,14 @@ After the installation you can check if the Docker version normally output at th 1. Enter database Docker container - ``` + ```shell docker exec -it tdengine /bin/bash ``` 2. Create database named hertzbeat After entering the container,execute `taos` command as follows: - ``` + ```shell root@tdengine-server:~/TDengine-server# taos Welcome to the TDengine shell from Linux, Client Version Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. @@ -62,7 +60,7 @@ After the installation you can check if the Docker version normally output at th execute commands to create database - ``` + ```shell taos> show databases; taos> CREATE DATABASE hertzbeat KEEP 90 DURATION 10 BUFFER 16; ``` @@ -72,7 +70,7 @@ After the installation you can check if the Docker version normally output at th 3. Check if hertzbeat database has been created success - ``` + ```shell taos> show databases; taos> use hertzbeat; ``` diff --git a/home/docs/start/update-1.6.0.md b/home/docs/start/update-1.6.0.md index 2a5b2581ed4..512a43d2d40 100644 --- a/home/docs/start/update-1.6.0.md +++ b/home/docs/start/update-1.6.0.md @@ -112,7 +112,7 @@ Next, run the start-up script as before to experience the latest HertzBeat 1.6.0 - Stop the HertzBeat container: - ``` + ```shell docker stop hertzbeat ``` @@ -129,7 +129,7 @@ Next, run HertzBeat using Docker as before to experience the latest HertzBeat 1. - Stop the HertzBeat container: - ``` + ```shell docker stop hertzbeat ``` @@ -138,7 +138,7 @@ Next, run HertzBeat using Docker as before to experience the latest HertzBeat 1. - Download the H2 driver jar from [https://mvnrepository.com/artifact/com.h2database/h2/2.2.220](https://mvnrepository.com/artifact/com.h2database/h2/2.2.220). - Start the database locally using the H2 driver jar: - ``` + ```shell java -jar h2-2.2.220.jar -url jdbc:h2:file:./hertzbeat -user sa -password 123456 ``` diff --git a/home/docs/start/victoria-metrics-init.md b/home/docs/start/victoria-metrics-init.md index b89b26a8fa3..45490fa80d6 100644 --- a/home/docs/start/victoria-metrics-init.md +++ b/home/docs/start/victoria-metrics-init.md @@ -17,13 +17,11 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t ### Install VictoriaMetrics via Docker -> Refer to the official website [installation tutorial](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) - 1. Download and install Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). After the installation you can check if the Docker version normally output at the terminal. - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md index c5f4f874fb3..4601c791c0e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-19-hertzbeat-v1.1.0.md @@ -42,7 +42,7 @@ Windows Monitor coming: ⚠️ ⚠️⚠️⚠️请注意其它版本升级到v1.1.0需要先执行下面的SQL脚本. 现在我们的表名称有个统一前缀 hzb_ prefix. -``` +```properties ALTER TABLE alert RENAME TO hzb_alert; ALTER TABLE alert_define RENAME TO hzb_alert_define; ALTER TABLE alert_define_monitor_bind RENAME TO hzb_alert_define_monitor_bind; @@ -96,7 +96,7 @@ Windows Monitor coming: ⚠️ ⚠️⚠️⚠️Attention other version upgrade to v1.1.0 need run sql script. Now the tables name has hzb_ prefix. -``` +```properties ALTER TABLE alert RENAME TO hzb_alert; ALTER TABLE alert_define RENAME TO hzb_alert_define; ALTER TABLE alert_define_monitor_bind RENAME TO hzb_alert_define_monitor_bind; diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md index c5f4f874fb3..4601c791c0e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-06-22-one-step-up.md @@ -42,7 +42,7 @@ Windows Monitor coming: ⚠️ ⚠️⚠️⚠️请注意其它版本升级到v1.1.0需要先执行下面的SQL脚本. 现在我们的表名称有个统一前缀 hzb_ prefix. -``` +```properties ALTER TABLE alert RENAME TO hzb_alert; ALTER TABLE alert_define RENAME TO hzb_alert_define; ALTER TABLE alert_define_monitor_bind RENAME TO hzb_alert_define_monitor_bind; @@ -96,7 +96,7 @@ Windows Monitor coming: ⚠️ ⚠️⚠️⚠️Attention other version upgrade to v1.1.0 need run sql script. Now the tables name has hzb_ prefix. -``` +```properties ALTER TABLE alert RENAME TO hzb_alert; ALTER TABLE alert_define RENAME TO hzb_alert_define; ALTER TABLE alert_define_monitor_bind RENAME TO hzb_alert_define_monitor_bind; diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-10-08-hertzbeat-v1.2.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-10-08-hertzbeat-v1.2.0.md index bcb68577932..f66f3ee9296 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-10-08-hertzbeat-v1.2.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2022-10-08-hertzbeat-v1.2.0.md @@ -59,7 +59,7 @@ Have Fun! Need Convert `application.yml` -``` +```yaml spring: resources: static-locations: @@ -69,7 +69,7 @@ spring: To -``` +```yaml spring: web: resources: @@ -133,7 +133,7 @@ Have Fun! 需要将配置文件内容 `application.yml` -``` +```yaml spring: resources: static-locations: @@ -143,7 +143,7 @@ spring: 变更为 -``` +```yaml spring: web: resources: diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md index 519b35f1287..7aa20f2dbda 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-05-monitor-iotdb.md @@ -32,7 +32,7 @@ keywords: [开源监控系统, 开源数据库监控, IotDB数据库监控] 1. metric 采集默认是关闭的,需要先到 `conf/iotdb-metric.yml` 中修改参数打开后重启 server - ``` + ```yaml # 是否启动监控模块,默认为false enableMetric: true @@ -110,7 +110,7 @@ keywords: [开源监控系统, 开源数据库监控, IotDB数据库监控] ### 完毕,现在坐等告警消息过来了。叮叮叮叮 -``` +```text [HertzBeat告警通知] 告警目标对象 : iotdb.cluster_node_status.status 所属监控任务ID : 205540620349696 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md index 74e87d11991..b643540e9ee 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-01-08-monitor-shenyu.md @@ -141,7 +141,7 @@ tags: [opensource, practice] ### 完毕,现在坐等告警消息过来啦。叮叮叮叮 -``` +```text [HertzBeat告警通知] 告警目标对象 : shenyu.process_open_fds.value 所属监控任务ID : 205540620349696 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md index 6fcb608a759..539424907b1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-02-monitor-dynamic-tp.md @@ -153,7 +153,7 @@ tags: [opensource, practice] ### 完毕,现在坐等告警消息过来啦。叮叮叮叮 -``` +```text [HertzBeat告警通知] 告警目标对象 : dynamic_tp.thread_pool_running.run_timeout_count 所属监控任务ID : 205540620349493 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md index 4ec31b3926e..68d41d4be9c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-11-monitor-mysql.md @@ -103,7 +103,7 @@ keywords: [开源监控系统, 开源数据库监控, Mysql数据库监控] ### 完毕,现在坐等告警消息过来啦。叮叮叮叮 -``` +```text [HertzBeat告警通知] 告警目标对象 : mysql.cahce.query_cache_hit_rate 所属监控任务ID : 205540620394932 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md index 2734281bbf1..6286fdaeaa0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-02-15-monitor-linux.md @@ -164,7 +164,7 @@ Github: ### 完毕,现在坐等告警消息过来啦。叮叮叮叮 -``` +```text [HertzBeat告警通知] 告警目标对象 : linux.cpu.usage 所属监控任务ID : 483783444839382 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md index 5d622de36dc..f53461152d4 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-15-hertzbeat-v1.3.0.md @@ -117,7 +117,7 @@ Bugfix. 修改 `application.yml` 并设置 `warehouse.store.jpa.enabled` 参数为 false -``` +```yaml warehouse: store: jpa: @@ -126,7 +126,7 @@ warehouse: 执行SQL脚本 -``` +```shell ALTER table hzb_monitor modify job_id bigint default null; COMMIT; ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md index 413e9a7d019..4dcc3a21c49 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-03-22-monitor-springboot2.md @@ -153,7 +153,7 @@ Github: ### 完毕,现在坐等告警消息过来啦。叮叮叮叮 -``` +```text [HertzBeat告警通知] 告警目标对象 : springboot2.threads.size 所属监控任务ID : 483783444839322 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-09-hertzbeat-v1.3.1.md b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-09-hertzbeat-v1.3.1.md index b58b533d388..a1961b33a7a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-09-hertzbeat-v1.3.1.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-blog/2023-05-09-hertzbeat-v1.3.1.md @@ -44,7 +44,7 @@ keywords: [open source monitoring system, alerting system, Linux monitoring] 需要执行SQL升级脚本 -``` +```shell ALTER table hzb_alert_define modify field varchar(255) default null; COMMIT; ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-hertzbeat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-hertzbeat.md index 9e576543a25..82a80523f52 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-hertzbeat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-http-example-hertzbeat.md @@ -12,7 +12,7 @@ sidebar_label: 教程一:适配一款HTTP协议监控 > 很多场景我们需要对提供的 HTTP API 接口进行监控,获取接口返回的指标值。这篇文章我们通过http自定义协议来解析我们常见的http接口响应结构,获取返回体中的字段作为指标数据。 -``` +```json { "code": 200, "msg": "success", @@ -24,7 +24,7 @@ sidebar_label: 教程一:适配一款HTTP协议监控 如上,通常我们的后台API接口会设计这这样一个通用返回。hertzbeat系统的后台也是如此,我们今天就用hertzbeat的 API 做样例,新增适配一款新的监控类型 **hertzbeat**,监控采集它的系统摘要统计API `http://localhost:1157/api/summary`, 其响应数据为: -``` +```json { "msg": null, "code": 0, diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ssh.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ssh.md index 451e2b3a540..9ae2ba22c3b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ssh.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-ssh.md @@ -29,7 +29,7 @@ SHELL脚本查询回来的数据字段和我们需要的指标映射,就能获 `hostname; uptime | awk -F "," '{print $1}'` 终端响应的数据为: -``` +```shell tombook 14:00:15 up 72 days ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-tutorial.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-tutorial.md index 221ee168b4b..8635126fdf9 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-tutorial.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/advanced/extend-tutorial.md @@ -12,7 +12,7 @@ sidebar_label: 教程案例 > 很多场景我们需要对提供的 HTTP API 接口进行监控,获取接口返回的指标值。这篇文章我们通过http自定义协议来解析我们常见的http接口响应结构,获取返回体中的字段作为指标数据。 -``` +```json { "code": 200, "msg": "success", @@ -24,7 +24,7 @@ sidebar_label: 教程案例 如上,通常我们的后台API接口会设计这这样一个通用返回。hertzbeat系统的后台也是如此,我们今天就用hertzbeat的 API 做样例,新增适配一款新的监控类型 **hertzbeat**,监控采集它的系统摘要统计API `http://localhost:1157/api/summary`, 其响应数据为: -``` +```json { "msg": null, "code": 0, diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md index eb1d4e1bd46..ca23f3db755 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/code-style-and-quality-guide.md @@ -648,7 +648,3 @@ public void process(String input) { - - - - -``` - -``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md index 5402bc696aa..a7ee981cdfc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/how-to-release.md @@ -252,7 +252,7 @@ for i in *.tar.gz; do echo $i; sha512sum $i > $i.sha512 ; done > 最终文件列表如下 -``` +```text apache-hertzbeat-1.6.0-incubating-src.tar.gz apache-hertzbeat-1.6.0-incubating-src.tar.gz.asc apache-hertzbeat-1.6.0-incubating-src.tar.gz.sha512 @@ -347,7 +347,7 @@ svn commit -m "release for HertzBeat 1.6.0-RC1" > `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0 rc1
> `Body`: -``` +```text Hello HertzBeat Community: This is a call for vote to release Apache HertzBeat (incubating) version release-1.6.0-RC1. @@ -403,7 +403,7 @@ Thanks! > `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: -``` +```text Dear HertzBeat community, Thanks for your review and vote for "Release Apache HertzBeat (incubating) 1.6.0-rc1" @@ -439,7 +439,7 @@ ChunJin Mu > `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: -``` +```text Hello Incubator Community: This is a call for a vote to release Apache HertzBeat (incubating) version 1.6.0-RC1. @@ -492,7 +492,7 @@ ChunJin Mu > `Send to`:
> `Body`: -``` +```text Thanks everyone for review and vote, 72H passed. I'll announce the vote result soon. Best, @@ -505,7 +505,7 @@ Chunjin Mu > `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: -``` +```text Hi Incubator Community, The vote to release Apache HertzBeat (incubating) 1.6.0-rc4 has passed with 3 +1 binding and no +0 or -1 votes. @@ -560,13 +560,13 @@ svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 http - 发版标题: -``` +```text v1.6.0 ``` - 描述: -``` +```text xxx release note: xxx ``` @@ -582,7 +582,7 @@ release note: xxx > `Title`: [ANNOUNCE] Apache HertzBeat (incubating) 1.6.0 released
> `Body`: -``` +```text Hi Community, We are glad to announce the release of Apache HertzBeat (incubating) 1.6.0. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md index f931dc556b8..f3cb4afad25 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/community/new_committer_process.md @@ -287,7 +287,7 @@ Best Wishes! 当通过PMC内部投票后,PMC会向您索要个人邮箱,之后您会收到一封邮件,邮件会指引您进行下一步操作,内容为: -``` +```text Hello xxxx, The HertzBeat Project Management Committee (PPMC) @@ -336,7 +336,7 @@ establishing you as a committer. 如果您接受邀请,请回复该邮件,记住要**回复全部**,回复内容为: -``` +```text hi, i accept. Thanks for invitaion. ``` @@ -406,7 +406,7 @@ hi, i accept. Thanks for invitaion. 正文: - ``` + ```text Hello Apache, I am willing contribute to the ASF. The attachment is my ICLA information. My Github account is : https://github.com/xxxx. Thanks ! diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md index e266fa96feb..88a20e4ab97 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/iotdb.md @@ -17,7 +17,7 @@ keywords: [开源监控系统, 开源数据库监控, IoTDB数据库监控] 1. metric 采集默认是关闭的,需要先到 `conf/iotdb-metric.yml` 中修改参数打开后重启 server - ``` + ```text # 是否启动监控模块,默认为false enableMetric: true diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md index 162262ab8cd..58ed7e3fcf3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/kubernetes.md @@ -29,7 +29,7 @@ keywords: [开源监控系统, 开源Kubernetes监控] ### 方式二 -``` +```shell kubectl create serviceaccount cluster-admin kubectl create clusterrolebinding cluster-admin-manual --clusterrole=cluster-admin --serviceaccount=default:cluster-admin diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md index 0b3cf704589..f95da705d58 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nacos.md @@ -14,7 +14,7 @@ keywords: [开源监控系统, 中间件监控, Nacos分布式监控] 1. 按照[部署文档](https://nacos.io/zh-cn/docs/deployment.html)搭建好Nacos集群。 2. 配置application.properties文件,暴露metrics数据。 - ``` + ```properties management.endpoints.web.exposure.include=* ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis_cluster.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis_cluster.md index e5aed34ba3f..c34681ba9c6 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis_cluster.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/redis_cluster.md @@ -11,7 +11,7 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 *redis.config* - ``` + ```properties port 6379 cluster-enabled yes cluster-config-file nodes.conf @@ -93,7 +93,7 @@ keywords: [开源监控系统, 开源数据库监控, RedisCluster数据库监 docker network inspect hertzbeat-redis-cluste ``` - ``` + ```json "Containers": { "187b879f73c473b3cbb82ff95f668e65af46115ddaa27f3ff1a712332b981531": { ... diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/time_expression.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/time_expression.md index 1a6b02b45b2..4c3bd7b3e76 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/time_expression.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/time_expression.md @@ -11,7 +11,7 @@ HertzBeat支持使用表达式计算监控采集时的相对时间,支持更 ### 语法 -``` +```shell ${FORMATTER [{ + | - } ]} ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md index 6c30086e1b8..4e70646d1dc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/custom-config.md @@ -36,7 +36,7 @@ sidebar_label: 常见参数配置 1.2 腾讯云短信创建正文模板(template-id) - ``` + ```text 监控:{1},告警级别:{2}。内容:{3} ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md index ff2f22d3adc..e1c91187530 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/docker-compose-deploy.md @@ -28,13 +28,13 @@ sidebar_label: Docker Compose方式安装 - 解压脚本包 - ``` + ```shell tar zxvf apache-hertzbeat-1.6.0-incubating-docker-compose.tar.gz ``` - 进入解压目录, 选择 `HertzBeat + PostgreSQL + VictoriaMetrics` 一键部署 - ``` + ```shell cd apache-hertzbeat-1.6.0-incubating-docker-compose cd hertzbeat-postgresql-victoria-metrics ``` @@ -43,7 +43,7 @@ sidebar_label: Docker Compose方式安装 > 在 `hertzbeat-postgresql-victoria-metrics` 目录下执行以下命令 - ``` + ```shell docker-compose up -d ``` @@ -51,7 +51,7 @@ sidebar_label: Docker Compose方式安装 > 查看各个容器的运行状态,up 为正常运行状态 - ``` + ```shell docker-compose ps ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md index 1f24f70f3e5..7c40e2a8255 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/greptime-init.md @@ -15,8 +15,6 @@ It's designed to work on infrastructure of the cloud era, and users benefit from ### 通过Docker方式安装GreptimeDB -> 可参考官方网站[安装教程](https://docs.greptime.com/getting-started/overview) - 1. 下载安装Docker环境 Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后终端查看Docker版本是否正常输出。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md index 1c97c2ccfb9..12dc86fd662 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/influxdb-init.md @@ -23,8 +23,6 @@ InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海 ### 2. 通过Docker方式安装InfluxDB -> 可参考官方网站[安装教程](https://hub.docker.com/_/influxdb) - 1. 下载安装Docker环境 Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后终端查看Docker版本是否正常输出。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md index f2ce6087dc5..af9e6531d34 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/iotdb-init.md @@ -24,7 +24,7 @@ Apache IoTDB是一体化收集、存储、管理与分析物联网时序数据 Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后终端查看Docker版本是否正常输出。 - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/mysql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/mysql-change.md index e05bfde2a29..1fcdad7e8a1 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/mysql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/mysql-change.md @@ -16,14 +16,14 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) Docker 的安装请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后请于终端检查Docker版本输出是否正常。 - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` 2. Docker安装MYSQl - ``` + ```shell $ docker run -d --name mysql \ -p 3306:3306 \ -v /opt/data:/var/lib/mysql \ diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md index bd758bc1502..fd7d1e55ff3 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/package-deploy.md @@ -124,7 +124,6 @@ HertzBeat Collector 是一个轻量级的数据采集器,用于采集并将数 java version "17.0.9" Java(TM) SE Runtime Environment 17.0.9 (build 17.0.9+8-LTS-237) Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) - ``` 2. 按照流程部署,访问 无界面 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md index ee447e1be7d..c8c84be2815 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/tdengine-init.md @@ -17,8 +17,6 @@ TDengine是一款开源物联网时序型数据库,我们用其存储采集到 ### 通过Docker方式安装TDengine -> 可参考官方网站[安装教程](https://docs.taosdata.com/get-started/docker/) - 1. 下载安装Docker环境 Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后终端查看Docker版本是否正常输出。 @@ -56,7 +54,7 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c > 建议您修改密码。TDengine默认的账户密码是 root/taosdata > 进入容器后,执行 `taos` 命令进入TDengine CLI , 如下: - ``` + ```shell root@tdengine-server:~/TDengine-server# taos Welcome to the TDengine shell from Linux, Client Version Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. @@ -85,7 +83,7 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c 5. 退出TDengine CLI - ``` + ```text 输入 q 或 quit 或 exit 回车 ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md index 24cb43f7b24..ffd99ca6d8d 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/start/victoria-metrics-init.md @@ -15,9 +15,7 @@ VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决 > 如果您已有VictoriaMetrics环境,可直接跳到YML配置那一步。 -### 通过Docker方式安装VictoriaMetrics - -> 可参考官方网站[安装教程](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) +### 通过Docker方式安装VictoriaMetrics 1. 下载安装Docker环境 Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md index 56e9b125e5f..c2574f03001 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md @@ -12,7 +12,7 @@ sidebar_label: 教程一:适配一款HTTP协议监控 > 很多场景我们需要对提供的 HTTP API 接口进行监控,获取接口返回的指标值。这篇文章我们通过http自定义协议来解析我们常见的http接口响应结构,获取返回体中的字段作为指标数据。 -``` +```json { "code": 200, "msg": "success", @@ -24,7 +24,7 @@ sidebar_label: 教程一:适配一款HTTP协议监控 如上,通常我们的后台API接口会设计这这样一个通用返回。hertzbeat系统的后台也是如此,我们今天就用hertzbeat的 API 做样例,新增适配一款新的监控类型 **hertzbeat**,监控采集它的系统摘要统计API `http://localhost:1157/api/summary`, 其响应数据为: -``` +```json { "msg": null, "code": 0, diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ssh.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ssh.md index 451e2b3a540..9ae2ba22c3b 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ssh.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-ssh.md @@ -29,7 +29,7 @@ SHELL脚本查询回来的数据字段和我们需要的指标映射,就能获 `hostname; uptime | awk -F "," '{print $1}'` 终端响应的数据为: -``` +```shell tombook 14:00:15 up 72 days ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-tutorial.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-tutorial.md index a2b602f7844..80dc3f36077 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-tutorial.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/advanced/extend-tutorial.md @@ -12,7 +12,7 @@ sidebar_label: 教程案例 > 很多场景我们需要对提供的 HTTP API 接口进行监控,获取接口返回的指标值。这篇文章我们通过http自定义协议来解析我们常见的http接口响应结构,获取返回体中的字段作为指标数据。 -``` +```json { "code": 200, "msg": "success", @@ -24,7 +24,7 @@ sidebar_label: 教程案例 如上,通常我们的后台API接口会设计这这样一个通用返回。hertzbeat系统的后台也是如此,我们今天就用hertzbeat的 API 做样例,新增适配一款新的监控类型 **hertzbeat**,监控采集它的系统摘要统计API `http://localhost:1157/api/summary`, 其响应数据为: -``` +```json { "msg": null, "code": 0, diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md index d3e6a1aa780..dea85bc8aae 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/code-style-and-quality-guide.md @@ -585,7 +585,3 @@ public void process(String input) { - - - - -``` - -``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md index 5402bc696aa..419d0ad3466 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/community/how-to-release.md @@ -252,7 +252,7 @@ for i in *.tar.gz; do echo $i; sha512sum $i > $i.sha512 ; done > 最终文件列表如下 -``` +```properties apache-hertzbeat-1.6.0-incubating-src.tar.gz apache-hertzbeat-1.6.0-incubating-src.tar.gz.asc apache-hertzbeat-1.6.0-incubating-src.tar.gz.sha512 @@ -347,7 +347,7 @@ svn commit -m "release for HertzBeat 1.6.0-RC1" > `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0 rc1
> `Body`: -``` +```text Hello HertzBeat Community: This is a call for vote to release Apache HertzBeat (incubating) version release-1.6.0-RC1. @@ -403,7 +403,7 @@ Thanks! > `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: -``` +```text Dear HertzBeat community, Thanks for your review and vote for "Release Apache HertzBeat (incubating) 1.6.0-rc1" @@ -439,7 +439,7 @@ ChunJin Mu > `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: -``` +```text Hello Incubator Community: This is a call for a vote to release Apache HertzBeat (incubating) version 1.6.0-RC1. @@ -492,7 +492,7 @@ ChunJin Mu > `Send to`:
> `Body`: -``` +```text Thanks everyone for review and vote, 72H passed. I'll announce the vote result soon. Best, @@ -505,7 +505,7 @@ Chunjin Mu > `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: -``` +```text Hi Incubator Community, The vote to release Apache HertzBeat (incubating) 1.6.0-rc4 has passed with 3 +1 binding and no +0 or -1 votes. @@ -560,13 +560,13 @@ svn mv https://dist.apache.org/repos/dist/dev/incubator/hertzbeat/1.6.0-RC1 http - 发版标题: -``` +```text v1.6.0 ``` - 描述: -``` +```text xxx release note: xxx ``` @@ -582,7 +582,7 @@ release note: xxx > `Title`: [ANNOUNCE] Apache HertzBeat (incubating) 1.6.0 released
> `Body`: -``` +```text Hi Community, We are glad to announce the release of Apache HertzBeat (incubating) 1.6.0. diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/time_expression.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/time_expression.md index 1a6b02b45b2..4c3bd7b3e76 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/time_expression.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/help/time_expression.md @@ -11,7 +11,7 @@ HertzBeat支持使用表达式计算监控采集时的相对时间,支持更 ### 语法 -``` +```shell ${FORMATTER [{ + | - } ]} ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md index 6c30086e1b8..4e70646d1dc 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/custom-config.md @@ -36,7 +36,7 @@ sidebar_label: 常见参数配置 1.2 腾讯云短信创建正文模板(template-id) - ``` + ```text 监控:{1},告警级别:{2}。内容:{3} ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md index d8a86a88acd..ae036840725 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/greptime-init.md @@ -15,8 +15,6 @@ It's designed to work on infrastructure of the cloud era, and users benefit from ### 通过Docker方式安装GreptimeDB -> 可参考官方网站[安装教程](https://docs.greptime.com/getting-started/overview) -> 1. 下载安装Docker环境 Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后终端查看Docker版本是否正常输出。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md index bbba8a1df89..bd4390fc35e 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/influxdb-init.md @@ -23,8 +23,6 @@ InfluxDB是一个由InfluxData开发的开源时序型数据库,专注于海 ### 2. 通过Docker方式安装InfluxDB -> 可参考官方网站[安装教程](https://hub.docker.com/_/influxdb) - 1. 下载安装Docker环境 Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后终端查看Docker版本是否正常输出。 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/mysql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/mysql-change.md index 874de9a0328..7df3aa4b001 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/mysql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/mysql-change.md @@ -16,14 +16,14 @@ MYSQL是一款值得信赖的关系型数据库,Apache HertzBeat (incubating) Docker 的安装请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后请于终端检查Docker版本输出是否正常。 - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` 2. Docker安装MYSQl - ``` + ```shell $ docker run -d --name mysql \ -p 3306:3306 \ -v /opt/data:/var/lib/mysql \ diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md index 39744f8ef23..344c42801a0 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/package-deploy.md @@ -12,7 +12,7 @@ sidebar_label: 安装包方式部署 2. 配置HertzBeat的配置文件(可选) 解压安装包到主机 eg: /opt/hertzbeat - ``` + ```shell $ tar zxvf hertzbeat-xx.tar.gz or $ unzip -o hertzbeat-xx.zip @@ -33,7 +33,7 @@ sidebar_label: 安装包方式部署 4. 部署启动 执行位于安装目录hertzbeat/bin/下的启动脚本 startup.sh, windows环境下为 startup.bat - ``` + ```shell ./startup.sh ``` @@ -76,7 +76,6 @@ sidebar_label: 安装包方式部署 java version "17.0.9" Java(TM) SE Runtime Environment 17.0.9 (build 17.0.9+8-LTS-237) Java HotSpot(TM) 64-Bit Server VM 17.0.9 (build 17.0.9+8-LTS-237, mixed mode) - ``` 2. **按照流程部署,访问 无界面** diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md index e3a6b66a41d..e3c2a933a23 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/postgresql-change.md @@ -16,14 +16,14 @@ PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBM Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` 2. Docker安装 PostgreSQL - ``` + ```shell docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` @@ -35,7 +35,7 @@ PostgreSQL是一个功能强大,开源的关系型数据库管理系统(RDBM 1. 进入 PostgreSQL 或使用客户端连接 PostgreSQL 服务 - ``` + ```shell su - postgres psql ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md index 813eb94ad1a..4ba4c97659a 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/tdengine-init.md @@ -17,8 +17,6 @@ TDengine是一款开源物联网时序型数据库,我们用其存储采集到 ### 通过Docker方式安装TDengine -> 可参考官方网站[安装教程](https://docs.taosdata.com/get-started/docker/) - 1. 下载安装Docker环境 Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后终端查看Docker版本是否正常输出。 @@ -47,7 +45,7 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c 1. 进入数据库Docker容器 - ``` + ```shell docker exec -it tdengine /bin/bash ``` @@ -56,7 +54,7 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c > 建议您修改密码。TDengine默认的账户密码是 root/taosdata > 进入容器后,执行 `taos` 命令进入TDengine CLI , 如下: - ``` + ```shell root@tdengine-server:~/TDengine-server# taos Welcome to the TDengine shell from Linux, Client Version Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. @@ -69,7 +67,7 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c 执行创建数据库命令 - ``` + ```shell taos> show databases; taos> CREATE DATABASE hertzbeat KEEP 90 DURATION 10 BUFFER 16; ``` @@ -78,14 +76,14 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c 4. 查看hertzbeat数据库是否成功创建 - ``` + ```shell taos> show databases; taos> use hertzbeat; ``` 5. 退出TDengine CLI - ``` + ```text 输入 q 或 quit 或 exit 回车 ``` diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md index c38e59b3198..4fda51312d7 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/version-v1.5.x/start/victoria-metrics-init.md @@ -17,8 +17,6 @@ VictoriaMetrics,是一个快速高效、经济并且可扩展的监控解决 ### 通过Docker方式安装VictoriaMetrics -> 可参考官方网站[安装教程](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) - 1. 下载安装Docker环境 Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 安装完毕后终端查看Docker版本是否正常输出。 diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md b/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md index 0d1a7112bb9..c941befb396 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-http-example-hertzbeat.md @@ -12,7 +12,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz > In many scenarios, we need to monitor the provided HTTP API interface and obtain the index value returned by the interface. In this article, we use the http custom protocol to parse our common http interface response structure, and obtain the fields in the returned body as metric data. -``` +```json { "code": 200, "msg": "success", @@ -24,7 +24,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz As above, usually our background API interface will design such a general return. The same is true for the background of the hertzbeat system. Today, we will use the hertzbeat API as an example, add a new monitoring type **hertzbeat**, and monitor and collect its system summary statistics API `http://localhost:1157/api/summary`, the response data is: -``` +```json { "msg": null, "code": 0, diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-ssh.md b/home/versioned_docs/version-v1.5.x/advanced/extend-ssh.md index 3a5486c394b..a4dc46d1dc1 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-ssh.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-ssh.md @@ -29,7 +29,7 @@ Then the query script of the two Metrics in hertzbeat is(Use `;` Connect them to `hostname; uptime | awk -F "," '{print $1}'` The data responded by the terminal is: -``` +```text tombook 14:00:15 up 72 days ``` diff --git a/home/versioned_docs/version-v1.5.x/advanced/extend-tutorial.md b/home/versioned_docs/version-v1.5.x/advanced/extend-tutorial.md index c9759063fa5..fbf4f6ea743 100644 --- a/home/versioned_docs/version-v1.5.x/advanced/extend-tutorial.md +++ b/home/versioned_docs/version-v1.5.x/advanced/extend-tutorial.md @@ -12,7 +12,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz > In many scenarios, we need to monitor the provided HTTP API interface and obtain the index value returned by the interface. In this article, we use the http custom protocol to parse our common http interface response structure, and obtain the fields in the returned body as metric data. -``` +```json { "code": 200, "msg": "success", @@ -24,7 +24,7 @@ Before reading this tutorial, we hope that you are familiar with how to customiz As above, usually our background API interface will design such a general return. The same is true for the background of the hertzbeat system. Today, we will use the hertzbeat API as an example, add a new monitoring type **hertzbeat**, and monitor and collect its system summary statistics API `http://localhost:1157/api/summary`, the response data is: -``` +```json { "msg": null, "code": 0, diff --git a/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md b/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md index 342c176673e..ef27d0d5f21 100644 --- a/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md +++ b/home/versioned_docs/version-v1.5.x/community/code-style-and-quality-guide.md @@ -585,7 +585,3 @@ public void process(String input) { - - - - -``` - -``` diff --git a/home/versioned_docs/version-v1.5.x/community/how-to-release.md b/home/versioned_docs/version-v1.5.x/community/how-to-release.md index 46ab8a2bc86..8c862805475 100644 --- a/home/versioned_docs/version-v1.5.x/community/how-to-release.md +++ b/home/versioned_docs/version-v1.5.x/community/how-to-release.md @@ -250,7 +250,7 @@ for i in *.tar.gz; do echo $i; sha512sum $i > $i.sha512 ; done > The final file list is as follows -``` +```properties apache-hertzbeat-1.6.0-incubating-src.tar.gz apache-hertzbeat-1.6.0-incubating-src.tar.gz.asc apache-hertzbeat-1.6.0-incubating-src.tar.gz.sha512 @@ -347,7 +347,7 @@ Send a voting email in the community requires at least three `+1` and no `-1`. > `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0 rc1
> `Body`: -``` +```text Hello HertzBeat Community: This is a call for vote to release Apache HertzBeat (incubating) version release-1.6.0-RC1. @@ -403,7 +403,7 @@ After 72 hours, the voting results will be counted, and the voting result email > `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: -``` +```text Dear HertzBeat community, Thanks for your review and vote for "Release Apache HertzBeat (incubating) 1.6.0-rc1" @@ -439,7 +439,7 @@ Send a voting email in the incubator community requires at least three `+1` and > `Title`: [VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: -``` +```text Hello Incubator Community: This is a call for a vote to release Apache HertzBeat (incubating) version 1.6.0-RC1. @@ -492,7 +492,7 @@ If there is no -1 after 72 hours, reply to the email as follows > `Send to`:
> `Body`: -``` +```text Thanks everyone for review and vote, 72H passed. I'll announce the vote result soon. Best, @@ -505,7 +505,7 @@ Then the voting results will be counted, and the voting result email will be sen > `Title`: [RESULT][VOTE] Release Apache HertzBeat (incubating) 1.6.0-rc1
> `Body`: -``` +```text Hi Incubator Community, The vote to release Apache HertzBeat (incubating) 1.6.0-rc4 has passed with 3 +1 binding and no +0 or -1 votes. @@ -560,13 +560,13 @@ Then enter Release Title and Describe - Release Title: -``` +```text v1.6.0 ``` - Describe: -``` +```text xxx release note: xxx ``` @@ -582,7 +582,7 @@ The rename the release-1.6.0-rc1 branch to release-1.6.0. > `Title`: [ANNOUNCE] Apache HertzBeat (incubating) 1.6.0 released
> `Body`: -``` +```text Hi Community, We are glad to announce the release of Apache HertzBeat (incubating) 1.6.0. diff --git a/home/versioned_docs/version-v1.5.x/help/time_expression.md b/home/versioned_docs/version-v1.5.x/help/time_expression.md index 2f0711c4cf9..07eea14a81d 100644 --- a/home/versioned_docs/version-v1.5.x/help/time_expression.md +++ b/home/versioned_docs/version-v1.5.x/help/time_expression.md @@ -11,7 +11,7 @@ HertzBeat supports using expressions to calculate relative time during monitorin ### Syntax -``` +```shell ${FORMATTER [{ + | - } ]} ``` diff --git a/home/versioned_docs/version-v1.5.x/start/docker-deploy.md b/home/versioned_docs/version-v1.5.x/start/docker-deploy.md index 255216c9aff..bd385b6c67f 100644 --- a/home/versioned_docs/version-v1.5.x/start/docker-deploy.md +++ b/home/versioned_docs/version-v1.5.x/start/docker-deploy.md @@ -10,7 +10,7 @@ sidebar_label: Install via Docker Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` diff --git a/home/versioned_docs/version-v1.5.x/start/iotdb-init.md b/home/versioned_docs/version-v1.5.x/start/iotdb-init.md index 9cd1157db22..52a5fcf9043 100644 --- a/home/versioned_docs/version-v1.5.x/start/iotdb-init.md +++ b/home/versioned_docs/version-v1.5.x/start/iotdb-init.md @@ -24,7 +24,7 @@ Apache IoTDB is a software system that integrates the collection, storage, manag Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). After the installation you can check if the Docker version normally output at the terminal. - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` diff --git a/home/versioned_docs/version-v1.5.x/start/mysql-change.md b/home/versioned_docs/version-v1.5.x/start/mysql-change.md index e78d414af5a..f3dfed7861f 100644 --- a/home/versioned_docs/version-v1.5.x/start/mysql-change.md +++ b/home/versioned_docs/version-v1.5.x/start/mysql-change.md @@ -14,14 +14,14 @@ MYSQL is a reliable relational database. In addition to default built-in H2 data For Docker installation, please refer to the [Docker official documentation](https://docs.docker.com/get-docker/). After the installation, please verify in the terminal that the Docker version can be printed normally. - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` 2. Install MYSQl with Docker - ``` + ```shell $ docker run -d --name mysql \ -p 3306:3306 \ -v /opt/data:/var/lib/mysql \ diff --git a/home/versioned_docs/version-v1.5.x/start/postgresql-change.md b/home/versioned_docs/version-v1.5.x/start/postgresql-change.md index 26eff086cb6..e9cd31cd408 100644 --- a/home/versioned_docs/version-v1.5.x/start/postgresql-change.md +++ b/home/versioned_docs/version-v1.5.x/start/postgresql-change.md @@ -14,14 +14,14 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/)。 After the installation you can check if the Docker version normally output at the terminal. - ``` + ```shell $ docker -v Docker version 20.10.12, build e91ed57 ``` 2. Install PostgreSQL with Docker - ``` + ```shell docker run -d --name postgresql -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=123456 -e TZ=Asia/Shanghai postgresql:15 ``` @@ -33,7 +33,7 @@ PostgreSQL is a RDBMS emphasizing extensibility and SQL compliance. In addition 1. Enter postgreSQL or use the client to connect postgreSQL service - ``` + ```shell su - postgres psql ``` diff --git a/home/versioned_docs/version-v1.5.x/start/tdengine-init.md b/home/versioned_docs/version-v1.5.x/start/tdengine-init.md index e2d6e21936a..99c88825369 100644 --- a/home/versioned_docs/version-v1.5.x/start/tdengine-init.md +++ b/home/versioned_docs/version-v1.5.x/start/tdengine-init.md @@ -46,14 +46,14 @@ After the installation you can check if the Docker version normally output at th 1. Enter database Docker container - ``` + ```shell docker exec -it tdengine /bin/bash ``` 2. Create database named hertzbeat After entering the container,execute `taos` command as follows: - ``` + ```shell root@tdengine-server:~/TDengine-server# taos Welcome to the TDengine shell from Linux, Client Version Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. @@ -62,7 +62,7 @@ After the installation you can check if the Docker version normally output at th execute commands to create database - ``` + ```shell taos> show databases; taos> CREATE DATABASE hertzbeat KEEP 90 DURATION 10 BUFFER 16; ``` @@ -72,7 +72,7 @@ After the installation you can check if the Docker version normally output at th 3. Check if hertzbeat database has been created success - ``` + ```shell taos> show databases; taos> use hertzbeat; ``` diff --git a/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md b/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md index 9d1d1f76f53..a593e794dd7 100644 --- a/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md +++ b/home/versioned_docs/version-v1.5.x/start/victoria-metrics-init.md @@ -17,8 +17,6 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t ### Install VictoriaMetrics via Docker -> Refer to the official website [installation tutorial](https://docs.victoriametrics.com/Quick-Start.html#how-to-install) - 1. Download and install Docker environment Docker tools download refer to [Docker official document](https://docs.docker.com/get-docker/). After the installation you can check if the Docker version normally output at the terminal. diff --git a/package.json b/package.json new file mode 100644 index 00000000000..5d3eff22d75 --- /dev/null +++ b/package.json @@ -0,0 +1,5 @@ +{ + "devDependencies": { + "markdownlint": "^0.34.0" + } +} From 77f725cf425788ab166f404e5ca150e01e67e3a9 Mon Sep 17 00:00:00 2001 From: Jast Date: Mon, 26 Aug 2024 17:03:56 +0800 Subject: [PATCH 233/257] [fixbug]Fixbug kafka version display (#2609) --- manager/src/main/resources/define/app-kafka.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manager/src/main/resources/define/app-kafka.yml b/manager/src/main/resources/define/app-kafka.yml index 2616b5aa835..9b6eebfee5b 100644 --- a/manager/src/main/resources/define/app-kafka.yml +++ b/manager/src/main/resources/define/app-kafka.yml @@ -110,7 +110,7 @@ metrics: username: ^_^username^_^ password: ^_^password^_^ # jmx mbean object name - objectName: kafka.server:type=app-info,id=0 + objectName: kafka.server:type=app-info,id=* # jmx url url: ^_^url^_^ From 5eef247a5b50938dd29160a537caca42967ef677 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Mon, 26 Aug 2024 19:13:16 +0800 Subject: [PATCH 234/257] [improve] enable and fix test src checkstyle (#2610) Signed-off-by: tomsun28 Co-authored-by: shown --- .../AlertConvergeControllerTest.java | 131 +- .../AlertConvergesControllerTest.java | 96 +- .../controller/AlertDefineControllerTest.java | 20 +- .../AlertDefinesControllerTest.java | 20 +- .../controller/AlertReportControllerTest.java | 32 +- .../AlertSilenceControllerTest.java | 128 +- .../controller/AlertsControllerTest.java | 22 +- .../alert/reduce/AlarmCommonReduceTest.java | 141 +- .../alert/reduce/AlarmConvergeReduceTest.java | 100 +- .../alert/reduce/AlarmSilenceReduceTest.java | 232 ++- .../service/AlertConvergeServiceTest.java | 90 +- .../AlertDefineExcelImExportServiceTest.java | 240 ++- .../AlertDefineJsonImExportServiceTest.java | 157 +- .../AlertDefineYamlImExportServiceTest.java | 162 +- .../service/AlertSilenceServiceTest.java | 114 +- .../collect/dns/DnsCollectImplTest.java | 2 - .../collect/ftp/FtpCollectImplTest.java | 4 +- .../collect/icmp/IcmpCollectImplTest.java | 6 +- .../collect/imap/ImapCollectImplTest.java | 18 +- .../memcached/MemcachedCollectImplTest.java | 8 +- .../nebulagraph/NgqlCollectImplTest.java | 50 +- .../redis/RedisClusterCollectImplTest.java | 6 +- .../redis/RedisSingleCollectImplTest.java | 14 +- .../collect/telnet/TelnetCollectImplTest.java | 4 +- .../dispatch/CommonDispatcherTest.java | 15 +- .../dispatch/MetricsCollectorQueueTest.java | 6 +- .../collector/dispatch/WorkerPoolTest.java | 6 +- .../dispatch/entrance/CollectServerTest.java | 148 +- .../internal/CollectJobServiceTest.java | 21 +- .../dispatch/export/NettyDataQueueTest.java | 39 +- .../dispatch/unit/DataSizeConvertTest.java | 65 +- .../collector/util/JsonPathParserTest.java | 78 +- .../impl/InMemoryCommonDataQueueTest.java | 116 +- .../queue/impl/KafkaCommonDataQueueTest.java | 264 ++- .../queue/impl/RedisCommonDataQueueTest.java | 168 +- .../serialize/AlertDeserializerTest.java | 88 +- .../common/serialize/AlertSerializerTest.java | 106 +- .../KafkaMetricsDataDeserializerTest.java | 92 +- .../KafkaMetricsDataSerializerTest.java | 93 +- .../common/support/CommonThreadPoolTest.java | 98 +- .../ResourceBundleUtf8ControlTest.java | 66 +- .../support/SpringContextHolderTest.java | 12 +- .../vaild/EmailParamValidatorTest.java | 71 +- .../support/vaild/HostParamValidatorTest.java | 73 +- .../vaild/PhoneNumParamValidatorTest.java | 61 +- .../hertzbeat/common/util/FileUtilTest.java | 4 +- .../util/IntervalExpressionUtilTest.java | 61 +- .../common/util/IpDomainUtilTest.java | 17 +- .../hertzbeat/common/util/JexlTest.java | 80 +- .../hertzbeat/common/util/LruHashMapTest.java | 3 +- .../hertzbeat/common/util/MapCapUtilTest.java | 25 +- .../common/util/ProtoJsonUtilTest.java | 10 +- .../common/util/ResourceBundleUtilTest.java | 25 +- .../hertzbeat/common/util/StrBufferTest.java | 179 +- .../hertzbeat/common/util/StrUtilTest.java | 60 +- .../common/util/TimePeriodUtilTest.java | 142 +- .../common/util/TimeZoneUtilTest.java | 76 +- .../common/util/entity/PersonTest.java | 1630 +++++++++-------- .../util/prometheus/PrometheusUtilTest.java | 280 ++- .../apache/hertzbeat/manager/ManagerTest.java | 1 + .../alerter/DispatcherAlarmTest.java | 16 +- .../impl/DbAlertStoreHandlerImplTest.java | 167 +- .../impl/EmailAlertNotifyHandlerImplTest.java | 14 +- .../GotifyAlertNotifyHandlerImplTest.java | 157 +- .../ServerChanAlertNotifyHandlerImplTest.java | 157 +- .../impl/SlackAlertNotifyHandlerImplTest.java | 1 + .../impl/SmsAlertNotifyHandlerImplTest.java | 118 +- ...TelegramBotAlertNotifyHandlerImplTest.java | 1 + .../WeChatAppAlertNotifyHandlerImplTest.java | 2 +- .../WeComAppAlertNotifyHandlerImplTest.java | 180 +- .../WebHookAlertNotifyHandlerImplTest.java | 23 +- .../listener/TimeZoneListenerTest.java | 53 +- .../manager/controller/AiControllerTest.java | 68 +- .../manager/controller/AppControllerTest.java | 6 +- .../controller/CollectorControllerTest.java | 10 +- .../GeneralConfigControllerTest.java | 93 +- .../controller/MetricsControllerTest.java | 56 +- .../controller/MonitorControllerTest.java | 18 +- .../controller/MonitorsControllerTest.java | 34 +- .../NoticeConfigControllerTest.java | 19 +- .../controller/PluginControllerTest.java | 54 +- .../controller/StatusPageControllerTest.java | 375 ++-- .../StatusPagePublicControllerTest.java | 119 +- .../controller/SummaryControllerTest.java | 4 +- .../manager/controller/TagControllerTest.java | 4 +- .../manager/dao/CollectorDaoTest.java | 8 +- .../manager/scheduler/ConsistentHashTest.java | 12 +- .../manager/service/AccountServiceTest.java | 222 ++- .../manager/service/AiServiceFactoryTest.java | 117 +- .../manager/service/AppServiceTest.java | 4 +- .../service/AvailableAlertDefineInitTest.java | 85 +- .../manager/service/CollectorServiceTest.java | 14 +- .../manager/service/ConfigServiceTest.java | 5 +- .../service/ExcelImExportServiceTest.java | 139 +- .../service/JsonImExportServiceTest.java | 93 +- .../service/MailGeneralConfigServiceTest.java | 39 +- .../manager/service/MonitorServiceTest.java | 38 +- .../service/ObjectStoreConfigServiceTest.java | 79 +- .../service/ObsObjectStoreServiceTest.java | 155 +- .../manager/service/PluginServiceTest.java | 2 +- .../service/SmsGeneralConfigServiceTest.java | 55 +- .../service/StatusPageServiceTest.java | 149 +- .../SystemGeneralConfigServiceTest.java | 49 +- .../service/SystemSecretServiceTest.java | 41 +- .../manager/service/TagServiceTest.java | 2 +- .../service/TemplateConfigServiceTest.java | 81 +- .../service/YamlImExportServiceTest.java | 115 +- .../push/controller/PushControllerTest.java | 75 +- .../controller/PushGatewayControllerTest.java | 71 +- .../push/service/PushGatewayServiceTest.java | 88 +- .../push/service/PushServiceTest.java | 127 +- script/checkstyle/checkstyle.xml | 2 +- script/checkstyle/suppressions.xml | 3 - .../controller/MetricsDataControllerTest.java | 2 +- .../service/MetricsDataServiceTest.java | 10 +- .../service/WarehouseServiceTest.java | 66 +- .../store/MemoryDataStorageTest.java | 11 +- .../store/MetricsDataRedisCodecTest.java | 17 +- .../warehouse/store/RedisDataStorageTest.java | 8 +- 119 files changed, 4891 insertions(+), 4918 deletions(-) diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergeControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergeControllerTest.java index 6ccf349559c..64aca2bd273 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergeControllerTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergeControllerTest.java @@ -17,6 +17,16 @@ package org.apache.hertzbeat.alert.controller; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; import org.apache.hertzbeat.alert.service.AlertConvergeService; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.alerter.AlertConverge; @@ -30,17 +40,6 @@ import org.springframework.http.MediaType; import org.springframework.test.web.servlet.MockMvc; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.when; -import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; -import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; -import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; -import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; -import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; -import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; - /** * test case for {@link AlertConvergeController} */ @@ -48,78 +47,78 @@ @ExtendWith(MockitoExtension.class) public class AlertConvergeControllerTest { - private MockMvc mockMvc; + private MockMvc mockMvc; - @Mock - private AlertConvergeService alertConvergeService; + @Mock + private AlertConvergeService alertConvergeService; - private AlertConverge alertConverge; + private AlertConverge alertConverge; - @InjectMocks - private AlertConvergeController alertConvergeController; + @InjectMocks + private AlertConvergeController alertConvergeController; - @BeforeEach - void setUp() { + @BeforeEach + void setUp() { - this.mockMvc = standaloneSetup(alertConvergeController).build(); + this.mockMvc = standaloneSetup(alertConvergeController).build(); - alertConverge = AlertConverge.builder() - .name("test") - .creator("admin") - .modifier("admin") - .id(1L) - .build(); - } + alertConverge = AlertConverge.builder() + .name("test") + .creator("admin") + .modifier("admin") + .id(1L) + .build(); + } - @Test - void testAddNewAlertConverge() throws Exception { + @Test + void testAddNewAlertConverge() throws Exception { - doNothing().when(alertConvergeService).validate(any(AlertConverge.class), eq(false)); - doNothing().when(alertConvergeService).addAlertConverge(any(AlertConverge.class)); + doNothing().when(alertConvergeService).validate(any(AlertConverge.class), eq(false)); + doNothing().when(alertConvergeService).addAlertConverge(any(AlertConverge.class)); - mockMvc.perform(post("/api/alert/converge") - .contentType(MediaType.APPLICATION_JSON) - .content(JsonUtil.toJson(alertConverge)) - ).andExpect(status().isOk()) - .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) - .andExpect(jsonPath("$.msg").value("Add success")); - } + mockMvc.perform(post("/api/alert/converge") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(alertConverge)) + ).andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andExpect(jsonPath("$.msg").value("Add success")); + } - @Test - void testModifyAlertConverge() throws Exception { + @Test + void testModifyAlertConverge() throws Exception { - doNothing().when(alertConvergeService).validate(any(AlertConverge.class), eq(true)); - doNothing().when(alertConvergeService).modifyAlertConverge(any(AlertConverge.class)); + doNothing().when(alertConvergeService).validate(any(AlertConverge.class), eq(true)); + doNothing().when(alertConvergeService).modifyAlertConverge(any(AlertConverge.class)); - mockMvc.perform(put("/api/alert/converge") - .contentType(MediaType.APPLICATION_JSON) - .content(JsonUtil.toJson(alertConverge)) - ).andExpect(status().isOk()) - .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) - .andExpect(jsonPath("$.msg").value("Modify success")); - } + mockMvc.perform(put("/api/alert/converge") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(alertConverge)) + ).andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) + .andExpect(jsonPath("$.msg").value("Modify success")); + } - @Test - void testGetAlertConvergeExists() throws Exception { + @Test + void testGetAlertConvergeExists() throws Exception { - when(alertConvergeService.getAlertConverge(1L)).thenReturn(alertConverge); + when(alertConvergeService.getAlertConverge(1L)).thenReturn(alertConverge); - mockMvc.perform(get("/api/alert/converge/{id}", 1L) - .accept(MediaType.APPLICATION_JSON)) - .andExpect(status().isOk()) - .andExpect(jsonPath("$.data.id").value(alertConverge.getId())); - } + mockMvc.perform(get("/api/alert/converge/{id}", 1L) + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.data.id").value(alertConverge.getId())); + } - @Test - void testGetAlertConvergeNotExists() throws Exception { + @Test + void testGetAlertConvergeNotExists() throws Exception { - when(alertConvergeService.getAlertConverge(1L)).thenReturn(null); + when(alertConvergeService.getAlertConverge(1L)).thenReturn(null); - mockMvc.perform(get("/api/alert/converge/{id}", 1L) - .accept(MediaType.APPLICATION_JSON)) - .andExpect(status().isOk()) - .andExpect(jsonPath("$.code").value((int) CommonConstants.MONITOR_NOT_EXIST_CODE)) - .andExpect(jsonPath("$.msg").value("AlertConverge not exist.")); - } + mockMvc.perform(get("/api/alert/converge/{id}", 1L) + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.MONITOR_NOT_EXIST_CODE)) + .andExpect(jsonPath("$.msg").value("AlertConverge not exist.")); + } } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergesControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergesControllerTest.java index 3eb5eb97eab..dd52237962e 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergesControllerTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertConvergesControllerTest.java @@ -53,68 +53,68 @@ @ExtendWith(MockitoExtension.class) class AlertConvergesControllerTest { - private MockMvc mockMvc; + private MockMvc mockMvc; - @Mock - private AlertConvergeService alertConvergeService; + @Mock + private AlertConvergeService alertConvergeService; - @InjectMocks - private AlertConvergesController alertConvergesController; + @InjectMocks + private AlertConvergesController alertConvergesController; - private List alertConvergeList; + private List alertConvergeList; - @BeforeEach - void setUp() { + @BeforeEach + void setUp() { - this.mockMvc = standaloneSetup(alertConvergesController).build(); + this.mockMvc = standaloneSetup(alertConvergesController).build(); - AlertConverge alertConverge1 = AlertConverge.builder() - .name("Converge1") - .id(1L) - .build(); + AlertConverge alertConverge1 = AlertConverge.builder() + .name("Converge1") + .id(1L) + .build(); - AlertConverge alertConverge2 = AlertConverge.builder() - .name("Converge2") - .id(2L) - .build(); + AlertConverge alertConverge2 = AlertConverge.builder() + .name("Converge2") + .id(2L) + .build(); - alertConvergeList = Arrays.asList(alertConverge1, alertConverge2); - } + alertConvergeList = Arrays.asList(alertConverge1, alertConverge2); + } - @Test - void testGetAlertConverges() throws Exception { + @Test + void testGetAlertConverges() throws Exception { - Page alertConvergePage = new PageImpl<>( - alertConvergeList, - PageRequest.of(0, 8, Sort.by("id").descending()), - alertConvergeList.size() - ); + Page alertConvergePage = new PageImpl<>( + alertConvergeList, + PageRequest.of(0, 8, Sort.by("id").descending()), + alertConvergeList.size() + ); - when(alertConvergeService.getAlertConverges(null, null, "id", "desc", 0, 8)).thenReturn(alertConvergePage); + when(alertConvergeService.getAlertConverges(null, null, "id", "desc", 0, 8)).thenReturn(alertConvergePage); - mockMvc.perform(get("/api/alert/converges") - .param("pageIndex", "0") - .param("pageSize", "8") - .param("sort", "id") - .param("order", "desc") - .accept(MediaType.APPLICATION_JSON)) - .andExpect(status().isOk()) - .andExpect(jsonPath("$.data.content[0].id").value(1)) - .andExpect(jsonPath("$.data.content[0].name").value("Converge1")) - .andExpect(jsonPath("$.data.content[1].id").value(2)) - .andExpect(jsonPath("$.data.content[1].name").value("Converge2")); - } + mockMvc.perform(get("/api/alert/converges") + .param("pageIndex", "0") + .param("pageSize", "8") + .param("sort", "id") + .param("order", "desc") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.data.content[0].id").value(1)) + .andExpect(jsonPath("$.data.content[0].name").value("Converge1")) + .andExpect(jsonPath("$.data.content[1].id").value(2)) + .andExpect(jsonPath("$.data.content[1].name").value("Converge2")); + } - @Test - void testDeleteAlertDefines() throws Exception { + @Test + void testDeleteAlertDefines() throws Exception { - doNothing().when(alertConvergeService).deleteAlertConverges(eq(new HashSet<>(Arrays.asList(1L, 2L)))); + doNothing().when(alertConvergeService).deleteAlertConverges(eq(new HashSet<>(Arrays.asList(1L, 2L)))); - mockMvc.perform(delete("/api/alert/converges") - .param("ids", "1,2") - .accept(MediaType.APPLICATION_JSON)) - .andExpect(status().isOk()) - .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); - } + mockMvc.perform(delete("/api/alert/converges") + .param("ids", "1,2") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertDefineControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertDefineControllerTest.java index fa2fe11c615..95a3637f265 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertDefineControllerTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertDefineControllerTest.java @@ -97,8 +97,8 @@ void setUp() { void addNewAlertDefine() throws Exception { // Simulate the client sending a request to the server mockMvc.perform(MockMvcRequestBuilders.post("/api/alert/define") - .contentType(MediaType.APPLICATION_JSON) - .content(JsonUtil.toJson(this.alertDefine))) + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(this.alertDefine))) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) .andReturn(); @@ -107,8 +107,8 @@ void addNewAlertDefine() throws Exception { @Test void modifyAlertDefine() throws Exception { mockMvc.perform(MockMvcRequestBuilders.put("/api/alert/define") - .contentType(MediaType.APPLICATION_JSON) - .content(JsonUtil.toJson(this.alertDefine))) + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(this.alertDefine))) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) .andReturn(); @@ -121,7 +121,7 @@ void getAlertDefine() throws Exception { .thenReturn(this.alertDefine); mockMvc.perform(MockMvcRequestBuilders.get("/api/alert/define/" + this.alertDefine.getId()) - .contentType(MediaType.APPLICATION_JSON)) + .contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) .andExpect(jsonPath("$.data.id").value(alertDefine.getId())) @@ -138,8 +138,8 @@ void getAlertDefine() throws Exception { @Test void deleteAlertDefine() throws Exception { mockMvc.perform(MockMvcRequestBuilders.delete("/api/alert/define/" + this.alertDefine.getId()) - .contentType(MediaType.APPLICATION_JSON) - .content(JsonUtil.toJson(this.alertDefine))) + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(this.alertDefine))) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) .andReturn(); @@ -148,8 +148,8 @@ void deleteAlertDefine() throws Exception { @Test void applyAlertDefineMonitorsBind() throws Exception { mockMvc.perform(MockMvcRequestBuilders.post("/api/alert/define/" + this.alertDefine.getId() + "/monitors") - .contentType(MediaType.APPLICATION_JSON) - .content(JsonUtil.toJson(this.alertDefineMonitorBinds))) + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(this.alertDefineMonitorBinds))) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) .andReturn(); @@ -161,7 +161,7 @@ void getAlertDefineMonitorsBind() throws Exception { .thenReturn(this.alertDefineMonitorBinds); mockMvc.perform(MockMvcRequestBuilders.get("/api/alert/define/" + this.alertDefine.getId() + "/monitors") - .contentType(MediaType.APPLICATION_JSON)) + .contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) .andExpect(jsonPath("$.data[0].id").value(alertDefineMonitorBinds.get(0).getId())) diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertDefinesControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertDefinesControllerTest.java index 7f9ef2c25f6..0af84fba1fc 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertDefinesControllerTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertDefinesControllerTest.java @@ -68,7 +68,7 @@ class AlertDefinesControllerTest { String order = "asc"; Integer pageIndex = 1; Integer pageSize = 7; - + // Parameter collection Map content = new HashMap<>(); @@ -114,13 +114,13 @@ void getAlertDefines() throws Exception { Mockito.when(alertDefineService.getAlertDefines(null, null, null, "id", "desc", 1, 10)).thenReturn(new PageImpl<>(Collections.singletonList(define))); mockMvc.perform(MockMvcRequestBuilders.get( - "/api/alert/defines") - .param("ids", ids.toString().substring(1, ids.toString().length() - 1)) - .param("priority", priority.toString()) - .param("sort", sort) - .param("order", order) - .param("pageIndex", pageIndex.toString()) - .param("pageSize", pageSize.toString())) + "/api/alert/defines") + .param("ids", ids.toString().substring(1, ids.toString().length() - 1)) + .param("priority", priority.toString()) + .param("sort", sort) + .param("order", order) + .param("pageIndex", pageIndex.toString()) + .param("pageSize", pageSize.toString())) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) .andExpect(jsonPath("$.data.content").value(new ArrayList<>())) @@ -142,8 +142,8 @@ void getAlertDefines() throws Exception { @Test void deleteAlertDefines() throws Exception { this.mockMvc.perform(MockMvcRequestBuilders.delete("/api/alert/defines") - .contentType(MediaType.APPLICATION_JSON) - .content(JsonUtil.toJson(ids))) + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(ids))) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) .andReturn(); diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertReportControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertReportControllerTest.java index cb7f4e04588..e2dc1bf3ff8 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertReportControllerTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertReportControllerTest.java @@ -41,20 +41,20 @@ */ @ExtendWith(MockitoExtension.class) class AlertReportControllerTest { - + private MockMvc mockMvc; @Mock private AlertService alertService; - + @InjectMocks private AlertReportController alertReportController; - + @BeforeEach void setUp() { this.mockMvc = MockMvcBuilders.standaloneSetup(alertReportController).build(); } - + @Test void addNewAlertReportTencent() throws Exception { TenCloudAlertReport.Dimensions dimensions = new TenCloudAlertReport.Dimensions(); @@ -86,14 +86,14 @@ void addNewAlertReportTencent() throws Exception { alarmPolicyInfo.setConditions(conditions); TenCloudAlertReport report = TenCloudAlertReport.builder() - .sessionId("123") - .alarmStatus("1") - .alarmType("event") - .durationTime(2) - .firstOccurTime("2023-08-14 11:11:11") - .alarmObjInfo(alarmObjInfo) - .alarmPolicyInfo(alarmPolicyInfo) - .build(); + .sessionId("123") + .alarmStatus("1") + .alarmType("event") + .durationTime(2) + .firstOccurTime("2023-08-14 11:11:11") + .alarmObjInfo(alarmObjInfo) + .alarmPolicyInfo(alarmPolicyInfo) + .build(); mockMvc.perform( MockMvcRequestBuilders .post("/api/alerts/report/tencloud") @@ -105,16 +105,16 @@ void addNewAlertReportTencent() throws Exception { .andExpect(content().json("{\"data\":null,\"msg\":\"Add report success\",\"code\":0}")) .andReturn(); } - + @Test void addNewAlertReport() throws Exception { GeneralCloudAlertReport generalCloudAlertReport = new GeneralCloudAlertReport(); generalCloudAlertReport.setAlertDateTime("2023-02-22T07:27:15.404000000Z"); mockMvc.perform(MockMvcRequestBuilders - .post("/api/alerts/report") - .contentType(MediaType.APPLICATION_JSON) - .content(JsonUtil.toJson(generalCloudAlertReport)) + .post("/api/alerts/report") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(generalCloudAlertReport)) ) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertSilenceControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertSilenceControllerTest.java index b31415f275a..2ddd2f236c9 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertSilenceControllerTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertSilenceControllerTest.java @@ -17,13 +17,22 @@ package org.apache.hertzbeat.alert.controller; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; import org.apache.hertzbeat.alert.service.AlertSilenceService; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.alerter.AlertSilence; import org.apache.hertzbeat.common.util.JsonUtil; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; - import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; import org.mockito.Mock; @@ -31,17 +40,6 @@ import org.springframework.http.MediaType; import org.springframework.test.web.servlet.MockMvc; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.when; -import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; -import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; -import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; -import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; -import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; -import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup; - /** * tes case for {@link AlertSilenceController} */ @@ -49,76 +47,76 @@ @ExtendWith(MockitoExtension.class) class AlertSilenceControllerTest { - private MockMvc mockMvc; + private MockMvc mockMvc; - @Mock - private AlertSilenceService alertSilenceService; + @Mock + private AlertSilenceService alertSilenceService; - private AlertSilence alertSilence; + private AlertSilence alertSilence; - @InjectMocks - private AlertSilenceController alertSilenceController; + @InjectMocks + private AlertSilenceController alertSilenceController; - @BeforeEach - void setUp() { + @BeforeEach + void setUp() { - this.mockMvc = standaloneSetup(alertSilenceController).build(); + this.mockMvc = standaloneSetup(alertSilenceController).build(); - alertSilence = AlertSilence.builder() - .id(1L) - .name("Test Silence") - .type((byte) 1) - .build(); - } + alertSilence = AlertSilence.builder() + .id(1L) + .name("Test Silence") + .type((byte) 1) + .build(); + } - @Test - void testAddNewAlertSilence() throws Exception { + @Test + void testAddNewAlertSilence() throws Exception { - doNothing().when(alertSilenceService).validate(any(AlertSilence.class), eq(false)); - doNothing().when(alertSilenceService).addAlertSilence(any(AlertSilence.class)); + doNothing().when(alertSilenceService).validate(any(AlertSilence.class), eq(false)); + doNothing().when(alertSilenceService).addAlertSilence(any(AlertSilence.class)); - mockMvc.perform(post("/api/alert/silence") - .contentType(MediaType.APPLICATION_JSON) - .content(JsonUtil.toJson(alertSilence))) - .andExpect(status().isOk()) - .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); - } + mockMvc.perform(post("/api/alert/silence") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(alertSilence))) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } - @Test - void testModifyAlertSilence() throws Exception { + @Test + void testModifyAlertSilence() throws Exception { - doNothing().when(alertSilenceService).validate(any(AlertSilence.class), eq(true)); - doNothing().when(alertSilenceService).modifyAlertSilence(any(AlertSilence.class)); + doNothing().when(alertSilenceService).validate(any(AlertSilence.class), eq(true)); + doNothing().when(alertSilenceService).modifyAlertSilence(any(AlertSilence.class)); - mockMvc.perform(put("/api/alert/silence") - .contentType(MediaType.APPLICATION_JSON) - .content(JsonUtil.toJson(alertSilence))) - .andExpect(status().isOk()) - .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); - } + mockMvc.perform(put("/api/alert/silence") + .contentType(MediaType.APPLICATION_JSON) + .content(JsonUtil.toJson(alertSilence))) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)); + } - @Test - void testGetAlertSilence() throws Exception { + @Test + void testGetAlertSilence() throws Exception { - when(alertSilenceService.getAlertSilence(1L)).thenReturn(alertSilence); + when(alertSilenceService.getAlertSilence(1L)).thenReturn(alertSilence); - mockMvc.perform(get("/api/alert/silence/1") - .accept(MediaType.APPLICATION_JSON)) - .andExpect(status().isOk()) - .andExpect(jsonPath("$.data.id").value(1)) - .andExpect(jsonPath("$.data.name").value("Test Silence")); - } + mockMvc.perform(get("/api/alert/silence/1") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.data.id").value(1)) + .andExpect(jsonPath("$.data.name").value("Test Silence")); + } - @Test - void testGetAlertSilenceNotExists() throws Exception { + @Test + void testGetAlertSilenceNotExists() throws Exception { - when(alertSilenceService.getAlertSilence(1L)).thenReturn(null); + when(alertSilenceService.getAlertSilence(1L)).thenReturn(null); - mockMvc.perform(get("/api/alert/silence/1") - .accept(MediaType.APPLICATION_JSON)) - .andExpect(status().isOk()) - .andExpect(jsonPath("$.code").value((int) CommonConstants.MONITOR_NOT_EXIST_CODE)) - .andExpect(jsonPath("$.msg").value("AlertSilence not exist.")); - } + mockMvc.perform(get("/api/alert/silence/1") + .accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.code").value((int) CommonConstants.MONITOR_NOT_EXIST_CODE)) + .andExpect(jsonPath("$.msg").value("AlertSilence not exist.")); + } } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertsControllerTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertsControllerTest.java index e1044206617..69e330dedf5 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertsControllerTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/controller/AlertsControllerTest.java @@ -87,17 +87,17 @@ void getAlerts() throws Exception { .thenReturn(alertPage); mockMvc.perform(MockMvcRequestBuilders - .get("/api/alerts") - .param("ids", ids.stream().map(String::valueOf).collect(Collectors.joining(","))) - .param("monitorId", String.valueOf(monitorId)) - .param("priority", String.valueOf(priority)) - .param("status", String.valueOf(status)) - .param("content", content) - .param("sort", sortField) - .param("order", orderType) - .param("pageIndex", String.valueOf(pageIndex)) - .param("pageSize", String.valueOf(pageSize)) - .accept(MediaType.APPLICATION_JSON)) + .get("/api/alerts") + .param("ids", ids.stream().map(String::valueOf).collect(Collectors.joining(","))) + .param("monitorId", String.valueOf(monitorId)) + .param("priority", String.valueOf(priority)) + .param("status", String.valueOf(status)) + .param("content", content) + .param("sort", sortField) + .param("order", orderType) + .param("pageIndex", String.valueOf(pageIndex)) + .param("pageSize", String.valueOf(pageSize)) + .accept(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()) .andExpect(jsonPath("$.code").value((int) CommonConstants.SUCCESS_CODE)) .andExpect(jsonPath("$.data.content.length()").value(1)) diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmCommonReduceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmCommonReduceTest.java index 9b7f77deef1..43e0890deb5 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmCommonReduceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmCommonReduceTest.java @@ -17,30 +17,28 @@ package org.apache.hertzbeat.alert.reduce; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.util.Collections; import java.util.HashMap; import java.util.Map; - import org.apache.hertzbeat.alert.dao.AlertMonitorDao; import org.apache.hertzbeat.common.constants.CommonConstants; import org.apache.hertzbeat.common.entity.alerter.Alert; -import org.apache.hertzbeat.common.queue.CommonDataQueue; import org.apache.hertzbeat.common.entity.manager.Tag; +import org.apache.hertzbeat.common.queue.CommonDataQueue; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - /** * test case for {@link AlarmCommonReduce} */ @@ -48,88 +46,89 @@ @ExtendWith(MockitoExtension.class) class AlarmCommonReduceTest { - @Mock - private AlarmSilenceReduce alarmSilenceReduce; + @Mock + private AlarmSilenceReduce alarmSilenceReduce; + + @Mock + private AlarmConvergeReduce alarmConvergeReduce; - @Mock - private AlarmConvergeReduce alarmConvergeReduce; + @Mock + private CommonDataQueue dataQueue; - @Mock - private CommonDataQueue dataQueue; + @Mock + private AlertMonitorDao alertMonitorDao; - @Mock - private AlertMonitorDao alertMonitorDao; + private AlarmCommonReduce alarmCommonReduce; - private AlarmCommonReduce alarmCommonReduce; + private Alert testAlert; - private Alert testAlert; + @BeforeEach + void setUp() { - @BeforeEach - void setUp() { + testAlert = Alert.builder().build(); + alarmCommonReduce = new AlarmCommonReduce( + alarmSilenceReduce, + alarmConvergeReduce, + dataQueue, + alertMonitorDao + ); + } - testAlert = Alert.builder().build(); - alarmCommonReduce = new AlarmCommonReduce( - alarmSilenceReduce, - alarmConvergeReduce, - dataQueue, - alertMonitorDao - ); - } + @Test + void testReduceAndSendAlarmNoMonitorId() { - @Test - void testReduceAndSendAlarmNoMonitorId() { + when(alarmConvergeReduce.filterConverge(testAlert)).thenReturn(true); + when(alarmSilenceReduce.filterSilence(testAlert)).thenReturn(true); - when(alarmConvergeReduce.filterConverge(testAlert)).thenReturn(true); - when(alarmSilenceReduce.filterSilence(testAlert)).thenReturn(true); + alarmCommonReduce.reduceAndSendAlarm(testAlert); - alarmCommonReduce.reduceAndSendAlarm(testAlert); + verify(dataQueue).sendAlertsData(testAlert); + verify(alertMonitorDao, never()).findMonitorIdBindTags(anyLong()); + } - verify(dataQueue).sendAlertsData(testAlert); - verify(alertMonitorDao, never()).findMonitorIdBindTags(anyLong()); - } - @Test - void testReduceAndSendAlarmWithMonitorId() { + @Test + void testReduceAndSendAlarmWithMonitorId() { - Map tags = new HashMap<>(); - tags.put(CommonConstants.TAG_MONITOR_ID, "123"); - testAlert.setTags(tags); + Map tags = new HashMap<>(); + tags.put(CommonConstants.TAG_MONITOR_ID, "123"); + testAlert.setTags(tags); - doReturn(Collections.singletonList( - Tag.builder() - .name("newTag") - .tagValue("tagValue") - .build()) - ).when(alertMonitorDao).findMonitorIdBindTags(123L); - when(alarmConvergeReduce.filterConverge(testAlert)).thenReturn(true); - when(alarmSilenceReduce.filterSilence(testAlert)).thenReturn(true); + doReturn(Collections.singletonList( + Tag.builder() + .name("newTag") + .tagValue("tagValue") + .build()) + ).when(alertMonitorDao).findMonitorIdBindTags(123L); + when(alarmConvergeReduce.filterConverge(testAlert)).thenReturn(true); + when(alarmSilenceReduce.filterSilence(testAlert)).thenReturn(true); - alarmCommonReduce.reduceAndSendAlarm(testAlert); + alarmCommonReduce.reduceAndSendAlarm(testAlert); - assertTrue(testAlert.getTags().containsKey("newTag")); - assertEquals("tagValue", testAlert.getTags().get("newTag")); - verify(dataQueue).sendAlertsData(testAlert); - } + assertTrue(testAlert.getTags().containsKey("newTag")); + assertEquals("tagValue", testAlert.getTags().get("newTag")); + verify(dataQueue).sendAlertsData(testAlert); + } - @Test - void testReduceAndSendAlarmConvergeFilterFail() { + @Test + void testReduceAndSendAlarmConvergeFilterFail() { - when(alarmConvergeReduce.filterConverge(testAlert)).thenReturn(false); + when(alarmConvergeReduce.filterConverge(testAlert)).thenReturn(false); - alarmCommonReduce.reduceAndSendAlarm(testAlert); + alarmCommonReduce.reduceAndSendAlarm(testAlert); - verify(dataQueue, never()).sendAlertsData(testAlert); - verify(alarmSilenceReduce, never()).filterSilence(any(Alert.class)); - } + verify(dataQueue, never()).sendAlertsData(testAlert); + verify(alarmSilenceReduce, never()).filterSilence(any(Alert.class)); + } - @Test - void testReduceAndSendAlarmSilenceFilterFail() { + @Test + void testReduceAndSendAlarmSilenceFilterFail() { - when(alarmConvergeReduce.filterConverge(testAlert)).thenReturn(true); - when(alarmSilenceReduce.filterSilence(testAlert)).thenReturn(false); + when(alarmConvergeReduce.filterConverge(testAlert)).thenReturn(true); + when(alarmSilenceReduce.filterSilence(testAlert)).thenReturn(false); - alarmCommonReduce.reduceAndSendAlarm(testAlert); + alarmCommonReduce.reduceAndSendAlarm(testAlert); - verify(dataQueue, never()).sendAlertsData(testAlert); - } + verify(dataQueue, never()).sendAlertsData(testAlert); + } } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduceTest.java index 2e06c3aacdd..b304e6035b7 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmConvergeReduceTest.java @@ -17,9 +17,12 @@ package org.apache.hertzbeat.alert.reduce; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.util.Collections; import java.util.HashMap; - import org.apache.hertzbeat.alert.dao.AlertConvergeDao; import org.apache.hertzbeat.common.cache.CacheFactory; import org.apache.hertzbeat.common.cache.CommonCacheService; @@ -33,11 +36,6 @@ import org.mockito.MockedStatic; import org.mockito.junit.jupiter.MockitoExtension; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.mockStatic; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - /** * test case for {@link AlarmConvergeReduce} */ @@ -45,70 +43,70 @@ @ExtendWith(MockitoExtension.class) class AlarmConvergeReduceTest { - @Mock - private AlertConvergeDao alertConvergeDao; + @Mock + private AlertConvergeDao alertConvergeDao; - @Mock - private CommonCacheService convergeCache; + @Mock + private CommonCacheService convergeCache; - private AlarmConvergeReduce alarmConvergeReduce; + private AlarmConvergeReduce alarmConvergeReduce; - private Alert testAlert; + private Alert testAlert; - private MockedStatic cacheFactoryMockedStatic; + private MockedStatic cacheFactoryMockedStatic; - @BeforeEach - void setUp() { + @BeforeEach + void setUp() { - testAlert = Alert.builder() - .tags(new HashMap<>()) - .status(CommonConstants.ALERT_STATUS_CODE_SOLVED) - .build(); + testAlert = Alert.builder() + .tags(new HashMap<>()) + .status(CommonConstants.ALERT_STATUS_CODE_SOLVED) + .build(); - cacheFactoryMockedStatic = mockStatic(CacheFactory.class); - cacheFactoryMockedStatic.when(CacheFactory::getAlertConvergeCache).thenReturn(convergeCache); + cacheFactoryMockedStatic = mockStatic(CacheFactory.class); + cacheFactoryMockedStatic.when(CacheFactory::getAlertConvergeCache).thenReturn(convergeCache); - alarmConvergeReduce = new AlarmConvergeReduce(alertConvergeDao); - } + alarmConvergeReduce = new AlarmConvergeReduce(alertConvergeDao); + } - @AfterEach - void tearDown() { + @AfterEach + void tearDown() { - if (cacheFactoryMockedStatic != null) { - cacheFactoryMockedStatic.close(); - } - } + if (cacheFactoryMockedStatic != null) { + cacheFactoryMockedStatic.close(); + } + } - @Test - void testFilterConverge_RestoredAlert() { + @Test + void testFilterConverge_RestoredAlert() { - testAlert.setStatus(CommonConstants.ALERT_STATUS_CODE_RESTORED); - boolean result = alarmConvergeReduce.filterConverge(testAlert); + testAlert.setStatus(CommonConstants.ALERT_STATUS_CODE_RESTORED); + boolean result = alarmConvergeReduce.filterConverge(testAlert); - assertTrue(result); - } + assertTrue(result); + } - @Test - void testFilterConverge_IgnoreTag() { + @Test + void testFilterConverge_IgnoreTag() { - testAlert.getTags().put(CommonConstants.IGNORE, "true"); - boolean result = alarmConvergeReduce.filterConverge(testAlert); + testAlert.getTags().put(CommonConstants.IGNORE, "true"); + boolean result = alarmConvergeReduce.filterConverge(testAlert); - assertTrue(result); - } + assertTrue(result); + } - @Test - void testFilterConvergeNoConverge() { + @Test + void testFilterConvergeNoConverge() { - when(convergeCache.get(CommonConstants.CACHE_ALERT_CONVERGE)).thenReturn(null); - when(alertConvergeDao.findAll()).thenReturn(Collections.emptyList()); + when(convergeCache.get(CommonConstants.CACHE_ALERT_CONVERGE)).thenReturn(null); + when(alertConvergeDao.findAll()).thenReturn(Collections.emptyList()); - boolean result = alarmConvergeReduce.filterConverge(testAlert); + boolean result = alarmConvergeReduce.filterConverge(testAlert); - assertTrue(result); - verify(convergeCache).get(CommonConstants.CACHE_ALERT_CONVERGE); - verify(alertConvergeDao).findAll(); - verify(convergeCache).put(CommonConstants.CACHE_ALERT_CONVERGE, Collections.emptyList()); - } + assertTrue(result); + verify(convergeCache).get(CommonConstants.CACHE_ALERT_CONVERGE); + verify(alertConvergeDao).findAll(); + verify(convergeCache).put(CommonConstants.CACHE_ALERT_CONVERGE, Collections.emptyList()); + } } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmSilenceReduceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmSilenceReduceTest.java index 5f18ac52c07..079fb49ffde 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmSilenceReduceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/reduce/AlarmSilenceReduceTest.java @@ -17,11 +17,21 @@ package org.apache.hertzbeat.alert.reduce; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.time.LocalDateTime; import java.time.ZoneId; import java.util.Collections; import java.util.HashMap; - import org.apache.hertzbeat.alert.dao.AlertSilenceDao; import org.apache.hertzbeat.common.cache.CacheFactory; import org.apache.hertzbeat.common.cache.CommonCacheService; @@ -36,153 +46,141 @@ import org.mockito.MockedStatic; import org.mockito.MockitoAnnotations; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mockStatic; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - /** * test case for {@link AlarmSilenceReduce} */ class AlarmSilenceReduceTest { - @Mock - private AlertSilenceDao alertSilenceDao; + @Mock + private AlertSilenceDao alertSilenceDao; - @Mock - private CommonCacheService silenceCache; + @Mock + private CommonCacheService silenceCache; - private AlarmSilenceReduce alarmSilenceReduce; + private AlarmSilenceReduce alarmSilenceReduce; - private MockedStatic cacheFactoryMockedStatic; + private MockedStatic cacheFactoryMockedStatic; - @BeforeEach - void setUp() { + @BeforeEach + void setUp() { - MockitoAnnotations.openMocks(this); + MockitoAnnotations.openMocks(this); - cacheFactoryMockedStatic = mockStatic(CacheFactory.class); - cacheFactoryMockedStatic.when(CacheFactory::getAlertSilenceCache).thenReturn(silenceCache); + cacheFactoryMockedStatic = mockStatic(CacheFactory.class); + cacheFactoryMockedStatic.when(CacheFactory::getAlertSilenceCache).thenReturn(silenceCache); - // inject dao object. - alarmSilenceReduce = new AlarmSilenceReduce(alertSilenceDao); - } + // inject dao object. + alarmSilenceReduce = new AlarmSilenceReduce(alertSilenceDao); + } - @Test - void testFilterSilenceNull() { + @Test + void testFilterSilenceNull() { - // when cache get result is null, exec db logic. - when(silenceCache.get(CommonConstants.CACHE_ALERT_SILENCE)).thenReturn(null); - doReturn(Collections.emptyList()).when(alertSilenceDao).findAll(); + // when cache get result is null, exec db logic. + when(silenceCache.get(CommonConstants.CACHE_ALERT_SILENCE)).thenReturn(null); + doReturn(Collections.emptyList()).when(alertSilenceDao).findAll(); - Alert alert = Alert.builder() - .tags(new HashMap<>()) - .priority((byte) 1) - .build(); + Alert alert = Alert.builder() + .tags(new HashMap<>()) + .priority((byte) 1) + .build(); - boolean result = alarmSilenceReduce.filterSilence(alert); + boolean result = alarmSilenceReduce.filterSilence(alert); - assertTrue(result); - verify(alertSilenceDao, times(1)).findAll(); - verify(silenceCache, times(1)).put(eq(CommonConstants.CACHE_ALERT_SILENCE), any()); - } + assertTrue(result); + verify(alertSilenceDao, times(1)).findAll(); + verify(silenceCache, times(1)).put(eq(CommonConstants.CACHE_ALERT_SILENCE), any()); + } - @Test - void testFilterSilenceOnce() { + @Test + void testFilterSilenceOnce() { - AlertSilence alertSilence = AlertSilence.builder() - .enable(Boolean.TRUE) - .matchAll(Boolean.TRUE) - .type((byte) 0) - .periodEnd(LocalDateTime.now().atZone(ZoneId.systemDefault()).plusHours(1)) - .periodStart(LocalDateTime.now().atZone(ZoneId.systemDefault()).minusHours(1)) - .times(0) - .build(); + AlertSilence alertSilence = AlertSilence.builder() + .enable(Boolean.TRUE) + .matchAll(Boolean.TRUE) + .type((byte) 0) + .periodEnd(LocalDateTime.now().atZone(ZoneId.systemDefault()).plusHours(1)) + .periodStart(LocalDateTime.now().atZone(ZoneId.systemDefault()).minusHours(1)) + .times(0) + .build(); - when(silenceCache.get(CommonConstants.CACHE_ALERT_SILENCE)).thenReturn(Collections.singletonList(alertSilence)); - doReturn(alertSilence).when(alertSilenceDao).save(alertSilence); + when(silenceCache.get(CommonConstants.CACHE_ALERT_SILENCE)).thenReturn(Collections.singletonList(alertSilence)); + doReturn(alertSilence).when(alertSilenceDao).save(alertSilence); - Alert alert = Alert.builder() - .tags(new HashMap<>()) - .priority((byte) 1) - .build(); + Alert alert = Alert.builder() + .tags(new HashMap<>()) + .priority((byte) 1) + .build(); - boolean result = alarmSilenceReduce.filterSilence(alert); + boolean result = alarmSilenceReduce.filterSilence(alert); - assertFalse(result); - verify(alertSilenceDao, times(1)).save(alertSilence); - assertEquals(1, alertSilence.getTimes()); - } + assertFalse(result); + verify(alertSilenceDao, times(1)).save(alertSilence); + assertEquals(1, alertSilence.getTimes()); + } - @Test - void testFilterSilenceCyc() { + @Test + void testFilterSilenceCyc() { - AlertSilence alertSilence = AlertSilence.builder() - .enable(Boolean.TRUE) - .matchAll(Boolean.TRUE) - .type((byte) 1) // cyc time - .periodEnd(LocalDateTime.now().atZone(ZoneId.systemDefault()).plusHours(1)) - .periodStart(LocalDateTime.now().atZone(ZoneId.systemDefault()).minusHours(1)) - .times(0) - .days(Collections.singletonList((byte) LocalDateTime.now().getDayOfWeek().getValue())) - .build(); + AlertSilence alertSilence = AlertSilence.builder() + .enable(Boolean.TRUE) + .matchAll(Boolean.TRUE) + .type((byte) 1) // cyc time + .periodEnd(LocalDateTime.now().atZone(ZoneId.systemDefault()).plusHours(1)) + .periodStart(LocalDateTime.now().atZone(ZoneId.systemDefault()).minusHours(1)) + .times(0) + .days(Collections.singletonList((byte) LocalDateTime.now().getDayOfWeek().getValue())) + .build(); - when(silenceCache.get(CommonConstants.CACHE_ALERT_SILENCE)).thenReturn(Collections.singletonList(alertSilence)); - doReturn(alertSilence).when(alertSilenceDao).save(alertSilence); + when(silenceCache.get(CommonConstants.CACHE_ALERT_SILENCE)).thenReturn(Collections.singletonList(alertSilence)); + doReturn(alertSilence).when(alertSilenceDao).save(alertSilence); - Alert alert = Alert.builder() - .tags(new HashMap<>()) - .priority((byte) 1) - .build(); + Alert alert = Alert.builder() + .tags(new HashMap<>()) + .priority((byte) 1) + .build(); - boolean result = alarmSilenceReduce.filterSilence(alert); + boolean result = alarmSilenceReduce.filterSilence(alert); - assertFalse(result); - verify(alertSilenceDao, times(1)).save(alertSilence); - assertEquals(1, alertSilence.getTimes()); - } + assertFalse(result); + verify(alertSilenceDao, times(1)).save(alertSilence); + assertEquals(1, alertSilence.getTimes()); + } - @Test - void testFilterSilenceNoMatch() { + @Test + void testFilterSilenceNoMatch() { - AlertSilence alertSilence = AlertSilence.builder() - .enable(Boolean.TRUE) - .matchAll(Boolean.TRUE) - .type((byte) 0) - .tags(Collections.singletonList(new TagItem("non-matching-tag", "value"))) - .periodEnd(LocalDateTime.now().atZone(ZoneId.systemDefault()).minusHours(1)) - .periodStart(LocalDateTime.now().atZone(ZoneId.systemDefault()).plusHours(1)) - .times(0) - .build(); - - when(silenceCache.get(CommonConstants.CACHE_ALERT_SILENCE)).thenReturn(Collections.singletonList(alertSilence)); - doReturn(alertSilence).when(alertSilenceDao).save(alertSilence); - - Alert alert = Alert.builder() - .tags(new HashMap<>()) - .priority((byte) 1) - .build(); - - boolean result = alarmSilenceReduce.filterSilence(alert); - - assertTrue(result); - verify(alertSilenceDao, never()).save(any()); - } - - @AfterEach - public void tearDown() { - - if (cacheFactoryMockedStatic != null) { - cacheFactoryMockedStatic.close(); - } - } + AlertSilence alertSilence = AlertSilence.builder() + .enable(Boolean.TRUE) + .matchAll(Boolean.TRUE) + .type((byte) 0) + .tags(Collections.singletonList(new TagItem("non-matching-tag", "value"))) + .periodEnd(LocalDateTime.now().atZone(ZoneId.systemDefault()).minusHours(1)) + .periodStart(LocalDateTime.now().atZone(ZoneId.systemDefault()).plusHours(1)) + .times(0) + .build(); + + when(silenceCache.get(CommonConstants.CACHE_ALERT_SILENCE)).thenReturn(Collections.singletonList(alertSilence)); + doReturn(alertSilence).when(alertSilenceDao).save(alertSilence); + + Alert alert = Alert.builder() + .tags(new HashMap<>()) + .priority((byte) 1) + .build(); + + boolean result = alarmSilenceReduce.filterSilence(alert); + + assertTrue(result); + verify(alertSilenceDao, never()).save(any()); + } + + @AfterEach + public void tearDown() { + + if (cacheFactoryMockedStatic != null) { + cacheFactoryMockedStatic.close(); + } + } } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertConvergeServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertConvergeServiceTest.java index e01b716ab24..712f618eb07 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertConvergeServiceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertConvergeServiceTest.java @@ -46,67 +46,67 @@ @ExtendWith(MockitoExtension.class) class AlertConvergeServiceTest { - @Mock - private AlertConvergeDao alertConvergeDao; + @Mock + private AlertConvergeDao alertConvergeDao; - @InjectMocks - private AlertConvergeServiceImpl alertConvergeService; + @InjectMocks + private AlertConvergeServiceImpl alertConvergeService; - @Test - public void testAddAlertConverge() { + @Test + public void testAddAlertConverge() { - AlertConverge alertConverge = new AlertConverge(); - alertConvergeService.addAlertConverge(alertConverge); + AlertConverge alertConverge = new AlertConverge(); + alertConvergeService.addAlertConverge(alertConverge); - verify(alertConvergeDao, times(1)).save(alertConverge); - } + verify(alertConvergeDao, times(1)).save(alertConverge); + } - @Test - public void testModifyAlertConverge() { + @Test + public void testModifyAlertConverge() { - AlertConverge alertConverge = new AlertConverge(); - alertConvergeService.modifyAlertConverge(alertConverge); + AlertConverge alertConverge = new AlertConverge(); + alertConvergeService.modifyAlertConverge(alertConverge); - verify(alertConvergeDao, times(1)).save(alertConverge); - } + verify(alertConvergeDao, times(1)).save(alertConverge); + } - @Test - public void testGetAlertConverge() { + @Test + public void testGetAlertConverge() { - long convergeId = 1L; - AlertConverge alertConverge = new AlertConverge(); - when(alertConvergeDao.findById(convergeId)).thenReturn(Optional.of(alertConverge)); - AlertConverge result = alertConvergeService.getAlertConverge(convergeId); + long convergeId = 1L; + AlertConverge alertConverge = new AlertConverge(); + when(alertConvergeDao.findById(convergeId)).thenReturn(Optional.of(alertConverge)); + AlertConverge result = alertConvergeService.getAlertConverge(convergeId); - verify(alertConvergeDao, times(1)).findById(convergeId); - assertEquals(alertConverge, result); - } + verify(alertConvergeDao, times(1)).findById(convergeId); + assertEquals(alertConverge, result); + } - @Test - public void testDeleteAlertConverges() { + @Test + public void testDeleteAlertConverges() { - Set convergeIds = Set.of(1L, 2L, 3L); - alertConvergeService.deleteAlertConverges(convergeIds); + Set convergeIds = Set.of(1L, 2L, 3L); + alertConvergeService.deleteAlertConverges(convergeIds); - verify(alertConvergeDao, times(1)).deleteAlertConvergesByIdIn(convergeIds); - } + verify(alertConvergeDao, times(1)).deleteAlertConvergesByIdIn(convergeIds); + } - @Test - public void testGetAlertConverges() { + @Test + public void testGetAlertConverges() { - Page page = new PageImpl<>(Collections.emptyList()); - when(alertConvergeDao.findAll( - any(Specification.class), - any(Pageable.class)) - ).thenReturn(page); + Page page = new PageImpl<>(Collections.emptyList()); + when(alertConvergeDao.findAll( + any(Specification.class), + any(Pageable.class)) + ).thenReturn(page); - Page result = alertConvergeService.getAlertConverges(null, null, "id", "desc", 1, 10); + Page result = alertConvergeService.getAlertConverges(null, null, "id", "desc", 1, 10); - verify(alertConvergeDao, times(1)).findAll( - any(Specification.class), - any(PageRequest.class) - ); - assertEquals(page, result); - } + verify(alertConvergeDao, times(1)).findAll( + any(Specification.class), + any(PageRequest.class) + ); + assertEquals(page, result); + } } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineExcelImExportServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineExcelImExportServiceTest.java index 9f20caab3e0..deb9090b705 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineExcelImExportServiceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineExcelImExportServiceTest.java @@ -17,12 +17,13 @@ package org.apache.hertzbeat.alert.service; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hertzbeat.alert.dto.AlertDefineDTO; import org.apache.hertzbeat.alert.dto.ExportAlertDefineDTO; import org.apache.hertzbeat.alert.service.impl.AlertDefineExcelImExportServiceImpl; @@ -38,9 +39,6 @@ import org.mockito.InjectMocks; import org.mockito.junit.jupiter.MockitoExtension; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - /** * test case for {@link AlertDefineExcelImExportServiceImpl} */ @@ -48,122 +46,122 @@ @ExtendWith(MockitoExtension.class) public class AlertDefineExcelImExportServiceTest { - @InjectMocks - private AlertDefineExcelImExportServiceImpl alertDefineExcelImExportService; - - private Workbook workbook; - private Sheet sheet; - - @BeforeEach - public void setUp() throws IOException { - - Workbook initialWorkbook = WorkbookFactory.create(true); - Sheet initialSheet = initialWorkbook.createSheet(); - Row headerRow = initialSheet.createRow(0); - String[] headers = {"app", "metric", "field", "preset", "expr", "priority", "times", "tags", "enable", "recoverNotice", "template"}; - for (int i = 0; i < headers.length; i++) { - Cell cell = headerRow.createCell(i); - cell.setCellValue(headers[i]); - } - - Row row = initialSheet.createRow(1); - row.createCell(0).setCellValue("app1"); - row.createCell(1).setCellValue("metric1"); - row.createCell(2).setCellValue("field1"); - row.createCell(3).setCellValue(true); - row.createCell(4).setCellValue("expr1"); - row.createCell(5).setCellValue(1); - row.createCell(6).setCellValue(10); - row.createCell(7).setCellValue("[{\"name\":\"tag1\",\"value\":\"value1\"}]"); - row.createCell(8).setCellValue(true); - row.createCell(9).setCellValue(true); - row.createCell(10).setCellValue("template1"); - - ByteArrayInputStream inputStream = new ByteArrayInputStream(toByteArray(initialWorkbook)); - - workbook = WorkbookFactory.create(inputStream); - sheet = workbook.getSheetAt(0); - } - - @Test - public void testParseImport() throws IOException { - - try (ByteArrayInputStream inputStream = new ByteArrayInputStream(toByteArray(workbook))) { - List result = alertDefineExcelImExportService.parseImport(inputStream); - - assertEquals(1, result.size()); - AlertDefineDTO alertDefineDTO = result.get(0).getAlertDefine(); - assertEquals("app1", alertDefineDTO.getApp()); - assertEquals("metric1", alertDefineDTO.getMetric()); - assertEquals("field1", alertDefineDTO.getField()); - assertTrue(alertDefineDTO.getPreset()); - assertEquals("expr1", alertDefineDTO.getExpr()); - assertEquals(10, alertDefineDTO.getTimes()); - assertEquals(1, alertDefineDTO.getTags().size()); - assertEquals("tag1", alertDefineDTO.getTags().get(0).getName()); - assertEquals("value1", alertDefineDTO.getTags().get(0).getValue()); - assertTrue(alertDefineDTO.getEnable()); - assertTrue(alertDefineDTO.getRecoverNotice()); - assertEquals("template1", alertDefineDTO.getTemplate()); - } - } - - @Test - public void testWriteOs() throws IOException { - - List exportAlertDefineList = new ArrayList<>(); - ExportAlertDefineDTO exportAlertDefineDTO = new ExportAlertDefineDTO(); - AlertDefineDTO alertDefineDTO = new AlertDefineDTO(); - alertDefineDTO.setApp("app1"); - alertDefineDTO.setMetric("metric1"); - alertDefineDTO.setField("field1"); - alertDefineDTO.setPreset(true); - alertDefineDTO.setExpr("expr1"); - alertDefineDTO.setPriority((byte) 1); - alertDefineDTO.setTimes(10); - List tags = new ArrayList<>(); - TagItem tagItem = new TagItem(); - tagItem.setName("tag1"); - tagItem.setValue("value1"); - tags.add(tagItem); - alertDefineDTO.setTags(tags); - alertDefineDTO.setEnable(true); - alertDefineDTO.setRecoverNotice(true); - alertDefineDTO.setTemplate("template1"); - exportAlertDefineDTO.setAlertDefine(alertDefineDTO); - exportAlertDefineList.add(exportAlertDefineDTO); - - try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { - alertDefineExcelImExportService.writeOs(exportAlertDefineList, outputStream); - - try (Workbook resultWorkbook = WorkbookFactory.create(new ByteArrayInputStream(outputStream.toByteArray()))) { - Sheet resultSheet = resultWorkbook.getSheetAt(0); - Row headerRow = resultSheet.getRow(0); - assertEquals("app", headerRow.getCell(0).getStringCellValue()); - assertEquals("metric", headerRow.getCell(1).getStringCellValue()); - - Row dataRow = resultSheet.getRow(1); - assertEquals("app1", dataRow.getCell(0).getStringCellValue()); - assertEquals("metric1", dataRow.getCell(1).getStringCellValue()); - assertEquals("field1", dataRow.getCell(2).getStringCellValue()); - assertTrue(dataRow.getCell(3).getBooleanCellValue()); - assertEquals("expr1", dataRow.getCell(4).getStringCellValue()); - assertEquals(1, (int) dataRow.getCell(5).getNumericCellValue()); - assertEquals(10, (int) dataRow.getCell(6).getNumericCellValue()); - assertEquals("[{\"name\":\"tag1\",\"value\":\"value1\"}]", dataRow.getCell(7).getStringCellValue()); - assertTrue(dataRow.getCell(8).getBooleanCellValue()); - assertTrue(dataRow.getCell(9).getBooleanCellValue()); - assertEquals("template1", dataRow.getCell(10).getStringCellValue()); - } - } - } - - private byte[] toByteArray(Workbook workbook) throws IOException { - - try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { - workbook.write(bos); - return bos.toByteArray(); - } - } + @InjectMocks + private AlertDefineExcelImExportServiceImpl alertDefineExcelImExportService; + + private Workbook workbook; + private Sheet sheet; + + @BeforeEach + public void setUp() throws IOException { + + Workbook initialWorkbook = WorkbookFactory.create(true); + Sheet initialSheet = initialWorkbook.createSheet(); + Row headerRow = initialSheet.createRow(0); + String[] headers = {"app", "metric", "field", "preset", "expr", "priority", "times", "tags", "enable", "recoverNotice", "template"}; + for (int i = 0; i < headers.length; i++) { + Cell cell = headerRow.createCell(i); + cell.setCellValue(headers[i]); + } + + Row row = initialSheet.createRow(1); + row.createCell(0).setCellValue("app1"); + row.createCell(1).setCellValue("metric1"); + row.createCell(2).setCellValue("field1"); + row.createCell(3).setCellValue(true); + row.createCell(4).setCellValue("expr1"); + row.createCell(5).setCellValue(1); + row.createCell(6).setCellValue(10); + row.createCell(7).setCellValue("[{\"name\":\"tag1\",\"value\":\"value1\"}]"); + row.createCell(8).setCellValue(true); + row.createCell(9).setCellValue(true); + row.createCell(10).setCellValue("template1"); + + ByteArrayInputStream inputStream = new ByteArrayInputStream(toByteArray(initialWorkbook)); + + workbook = WorkbookFactory.create(inputStream); + sheet = workbook.getSheetAt(0); + } + + @Test + public void testParseImport() throws IOException { + + try (ByteArrayInputStream inputStream = new ByteArrayInputStream(toByteArray(workbook))) { + List result = alertDefineExcelImExportService.parseImport(inputStream); + + assertEquals(1, result.size()); + AlertDefineDTO alertDefineDTO = result.get(0).getAlertDefine(); + assertEquals("app1", alertDefineDTO.getApp()); + assertEquals("metric1", alertDefineDTO.getMetric()); + assertEquals("field1", alertDefineDTO.getField()); + assertTrue(alertDefineDTO.getPreset()); + assertEquals("expr1", alertDefineDTO.getExpr()); + assertEquals(10, alertDefineDTO.getTimes()); + assertEquals(1, alertDefineDTO.getTags().size()); + assertEquals("tag1", alertDefineDTO.getTags().get(0).getName()); + assertEquals("value1", alertDefineDTO.getTags().get(0).getValue()); + assertTrue(alertDefineDTO.getEnable()); + assertTrue(alertDefineDTO.getRecoverNotice()); + assertEquals("template1", alertDefineDTO.getTemplate()); + } + } + + @Test + public void testWriteOs() throws IOException { + + List exportAlertDefineList = new ArrayList<>(); + ExportAlertDefineDTO exportAlertDefineDTO = new ExportAlertDefineDTO(); + AlertDefineDTO alertDefineDTO = new AlertDefineDTO(); + alertDefineDTO.setApp("app1"); + alertDefineDTO.setMetric("metric1"); + alertDefineDTO.setField("field1"); + alertDefineDTO.setPreset(true); + alertDefineDTO.setExpr("expr1"); + alertDefineDTO.setPriority((byte) 1); + alertDefineDTO.setTimes(10); + List tags = new ArrayList<>(); + TagItem tagItem = new TagItem(); + tagItem.setName("tag1"); + tagItem.setValue("value1"); + tags.add(tagItem); + alertDefineDTO.setTags(tags); + alertDefineDTO.setEnable(true); + alertDefineDTO.setRecoverNotice(true); + alertDefineDTO.setTemplate("template1"); + exportAlertDefineDTO.setAlertDefine(alertDefineDTO); + exportAlertDefineList.add(exportAlertDefineDTO); + + try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { + alertDefineExcelImExportService.writeOs(exportAlertDefineList, outputStream); + + try (Workbook resultWorkbook = WorkbookFactory.create(new ByteArrayInputStream(outputStream.toByteArray()))) { + Sheet resultSheet = resultWorkbook.getSheetAt(0); + Row headerRow = resultSheet.getRow(0); + assertEquals("app", headerRow.getCell(0).getStringCellValue()); + assertEquals("metric", headerRow.getCell(1).getStringCellValue()); + + Row dataRow = resultSheet.getRow(1); + assertEquals("app1", dataRow.getCell(0).getStringCellValue()); + assertEquals("metric1", dataRow.getCell(1).getStringCellValue()); + assertEquals("field1", dataRow.getCell(2).getStringCellValue()); + assertTrue(dataRow.getCell(3).getBooleanCellValue()); + assertEquals("expr1", dataRow.getCell(4).getStringCellValue()); + assertEquals(1, (int) dataRow.getCell(5).getNumericCellValue()); + assertEquals(10, (int) dataRow.getCell(6).getNumericCellValue()); + assertEquals("[{\"name\":\"tag1\",\"value\":\"value1\"}]", dataRow.getCell(7).getStringCellValue()); + assertTrue(dataRow.getCell(8).getBooleanCellValue()); + assertTrue(dataRow.getCell(9).getBooleanCellValue()); + assertEquals("template1", dataRow.getCell(10).getStringCellValue()); + } + } + } + + private byte[] toByteArray(Workbook workbook) throws IOException { + + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + workbook.write(bos); + return bos.toByteArray(); + } + } } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineJsonImExportServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineJsonImExportServiceTest.java index 6f690083d75..925c0e70c49 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineJsonImExportServiceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineJsonImExportServiceTest.java @@ -17,15 +17,23 @@ package org.apache.hertzbeat.alert.service; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.List; - -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.hertzbeat.alert.dto.AlertDefineDTO; import org.apache.hertzbeat.alert.dto.ExportAlertDefineDTO; import org.apache.hertzbeat.alert.service.impl.AlertDefineJsonImExportServiceImpl; @@ -36,16 +44,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - /** * test case for {@link AlertDefineJsonImExportServiceImpl} */ @@ -53,95 +51,98 @@ @ExtendWith(MockitoExtension.class) class AlertDefineJsonImExportServiceTest { - @Mock - private ObjectMapper objectMapper; + @Mock + private ObjectMapper objectMapper; - @InjectMocks - private AlertDefineJsonImExportServiceImpl service; + @InjectMocks + private AlertDefineJsonImExportServiceImpl service; - private static final String JSON_DATA = "[{\"alertDefine\":{\"app\":\"App1\",\"metric\":\"Metric1\",\"field\":\"Field1\",\"preset\":true,\"expr\":\"Expr1\",\"priority\":1,\"times\":1,\"tags\":[],\"enable\":true,\"recoverNotice\":true,\"template\":\"Template1\"}}]"; + @SuppressWarnings("checkstyle:OperatorWrap") + private static final String JSON_DATA = "[{\"alertDefine\":{\"app\":\"App1\",\"metric\":\"Metric1\"," + + "\"field\":\"Field1\",\"preset\":true,\"expr\":\"Expr1\",\"priority\":1,\"times\":1,\"tags\":[]," + + "\"enable\":true,\"recoverNotice\":true,\"template\":\"Template1\"}}]"; - private InputStream inputStream; - private List alertDefineList; + private InputStream inputStream; + private List alertDefineList; - @BeforeEach - public void setup() { + @BeforeEach + public void setup() { - inputStream = new ByteArrayInputStream(JSON_DATA.getBytes()); + inputStream = new ByteArrayInputStream(JSON_DATA.getBytes()); - AlertDefineDTO alertDefine = new AlertDefineDTO(); - alertDefine.setApp("App1"); - alertDefine.setMetric("Metric1"); - alertDefine.setField("Field1"); - alertDefine.setPreset(true); - alertDefine.setExpr("Expr1"); - alertDefine.setPriority((byte) 1); - alertDefine.setTimes(1); - alertDefine.setTags(List.of()); - alertDefine.setEnable(true); - alertDefine.setRecoverNotice(true); - alertDefine.setTemplate("Template1"); + AlertDefineDTO alertDefine = new AlertDefineDTO(); + alertDefine.setApp("App1"); + alertDefine.setMetric("Metric1"); + alertDefine.setField("Field1"); + alertDefine.setPreset(true); + alertDefine.setExpr("Expr1"); + alertDefine.setPriority((byte) 1); + alertDefine.setTimes(1); + alertDefine.setTags(List.of()); + alertDefine.setEnable(true); + alertDefine.setRecoverNotice(true); + alertDefine.setTemplate("Template1"); - ExportAlertDefineDTO exportAlertDefine = new ExportAlertDefineDTO(); - exportAlertDefine.setAlertDefine(alertDefine); + ExportAlertDefineDTO exportAlertDefine = new ExportAlertDefineDTO(); + exportAlertDefine.setAlertDefine(alertDefine); - alertDefineList = List.of(exportAlertDefine); - } + alertDefineList = List.of(exportAlertDefine); + } - @Test - void testParseImport() throws IOException { + @Test + void testParseImport() throws IOException { - when(objectMapper.readValue( - any(InputStream.class), - any(TypeReference.class)) - ).thenReturn(alertDefineList); + when(objectMapper.readValue( + any(InputStream.class), + any(TypeReference.class)) + ).thenReturn(alertDefineList); - List result = service.parseImport(inputStream); + List result = service.parseImport(inputStream); - assertNotNull(result); - assertEquals(1, result.size()); - assertEquals(alertDefineList, result); - verify(objectMapper, times(1)).readValue(any(InputStream.class), any(TypeReference.class)); - } + assertNotNull(result); + assertEquals(1, result.size()); + assertEquals(alertDefineList, result); + verify(objectMapper, times(1)).readValue(any(InputStream.class), any(TypeReference.class)); + } - @Test - void testParseImportFailed() throws IOException { + @Test + void testParseImportFailed() throws IOException { - when(objectMapper.readValue( - any(InputStream.class), - any(TypeReference.class)) - ).thenThrow(new IOException("Test Exception")); + when(objectMapper.readValue( + any(InputStream.class), + any(TypeReference.class)) + ).thenThrow(new IOException("Test Exception")); - RuntimeException exception = assertThrows(RuntimeException.class, () -> service.parseImport(inputStream)); + RuntimeException exception = assertThrows(RuntimeException.class, () -> service.parseImport(inputStream)); - assertEquals("import alertDefine failed", exception.getMessage()); - verify(objectMapper, times(1)).readValue(any(InputStream.class), any(TypeReference.class)); - } + assertEquals("import alertDefine failed", exception.getMessage()); + verify(objectMapper, times(1)).readValue(any(InputStream.class), any(TypeReference.class)); + } - @Test - void testWriteOs() throws IOException { + @Test + void testWriteOs() throws IOException { - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - service.writeOs(alertDefineList, outputStream); + service.writeOs(alertDefineList, outputStream); - verify(objectMapper, times(1)).writeValue(any(OutputStream.class), eq(alertDefineList)); - } + verify(objectMapper, times(1)).writeValue(any(OutputStream.class), eq(alertDefineList)); + } - @Test - void testWriteOsFailed() throws IOException { + @Test + void testWriteOsFailed() throws IOException { - doThrow(new IOException("Test Exception")).when(objectMapper).writeValue(any(OutputStream.class), any()); + doThrow(new IOException("Test Exception")).when(objectMapper).writeValue(any(OutputStream.class), any()); - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - RuntimeException exception = assertThrows( - RuntimeException.class, - () -> service.writeOs(alertDefineList, outputStream) - ); + RuntimeException exception = assertThrows( + RuntimeException.class, + () -> service.writeOs(alertDefineList, outputStream) + ); - assertEquals("export alertDefine failed", exception.getMessage()); - verify(objectMapper, times(1)).writeValue(any(OutputStream.class), eq(alertDefineList)); - } + assertEquals("export alertDefine failed", exception.getMessage()); + verify(objectMapper, times(1)).writeValue(any(OutputStream.class), eq(alertDefineList)); + } } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineYamlImExportServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineYamlImExportServiceTest.java index 840da444fc5..fe030c79c0b 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineYamlImExportServiceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertDefineYamlImExportServiceTest.java @@ -17,6 +17,16 @@ package org.apache.hertzbeat.alert.service; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -24,7 +34,6 @@ import java.io.OutputStream; import java.nio.charset.StandardCharsets; import java.util.List; - import org.apache.hertzbeat.alert.dto.AlertDefineDTO; import org.apache.hertzbeat.alert.dto.ExportAlertDefineDTO; import org.apache.hertzbeat.alert.service.impl.AlertDefineYamlImExportServiceImpl; @@ -36,17 +45,6 @@ import org.mockito.junit.jupiter.MockitoExtension; import org.yaml.snakeyaml.Yaml; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - /** * test case for {@link AlertDefineYamlImExportServiceImpl} */ @@ -54,10 +52,10 @@ @ExtendWith(MockitoExtension.class) class AlertDefineYamlImExportServiceTest { - @InjectMocks - private AlertDefineYamlImExportServiceImpl service; + @InjectMocks + private AlertDefineYamlImExportServiceImpl service; - private static final String YAML_DATA = + private static final String YAML_DATA = """ - alertDefine: app: App1 @@ -73,94 +71,94 @@ class AlertDefineYamlImExportServiceTest { template: Template1 """; - private InputStream inputStream; - private List alertDefineList; + private InputStream inputStream; + private List alertDefineList; - @BeforeEach - public void setup() { + @BeforeEach + public void setup() { - inputStream = new ByteArrayInputStream(YAML_DATA.getBytes(StandardCharsets.UTF_8)); + inputStream = new ByteArrayInputStream(YAML_DATA.getBytes(StandardCharsets.UTF_8)); - AlertDefineDTO alertDefine = new AlertDefineDTO(); - alertDefine.setApp("App1"); - alertDefine.setMetric("Metric1"); - alertDefine.setField("Field1"); - alertDefine.setPreset(true); - alertDefine.setExpr("Expr1"); - alertDefine.setPriority((byte) 1); - alertDefine.setTimes(1); - alertDefine.setTags(List.of()); - alertDefine.setEnable(true); - alertDefine.setRecoverNotice(true); - alertDefine.setTemplate("Template1"); + AlertDefineDTO alertDefine = new AlertDefineDTO(); + alertDefine.setApp("App1"); + alertDefine.setMetric("Metric1"); + alertDefine.setField("Field1"); + alertDefine.setPreset(true); + alertDefine.setExpr("Expr1"); + alertDefine.setPriority((byte) 1); + alertDefine.setTimes(1); + alertDefine.setTags(List.of()); + alertDefine.setEnable(true); + alertDefine.setRecoverNotice(true); + alertDefine.setTemplate("Template1"); - ExportAlertDefineDTO exportAlertDefine = new ExportAlertDefineDTO(); - exportAlertDefine.setAlertDefine(alertDefine); + ExportAlertDefineDTO exportAlertDefine = new ExportAlertDefineDTO(); + exportAlertDefine.setAlertDefine(alertDefine); - alertDefineList = List.of(exportAlertDefine); - } + alertDefineList = List.of(exportAlertDefine); + } - @Test - void testParseImport() { + @Test + void testParseImport() { - List result = service.parseImport(inputStream); + List result = service.parseImport(inputStream); - assertNotNull(result); - assertEquals(1, result.size()); + assertNotNull(result); + assertEquals(1, result.size()); - InputStream inputStream = new ByteArrayInputStream(JsonUtil.toJson(alertDefineList) - .getBytes(StandardCharsets.UTF_8)); - Yaml yaml = new Yaml(); + InputStream inputStream = new ByteArrayInputStream(JsonUtil.toJson(alertDefineList) + .getBytes(StandardCharsets.UTF_8)); + Yaml yaml = new Yaml(); - assertEquals(yaml.load(inputStream), result); - } + assertEquals(yaml.load(inputStream), result); + } - @Test - void testParseImportFailed() { + @Test + void testParseImportFailed() { - InputStream faultyInputStream = mock(InputStream.class); - try { - when(faultyInputStream.read( - any(byte[].class), - anyInt(), anyInt()) - ).thenThrow(new IOException("Test Exception")); + InputStream faultyInputStream = mock(InputStream.class); + try { + when(faultyInputStream.read( + any(byte[].class), + anyInt(), anyInt()) + ).thenThrow(new IOException("Test Exception")); - RuntimeException exception = assertThrows( - RuntimeException.class, - () -> service.parseImport(faultyInputStream) - ); - assertEquals("java.io.IOException: Test Exception", exception.getMessage()); - } catch (IOException e) { + RuntimeException exception = assertThrows( + RuntimeException.class, + () -> service.parseImport(faultyInputStream) + ); + assertEquals("java.io.IOException: Test Exception", exception.getMessage()); + } catch (IOException e) { - fail("Mocking IOException failed"); - } - } + fail("Mocking IOException failed"); + } + } - @Test - void testWriteOs() { + @Test + void testWriteOs() { - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - service.writeOs(alertDefineList, outputStream); - String yamlOutput = outputStream.toString(StandardCharsets.UTF_8); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + service.writeOs(alertDefineList, outputStream); + String yamlOutput = outputStream.toString(StandardCharsets.UTF_8); - assertTrue(yamlOutput.contains("app: App1")); - assertTrue(yamlOutput.contains("metric: Metric1")); - } + assertTrue(yamlOutput.contains("app: App1")); + assertTrue(yamlOutput.contains("metric: Metric1")); + } - @Test - void testWriteOsFailed() { + @Test + void testWriteOsFailed() { - OutputStream faultyOutputStream = mock(OutputStream.class); + OutputStream faultyOutputStream = mock(OutputStream.class); - try { - doThrow(new IOException("Test Exception")).when(faultyOutputStream).write(any(byte[].class), anyInt(), anyInt()); + try { + doThrow(new IOException("Test Exception")).when(faultyOutputStream).write(any(byte[].class), anyInt(), anyInt()); - RuntimeException exception = assertThrows(RuntimeException.class, () -> service.writeOs(alertDefineList, faultyOutputStream)); - assertEquals("java.io.IOException: Test Exception", exception.getMessage()); - } catch (IOException e) { + RuntimeException exception = assertThrows(RuntimeException.class, () -> service.writeOs(alertDefineList, faultyOutputStream)); + assertEquals("java.io.IOException: Test Exception", exception.getMessage()); + } catch (IOException e) { - fail("Mocking IOException failed"); - } - } + fail("Mocking IOException failed"); + } + } } diff --git a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertSilenceServiceTest.java b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertSilenceServiceTest.java index 2915b287592..24b059ac6ab 100644 --- a/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertSilenceServiceTest.java +++ b/alerter/src/test/java/org/apache/hertzbeat/alert/service/AlertSilenceServiceTest.java @@ -45,83 +45,83 @@ class AlertSilenceServiceTest { - @Mock - private AlertSilenceDao alertSilenceDao; + @Mock + private AlertSilenceDao alertSilenceDao; - @InjectMocks - private AlertSilenceServiceImpl alertSilenceService; + @InjectMocks + private AlertSilenceServiceImpl alertSilenceService; - @BeforeEach - void setUp() { + @BeforeEach + void setUp() { - MockitoAnnotations.openMocks(this); + MockitoAnnotations.openMocks(this); - alertSilenceDao.save(AlertSilence - .builder() - .id(1L) - .type((byte) 1) - .build() - ); + alertSilenceDao.save(AlertSilence + .builder() + .id(1L) + .type((byte) 1) + .build() + ); - assertNotNull(alertSilenceDao.findAll()); - } + assertNotNull(alertSilenceDao.findAll()); + } - @Test - void testValidate() { + @Test + void testValidate() { - AlertSilence alertSilence = new AlertSilence(); - alertSilence.setType((byte) 1); + AlertSilence alertSilence = new AlertSilence(); + alertSilence.setType((byte) 1); - alertSilenceService.validate(alertSilence, false); + alertSilenceService.validate(alertSilence, false); - assertNotNull(alertSilence.getDays()); - assertEquals(7, alertSilence.getDays().size()); - } + assertNotNull(alertSilence.getDays()); + assertEquals(7, alertSilence.getDays().size()); + } - @Test - void testAddAlertSilence() { + @Test + void testAddAlertSilence() { - AlertSilence alertSilence = new AlertSilence(); - when(alertSilenceDao.save(any(AlertSilence.class))).thenReturn(alertSilence); + AlertSilence alertSilence = new AlertSilence(); + when(alertSilenceDao.save(any(AlertSilence.class))).thenReturn(alertSilence); - assertDoesNotThrow(() -> alertSilenceService.addAlertSilence(alertSilence)); - verify(alertSilenceDao, times(1)).save(alertSilence); - } + assertDoesNotThrow(() -> alertSilenceService.addAlertSilence(alertSilence)); + verify(alertSilenceDao, times(1)).save(alertSilence); + } - @Test - void testModifyAlertSilence() { - AlertSilence alertSilence = new AlertSilence(); - when(alertSilenceDao.save(any(AlertSilence.class))).thenReturn(alertSilence); + @Test + void testModifyAlertSilence() { + AlertSilence alertSilence = new AlertSilence(); + when(alertSilenceDao.save(any(AlertSilence.class))).thenReturn(alertSilence); - assertDoesNotThrow(() -> alertSilenceService.modifyAlertSilence(alertSilence)); - verify(alertSilenceDao, times(1)).save(alertSilence); - } + assertDoesNotThrow(() -> alertSilenceService.modifyAlertSilence(alertSilence)); + verify(alertSilenceDao, times(1)).save(alertSilence); + } - @Test - void testGetAlertSilence() { - AlertSilence alertSilence = new AlertSilence(); - when(alertSilenceDao.findById(anyLong())).thenReturn(Optional.of(alertSilence)); + @Test + void testGetAlertSilence() { + AlertSilence alertSilence = new AlertSilence(); + when(alertSilenceDao.findById(anyLong())).thenReturn(Optional.of(alertSilence)); - AlertSilence result = alertSilenceService.getAlertSilence(1L); - assertNotNull(result); - verify(alertSilenceDao, times(1)).findById(1L); - } + AlertSilence result = alertSilenceService.getAlertSilence(1L); + assertNotNull(result); + verify(alertSilenceDao, times(1)).findById(1L); + } - @Test - void testGetAlertSilences() { - when(alertSilenceDao.findAll(any(Specification.class), any(PageRequest.class))).thenReturn(Page.empty()); - assertDoesNotThrow(() -> alertSilenceService.getAlertSilences(null, null, "id", "desc", 1, 10)); - verify(alertSilenceDao, times(1)).findAll(any(Specification.class), any(PageRequest.class)); + @Test + void testGetAlertSilences() { + when(alertSilenceDao.findAll(any(Specification.class), any(PageRequest.class))).thenReturn(Page.empty()); + assertDoesNotThrow(() -> alertSilenceService.getAlertSilences(null, null, "id", "desc", 1, 10)); + verify(alertSilenceDao, times(1)).findAll(any(Specification.class), any(PageRequest.class)); - assertNotNull(alertSilenceService.getAlertSilences(null, null, "id", "desc", 1, 10)); - } + assertNotNull(alertSilenceService.getAlertSilences(null, null, "id", "desc", 1, 10)); + } - @Test - void testDeleteAlertSilences() { + @Test + void testDeleteAlertSilences() { - alertSilenceDao.deleteAlertSilencesByIdIn(Set.of(1L)); + alertSilenceDao.deleteAlertSilencesByIdIn(Set.of(1L)); - verify(alertSilenceDao, times(1)).deleteAlertSilencesByIdIn(Set.of(1L)); - } + verify(alertSilenceDao, times(1)).deleteAlertSilencesByIdIn(Set.of(1L)); + } } diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/dns/DnsCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/dns/DnsCollectImplTest.java index 6ba1b09f3a5..e362c1890e9 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/dns/DnsCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/dns/DnsCollectImplTest.java @@ -19,7 +19,6 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; - import java.util.Collections; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.job.protocol.DnsProtocol; @@ -30,7 +29,6 @@ /** * Test case for {@link DnsCollectImpl} - * */ public class DnsCollectImplTest { private DnsProtocol dnsProtocol; diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/ftp/FtpCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/ftp/FtpCollectImplTest.java index 2cd0a13c726..f4737bd49c9 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/ftp/FtpCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/ftp/FtpCollectImplTest.java @@ -60,7 +60,7 @@ void testPreCheck() { metrics.setName("server"); metrics.setFtp(ftpProtocol); metrics.setAliasFields(aliasField); - assertThrows(IllegalArgumentException.class, ()-> ftpCollectImpl.preCheck(metrics)); + assertThrows(IllegalArgumentException.class, () -> ftpCollectImpl.preCheck(metrics)); } @@ -144,4 +144,4 @@ void testAnonymousCollect() throws IOException { } -} \ No newline at end of file +} diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImplTest.java index 5e85a4777f0..aaab0949f03 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImplTest.java @@ -62,9 +62,9 @@ class IcmpCollectImplTest { @BeforeEach void setUp() { icmpProtocol = IcmpProtocol.builder() - .host("127.0.0.1") - .timeout("3000") - .build(); + .host("127.0.0.1") + .timeout("3000") + .build(); List aliasField = new ArrayList<>(); aliasField.add("responseTime"); metrics = new Metrics(); diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/imap/ImapCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/imap/ImapCollectImplTest.java index a05b311c634..2e2b7a59db5 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/imap/ImapCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/imap/ImapCollectImplTest.java @@ -78,15 +78,15 @@ void preCheck() { void enableSslCollect() { String response = "* STATUS \"testFolder\" (MESSAGES 3 RECENT 2 UNSEEN 1)"; MockedConstruction mocked = Mockito.mockConstruction(IMAPSClient.class, - (imapsClient, context) -> { - Mockito.doNothing().when(imapsClient).connect(Mockito.anyString(), Mockito.anyInt()); - Mockito.doAnswer(invocationOnMock -> true).when(imapsClient).login(Mockito.anyString(), Mockito.anyString()); - Mockito.doAnswer(invocationOnMock -> true).when(imapsClient).isConnected(); - Mockito.when(imapsClient.sendCommand(Mockito.anyString())).thenReturn(0); - Mockito.when(imapsClient.getReplyString()).thenReturn(response); - Mockito.doAnswer(invocationOnMock -> true).when(imapsClient).logout(); - Mockito.doNothing().when(imapsClient).disconnect(); - }); + (imapsClient, context) -> { + Mockito.doNothing().when(imapsClient).connect(Mockito.anyString(), Mockito.anyInt()); + Mockito.doAnswer(invocationOnMock -> true).when(imapsClient).login(Mockito.anyString(), Mockito.anyString()); + Mockito.doAnswer(invocationOnMock -> true).when(imapsClient).isConnected(); + Mockito.when(imapsClient.sendCommand(Mockito.anyString())).thenReturn(0); + Mockito.when(imapsClient.getReplyString()).thenReturn(response); + Mockito.doAnswer(invocationOnMock -> true).when(imapsClient).logout(); + Mockito.doNothing().when(imapsClient).disconnect(); + }); imapCollect.preCheck(metrics); imapCollect.collect(builder, 1L, "testIMAP", metrics); diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/memcached/MemcachedCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/memcached/MemcachedCollectImplTest.java index 8756a878086..401eb22627b 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/memcached/MemcachedCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/memcached/MemcachedCollectImplTest.java @@ -79,10 +79,10 @@ void testPreCheck() { void testCollectCmdResponse() { String httpResponse = """ - STAT pid 1 - STAT uptime 2 - END - """; + STAT pid 1 + STAT uptime 2 + END + """; OutputStream outputStreamMock = Mockito.mock(OutputStream.class); byte[] responseBytes = httpResponse.getBytes(StandardCharsets.UTF_8); InputStream inputStream = new ByteArrayInputStream(responseBytes); diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/nebulagraph/NgqlCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/nebulagraph/NgqlCollectImplTest.java index 25f6050a333..98c2c978e1d 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/nebulagraph/NgqlCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/nebulagraph/NgqlCollectImplTest.java @@ -50,11 +50,11 @@ class NgqlCollectImplTest { @BeforeEach public void init() { ngqlProtocol = NgqlProtocol.builder() - .host("127.0.0.1") - .port("9669") - .password("123456") - .username("root") - .timeout("60000").build(); + .host("127.0.0.1") + .port("9669") + .password("123456") + .username("root") + .timeout("60000").build(); } @Test @@ -74,11 +74,11 @@ void testOneRowCollect() { result.add(data); MockedConstruction mocked = - Mockito.mockConstruction(NebulaTemplate.class, (template, context) -> { - Mockito.doNothing().when(template).closeSessionAndPool(); - Mockito.when(template.initSession(ngqlProtocol)).thenReturn(true); - Mockito.when(template.executeCommand(ngql)).thenReturn(result); - }); + Mockito.mockConstruction(NebulaTemplate.class, (template, context) -> { + Mockito.doNothing().when(template).closeSessionAndPool(); + Mockito.when(template.initSession(ngqlProtocol)).thenReturn(true); + Mockito.when(template.executeCommand(ngql)).thenReturn(result); + }); Metrics metrics = new Metrics(); metrics.setNgql(ngqlProtocol); @@ -108,11 +108,11 @@ void testFilterCountCollect() { result.add(data); } MockedConstruction mocked = - Mockito.mockConstruction(NebulaTemplate.class, (template, context) -> { - Mockito.doNothing().when(template).closeSessionAndPool(); - Mockito.when(template.initSession(ngqlProtocol)).thenReturn(true); - Mockito.when(template.executeCommand("SHOW HOSTS")).thenReturn(result); - }); + Mockito.mockConstruction(NebulaTemplate.class, (template, context) -> { + Mockito.doNothing().when(template).closeSessionAndPool(); + Mockito.when(template.initSession(ngqlProtocol)).thenReturn(true); + Mockito.when(template.executeCommand("SHOW HOSTS")).thenReturn(result); + }); Metrics metrics = new Metrics(); metrics.setNgql(ngqlProtocol); @@ -141,11 +141,11 @@ void testMultiRowCollect() { result.add(data); } MockedConstruction mocked = - Mockito.mockConstruction(NebulaTemplate.class, (template, context) -> { - Mockito.doNothing().when(template).closeSessionAndPool(); - Mockito.when(template.initSession(ngqlProtocol)).thenReturn(true); - Mockito.when(template.executeCommand(command)).thenReturn(result); - }); + Mockito.mockConstruction(NebulaTemplate.class, (template, context) -> { + Mockito.doNothing().when(template).closeSessionAndPool(); + Mockito.when(template.initSession(ngqlProtocol)).thenReturn(true); + Mockito.when(template.executeCommand(command)).thenReturn(result); + }); Metrics metrics = new Metrics(); metrics.setNgql(ngqlProtocol); @@ -179,11 +179,11 @@ void testColumnsCollect() { result.add(data); } MockedConstruction mocked = - Mockito.mockConstruction(NebulaTemplate.class, (template, context) -> { - Mockito.doNothing().when(template).closeSessionAndPool(); - Mockito.when(template.initSession(ngqlProtocol)).thenReturn(true); - Mockito.when(template.executeCommand(command)).thenReturn(result); - }); + Mockito.mockConstruction(NebulaTemplate.class, (template, context) -> { + Mockito.doNothing().when(template).closeSessionAndPool(); + Mockito.when(template.initSession(ngqlProtocol)).thenReturn(true); + Mockito.when(template.executeCommand(command)).thenReturn(result); + }); Metrics metrics = new Metrics(); metrics.setNgql(ngqlProtocol); diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisClusterCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisClusterCollectImplTest.java index df41c165f51..3d44e884f2b 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisClusterCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisClusterCollectImplTest.java @@ -45,7 +45,7 @@ */ @ExtendWith(MockitoExtension.class) public class RedisClusterCollectImplTest { - + @InjectMocks private RedisCommonCollectImpl redisClusterCollect; @@ -70,7 +70,7 @@ void setDown() { } @Test - void testCollect(){ + void testCollect() { RedisProtocol redisProtocol = RedisProtocol.builder() .host("127.0.0.1") .port("6379") @@ -111,7 +111,7 @@ void testCollect(){ metrics.setFields(fields); - Mockito.mockStatic(RedisClusterClient.class).when(()->RedisClusterClient.create(Mockito.any(ClientResources.class), + Mockito.mockStatic(RedisClusterClient.class).when(() -> RedisClusterClient.create(Mockito.any(ClientResources.class), Mockito.any(RedisURI.class))).thenReturn(client); Mockito.when(client.connect()).thenReturn(connection); diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisSingleCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisSingleCollectImplTest.java index be6a259667b..31646b9caea 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisSingleCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/redis/RedisSingleCollectImplTest.java @@ -70,7 +70,7 @@ void setUp() { .pattern("1") .build(); } - + @AfterEach void setDown() { connection.close(); @@ -84,10 +84,10 @@ void getInstance() { @Test void collect() { String info = """ - # CPU - used_cpu_sys:0.544635 - used_cpu_user:0.330690 - """; + # CPU + used_cpu_sys:0.544635 + used_cpu_user:0.330690 + """; CollectRep.MetricsData.Builder builder = CollectRep.MetricsData.newBuilder(); List aliasField = new ArrayList<>(); aliasField.add("used_cpu_sys"); @@ -99,7 +99,7 @@ void collect() { MockedStatic clientMockedStatic = Mockito.mockStatic(RedisClient.class); clientMockedStatic.when(() -> RedisClient.create(Mockito.any(ClientResources.class), Mockito.any(RedisURI.class))) - .thenReturn(client); + .thenReturn(client); Mockito.when(client.connect()).thenReturn(connection); Mockito.when(connection.sync()).thenReturn(cmd); Mockito.when(cmd.info(metrics.getName())).thenReturn(info); @@ -142,7 +142,7 @@ void testCollect() { MockedStatic clientMockedStatic = Mockito.mockStatic(RedisClient.class); clientMockedStatic.when(() -> RedisClient.create(Mockito.any(ClientResources.class), Mockito.any(RedisURI.class))) - .thenReturn(client); + .thenReturn(client); Mockito.when(client.connect()).thenReturn(connection); Mockito.when(connection.sync()).thenReturn(cmd); diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/collect/telnet/TelnetCollectImplTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/collect/telnet/TelnetCollectImplTest.java index 78f3c9832e8..1b4f8854165 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/collect/telnet/TelnetCollectImplTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/collect/telnet/TelnetCollectImplTest.java @@ -46,7 +46,7 @@ class TelnetCollectImplTest { private TelnetCollectImpl telnetCollect; @Test - void testCollectWithEquals(){ + void testCollectWithEquals() { CollectRep.MetricsData.Builder builder = CollectRep.MetricsData.newBuilder(); TelnetProtocol telnetProtocol = TelnetProtocol.builder() .timeout("10") @@ -99,7 +99,7 @@ void testCollectWithEquals(){ } @Test - void testCollectWithTab(){ + void testCollectWithTab() { CollectRep.MetricsData.Builder builder = CollectRep.MetricsData.newBuilder(); TelnetProtocol telnetProtocol = TelnetProtocol.builder() .timeout("10") diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/CommonDispatcherTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/CommonDispatcherTest.java index d67c44c3aa0..6db544ba0d2 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/CommonDispatcherTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/CommonDispatcherTest.java @@ -17,16 +17,21 @@ package org.apache.hertzbeat.collector.dispatch; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.util.List; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; - -import org.apache.hertzbeat.common.entity.job.Job; import org.apache.hertzbeat.collector.dispatch.entrance.internal.CollectJobService; import org.apache.hertzbeat.collector.dispatch.timer.Timeout; import org.apache.hertzbeat.collector.dispatch.timer.TimerDispatch; import org.apache.hertzbeat.collector.dispatch.timer.WheelTimerTask; +import org.apache.hertzbeat.common.entity.job.Job; import org.apache.hertzbeat.common.entity.job.Metrics; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.queue.CommonDataQueue; @@ -35,12 +40,6 @@ import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; /** * Test case for {@link CommonDispatcher} diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/MetricsCollectorQueueTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/MetricsCollectorQueueTest.java index ee7181828c5..7298abbadd7 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/MetricsCollectorQueueTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/MetricsCollectorQueueTest.java @@ -17,12 +17,12 @@ package org.apache.hertzbeat.collector.dispatch; -import java.util.concurrent.locks.ReentrantLock; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.mock; +import java.util.concurrent.locks.ReentrantLock; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; /** * Test case for {@link MetricsCollectorQueue} diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/WorkerPoolTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/WorkerPoolTest.java index a42583f365b..61a59d450d1 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/WorkerPoolTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/WorkerPoolTest.java @@ -17,13 +17,13 @@ package org.apache.hertzbeat.collector.dispatch; -import java.util.concurrent.RejectedExecutionException; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import java.util.concurrent.RejectedExecutionException; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; /** * Test case for {@link WorkerPool} diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/entrance/CollectServerTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/entrance/CollectServerTest.java index 6e1367923fa..62c42125c27 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/entrance/CollectServerTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/entrance/CollectServerTest.java @@ -17,9 +17,14 @@ package org.apache.hertzbeat.collector.dispatch.entrance; -import java.util.concurrent.ScheduledExecutorService; - +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import io.netty.channel.Channel; +import java.util.concurrent.ScheduledExecutorService; import org.apache.hertzbeat.collector.dispatch.CollectorInfoProperties; import org.apache.hertzbeat.collector.dispatch.DispatchProperties; import org.apache.hertzbeat.collector.dispatch.entrance.internal.CollectJobService; @@ -34,13 +39,6 @@ import org.mockito.junit.jupiter.MockitoExtension; import org.springframework.test.util.ReflectionTestUtils; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - /** * test case for {@link CollectServer} */ @@ -48,100 +46,100 @@ @ExtendWith(MockitoExtension.class) class CollectServerTest { - @Mock - private CollectJobService collectJobService; + @Mock + private CollectJobService collectJobService; - @Mock - private TimerDispatch timerDispatch; + @Mock + private TimerDispatch timerDispatch; - @Mock - private DispatchProperties properties; + @Mock + private DispatchProperties properties; - @Mock - private DispatchProperties.EntranceProperties entranceProperties; + @Mock + private DispatchProperties.EntranceProperties entranceProperties; - @Mock - private DispatchProperties.EntranceProperties.NettyProperties nettyProperties; + @Mock + private DispatchProperties.EntranceProperties.NettyProperties nettyProperties; - @Mock - private CommonThreadPool threadPool; + @Mock + private CommonThreadPool threadPool; - @Mock - private CollectorInfoProperties infoProperties; + @Mock + private CollectorInfoProperties infoProperties; - private CollectServer collectServer; + private CollectServer collectServer; - private CollectServer.CollectNettyEventListener collectNettyEventListener; + private CollectServer.CollectNettyEventListener collectNettyEventListener; - @BeforeEach - void setUp() { + @BeforeEach + void setUp() { - when(nettyProperties.getManagerHost()).thenReturn("127.0.0.1"); - when(nettyProperties.getManagerPort()).thenReturn(8080); - when(entranceProperties.getNetty()).thenReturn(nettyProperties); - when(properties.getEntrance()).thenReturn(entranceProperties); + when(nettyProperties.getManagerHost()).thenReturn("127.0.0.1"); + when(nettyProperties.getManagerPort()).thenReturn(8080); + when(entranceProperties.getNetty()).thenReturn(nettyProperties); + when(properties.getEntrance()).thenReturn(entranceProperties); - collectServer = new CollectServer(collectJobService, timerDispatch, properties, threadPool, infoProperties); - collectNettyEventListener = collectServer.new CollectNettyEventListener(); - } + collectServer = new CollectServer(collectJobService, timerDispatch, properties, threadPool, infoProperties); + collectNettyEventListener = collectServer.new CollectNettyEventListener(); + } - @Test - void testRun() throws Exception { + @Test + void testRun() throws Exception { - RemotingClient remotingClient = mock(RemotingClient.class); - ReflectionTestUtils.setField(collectServer, "remotingClient", remotingClient); + RemotingClient remotingClient = mock(RemotingClient.class); + ReflectionTestUtils.setField(collectServer, "remotingClient", remotingClient); - collectServer.run(); + collectServer.run(); - verify(remotingClient, times(1)).start(); - } + verify(remotingClient, times(1)).start(); + } - @Test - void testShutdown() { + @Test + void testShutdown() { - RemotingClient remotingClient = mock(RemotingClient.class); - ReflectionTestUtils.setField(collectServer, "remotingClient", remotingClient); - ReflectionTestUtils.setField(collectServer, "scheduledExecutor", mock(ScheduledExecutorService.class)); + RemotingClient remotingClient = mock(RemotingClient.class); + ReflectionTestUtils.setField(collectServer, "remotingClient", remotingClient); + ReflectionTestUtils.setField(collectServer, "scheduledExecutor", mock(ScheduledExecutorService.class)); - collectServer.shutdown(); + collectServer.shutdown(); - ScheduledExecutorService scheduledExecutor = (ScheduledExecutorService) ReflectionTestUtils.getField(collectServer, "scheduledExecutor"); - verify((scheduledExecutor), times(1)).shutdownNow(); - verify(remotingClient, times(1)).shutdown(); - } + ScheduledExecutorService scheduledExecutor = (ScheduledExecutorService) ReflectionTestUtils.getField(collectServer, "scheduledExecutor"); + verify((scheduledExecutor), times(1)).shutdownNow(); + verify(remotingClient, times(1)).shutdown(); + } - @Test - void testSendMsg() { + @Test + void testSendMsg() { - RemotingClient remotingClient = mock(RemotingClient.class); - ReflectionTestUtils.setField(collectServer, "remotingClient", remotingClient); - ClusterMsg.Message message = mock(ClusterMsg.Message.class); + RemotingClient remotingClient = mock(RemotingClient.class); + ReflectionTestUtils.setField(collectServer, "remotingClient", remotingClient); + ClusterMsg.Message message = mock(ClusterMsg.Message.class); - collectServer.sendMsg(message); + collectServer.sendMsg(message); - verify(remotingClient, times(1)).sendMsg(message); - } + verify(remotingClient, times(1)).sendMsg(message); + } - @Test - void testOnChannelActive() { + @Test + void testOnChannelActive() { - RemotingClient remotingClient = mock(RemotingClient.class); - ReflectionTestUtils.setField(collectServer, "remotingClient", remotingClient); + RemotingClient remotingClient = mock(RemotingClient.class); + ReflectionTestUtils.setField(collectServer, "remotingClient", remotingClient); - Channel channel = mock(Channel.class); - when(collectJobService.getCollectorIdentity()).thenReturn("collector1"); - when(collectJobService.getCollectorMode()).thenReturn("mode1"); - when(infoProperties.getIp()).thenReturn("127.0.0.1"); - when(infoProperties.getVersion()).thenReturn("1.0"); + Channel channel = mock(Channel.class); + when(collectJobService.getCollectorIdentity()).thenReturn("collector1"); + when(collectJobService.getCollectorMode()).thenReturn("mode1"); + when(infoProperties.getIp()).thenReturn("127.0.0.1"); + when(infoProperties.getVersion()).thenReturn("1.0"); - collectNettyEventListener.onChannelActive(channel); + collectNettyEventListener.onChannelActive(channel); - verify(timerDispatch, times(1)).goOnline(); - verify(remotingClient, times(1)).sendMsg(any(ClusterMsg.Message.class)); + verify(timerDispatch, times(1)).goOnline(); + verify(remotingClient, times(1)).sendMsg(any(ClusterMsg.Message.class)); - ScheduledExecutorService scheduledExecutor = - (ScheduledExecutorService) ReflectionTestUtils.getField(collectServer, "scheduledExecutor"); - assertNotNull(scheduledExecutor); - } + ScheduledExecutorService scheduledExecutor = + (ScheduledExecutorService) ReflectionTestUtils.getField(collectServer, "scheduledExecutor"); + assertNotNull(scheduledExecutor); + } } diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/entrance/internal/CollectJobServiceTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/entrance/internal/CollectJobServiceTest.java index ca802b654fa..47557983ac2 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/entrance/internal/CollectJobServiceTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/entrance/internal/CollectJobServiceTest.java @@ -17,6 +17,16 @@ package org.apache.hertzbeat.collector.dispatch.entrance.internal; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.util.List; import java.util.concurrent.CountDownLatch; import org.apache.hertzbeat.collector.dispatch.DispatchProperties; @@ -32,17 +42,6 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - /** * Test case for {@link CollectJobService} */ diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/export/NettyDataQueueTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/export/NettyDataQueueTest.java index f1334160368..9c2318ab86a 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/export/NettyDataQueueTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/export/NettyDataQueueTest.java @@ -17,6 +17,8 @@ package org.apache.hertzbeat.collector.dispatch.export; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import org.apache.hertzbeat.collector.dispatch.entrance.internal.CollectJobService; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.junit.jupiter.api.BeforeEach; @@ -25,37 +27,34 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - /** * test for {@link NettyDataQueue} */ class NettyDataQueueTest { - @Mock - private CollectJobService collectJobService; + @Mock + private CollectJobService collectJobService; - @InjectMocks - private NettyDataQueue nettyDataQueue; + @InjectMocks + private NettyDataQueue nettyDataQueue; - @BeforeEach - public void setUp() { + @BeforeEach + public void setUp() { - MockitoAnnotations.openMocks(this); - } + MockitoAnnotations.openMocks(this); + } - @Test - void testSendMetricsData() { + @Test + void testSendMetricsData() { - CollectRep.MetricsData metricsData = CollectRep.MetricsData - .newBuilder() - .setMetrics("test") - .build(); - nettyDataQueue.sendMetricsData(metricsData); + CollectRep.MetricsData metricsData = CollectRep.MetricsData + .newBuilder() + .setMetrics("test") + .build(); + nettyDataQueue.sendMetricsData(metricsData); - verify(collectJobService, times(1)).sendAsyncCollectData(metricsData); - } + verify(collectJobService, times(1)).sendAsyncCollectData(metricsData); + } } diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/unit/DataSizeConvertTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/unit/DataSizeConvertTest.java index 52b33c176c3..5bfe3a6ab8a 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/unit/DataSizeConvertTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/dispatch/unit/DataSizeConvertTest.java @@ -17,14 +17,13 @@ package org.apache.hertzbeat.collector.dispatch.unit; -import org.apache.hertzbeat.collector.dispatch.unit.impl.DataSizeConvert; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; +import org.apache.hertzbeat.collector.dispatch.unit.impl.DataSizeConvert; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; /** * Test case for {@link DataSizeConvert} @@ -32,39 +31,39 @@ class DataSizeConvertTest { - private DataSizeConvert convert; + private DataSizeConvert convert; - @BeforeEach - void setUp() { - this.convert = new DataSizeConvert(); - } + @BeforeEach + void setUp() { + this.convert = new DataSizeConvert(); + } - @Test - void testConvert() { + @Test + void testConvert() { - assertEquals("1", convert.convert("1024", "B", "KB")); - assertEquals("1024", convert.convert("1024", "B", "B")); - assertEquals("1", convert.convert("1024", "B", "kb")); - assertEquals("1", convert.convert("1024", "b", "kb")); - assertEquals("1", convert.convert("1024", "KB", "MB")); - assertEquals("1", convert.convert("1024", "MB", "GB")); - assertNull(convert.convert("", "B", "KB")); - assertNull(convert.convert("1024", "INVALID", "KB")); - assertNull(convert.convert("1024", "B", "INVALID")); + assertEquals("1", convert.convert("1024", "B", "KB")); + assertEquals("1024", convert.convert("1024", "B", "B")); + assertEquals("1", convert.convert("1024", "B", "kb")); + assertEquals("1", convert.convert("1024", "b", "kb")); + assertEquals("1", convert.convert("1024", "KB", "MB")); + assertEquals("1", convert.convert("1024", "MB", "GB")); + assertNull(convert.convert("", "B", "KB")); + assertNull(convert.convert("1024", "INVALID", "KB")); + assertNull(convert.convert("1024", "B", "INVALID")); - } + } - @Test - void testCheckUnit() { - assertTrue(convert.checkUnit("KB")); - assertTrue(convert.checkUnit("kb")); - assertTrue(convert.checkUnit("Kb")); - assertTrue(convert.checkUnit("kB")); - assertTrue(convert.checkUnit("MB")); - assertTrue(convert.checkUnit("GB")); - assertFalse(convert.checkUnit("INVALID")); - assertFalse(convert.checkUnit(null)); - assertFalse(convert.checkUnit("")); - } + @Test + void testCheckUnit() { + assertTrue(convert.checkUnit("KB")); + assertTrue(convert.checkUnit("kb")); + assertTrue(convert.checkUnit("Kb")); + assertTrue(convert.checkUnit("kB")); + assertTrue(convert.checkUnit("MB")); + assertTrue(convert.checkUnit("GB")); + assertFalse(convert.checkUnit("INVALID")); + assertFalse(convert.checkUnit(null)); + assertFalse(convert.checkUnit("")); + } } diff --git a/collector/src/test/java/org/apache/hertzbeat/collector/util/JsonPathParserTest.java b/collector/src/test/java/org/apache/hertzbeat/collector/util/JsonPathParserTest.java index 8472376fab7..e9beb6e7dfc 100644 --- a/collector/src/test/java/org/apache/hertzbeat/collector/util/JsonPathParserTest.java +++ b/collector/src/test/java/org/apache/hertzbeat/collector/util/JsonPathParserTest.java @@ -32,47 +32,47 @@ class JsonPathParserTest { private static final String JSON_ARRAY = "[{'name': 'tom', 'speed': '433'},{'name': 'lili', 'speed': '543'}]"; public static final String JSON_OBJECT = """ - { - "store": { - "book": [ - { - "category": "reference", - "author": "Nigel Rees", - "title": "Sayings of the Century", - "price": 8.95 - }, - { - "category": "fiction", - "author": "Evelyn Waugh", - "title": "Sword of Honour", - "price": 12.99 - }, - { - "category": "fiction", - "author": "Herman Melville", - "title": "Moby Dick", - "isbn": "0-553-21311-3", - "price": 8.99 - }, - { - "category": "fiction", - "author": "J. R. R. Tolkien", - "title": "The Lord of the Rings", - "isbn": "0-395-19395-8", - "price": 22.99 + { + "store": { + "book": [ + { + "category": "reference", + "author": "Nigel Rees", + "title": "Sayings of the Century", + "price": 8.95 + }, + { + "category": "fiction", + "author": "Evelyn Waugh", + "title": "Sword of Honour", + "price": 12.99 + }, + { + "category": "fiction", + "author": "Herman Melville", + "title": "Moby Dick", + "isbn": "0-553-21311-3", + "price": 8.99 + }, + { + "category": "fiction", + "author": "J. R. R. Tolkien", + "title": "The Lord of the Rings", + "isbn": "0-395-19395-8", + "price": 22.99 + } + ], + "bicycle": { + "color": "red", + "price": 19.95, + "gears": [23, 50], + "extra": {"x": 0}, + "escape": "Esc\\b\\f\\n\\r\\t\\u002A", + "nullValue": null + } } - ], - "bicycle": { - "color": "red", - "price": 19.95, - "gears": [23, 50], - "extra": {"x": 0}, - "escape": "Esc\\b\\f\\n\\r\\t\\u002A", - "nullValue": null } - } - } - """; + """; @Test void parseContentWithJsonPath() { diff --git a/common/src/test/java/org/apache/hertzbeat/common/queue/impl/InMemoryCommonDataQueueTest.java b/common/src/test/java/org/apache/hertzbeat/common/queue/impl/InMemoryCommonDataQueueTest.java index 059f883dede..b55f29465a6 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/queue/impl/InMemoryCommonDataQueueTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/queue/impl/InMemoryCommonDataQueueTest.java @@ -17,101 +17,99 @@ package org.apache.hertzbeat.common.queue.impl; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import java.util.Map; - import org.apache.hertzbeat.common.entity.alerter.Alert; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; - /** * Test case for {@link InMemoryCommonDataQueue} */ class InMemoryCommonDataQueueTest { - private InMemoryCommonDataQueue queue; + private InMemoryCommonDataQueue queue; - @BeforeEach - void setUp() { - queue = new InMemoryCommonDataQueue(); - } + @BeforeEach + void setUp() { + queue = new InMemoryCommonDataQueue(); + } - @Test - void testAlertsData() throws InterruptedException { + @Test + void testAlertsData() throws InterruptedException { - var alert = new Alert(); + var alert = new Alert(); - queue.sendAlertsData(alert); - assertEquals(1, queue.getQueueSizeMetricsInfo().get("alertDataQueue")); + queue.sendAlertsData(alert); + assertEquals(1, queue.getQueueSizeMetricsInfo().get("alertDataQueue")); - var polledAlert = queue.pollAlertsData(); - assertEquals(0, queue.getQueueSizeMetricsInfo().get("alertDataQueue")); + var polledAlert = queue.pollAlertsData(); + assertEquals(0, queue.getQueueSizeMetricsInfo().get("alertDataQueue")); - assertNotNull(polledAlert); - assertEquals(alert, polledAlert); - } + assertNotNull(polledAlert); + assertEquals(alert, polledAlert); + } - @Test - void testMetricsData() throws InterruptedException { + @Test + void testMetricsData() throws InterruptedException { - var metricsData = CollectRep.MetricsData.newBuilder().build(); + var metricsData = CollectRep.MetricsData.newBuilder().build(); - queue.sendMetricsData(metricsData); + queue.sendMetricsData(metricsData); - CollectRep.MetricsData polledMetricsData = queue.pollMetricsDataToAlerter(); + CollectRep.MetricsData polledMetricsData = queue.pollMetricsDataToAlerter(); - assertNotNull(polledMetricsData); - assertEquals(metricsData, polledMetricsData); + assertNotNull(polledMetricsData); + assertEquals(metricsData, polledMetricsData); - polledMetricsData = queue.pollMetricsDataToPersistentStorage(); + polledMetricsData = queue.pollMetricsDataToPersistentStorage(); - assertNotNull(polledMetricsData); - assertEquals(metricsData, polledMetricsData); + assertNotNull(polledMetricsData); + assertEquals(metricsData, polledMetricsData); - polledMetricsData = queue.pollMetricsDataToRealTimeStorage(); + polledMetricsData = queue.pollMetricsDataToRealTimeStorage(); - assertNotNull(polledMetricsData); - assertEquals(metricsData, polledMetricsData); - } + assertNotNull(polledMetricsData); + assertEquals(metricsData, polledMetricsData); + } - @Test - void testGetQueueSizeMetricsInfo() { + @Test + void testGetQueueSizeMetricsInfo() { - Map metricsInfo = queue.getQueueSizeMetricsInfo(); + Map metricsInfo = queue.getQueueSizeMetricsInfo(); - assertEquals(0, metricsInfo.get("alertDataQueue")); - assertEquals(0, metricsInfo.get("metricsDataToAlertQueue")); - assertEquals(0, metricsInfo.get("metricsDataToPersistentStorageQueue")); - assertEquals(0, metricsInfo.get("metricsDataToMemoryStorageQueue")); + assertEquals(0, metricsInfo.get("alertDataQueue")); + assertEquals(0, metricsInfo.get("metricsDataToAlertQueue")); + assertEquals(0, metricsInfo.get("metricsDataToPersistentStorageQueue")); + assertEquals(0, metricsInfo.get("metricsDataToMemoryStorageQueue")); - queue.sendAlertsData(new Alert()); - queue.sendMetricsData(CollectRep.MetricsData.newBuilder().build()); + queue.sendAlertsData(new Alert()); + queue.sendMetricsData(CollectRep.MetricsData.newBuilder().build()); - metricsInfo = queue.getQueueSizeMetricsInfo(); + metricsInfo = queue.getQueueSizeMetricsInfo(); - assertEquals(1, metricsInfo.get("alertDataQueue")); - assertEquals(1, metricsInfo.get("metricsDataToAlertQueue")); - assertEquals(1, metricsInfo.get("metricsDataToPersistentStorageQueue")); - assertEquals(1, metricsInfo.get("metricsDataToMemoryStorageQueue")); - } + assertEquals(1, metricsInfo.get("alertDataQueue")); + assertEquals(1, metricsInfo.get("metricsDataToAlertQueue")); + assertEquals(1, metricsInfo.get("metricsDataToPersistentStorageQueue")); + assertEquals(1, metricsInfo.get("metricsDataToMemoryStorageQueue")); + } - @Test - void testDestroy() { + @Test + void testDestroy() { - queue.sendAlertsData(new Alert()); - queue.sendMetricsData(CollectRep.MetricsData.newBuilder().build()); + queue.sendAlertsData(new Alert()); + queue.sendMetricsData(CollectRep.MetricsData.newBuilder().build()); - queue.destroy(); + queue.destroy(); - Map metricsInfo = queue.getQueueSizeMetricsInfo(); + Map metricsInfo = queue.getQueueSizeMetricsInfo(); - assertEquals(0, metricsInfo.get("alertDataQueue")); - assertEquals(0, metricsInfo.get("metricsDataToAlertQueue")); - assertEquals(0, metricsInfo.get("metricsDataToPersistentStorageQueue")); - assertEquals(0, metricsInfo.get("metricsDataToMemoryStorageQueue")); - } + assertEquals(0, metricsInfo.get("alertDataQueue")); + assertEquals(0, metricsInfo.get("metricsDataToAlertQueue")); + assertEquals(0, metricsInfo.get("metricsDataToPersistentStorageQueue")); + assertEquals(0, metricsInfo.get("metricsDataToMemoryStorageQueue")); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/queue/impl/KafkaCommonDataQueueTest.java b/common/src/test/java/org/apache/hertzbeat/common/queue/impl/KafkaCommonDataQueueTest.java index f7bf8f0da4a..57c8ea4fcd4 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/queue/impl/KafkaCommonDataQueueTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/queue/impl/KafkaCommonDataQueueTest.java @@ -17,10 +17,14 @@ package org.apache.hertzbeat.common.queue.impl; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.lang.reflect.Field; import java.time.Duration; import java.util.Collections; - import org.apache.hertzbeat.common.config.CommonProperties; import org.apache.hertzbeat.common.entity.alerter.Alert; import org.apache.hertzbeat.common.entity.message.CollectRep; @@ -34,141 +38,135 @@ import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - /** * Test case for {@link KafkaCommonDataQueue} */ class KafkaCommonDataQueueTest { - private KafkaProducer metricsDataProducer; - private KafkaProducer alertDataProducer; - private KafkaConsumer alertDataConsumer; - private KafkaConsumer metricsDataToAlertConsumer; - private KafkaConsumer metricsDataToPersistentStorageConsumer; - private KafkaConsumer metricsDataToRealTimeStorageConsumer; - private CommonProperties.KafkaProperties kafkaProperties; - private KafkaCommonDataQueue kafkaCommonDataQueue; - - @BeforeEach - void setUp() throws Exception { - kafkaProperties = mock(CommonProperties.KafkaProperties.class); - when(kafkaProperties.getServers()).thenReturn("localhost:9092"); - when(kafkaProperties.getAlertsDataTopic()).thenReturn("alerts"); - when(kafkaProperties.getMetricsDataTopic()).thenReturn("metrics"); - - CommonProperties properties = mock(CommonProperties.class); - CommonProperties.DataQueueProperties queueProperties = mock(CommonProperties.DataQueueProperties.class); - when(properties.getQueue()).thenReturn(queueProperties); - when(queueProperties.getKafka()).thenReturn(kafkaProperties); - - metricsDataProducer = mock(KafkaProducer.class); - alertDataProducer = mock(KafkaProducer.class); - alertDataConsumer = mock(KafkaConsumer.class); - metricsDataToAlertConsumer = mock(KafkaConsumer.class); - metricsDataToPersistentStorageConsumer = mock(KafkaConsumer.class); - metricsDataToRealTimeStorageConsumer = mock(KafkaConsumer.class); - - kafkaCommonDataQueue = new KafkaCommonDataQueue(properties); - - setPrivateField(kafkaCommonDataQueue, "metricsDataProducer", metricsDataProducer); - setPrivateField(kafkaCommonDataQueue, "alertDataProducer", alertDataProducer); - setPrivateField(kafkaCommonDataQueue, "alertDataConsumer", alertDataConsumer); - setPrivateField(kafkaCommonDataQueue, "metricsDataToAlertConsumer", metricsDataToAlertConsumer); - setPrivateField(kafkaCommonDataQueue, "metricsDataToPersistentStorageConsumer", metricsDataToPersistentStorageConsumer); - setPrivateField(kafkaCommonDataQueue, "metricsDataToRealTimeStorageConsumer", metricsDataToRealTimeStorageConsumer); - } - - // Test use, set private field. - private void setPrivateField(Object target, String fieldName, Object value) throws Exception { - - Field field = target.getClass().getDeclaredField(fieldName); - field.setAccessible(true); - field.set(target, value); - } - - @Test - void testSendAlertsData() { - - Alert alert = new Alert(); - kafkaCommonDataQueue.sendAlertsData(alert); - - ArgumentCaptor> captor = ArgumentCaptor.forClass(ProducerRecord.class); - verify(alertDataProducer).send(captor.capture()); - - ProducerRecord record = captor.getValue(); - assertEquals("alerts", record.topic()); - assertEquals(alert, record.value()); - } - - @Test - void testPollAlertsData() throws InterruptedException { - - Alert alert = new Alert(); - ConsumerRecords records = new ConsumerRecords<>(Collections.emptyMap()); - when(alertDataConsumer.poll(Duration.ofSeconds(1))).thenReturn(records); - - assertNull(kafkaCommonDataQueue.pollAlertsData()); - - records = new ConsumerRecords<>(Collections.singletonMap( - new TopicPartition("alerts", 0), - Collections.singletonList( - new ConsumerRecord<>("alerts", 0, 0L, 1L, alert) - ) - )); - when(alertDataConsumer.poll(Duration.ofSeconds(1))).thenReturn(records); - - assertEquals(alert, kafkaCommonDataQueue.pollAlertsData()); - } - - @Test - void testSendMetricsData() { - - CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder().build(); - kafkaCommonDataQueue.sendMetricsData(metricsData); - - ArgumentCaptor> captor = ArgumentCaptor.forClass(ProducerRecord.class); - verify(metricsDataProducer).send(captor.capture()); - - ProducerRecord record = captor.getValue(); - assertEquals("metrics", record.topic()); - assertEquals(metricsData, record.value()); - } - - @Test - void testPollMetricsDataToAlerter() throws InterruptedException { - - CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder().build(); - ConsumerRecords records = new ConsumerRecords<>(Collections.emptyMap()); - when(metricsDataToAlertConsumer.poll(Duration.ofSeconds(1))).thenReturn(records); - - assertNull(kafkaCommonDataQueue.pollMetricsDataToAlerter()); - - records = new ConsumerRecords<>(Collections.singletonMap( - new TopicPartition("metrics", 0), - Collections.singletonList( - new ConsumerRecord<>("metrics", 0, 0L, 1L, metricsData) - ) - )); - when(metricsDataToAlertConsumer.poll(Duration.ofSeconds(1))).thenReturn(records); - - assertEquals(metricsData, kafkaCommonDataQueue.pollMetricsDataToAlerter()); - } - - @Test - void testDestroy() throws Exception { - - kafkaCommonDataQueue.destroy(); - - verify(metricsDataProducer).close(); - verify(alertDataProducer).close(); - verify(alertDataConsumer).close(); - verify(metricsDataToAlertConsumer).close(); - verify(metricsDataToPersistentStorageConsumer).close(); - verify(metricsDataToRealTimeStorageConsumer).close(); - } + private KafkaProducer metricsDataProducer; + private KafkaProducer alertDataProducer; + private KafkaConsumer alertDataConsumer; + private KafkaConsumer metricsDataToAlertConsumer; + private KafkaConsumer metricsDataToPersistentStorageConsumer; + private KafkaConsumer metricsDataToRealTimeStorageConsumer; + private CommonProperties.KafkaProperties kafkaProperties; + private KafkaCommonDataQueue kafkaCommonDataQueue; + + @BeforeEach + void setUp() throws Exception { + kafkaProperties = mock(CommonProperties.KafkaProperties.class); + when(kafkaProperties.getServers()).thenReturn("localhost:9092"); + when(kafkaProperties.getAlertsDataTopic()).thenReturn("alerts"); + when(kafkaProperties.getMetricsDataTopic()).thenReturn("metrics"); + + CommonProperties properties = mock(CommonProperties.class); + CommonProperties.DataQueueProperties queueProperties = mock(CommonProperties.DataQueueProperties.class); + when(properties.getQueue()).thenReturn(queueProperties); + when(queueProperties.getKafka()).thenReturn(kafkaProperties); + + metricsDataProducer = mock(KafkaProducer.class); + alertDataProducer = mock(KafkaProducer.class); + alertDataConsumer = mock(KafkaConsumer.class); + metricsDataToAlertConsumer = mock(KafkaConsumer.class); + metricsDataToPersistentStorageConsumer = mock(KafkaConsumer.class); + metricsDataToRealTimeStorageConsumer = mock(KafkaConsumer.class); + + kafkaCommonDataQueue = new KafkaCommonDataQueue(properties); + + setPrivateField(kafkaCommonDataQueue, "metricsDataProducer", metricsDataProducer); + setPrivateField(kafkaCommonDataQueue, "alertDataProducer", alertDataProducer); + setPrivateField(kafkaCommonDataQueue, "alertDataConsumer", alertDataConsumer); + setPrivateField(kafkaCommonDataQueue, "metricsDataToAlertConsumer", metricsDataToAlertConsumer); + setPrivateField(kafkaCommonDataQueue, "metricsDataToPersistentStorageConsumer", metricsDataToPersistentStorageConsumer); + setPrivateField(kafkaCommonDataQueue, "metricsDataToRealTimeStorageConsumer", metricsDataToRealTimeStorageConsumer); + } + + // Test use, set private field. + private void setPrivateField(Object target, String fieldName, Object value) throws Exception { + + Field field = target.getClass().getDeclaredField(fieldName); + field.setAccessible(true); + field.set(target, value); + } + + @Test + void testSendAlertsData() { + + Alert alert = new Alert(); + kafkaCommonDataQueue.sendAlertsData(alert); + + ArgumentCaptor> captor = ArgumentCaptor.forClass(ProducerRecord.class); + verify(alertDataProducer).send(captor.capture()); + + ProducerRecord record = captor.getValue(); + assertEquals("alerts", record.topic()); + assertEquals(alert, record.value()); + } + + @Test + void testPollAlertsData() throws InterruptedException { + + Alert alert = new Alert(); + ConsumerRecords records = new ConsumerRecords<>(Collections.emptyMap()); + when(alertDataConsumer.poll(Duration.ofSeconds(1))).thenReturn(records); + + assertNull(kafkaCommonDataQueue.pollAlertsData()); + + records = new ConsumerRecords<>(Collections.singletonMap( + new TopicPartition("alerts", 0), + Collections.singletonList( + new ConsumerRecord<>("alerts", 0, 0L, 1L, alert) + ) + )); + when(alertDataConsumer.poll(Duration.ofSeconds(1))).thenReturn(records); + + assertEquals(alert, kafkaCommonDataQueue.pollAlertsData()); + } + + @Test + void testSendMetricsData() { + + CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder().build(); + kafkaCommonDataQueue.sendMetricsData(metricsData); + + ArgumentCaptor> captor = ArgumentCaptor.forClass(ProducerRecord.class); + verify(metricsDataProducer).send(captor.capture()); + + ProducerRecord record = captor.getValue(); + assertEquals("metrics", record.topic()); + assertEquals(metricsData, record.value()); + } + + @Test + void testPollMetricsDataToAlerter() throws InterruptedException { + + CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder().build(); + ConsumerRecords records = new ConsumerRecords<>(Collections.emptyMap()); + when(metricsDataToAlertConsumer.poll(Duration.ofSeconds(1))).thenReturn(records); + + assertNull(kafkaCommonDataQueue.pollMetricsDataToAlerter()); + + records = new ConsumerRecords<>(Collections.singletonMap( + new TopicPartition("metrics", 0), + Collections.singletonList( + new ConsumerRecord<>("metrics", 0, 0L, 1L, metricsData) + ) + )); + when(metricsDataToAlertConsumer.poll(Duration.ofSeconds(1))).thenReturn(records); + + assertEquals(metricsData, kafkaCommonDataQueue.pollMetricsDataToAlerter()); + } + + @Test + void testDestroy() throws Exception { + + kafkaCommonDataQueue.destroy(); + + verify(metricsDataProducer).close(); + verify(alertDataProducer).close(); + verify(alertDataConsumer).close(); + verify(metricsDataToAlertConsumer).close(); + verify(metricsDataToPersistentStorageConsumer).close(); + verify(metricsDataToRealTimeStorageConsumer).close(); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/queue/impl/RedisCommonDataQueueTest.java b/common/src/test/java/org/apache/hertzbeat/common/queue/impl/RedisCommonDataQueueTest.java index 2344eca4f06..8df98643aae 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/queue/impl/RedisCommonDataQueueTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/queue/impl/RedisCommonDataQueueTest.java @@ -17,6 +17,13 @@ package org.apache.hertzbeat.common.queue.impl; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import com.fasterxml.jackson.databind.ObjectMapper; import io.lettuce.core.RedisClient; import io.lettuce.core.RedisURI; @@ -34,14 +41,6 @@ import org.mockito.MockitoAnnotations; import org.mockito.junit.jupiter.MockitoExtension; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.mockStatic; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - /** * test for {@link RedisCommonDataQueue} */ @@ -49,109 +48,112 @@ @ExtendWith(MockitoExtension.class) class RedisCommonDataQueueTest { - @Mock - private StatefulRedisConnection connection; + @Mock + private StatefulRedisConnection connection; - @Mock - private RedisCommands syncCommands; + @Mock + private RedisCommands syncCommands; - @Mock - private ObjectMapper objectMapper; + @Mock + private ObjectMapper objectMapper; - private RedisClient redisClient; + private RedisClient redisClient; - private CommonProperties commonProperties; + private CommonProperties commonProperties; - private CommonProperties.RedisProperties redisProperties; + private CommonProperties.RedisProperties redisProperties; - private RedisCommonDataQueue redisCommonDataQueue; + private RedisCommonDataQueue redisCommonDataQueue; - @BeforeEach - public void setUp() { + @BeforeEach + public void setUp() { - MockitoAnnotations.openMocks(this); + MockitoAnnotations.openMocks(this); - redisClient = mock(RedisClient.class); - commonProperties = mock(CommonProperties.class); - redisProperties = mock(CommonProperties.RedisProperties.class); - CommonProperties.DataQueueProperties dataQueueProperties = mock(CommonProperties.DataQueueProperties.class); + redisClient = mock(RedisClient.class); + commonProperties = mock(CommonProperties.class); + redisProperties = mock(CommonProperties.RedisProperties.class); + CommonProperties.DataQueueProperties dataQueueProperties = mock(CommonProperties.DataQueueProperties.class); - when(commonProperties.getQueue()).thenReturn(dataQueueProperties); - when(dataQueueProperties.getRedis()).thenReturn(redisProperties); + when(commonProperties.getQueue()).thenReturn(dataQueueProperties); + when(dataQueueProperties.getRedis()).thenReturn(redisProperties); - when(redisProperties.getRedisHost()).thenReturn("localhost"); - when(redisProperties.getRedisPort()).thenReturn(6379); - when(redisProperties.getMetricsDataQueueNameToAlerter()).thenReturn("metricsDataQueueToAlerter"); - when(redisProperties.getMetricsDataQueueNameToPersistentStorage()).thenReturn("metricsDataQueueToPersistentStorage"); - when(redisProperties.getMetricsDataQueueNameToRealTimeStorage()).thenReturn("metricsDataQueueToRealTimeStorage"); - when(redisProperties.getAlertsDataQueueName()).thenReturn("alertsDataQueue"); + when(redisProperties.getRedisHost()).thenReturn("localhost"); + when(redisProperties.getRedisPort()).thenReturn(6379); + when(redisProperties.getMetricsDataQueueNameToAlerter()).thenReturn("metricsDataQueueToAlerter"); + when(redisProperties.getMetricsDataQueueNameToPersistentStorage()).thenReturn("metricsDataQueueToPersistentStorage"); + when(redisProperties.getMetricsDataQueueNameToRealTimeStorage()).thenReturn("metricsDataQueueToRealTimeStorage"); + when(redisProperties.getAlertsDataQueueName()).thenReturn("alertsDataQueue"); - try (MockedStatic mockedRedisClient = mockStatic(RedisClient.class)) { + try (MockedStatic mockedRedisClient = mockStatic(RedisClient.class)) { - mockedRedisClient.when(() -> RedisClient.create( - any(RedisURI.class)) - ).thenReturn(redisClient); + mockedRedisClient.when(() -> RedisClient.create( + any(RedisURI.class)) + ).thenReturn(redisClient); - when(redisClient.connect()).thenReturn(connection); - when(connection.sync()).thenReturn(syncCommands); + when(redisClient.connect()).thenReturn(connection); + when(connection.sync()).thenReturn(syncCommands); - redisCommonDataQueue = new RedisCommonDataQueue(commonProperties); - } - } + redisCommonDataQueue = new RedisCommonDataQueue(commonProperties); + } + } - @Test - public void testPollAlertsData() throws Exception { + @Test + public void testPollAlertsData() throws Exception { - String alertJson = "{\"id\":\"1\",\"content\":\"Test Alert\"}"; - Alert expectedAlert = Alert.builder().id(1L).content("Test Alert").build(); + String alertJson = "{\"id\":\"1\",\"content\":\"Test Alert\"}"; + Alert expectedAlert = Alert.builder().id(1L).content("Test Alert").build(); - when(syncCommands.rpop(anyString())).thenReturn(alertJson); - Alert actualAlert = redisCommonDataQueue.pollAlertsData(); - assertEquals(expectedAlert, actualAlert); - } + when(syncCommands.rpop(anyString())).thenReturn(alertJson); + Alert actualAlert = redisCommonDataQueue.pollAlertsData(); + assertEquals(expectedAlert, actualAlert); + } - @Test - public void testPollMetricsDataToAlerter() throws Exception { + @Test + public void testPollMetricsDataToAlerter() throws Exception { - CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder().setMetrics("test metrics").build(); - String metricsDataJson = "{\"metrics\":\"test metrics\"}"; + CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder().setMetrics("test metrics").build(); + String metricsDataJson = "{\"metrics\":\"test metrics\"}"; - when(syncCommands.rpop("metricsDataQueueToAlerter")).thenReturn(metricsDataJson); + when(syncCommands.rpop("metricsDataQueueToAlerter")).thenReturn(metricsDataJson); - CollectRep.MetricsData actualMetricsData = redisCommonDataQueue.pollMetricsDataToAlerter(); - assertEquals(metricsData, actualMetricsData); - } + CollectRep.MetricsData actualMetricsData = redisCommonDataQueue.pollMetricsDataToAlerter(); + assertEquals(metricsData, actualMetricsData); + } - @Test - public void testSendMetricsData() throws Exception { - CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder().setMetrics("test metrics").build(); - String metricsDataJson = ProtoJsonUtil.toJsonStr(metricsData); + @Test + public void testSendMetricsData() throws Exception { + CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder().setMetrics("test metrics").build(); + String metricsDataJson = ProtoJsonUtil.toJsonStr(metricsData); - redisCommonDataQueue.sendMetricsData(metricsData); + redisCommonDataQueue.sendMetricsData(metricsData); - verify(syncCommands).lpush("metricsDataQueueToAlerter", metricsDataJson); - verify(syncCommands).lpush("metricsDataQueueToPersistentStorage", metricsDataJson); - verify(syncCommands).lpush("metricsDataQueueToRealTimeStorage", metricsDataJson); - } + verify(syncCommands).lpush("metricsDataQueueToAlerter", metricsDataJson); + verify(syncCommands).lpush("metricsDataQueueToPersistentStorage", metricsDataJson); + verify(syncCommands).lpush("metricsDataQueueToRealTimeStorage", metricsDataJson); + } - @Test - public void testSendAlertsData() throws Exception { + @SuppressWarnings("checkstyle:OperatorWrap") + @Test + public void testSendAlertsData() throws Exception { - Alert alert = Alert.builder() - .content("test") - .build(); - String alertJson = "{\"id\":null,\"target\":null,\"alertDefineId\":null,\"priority\":0,\"content\":\"test\",\"status\":0,\"times\":null,\"firstAlarmTime\":null,\"lastAlarmTime\":null,\"triggerTimes\":null,\"tags\":null,\"creator\":null,\"modifier\":null,\"gmtCreate\":null,\"gmtUpdate\":null}"; - redisCommonDataQueue.sendAlertsData(alert); + Alert alert = Alert.builder() + .content("test") + .build(); + String alertJson = "{\"id\":null,\"target\":null,\"alertDefineId\":null,\"priority\":0,\"content\":\"test\"," + + "\"status\":0,\"times\":null,\"firstAlarmTime\":null,\"lastAlarmTime\":null,\"triggerTimes\":null," + + "\"tags\":null,\"creator\":null,\"modifier\":null,\"gmtCreate\":null,\"gmtUpdate\":null}"; + redisCommonDataQueue.sendAlertsData(alert); - verify(syncCommands).lpush("alertsDataQueue", alertJson); - } + verify(syncCommands).lpush("alertsDataQueue", alertJson); + } - @Test - public void testDestroy() { - redisCommonDataQueue.destroy(); + @Test + public void testDestroy() { + redisCommonDataQueue.destroy(); - verify(connection).close(); - verify(redisClient).shutdown(); - } + verify(connection).close(); + verify(redisClient).shutdown(); + } -} \ No newline at end of file +} diff --git a/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertDeserializerTest.java b/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertDeserializerTest.java index c90729ff5ed..21e71a98e43 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertDeserializerTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertDeserializerTest.java @@ -17,8 +17,8 @@ package org.apache.hertzbeat.common.serialize; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.Map; - import org.apache.hertzbeat.common.entity.alerter.Alert; import org.apache.kafka.common.header.Headers; import org.junit.jupiter.api.BeforeEach; @@ -26,73 +26,71 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import static org.junit.jupiter.api.Assertions.assertEquals; - /** * test case for {@link AlertDeserializer} */ class AlertDeserializerTest { - private AlertDeserializer alertDeserializer; + private AlertDeserializer alertDeserializer; - @Mock - private Map configs; + @Mock + private Map configs; - @Mock - private Headers headers; + @Mock + private Headers headers; - @BeforeEach - void setUp() { + @BeforeEach + void setUp() { - MockitoAnnotations.openMocks(this); + MockitoAnnotations.openMocks(this); - alertDeserializer = new AlertDeserializer(); - } + alertDeserializer = new AlertDeserializer(); + } - @Test - void testConfigure() { + @Test + void testConfigure() { - alertDeserializer.configure(configs, false); - } + alertDeserializer.configure(configs, false); + } - @Test - void testDeserializeWithBytes() { + @Test + void testDeserializeWithBytes() { - String json = "{\"target\":\"test\",\"content\":\"test\"}"; - byte[] bytes = json.getBytes(); - Alert expectedAlert = Alert.builder() - .content("test") - .target("test") - .build(); + String json = "{\"target\":\"test\",\"content\":\"test\"}"; + byte[] bytes = json.getBytes(); + Alert expectedAlert = Alert.builder() + .content("test") + .target("test") + .build(); - Alert actualAlert = alertDeserializer.deserialize("", bytes); + Alert actualAlert = alertDeserializer.deserialize("", bytes); - assertEquals(expectedAlert.getContent(), actualAlert.getContent()); - assertEquals(expectedAlert.getTarget(), actualAlert.getTarget()); - } + assertEquals(expectedAlert.getContent(), actualAlert.getContent()); + assertEquals(expectedAlert.getTarget(), actualAlert.getTarget()); + } - @Test - void testDeserializeWithHeaders() { + @Test + void testDeserializeWithHeaders() { - String topic = "alerts"; - byte[] data = "{\"target\":\"test\",\"content\":\"test\"}".getBytes(); + String topic = "alerts"; + byte[] data = "{\"target\":\"test\",\"content\":\"test\"}".getBytes(); - Alert expectedAlert = Alert.builder() - .content("test") - .target("test") - .build(); + Alert expectedAlert = Alert.builder() + .content("test") + .target("test") + .build(); - Alert actualAlert = alertDeserializer.deserialize(topic, headers, data); + Alert actualAlert = alertDeserializer.deserialize(topic, headers, data); - assertEquals(expectedAlert.getContent(), actualAlert.getContent()); - assertEquals(expectedAlert.getTarget(), actualAlert.getTarget()); - } + assertEquals(expectedAlert.getContent(), actualAlert.getContent()); + assertEquals(expectedAlert.getTarget(), actualAlert.getTarget()); + } - @Test - void testClose() { + @Test + void testClose() { - alertDeserializer.close(); - } + alertDeserializer.close(); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertSerializerTest.java b/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertSerializerTest.java index 48ebb9bfbb4..a1a8c46ed97 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertSerializerTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/serialize/AlertSerializerTest.java @@ -17,9 +17,12 @@ package org.apache.hertzbeat.common.serialize; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import java.util.Arrays; import java.util.Map; - import org.apache.hertzbeat.common.entity.alerter.Alert; import org.apache.kafka.common.header.Headers; import org.junit.jupiter.api.BeforeEach; @@ -27,82 +30,77 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; - /** * test case for {@link AlertSerializer} */ class AlertSerializerTest { - private AlertSerializer alertSerializer; + private AlertSerializer alertSerializer; - @Mock - private Map configs; + @Mock + private Map configs; - @Mock - private Headers headers; + @Mock + private Headers headers; - @BeforeEach - void setUp() { + @BeforeEach + void setUp() { - MockitoAnnotations.openMocks(this); - alertSerializer = new AlertSerializer(); - } + MockitoAnnotations.openMocks(this); + alertSerializer = new AlertSerializer(); + } - @Test - void testConfigure() { + @Test + void testConfigure() { - alertSerializer.configure(configs, false); - } + alertSerializer.configure(configs, false); + } - @Test - void testSerializeWithAlert() { + @Test + void testSerializeWithAlert() { - Alert alert = Alert.builder() - .content("test") - .target("test") - .build(); - byte[] expectedJson = ("{\"id\":null,\"target\":\"test\",\"alertDefineId\":null,\"priority\":0,\"content\":" - + "\"test\",\"status\":0,\"times\":null,\"firstAlarmTime\":null,\"lastAlarmTime\":null,\"triggerTimes" - + "\":null,\"tags\":null,\"creator\":null,\"modifier\":null,\"gmtCreate\":null,\"gmtUpdate\":null}").getBytes(); + Alert alert = Alert.builder() + .content("test") + .target("test") + .build(); + byte[] expectedJson = ("{\"id\":null,\"target\":\"test\",\"alertDefineId\":null,\"priority\":0,\"content\":" + + "\"test\",\"status\":0,\"times\":null,\"firstAlarmTime\":null,\"lastAlarmTime\":null,\"triggerTimes" + + "\":null,\"tags\":null,\"creator\":null,\"modifier\":null,\"gmtCreate\":null,\"gmtUpdate\":null}").getBytes(); - byte[] bytes = alertSerializer.serialize("", alert); + byte[] bytes = alertSerializer.serialize("", alert); - assertNotNull(bytes); - assertEquals(Arrays.toString(expectedJson), Arrays.toString(bytes)); - } + assertNotNull(bytes); + assertEquals(Arrays.toString(expectedJson), Arrays.toString(bytes)); + } - @Test - void testSerializeWithNullAlert() { + @Test + void testSerializeWithNullAlert() { - byte[] bytes = alertSerializer.serialize("", null); - assertNull(bytes); - } + byte[] bytes = alertSerializer.serialize("", null); + assertNull(bytes); + } - @Test - void testSerializeWithHeaders() { + @Test + void testSerializeWithHeaders() { - Alert alert = Alert.builder() - .content("test") - .target("test") - .build(); - byte[] expectedBytes = ("{\"id\":null,\"target\":\"test\",\"alertDefineId\":null,\"priority\":0,\"content\":" - + "\"test\",\"status\":0,\"times\":null,\"firstAlarmTime\":null,\"lastAlarmTime\":null,\"triggerTimes" - + "\":null,\"tags\":null,\"creator\":null,\"modifier\":null,\"gmtCreate\":null,\"gmtUpdate\":null}").getBytes(); + Alert alert = Alert.builder() + .content("test") + .target("test") + .build(); + byte[] expectedBytes = ("{\"id\":null,\"target\":\"test\",\"alertDefineId\":null,\"priority\":0,\"content\":" + + "\"test\",\"status\":0,\"times\":null,\"firstAlarmTime\":null,\"lastAlarmTime\":null,\"triggerTimes" + + "\":null,\"tags\":null,\"creator\":null,\"modifier\":null,\"gmtCreate\":null,\"gmtUpdate\":null}").getBytes(); - byte[] bytes = alertSerializer.serialize("alerts", headers, alert); + byte[] bytes = alertSerializer.serialize("alerts", headers, alert); - assertArrayEquals(expectedBytes, bytes); - } + assertArrayEquals(expectedBytes, bytes); + } - @Test - void testClose() { + @Test + void testClose() { - alertSerializer.close(); - } + alertSerializer.close(); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataDeserializerTest.java b/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataDeserializerTest.java index f3da7ce171f..031c8120a34 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataDeserializerTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataDeserializerTest.java @@ -17,8 +17,9 @@ package org.apache.hertzbeat.common.serialize; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import java.util.Map; - import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.kafka.common.header.Headers; import org.junit.jupiter.api.BeforeEach; @@ -26,77 +27,74 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; - /** * test case for {@link KafkaMetricsDataDeserializer} */ class KafkaMetricsDataDeserializerTest { - private KafkaMetricsDataDeserializer deserializer; + private KafkaMetricsDataDeserializer deserializer; - @Mock - private Map configs; + @Mock + private Map configs; - @Mock - private Headers headers; + @Mock + private Headers headers; - @BeforeEach - void setUp() { + @BeforeEach + void setUp() { - MockitoAnnotations.openMocks(this); + MockitoAnnotations.openMocks(this); - deserializer = new KafkaMetricsDataDeserializer(); - } + deserializer = new KafkaMetricsDataDeserializer(); + } - @Test - void testConfigure() { + @Test + void testConfigure() { - deserializer.configure(configs, false); - } + deserializer.configure(configs, false); + } - @Test - void testDeserializeWithBytes() { + @Test + void testDeserializeWithBytes() { - CollectRep.MetricsData expectedMetricsData = CollectRep.MetricsData.newBuilder() - .setMetrics("someValue") - .setApp("linux") - .build(); - byte[] bytes = expectedMetricsData.toByteArray(); + CollectRep.MetricsData expectedMetricsData = CollectRep.MetricsData.newBuilder() + .setMetrics("someValue") + .setApp("linux") + .build(); + byte[] bytes = expectedMetricsData.toByteArray(); - CollectRep.MetricsData actualMetricsData = deserializer.deserialize("", bytes); + CollectRep.MetricsData actualMetricsData = deserializer.deserialize("", bytes); - assertEquals(expectedMetricsData, actualMetricsData); - } + assertEquals(expectedMetricsData, actualMetricsData); + } - @Test - void testDeserializeWithInvalidBytes() { + @Test + void testDeserializeWithInvalidBytes() { - byte[] invalidBytes = "invalid data".getBytes(); + byte[] invalidBytes = "invalid data".getBytes(); - assertThrows(RuntimeException.class, () -> deserializer.deserialize("", invalidBytes)); - } + assertThrows(RuntimeException.class, () -> deserializer.deserialize("", invalidBytes)); + } - @Test - void testDeserializeWithHeaders() { + @Test + void testDeserializeWithHeaders() { - CollectRep.MetricsData expectedMetricsData = CollectRep.MetricsData.newBuilder() - .setMetrics("someValue") - .setApp("linux") - .build(); - byte[] bytes = expectedMetricsData.toByteArray(); + CollectRep.MetricsData expectedMetricsData = CollectRep.MetricsData.newBuilder() + .setMetrics("someValue") + .setApp("linux") + .build(); + byte[] bytes = expectedMetricsData.toByteArray(); - CollectRep.MetricsData actualMetricsData = deserializer.deserialize("topic", headers, bytes); + CollectRep.MetricsData actualMetricsData = deserializer.deserialize("topic", headers, bytes); - assertEquals(expectedMetricsData, actualMetricsData); - } + assertEquals(expectedMetricsData, actualMetricsData); + } - @Test - void testClose() { + @Test + void testClose() { - deserializer.close(); - } + deserializer.close(); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataSerializerTest.java b/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataSerializerTest.java index 63d7a1dd1b5..f434774e8df 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataSerializerTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/serialize/KafkaMetricsDataSerializerTest.java @@ -17,6 +17,9 @@ package org.apache.hertzbeat.common.serialize; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import java.util.Map; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.kafka.common.header.Headers; @@ -25,76 +28,72 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; - /** * test case for {@link KafkaMetricsDataSerializer} */ class KafkaMetricsDataSerializerTest { - private KafkaMetricsDataSerializer serializer; + private KafkaMetricsDataSerializer serializer; - @Mock - private Map configs; + @Mock + private Map configs; - @Mock - private Headers headers; + @Mock + private Headers headers; - @BeforeEach - void setUp() { + @BeforeEach + void setUp() { - MockitoAnnotations.openMocks(this); + MockitoAnnotations.openMocks(this); - serializer = new KafkaMetricsDataSerializer(); - } + serializer = new KafkaMetricsDataSerializer(); + } - @Test - void testConfigure() { + @Test + void testConfigure() { - serializer.configure(configs, false); - } + serializer.configure(configs, false); + } - @Test - void testSerializeWithMetricsData() { + @Test + void testSerializeWithMetricsData() { - CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder() - .setMetrics("someValue") - .setApp("linux") - .build(); - byte[] bytes = serializer.serialize("", metricsData); + CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder() + .setMetrics("someValue") + .setApp("linux") + .build(); + byte[] bytes = serializer.serialize("", metricsData); - assertNotNull(bytes); - assertArrayEquals(metricsData.toByteArray(), bytes); - } + assertNotNull(bytes); + assertArrayEquals(metricsData.toByteArray(), bytes); + } - @Test - void testSerializeWithNullMetricsData() { + @Test + void testSerializeWithNullMetricsData() { - byte[] bytes = serializer.serialize("", null); + byte[] bytes = serializer.serialize("", null); - assertNull(bytes); - } + assertNull(bytes); + } - @Test - void testSerializeWithHeaders() { + @Test + void testSerializeWithHeaders() { - CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder() - .setMetrics("someValue") - .setApp("linux") - .build(); - byte[] expectedBytes = metricsData.toByteArray(); - byte[] bytes = serializer.serialize("topic", headers, metricsData); + CollectRep.MetricsData metricsData = CollectRep.MetricsData.newBuilder() + .setMetrics("someValue") + .setApp("linux") + .build(); + byte[] expectedBytes = metricsData.toByteArray(); + byte[] bytes = serializer.serialize("topic", headers, metricsData); - assertArrayEquals(expectedBytes, bytes); - } + assertArrayEquals(expectedBytes, bytes); + } - @Test - void testClose() { + @Test + void testClose() { - serializer.close(); - } + serializer.close(); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/support/CommonThreadPoolTest.java b/common/src/test/java/org/apache/hertzbeat/common/support/CommonThreadPoolTest.java index 18bc3343596..211b3ed280b 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/support/CommonThreadPoolTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/support/CommonThreadPoolTest.java @@ -40,71 +40,71 @@ class CommonThreadPoolTest { - private CommonThreadPool commonThreadPool; + private CommonThreadPool commonThreadPool; - private ThreadPoolExecutor executorMock; + private ThreadPoolExecutor executorMock; - @BeforeEach - public void setUp() throws Exception { + @BeforeEach + public void setUp() throws Exception { - commonThreadPool = new CommonThreadPool(); + commonThreadPool = new CommonThreadPool(); - Field workerExecutorField = CommonThreadPool.class.getDeclaredField("workerExecutor"); - workerExecutorField.setAccessible(true); - executorMock = mock(ThreadPoolExecutor.class); - workerExecutorField.set(commonThreadPool, executorMock); - } + Field workerExecutorField = CommonThreadPool.class.getDeclaredField("workerExecutor"); + workerExecutorField.setAccessible(true); + executorMock = mock(ThreadPoolExecutor.class); + workerExecutorField.set(commonThreadPool, executorMock); + } - @Test - public void testExecuteTask() { + @Test + public void testExecuteTask() { - Runnable task = mock(Runnable.class); - commonThreadPool.execute(task); - verify(executorMock).execute(task); - } + Runnable task = mock(Runnable.class); + commonThreadPool.execute(task); + verify(executorMock).execute(task); + } - @Test - public void testExecuteTaskThrowsEX() { + @Test + public void testExecuteTaskThrowsEx() { - Runnable task = mock(Runnable.class); - doThrow(RejectedExecutionException.class).when(executorMock).execute(task); + Runnable task = mock(Runnable.class); + doThrow(RejectedExecutionException.class).when(executorMock).execute(task); - assertThrows( - RejectedExecutionException.class, - () -> commonThreadPool.execute(task) - ); - } + assertThrows( + RejectedExecutionException.class, + () -> commonThreadPool.execute(task) + ); + } - @Test - public void testDestroy() throws Exception { + @Test + public void testDestroy() throws Exception { - commonThreadPool.destroy(); - verify(executorMock).shutdownNow(); - } + commonThreadPool.destroy(); + verify(executorMock).shutdownNow(); + } - @Test - public void testDestroyWithNull() throws Exception { + @Test + public void testDestroyWithNull() throws Exception { - Field workerExecutorField = CommonThreadPool.class.getDeclaredField("workerExecutor"); - workerExecutorField.setAccessible(true); - workerExecutorField.set(commonThreadPool, null); + Field workerExecutorField = CommonThreadPool.class.getDeclaredField("workerExecutor"); + workerExecutorField.setAccessible(true); + workerExecutorField.set(commonThreadPool, null); - commonThreadPool.destroy(); - } + commonThreadPool.destroy(); + } - @Test - public void testInitialization() throws Exception { - CommonThreadPool pool = new CommonThreadPool(); + @Test + public void testInitialization() throws Exception { + CommonThreadPool pool = new CommonThreadPool(); - Field workerExecutorField = CommonThreadPool.class.getDeclaredField("workerExecutor"); - workerExecutorField.setAccessible(true); - ThreadPoolExecutor workerExecutor = (ThreadPoolExecutor) workerExecutorField.get(pool); + Field workerExecutorField = CommonThreadPool.class.getDeclaredField("workerExecutor"); + workerExecutorField.setAccessible(true); + ThreadPoolExecutor workerExecutor = (ThreadPoolExecutor) workerExecutorField.get(pool); - assertNotNull(workerExecutor); - assertEquals(2, workerExecutor.getCorePoolSize()); - assertEquals(Integer.MAX_VALUE, workerExecutor.getMaximumPoolSize()); - assertEquals(10, workerExecutor.getKeepAliveTime(TimeUnit.SECONDS)); - assertTrue(workerExecutor.getQueue() instanceof SynchronousQueue); - } + assertNotNull(workerExecutor); + assertEquals(2, workerExecutor.getCorePoolSize()); + assertEquals(Integer.MAX_VALUE, workerExecutor.getMaximumPoolSize()); + assertEquals(10, workerExecutor.getKeepAliveTime(TimeUnit.SECONDS)); + assertTrue(workerExecutor.getQueue() instanceof SynchronousQueue); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/support/ResourceBundleUtf8ControlTest.java b/common/src/test/java/org/apache/hertzbeat/common/support/ResourceBundleUtf8ControlTest.java index 43d4ecb2356..c30f5a29eb4 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/support/ResourceBundleUtf8ControlTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/support/ResourceBundleUtf8ControlTest.java @@ -17,59 +17,57 @@ package org.apache.hertzbeat.common.support; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import java.io.IOException; import java.util.Locale; import java.util.ResourceBundle; - import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; - /** * Test case for {@link ResourceBundleUtf8Control} */ class ResourceBundleUtf8ControlTest { - @Test - void testNewBundleWithPropertiesFormat() throws IllegalAccessException, InstantiationException, IOException { + @Test + void testNewBundleWithPropertiesFormat() throws IllegalAccessException, InstantiationException, IOException { ResourceBundle.Control control = new ResourceBundleUtf8Control(); - ClassLoader loader = getClass().getClassLoader(); - String baseName = "msg"; + ClassLoader loader = getClass().getClassLoader(); + String baseName = "msg"; - ResourceBundle bundle = control.newBundle(baseName, Locale.ENGLISH, "java.properties", loader, false); + ResourceBundle bundle = control.newBundle(baseName, Locale.ENGLISH, "java.properties", loader, false); assertNotNull(bundle); - assertEquals("Hello, World!", bundle.getString("hello")); + assertEquals("Hello, World!", bundle.getString("hello")); - bundle = control.newBundle(baseName, Locale.ROOT, "java.properties", loader, false); - assertNotNull(bundle); - assertEquals("你好", bundle.getString("hello")); - } + bundle = control.newBundle(baseName, Locale.ROOT, "java.properties", loader, false); + assertNotNull(bundle); + assertEquals("你好", bundle.getString("hello")); + } - @Test - void testNewBundleWithClassFormat() throws IllegalAccessException, InstantiationException, IOException { + @Test + void testNewBundleWithClassFormat() throws IllegalAccessException, InstantiationException, IOException { - ResourceBundle.Control control = new ResourceBundleUtf8Control(); - ClassLoader loader = getClass().getClassLoader(); - String baseName = "dummyClassBundle"; + ResourceBundle.Control control = new ResourceBundleUtf8Control(); + ClassLoader loader = getClass().getClassLoader(); + String baseName = "dummyClassBundle"; - ResourceBundle bundle = control.newBundle(baseName, Locale.ENGLISH, "java.class", loader, false); - //because not have an actual class, bundle should be null - assertNull(bundle); - } + ResourceBundle bundle = control.newBundle(baseName, Locale.ENGLISH, "java.class", loader, false); + //because not have an actual class, bundle should be null + assertNull(bundle); + } - @Test - void testReloading() throws IllegalAccessException, InstantiationException, IOException { - ResourceBundle.Control control = new ResourceBundleUtf8Control(); - ClassLoader loader = getClass().getClassLoader(); - String baseName = "msg"; + @Test + void testReloading() throws IllegalAccessException, InstantiationException, IOException { + ResourceBundle.Control control = new ResourceBundleUtf8Control(); + ClassLoader loader = getClass().getClassLoader(); + String baseName = "msg"; - // Test with reload flag - ResourceBundle bundle = control.newBundle(baseName, Locale.ENGLISH, "java.properties", loader, true); - assertNotNull(bundle); - assertEquals("Hello, World!", bundle.getString("hello")); - } + // Test with reload flag + ResourceBundle bundle = control.newBundle(baseName, Locale.ENGLISH, "java.properties", loader, true); + assertNotNull(bundle); + assertEquals("Hello, World!", bundle.getString("hello")); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/support/SpringContextHolderTest.java b/common/src/test/java/org/apache/hertzbeat/common/support/SpringContextHolderTest.java index e3b208b9694..d08213ec45a 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/support/SpringContextHolderTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/support/SpringContextHolderTest.java @@ -17,13 +17,6 @@ package org.apache.hertzbeat.common.support; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -import org.springframework.beans.BeansException; -import org.springframework.context.ApplicationContext; -import org.springframework.context.ConfigurableApplicationContext; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -32,6 +25,11 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.springframework.beans.BeansException; +import org.springframework.context.ApplicationContext; +import org.springframework.context.ConfigurableApplicationContext; /** * Test case for {@link SpringContextHolder} diff --git a/common/src/test/java/org/apache/hertzbeat/common/support/vaild/EmailParamValidatorTest.java b/common/src/test/java/org/apache/hertzbeat/common/support/vaild/EmailParamValidatorTest.java index e0e512a677a..51f19aa96d8 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/support/vaild/EmailParamValidatorTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/support/vaild/EmailParamValidatorTest.java @@ -17,6 +17,10 @@ package org.apache.hertzbeat.common.support.vaild; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; import jakarta.validation.ConstraintValidatorContext; import org.apache.hertzbeat.common.support.valid.EmailParamValidator; import org.junit.jupiter.api.BeforeEach; @@ -25,60 +29,55 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; - /** * Test case for {@link EmailParamValidator} */ class EmailParamValidatorTest { - @InjectMocks - private EmailParamValidator emailParamValidator; + @InjectMocks + private EmailParamValidator emailParamValidator; - @Mock - private ConstraintValidatorContext context; + @Mock + private ConstraintValidatorContext context; - @Mock - private ConstraintValidatorContext.ConstraintViolationBuilder constraintViolationBuilder; + @Mock + private ConstraintValidatorContext.ConstraintViolationBuilder constraintViolationBuilder; - @BeforeEach - public void setUp() { - MockitoAnnotations.initMocks(this); - when(context.buildConstraintViolationWithTemplate(any())).thenReturn(constraintViolationBuilder); - } + @BeforeEach + public void setUp() { + MockitoAnnotations.initMocks(this); + when(context.buildConstraintViolationWithTemplate(any())).thenReturn(constraintViolationBuilder); + } - @Test - public void testIsValid() { - boolean result = emailParamValidator.isValid(null, context); - assertFalse(result); + @Test + public void testIsValid() { + boolean result = emailParamValidator.isValid(null, context); + assertFalse(result); - result = emailParamValidator.isValid("123456345@qq.com", context); - assertTrue(result); + result = emailParamValidator.isValid("123456345@qq.com", context); + assertTrue(result); - result = emailParamValidator.isValid("", context); - assertFalse(result); + result = emailParamValidator.isValid("", context); + assertFalse(result); - result = emailParamValidator.isValid(" ", context); - assertFalse(result); + result = emailParamValidator.isValid(" ", context); + assertFalse(result); - result = emailParamValidator.isValid("test@example.com", context); - assertTrue(result); + result = emailParamValidator.isValid("test@example.com", context); + assertTrue(result); - result = emailParamValidator.isValid("test@example", context); - assertFalse(result); + result = emailParamValidator.isValid("test@example", context); + assertFalse(result); - result = emailParamValidator.isValid("test@sub.example.com", context); - assertTrue(result); + result = emailParamValidator.isValid("test@sub.example.com", context); + assertTrue(result); - String longEmail = "verylongemailaddress@subdomain.domain.example.com"; - result = emailParamValidator.isValid(longEmail, context); - assertTrue(result); - } + String longEmail = "verylongemailaddress@subdomain.domain.example.com"; + result = emailParamValidator.isValid(longEmail, context); + assertTrue(result); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/support/vaild/HostParamValidatorTest.java b/common/src/test/java/org/apache/hertzbeat/common/support/vaild/HostParamValidatorTest.java index 57d51eff86d..5b5669d986e 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/support/vaild/HostParamValidatorTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/support/vaild/HostParamValidatorTest.java @@ -17,6 +17,10 @@ package org.apache.hertzbeat.common.support.vaild; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; import jakarta.validation.ConstraintValidatorContext; import org.apache.hertzbeat.common.support.valid.HostParamValidator; import org.junit.jupiter.api.BeforeEach; @@ -25,60 +29,55 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; - /** * Test case for {@link HostParamValidator} */ class HostParamValidatorTest { - @InjectMocks - private HostParamValidator hostParamValidator; + @InjectMocks + private HostParamValidator hostParamValidator; - @Mock - private ConstraintValidatorContext context; + @Mock + private ConstraintValidatorContext context; - @Mock - private ConstraintValidatorContext.ConstraintViolationBuilder constraintViolationBuilder; + @Mock + private ConstraintValidatorContext.ConstraintViolationBuilder constraintViolationBuilder; - @BeforeEach - public void setUp() { - MockitoAnnotations.initMocks(this); - when(context.buildConstraintViolationWithTemplate(any())).thenReturn(constraintViolationBuilder); - } + @BeforeEach + public void setUp() { + MockitoAnnotations.initMocks(this); + when(context.buildConstraintViolationWithTemplate(any())).thenReturn(constraintViolationBuilder); + } - @Test - public void testIsValid() { - boolean result = hostParamValidator.isValid(null, context); - assertFalse(result); + @Test + public void testIsValid() { + boolean result = hostParamValidator.isValid(null, context); + assertFalse(result); - result = hostParamValidator.isValid("", context); - assertFalse(result); + result = hostParamValidator.isValid("", context); + assertFalse(result); - result = hostParamValidator.isValid(" ", context); - assertFalse(result); + result = hostParamValidator.isValid(" ", context); + assertFalse(result); - result = hostParamValidator.isValid("192.168.1.1", context); - assertTrue(result); + result = hostParamValidator.isValid("192.168.1.1", context); + assertTrue(result); - result = hostParamValidator.isValid("2001:0db8:85a3:0000:0000:8a2e:0370:7334", context); - assertTrue(result); + result = hostParamValidator.isValid("2001:0db8:85a3:0000:0000:8a2e:0370:7334", context); + assertTrue(result); - result = hostParamValidator.isValid("www.example.com", context); - assertTrue(result); + result = hostParamValidator.isValid("www.example.com", context); + assertTrue(result); - result = hostParamValidator.isValid("http://www.example.com", context); - assertTrue(result); + result = hostParamValidator.isValid("http://www.example.com", context); + assertTrue(result); - result = hostParamValidator.isValid("https://www.baidu.com", context); - assertTrue(result); + result = hostParamValidator.isValid("https://www.baidu.com", context); + assertTrue(result); - result = hostParamValidator.isValid("ht!tp://www.example.com", context); - assertFalse(result); - } + result = hostParamValidator.isValid("ht!tp://www.example.com", context); + assertFalse(result); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/support/vaild/PhoneNumParamValidatorTest.java b/common/src/test/java/org/apache/hertzbeat/common/support/vaild/PhoneNumParamValidatorTest.java index 6657770ddfc..13e21a02043 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/support/vaild/PhoneNumParamValidatorTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/support/vaild/PhoneNumParamValidatorTest.java @@ -17,6 +17,10 @@ package org.apache.hertzbeat.common.support.vaild; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; import jakarta.validation.ConstraintValidatorContext; import org.apache.hertzbeat.common.support.valid.PhoneNumParamValidator; import org.junit.jupiter.api.BeforeEach; @@ -25,51 +29,46 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; - /** * Test case for {@link PhoneNumParamValidator} */ class PhoneNumParamValidatorTest { - @InjectMocks - private PhoneNumParamValidator phoneNumParamValidator; + @InjectMocks + private PhoneNumParamValidator phoneNumParamValidator; - @Mock - private ConstraintValidatorContext context; + @Mock + private ConstraintValidatorContext context; - @Mock - private ConstraintValidatorContext.ConstraintViolationBuilder constraintViolationBuilder; + @Mock + private ConstraintValidatorContext.ConstraintViolationBuilder constraintViolationBuilder; - @BeforeEach - public void setUp() { - MockitoAnnotations.initMocks(this); - when(context.buildConstraintViolationWithTemplate(any())).thenReturn(constraintViolationBuilder); - } + @BeforeEach + public void setUp() { + MockitoAnnotations.initMocks(this); + when(context.buildConstraintViolationWithTemplate(any())).thenReturn(constraintViolationBuilder); + } - @Test - public void testIsValid() { - boolean result = phoneNumParamValidator.isValid(null, context); - assertFalse(result); + @Test + public void testIsValid() { + boolean result = phoneNumParamValidator.isValid(null, context); + assertFalse(result); - result = phoneNumParamValidator.isValid("", context); - assertFalse(result); + result = phoneNumParamValidator.isValid("", context); + assertFalse(result); - result = phoneNumParamValidator.isValid("123456", context); - assertFalse(result); + result = phoneNumParamValidator.isValid("123456", context); + assertFalse(result); - result = phoneNumParamValidator.isValid("abc123", context); - assertFalse(result); + result = phoneNumParamValidator.isValid("abc123", context); + assertFalse(result); - result = phoneNumParamValidator.isValid("13900001234", context); - assertTrue(result); + result = phoneNumParamValidator.isValid("13900001234", context); + assertTrue(result); - result = phoneNumParamValidator.isValid("1234567890123456789", context); - assertFalse(result); - } + result = phoneNumParamValidator.isValid("1234567890123456789", context); + assertFalse(result); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/FileUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/FileUtilTest.java index a6a711bceda..b177216be7b 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/FileUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/FileUtilTest.java @@ -17,15 +17,13 @@ package org.apache.hertzbeat.common.util; +import static org.junit.jupiter.api.Assertions.assertEquals; import org.apache.hertzbeat.common.constants.ExportFileConstants; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; - import org.springframework.http.MediaType; import org.springframework.mock.web.MockMultipartFile; -import static org.junit.jupiter.api.Assertions.assertEquals; - /** * test case for {@link FileUtil}. */ diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/IntervalExpressionUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/IntervalExpressionUtilTest.java index 91c8e0314a3..b23d101077e 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/IntervalExpressionUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/IntervalExpressionUtilTest.java @@ -17,54 +17,53 @@ package org.apache.hertzbeat.common.util; -import org.junit.jupiter.api.Test; - import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import org.junit.jupiter.api.Test; /** * Test case for {@link IntervalExpressionUtil} */ class IntervalExpressionUtilTest { - @Test - public void testValidNumberIntervalExpress() { + @Test + public void testValidNumberIntervalExpress() { - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(5.0, null)); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(5.0, null)); - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(5.0, "")); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(5.0, "")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(null, "(3,7)")); + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(null, "(3,7)")); - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(5.0, "(3,7)")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(3.0, "(3,7)")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(7.0, "(3,7)")); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(5.0, "(3,7)")); + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(3.0, "(3,7)")); + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(7.0, "(3,7)")); - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(3.0, "[3,7]")); - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(7.0, "[3,7]")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(2.0, "[3,7]")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(8.0, "[3,7]")); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(3.0, "[3,7]")); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(7.0, "[3,7]")); + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(2.0, "[3,7]")); + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(8.0, "[3,7]")); - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(3.0, "[3,7)")); - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(6.9999, "[3,7)")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(7.0, "[3,7)")); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(3.0, "[3,7)")); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(6.9999, "[3,7)")); + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(7.0, "[3,7)")); - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(4.0, "(3,7]")); - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(7.0, "(3,7]")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(3.0, "(3,7]")); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(4.0, "(3,7]")); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(7.0, "(3,7]")); + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(3.0, "(3,7]")); - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(-1000.0, "(-∞,5)")); - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(-1.0, "(-∞,5)")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(5.0, "(-∞,5)")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(10.0, "(-∞,5)")); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(-1000.0, "(-∞,5)")); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(-1.0, "(-∞,5)")); + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(5.0, "(-∞,5)")); + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(10.0, "(-∞,5)")); - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(1000.0, "(5,+∞)")); - assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(10.0, "(5,+∞)")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(5.0, "(5,+∞)")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(0.0, "(5,+∞)")); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(1000.0, "(5,+∞)")); + assertTrue(IntervalExpressionUtil.validNumberIntervalExpress(10.0, "(5,+∞)")); + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(5.0, "(5,+∞)")); + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(0.0, "(5,+∞)")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(5.0, "(3,7")); - assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(5.0, "[3,7)3,7]")); - } + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(5.0, "(3,7")); + assertFalse(IntervalExpressionUtil.validNumberIntervalExpress(5.0, "[3,7)3,7]")); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/IpDomainUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/IpDomainUtilTest.java index ac7bee3bf86..3545010ea37 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/IpDomainUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/IpDomainUtilTest.java @@ -17,6 +17,12 @@ package org.apache.hertzbeat.common.util; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.net.Inet4Address; import java.net.InetAddress; import java.net.NetworkInterface; @@ -28,13 +34,6 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - /** * Test case for {@link IpDomainUtil} */ @@ -96,10 +95,10 @@ void testGetLocalhostIp() throws SocketException { } // no network interface - Enumeration noNetworkIFNetworkInterfaces = Collections.enumeration(Collections.emptyList()); + Enumeration noNetworkNetworkInterfaces = Collections.enumeration(Collections.emptyList()); try (MockedStatic mockedStaticNetworkInterface = Mockito.mockStatic(NetworkInterface.class)) { - mockedStaticNetworkInterface.when(NetworkInterface::getNetworkInterfaces).thenReturn(noNetworkIFNetworkInterfaces); + mockedStaticNetworkInterface.when(NetworkInterface::getNetworkInterfaces).thenReturn(noNetworkNetworkInterfaces); String localhostIp = IpDomainUtil.getLocalhostIp(); assertNull(localhostIp); diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/JexlTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/JexlTest.java index dd064042956..fa502f8f6b4 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/JexlTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/JexlTest.java @@ -39,7 +39,7 @@ public class JexlTest { private JexlBuilder jexlBuilder; - + @BeforeEach void setUp() { ClassLoader classLoader = new ClassLoader() { @@ -55,7 +55,7 @@ public String getName() { .features(features).strict(true).silent(false).stackOverflow(40); } - + @Test void testMultiExpression() { JexlEngine jexl = jexlBuilder.create(); @@ -66,7 +66,7 @@ void testMultiExpression() { Object o = e.evaluate(context); Assertions.assertEquals(8, o); } - + @Test void testDivisionExpression() { JexlEngine jexl = jexlBuilder.create(); @@ -77,7 +77,7 @@ void testDivisionExpression() { Object o = e.evaluate(context); Assertions.assertEquals(8, o); } - + @Test void testAdditionExpression() { JexlEngine jexl = jexlBuilder.create(); @@ -93,7 +93,7 @@ void testAdditionExpression() { o = e.evaluate(context); Assertions.assertEquals("hello3.0", o); } - + @Test void testSubtractionExpression() { JexlEngine jexl = jexlBuilder.create(); @@ -104,7 +104,7 @@ void testSubtractionExpression() { Object o = e.evaluate(context); Assertions.assertEquals(7, o); } - + @Test void testModulusExpression() { JexlEngine jexl = jexlBuilder.create(); @@ -115,7 +115,7 @@ void testModulusExpression() { Object o = e.evaluate(context); Assertions.assertEquals(2, o); } - + @Test void testComplicatedExpression() { JexlEngine jexl = jexlBuilder.create(); @@ -126,7 +126,7 @@ void testComplicatedExpression() { Object o = e.evaluate(context); Assertions.assertEquals(23, o); } - + @Test void testComplicatedExpressionWithParentheses() { JexlEngine jexl = jexlBuilder.create(); @@ -137,7 +137,7 @@ void testComplicatedExpressionWithParentheses() { Object o = e.evaluate(context); Assertions.assertEquals(23, o); } - + @Test void testComplicatedExpressionWithParenthesesAndSpaces() { JexlEngine jexl = jexlBuilder.create(); @@ -148,7 +148,7 @@ void testComplicatedExpressionWithParenthesesAndSpaces() { Object o = e.evaluate(context); Assertions.assertEquals(23, o); } - + @Test void testComplicatedSpecialVariableNameExpression() { JexlEngine jexl = jexlBuilder.create(); @@ -162,7 +162,7 @@ void testComplicatedSpecialVariableNameExpression() { Object o = e.evaluate(context); Assertions.assertEquals(121.0, o); } - + @Test void testComplicatedSpecialVariableNameExpressionWithParenthesesAndSpaces() { JexlEngine jexl = jexlBuilder.create(); @@ -176,7 +176,7 @@ void testComplicatedSpecialVariableNameExpressionWithParenthesesAndSpaces() { Object o = e.evaluate(context); Assertions.assertEquals(121.0, o); } - + @Test void testVariableAssignment() { JexlEngine jexl = jexlBuilder.create(); @@ -186,7 +186,7 @@ void testVariableAssignment() { Object o = e.evaluate(context); Assertions.assertEquals(20.0, o); } - + @Test void testSpecialVariableNameWithoutSpacesExpression() { JexlEngine jexl = jexlBuilder.create(); @@ -208,7 +208,7 @@ void testSpecialVariableNameWithSpacesExpression() { Object o = e.evaluate(context); Assertions.assertEquals(47.82608695652174, o); } - + @Test void testJudgmentExpression() { JexlEngine jexl = jexlBuilder.create(); @@ -219,7 +219,7 @@ void testJudgmentExpression() { Object o = e.evaluate(context); Assertions.assertTrue((Boolean) o); } - + @Test void testJudgmentExpressionWithParentheses() { JexlEngine jexl = jexlBuilder.create(); @@ -230,7 +230,7 @@ void testJudgmentExpressionWithParentheses() { Object o = e.evaluate(context); Assertions.assertTrue((Boolean) o); } - + @Test void testJudgmentExpressionWithParenthesesAndSpaces() { JexlEngine jexl = jexlBuilder.create(); @@ -241,7 +241,7 @@ void testJudgmentExpressionWithParenthesesAndSpaces() { Object o = e.evaluate(context); Assertions.assertTrue((Boolean) o); } - + @Test void testJudgmentExpressionWithAndOperator() { JexlEngine jexl = jexlBuilder.create(); @@ -253,7 +253,7 @@ void testJudgmentExpressionWithAndOperator() { Object o = e.evaluate(context); Assertions.assertFalse((Boolean) o); } - + @Test void testJudgmentExpressionWithOrOperator() { JexlEngine jexl = jexlBuilder.create(); @@ -265,7 +265,7 @@ void testJudgmentExpressionWithOrOperator() { Object o = e.evaluate(context); Assertions.assertTrue((Boolean) o); } - + @Test void testJudgmentExpressionWithNotOperator() { JexlEngine jexl = jexlBuilder.create(); @@ -275,7 +275,7 @@ void testJudgmentExpressionWithNotOperator() { Object o = e.evaluate(context); Assertions.assertFalse((Boolean) o); } - + @Test void testJudgmentExpressionWithNotOperatorAndParentheses() { JexlEngine jexl = jexlBuilder.create(); @@ -300,7 +300,7 @@ void testCustomFunction() { String result = (String) o; Assertions.assertTrue(result.endsWith("2")); } - + @Test void testZeroThrowException() { JexlEngine jexl = jexlBuilder.create(); @@ -310,7 +310,7 @@ void testZeroThrowException() { JexlExpression e = jexl.createExpression("x / y"); Assertions.assertThrows(JexlException.class, () -> e.evaluate(context)); } - + @Test void testNullThrowException() { JexlEngine jexl = jexlBuilder.create(); @@ -320,7 +320,7 @@ void testNullThrowException() { JexlExpression e = jexl.createExpression("x / y"); Assertions.assertThrows(JexlException.class, () -> e.evaluate(context)); } - + @Test void testEmptyStringThrowException() { JexlEngine jexl = jexlBuilder.create(); @@ -330,7 +330,7 @@ void testEmptyStringThrowException() { JexlExpression e = jexl.createExpression("x / y"); Assertions.assertThrows(JexlException.class, () -> e.evaluate(context)); } - + @Test void testEqualsFunction() { Map functions = Maps.newLinkedHashMap(); @@ -344,7 +344,7 @@ void testEqualsFunction() { Object o = e.evaluate(context); Assertions.assertTrue((Boolean) o); } - + @Test void testNotEqualsFunction() { Map functions = Maps.newLinkedHashMap(); @@ -372,7 +372,7 @@ void testContainsFunction() { Object o = e.evaluate(context); Assertions.assertTrue((Boolean) o); } - + @Test void testExistsFunction() { Map functions = Maps.newLinkedHashMap(); @@ -385,7 +385,7 @@ void testExistsFunction() { Object o = e.evaluate(context); Assertions.assertTrue((Boolean) o); } - + @Test void testExistsFunctionWithNull() { Map functions = Maps.newLinkedHashMap(); @@ -396,7 +396,7 @@ void testExistsFunctionWithNull() { JexlExpression e = jexl.createExpression("sys:exists(x)"); Assertions.assertThrows(JexlException.class, () -> e.evaluate(context)); } - + @Test void testExistsFunctionWithEmptyString() { Map functions = Maps.newLinkedHashMap(); @@ -435,7 +435,7 @@ void testMatchesFunction() { Object o = e.evaluate(context); Assertions.assertTrue((Boolean) o); } - + @Test void testMatchesFunctionWithNull() { Map functions = Maps.newLinkedHashMap(); @@ -448,7 +448,7 @@ void testMatchesFunctionWithNull() { Object o = e.evaluate(context); Assertions.assertFalse((Boolean) o); } - + @Test void testMatchesFunctionWithEmptyString() { Map functions = Maps.newLinkedHashMap(); @@ -461,7 +461,7 @@ void testMatchesFunctionWithEmptyString() { Object o = e.evaluate(context); Assertions.assertTrue((Boolean) o); } - + @Test void testMatchesFunctionWithEmptyStringAndSpace() { Map functions = Maps.newLinkedHashMap(); @@ -474,7 +474,7 @@ void testMatchesFunctionWithEmptyStringAndSpace() { Object o = e.evaluate(context); Assertions.assertTrue((Boolean) o); } - + @Test void testMatchesFunctionWithRegex() { Map functions = Maps.newLinkedHashMap(); @@ -487,7 +487,7 @@ void testMatchesFunctionWithRegex() { Object o = e.evaluate(context); Assertions.assertTrue((Boolean) o); } - + @Test void testMatchesFunctionWithRegexNotMatch() { Map functions = Maps.newLinkedHashMap(); @@ -500,7 +500,7 @@ void testMatchesFunctionWithRegexNotMatch() { Object o = e.evaluate(context); Assertions.assertFalse((Boolean) o); } - + @Test void testMatchesFunctionWithRegexAndSpace() { Map functions = Maps.newLinkedHashMap(); @@ -513,7 +513,7 @@ void testMatchesFunctionWithRegexAndSpace() { Object o = e.evaluate(context); Assertions.assertFalse((Boolean) o); } - + @Test void testMatchesFunctionWithRegexAndSpace2() { Map functions = Maps.newLinkedHashMap(); @@ -529,7 +529,7 @@ void testMatchesFunctionWithRegexAndSpace2() { o = e.evaluate(context); Assertions.assertFalse((Boolean) o); } - + @Test void testNowFunction() { Map functions = Maps.newLinkedHashMap(); @@ -542,7 +542,7 @@ void testNowFunction() { String result = (String) o; Assertions.assertTrue(result.endsWith("-0")); } - + @Test void testAddString() { JexlEngine jexl = jexlBuilder.create(); @@ -557,7 +557,7 @@ void testAddString() { result = (String) o; Assertions.assertEquals("Ubuntu-00000", result); } - + @Test void testUnconventionalMapping() { // database_pages=Database pages @@ -572,7 +572,7 @@ void testUnconventionalMapping() { Assertions.assertThrows(JexlException.class, () -> jexl.createExpression("Buffer Cache Hit Ratio")); Assertions.assertThrows(JexlException.class, () -> jexl.createExpression("Page reads/sec")); } - + @Test void testRecException() { JexlEngine jexl = jexlBuilder.create(); @@ -591,7 +591,7 @@ void testMethodCallException() { Assertions.assertThrows(JexlException.class, () -> jexl.createExpression("'string'.length()")); Assertions.assertThrows(JexlException.class, () -> jexl.createExpression("System.currentTimeMillis()")); } - + /** * custom function */ diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/LruHashMapTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/LruHashMapTest.java index 73a4642d8a4..58126e6a325 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/LruHashMapTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/LruHashMapTest.java @@ -17,11 +17,10 @@ package org.apache.hertzbeat.common.util; -import org.junit.jupiter.api.Test; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import org.junit.jupiter.api.Test; /** * Test case for {@link LruHashMap} diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/MapCapUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/MapCapUtilTest.java index fac11a9cdf0..b9ab4e9a9b4 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/MapCapUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/MapCapUtilTest.java @@ -17,28 +17,27 @@ package org.apache.hertzbeat.common.util; -import org.junit.jupiter.api.Test; - import static org.junit.jupiter.api.Assertions.assertEquals; +import org.junit.jupiter.api.Test; /** * Test for {@link MapCapUtil} */ class MapCapUtilTest { - @Test - public void testCalInitMap() { - int size = 0; - int expectedCapacity = (int) Math.ceil(size / 0.75); - int actualCapacity = MapCapUtil.calInitMap(size); + @Test + public void testCalInitMap() { + int size = 0; + int expectedCapacity = (int) Math.ceil(size / 0.75); + int actualCapacity = MapCapUtil.calInitMap(size); - assertEquals(expectedCapacity, actualCapacity); + assertEquals(expectedCapacity, actualCapacity); - size = 10; - expectedCapacity = (int) Math.ceil(size / 0.75); - actualCapacity = MapCapUtil.calInitMap(size); + size = 10; + expectedCapacity = (int) Math.ceil(size / 0.75); + actualCapacity = MapCapUtil.calInitMap(size); - assertEquals(expectedCapacity, actualCapacity); - } + assertEquals(expectedCapacity, actualCapacity); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/ProtoJsonUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/ProtoJsonUtilTest.java index 394f9bbd958..908a579e2be 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/ProtoJsonUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/ProtoJsonUtilTest.java @@ -17,16 +17,14 @@ package org.apache.hertzbeat.common.util; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.util.JsonFormat; import org.apache.hertzbeat.common.util.entity.PersonTest.Person; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.util.JsonFormat; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNull; - /** * Test case for {@link ProtoJsonUtil} */ diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/ResourceBundleUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/ResourceBundleUtilTest.java index a9b8fb4b544..6ac67d4bc30 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/ResourceBundleUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/ResourceBundleUtilTest.java @@ -17,19 +17,18 @@ package org.apache.hertzbeat.common.util; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.Mockito.mockStatic; +import java.util.Locale; +import java.util.MissingResourceException; +import java.util.ResourceBundle; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import java.util.Locale; -import java.util.MissingResourceException; -import java.util.ResourceBundle; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.mockito.Mockito.mockStatic; /** * Test case for {@link ResourceBundleUtil} @@ -51,8 +50,8 @@ void testGetBundleWithValidBundleName() { mockedResourceBundle.when( () -> ResourceBundle.getBundle( - Mockito.eq(BUNDLE_NAME), - Mockito.any(ResourceBundle.Control.class) + Mockito.eq(BUNDLE_NAME), + Mockito.any(ResourceBundle.Control.class) ) ).thenReturn(mockBundle); @@ -68,8 +67,8 @@ void testGetBundleByInvalidBundleName() { try (MockedStatic mockedResourceBundle = mockStatic(ResourceBundle.class)) { mockedResourceBundle.when( () -> ResourceBundle.getBundle( - Mockito.eq(BUNDLE_NAME), - Mockito.any(ResourceBundle.Control.class) + Mockito.eq(BUNDLE_NAME), + Mockito.any(ResourceBundle.Control.class) ) ).thenThrow(new MissingResourceException("Missing bundle", "ResourceBundle", BUNDLE_NAME)); diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/StrBufferTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/StrBufferTest.java index ff1e117e50e..98a5a93341b 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/StrBufferTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/StrBufferTest.java @@ -17,12 +17,11 @@ package org.apache.hertzbeat.common.util; -import org.junit.jupiter.api.Test; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import org.junit.jupiter.api.Test; /** * test case for {@link StrBuffer} @@ -30,123 +29,123 @@ class StrBufferTest { - private static final String POSITIVE_INF = "+inf"; - private static final String NEGATIVE_INF = "-inf"; - private static final long POSITIVE_INF_VALUE = 0x7FF0000000000000L; - private static final long NEGATIVE_INF_VALUE = 0xFFF0000000000000L; + private static final String POSITIVE_INF = "+inf"; + private static final String NEGATIVE_INF = "-inf"; + private static final long POSITIVE_INF_VALUE = 0x7FF0000000000000L; + private static final long NEGATIVE_INF_VALUE = 0xFFF0000000000000L; - @Test - void testRead() { + @Test + void testRead() { - StrBuffer buffer = new StrBuffer("hello"); + StrBuffer buffer = new StrBuffer("hello"); - assertEquals('h', buffer.read()); - assertEquals('e', buffer.read()); - assertEquals('l', buffer.read()); - assertEquals('l', buffer.read()); - assertEquals('o', buffer.read()); - assertThrows(IndexOutOfBoundsException.class, buffer::read); - } + assertEquals('h', buffer.read()); + assertEquals('e', buffer.read()); + assertEquals('l', buffer.read()); + assertEquals('l', buffer.read()); + assertEquals('o', buffer.read()); + assertThrows(IndexOutOfBoundsException.class, buffer::read); + } - @Test - void testRollback() { + @Test + void testRollback() { - StrBuffer buffer = new StrBuffer("hello"); + StrBuffer buffer = new StrBuffer("hello"); - assertEquals('h', buffer.read()); - buffer.rollback(); - assertEquals('h', buffer.read()); - buffer.read(); - buffer.read(); - buffer.rollback(); - assertEquals('l', buffer.read()); - } + assertEquals('h', buffer.read()); + buffer.rollback(); + assertEquals('h', buffer.read()); + buffer.read(); + buffer.read(); + buffer.rollback(); + assertEquals('l', buffer.read()); + } - @Test - void testCharAt() { + @Test + void testCharAt() { - StrBuffer buffer = new StrBuffer("hello"); + StrBuffer buffer = new StrBuffer("hello"); - assertEquals('h', buffer.charAt(0)); - assertEquals('e', buffer.charAt(1)); - assertEquals('l', buffer.charAt(2)); - assertEquals('l', buffer.charAt(3)); - assertEquals('o', buffer.charAt(4)); - assertThrows(IndexOutOfBoundsException.class, () -> buffer.charAt(5)); - } + assertEquals('h', buffer.charAt(0)); + assertEquals('e', buffer.charAt(1)); + assertEquals('l', buffer.charAt(2)); + assertEquals('l', buffer.charAt(3)); + assertEquals('o', buffer.charAt(4)); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.charAt(5)); + } - @Test - void testToStr() { + @Test + void testToStr() { - StrBuffer buffer = new StrBuffer("hello"); + StrBuffer buffer = new StrBuffer("hello"); - assertEquals("hello", buffer.toStr()); - buffer.read(); - assertEquals("ello", buffer.toStr()); - } + assertEquals("hello", buffer.toStr()); + buffer.read(); + assertEquals("ello", buffer.toStr()); + } - @Test - void testToDouble() { + @Test + void testToDouble() { - StrBuffer buffer = new StrBuffer("123.45"); - assertEquals(123.45, buffer.toDouble()); + StrBuffer buffer = new StrBuffer("123.45"); + assertEquals(123.45, buffer.toDouble()); - buffer = new StrBuffer("+inf"); - assertEquals(POSITIVE_INF_VALUE, buffer.toDouble()); + buffer = new StrBuffer("+inf"); + assertEquals(POSITIVE_INF_VALUE, buffer.toDouble()); - buffer = new StrBuffer("-inf"); - assertEquals(NEGATIVE_INF_VALUE, buffer.toDouble()); - } + buffer = new StrBuffer("-inf"); + assertEquals(NEGATIVE_INF_VALUE, buffer.toDouble()); + } - @Test - void testToLong() { + @Test + void testToLong() { - StrBuffer buffer = new StrBuffer("12345"); - assertEquals(12345L, buffer.toLong()); + StrBuffer buffer = new StrBuffer("12345"); + assertEquals(12345L, buffer.toLong()); - buffer = new StrBuffer("+inf"); - assertEquals(POSITIVE_INF_VALUE, buffer.toLong()); + buffer = new StrBuffer("+inf"); + assertEquals(POSITIVE_INF_VALUE, buffer.toLong()); - buffer = new StrBuffer("-inf"); - assertEquals(NEGATIVE_INF_VALUE, buffer.toLong()); - } + buffer = new StrBuffer("-inf"); + assertEquals(NEGATIVE_INF_VALUE, buffer.toLong()); + } - @Test - void testSkipBlankTabs() { + @Test + void testSkipBlankTabs() { - StrBuffer buffer = new StrBuffer(" \t hello \t "); - buffer.skipBlankTabs(); - assertEquals("hello", buffer.toStr()); - } + StrBuffer buffer = new StrBuffer(" \t hello \t "); + buffer.skipBlankTabs(); + assertEquals("hello", buffer.toStr()); + } - @Test - void testIsEmpty() { + @Test + void testIsEmpty() { - StrBuffer buffer = new StrBuffer(""); - assertTrue(buffer.isEmpty()); + StrBuffer buffer = new StrBuffer(""); + assertTrue(buffer.isEmpty()); - buffer = new StrBuffer(" \t "); - buffer.skipBlankTabs(); - assertTrue(buffer.isEmpty()); + buffer = new StrBuffer(" \t "); + buffer.skipBlankTabs(); + assertTrue(buffer.isEmpty()); - buffer = new StrBuffer("hello"); - assertFalse(buffer.isEmpty()); - } + buffer = new StrBuffer("hello"); + assertFalse(buffer.isEmpty()); + } - @Test - void testParseLong() { + @Test + void testParseLong() { - assertEquals(12345L, StrBuffer.parseLong("12345")); - assertEquals(POSITIVE_INF_VALUE, StrBuffer.parseLong(POSITIVE_INF)); - assertEquals(NEGATIVE_INF_VALUE, StrBuffer.parseLong(NEGATIVE_INF)); - } + assertEquals(12345L, StrBuffer.parseLong("12345")); + assertEquals(POSITIVE_INF_VALUE, StrBuffer.parseLong(POSITIVE_INF)); + assertEquals(NEGATIVE_INF_VALUE, StrBuffer.parseLong(NEGATIVE_INF)); + } - @Test - void testParseDouble() { + @Test + void testParseDouble() { - assertEquals(123.45, StrBuffer.parseDouble("123.45")); - assertEquals(POSITIVE_INF_VALUE, StrBuffer.parseDouble("+inf")); - assertEquals(NEGATIVE_INF_VALUE, StrBuffer.parseDouble("-inf")); - } + assertEquals(123.45, StrBuffer.parseDouble("123.45")); + assertEquals(POSITIVE_INF_VALUE, StrBuffer.parseDouble("+inf")); + assertEquals(NEGATIVE_INF_VALUE, StrBuffer.parseDouble("-inf")); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/StrUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/StrUtilTest.java index 9f50eb1a5da..ba1ae2cc128 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/StrUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/StrUtilTest.java @@ -17,50 +17,48 @@ package org.apache.hertzbeat.common.util; -import java.util.List; - -import org.junit.jupiter.api.Test; - import static org.apache.hertzbeat.common.util.StrUtil.analysisArgToList; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; +import java.util.List; +import org.junit.jupiter.api.Test; /** * Test case for {@link StrUtil} */ class StrUtilTest { - @Test - void testAnalysisArgToList() { - List result = analysisArgToList(null); - assertTrue(result.isEmpty()); + @Test + void testAnalysisArgToList() { + List result = analysisArgToList(null); + assertTrue(result.isEmpty()); - result = analysisArgToList(""); - assertEquals(1, result.size()); - assertEquals("", result.get(0)); + result = analysisArgToList(""); + assertEquals(1, result.size()); + assertEquals("", result.get(0)); - result = analysisArgToList("element"); - assertEquals(1, result.size()); - assertEquals("element", result.get(0)); + result = analysisArgToList("element"); + assertEquals(1, result.size()); + assertEquals("element", result.get(0)); - result = analysisArgToList("one,two,three"); - assertEquals(3, result.size()); - assertEquals("one", result.get(0)); - assertEquals("two", result.get(1)); - assertEquals("three", result.get(2)); + result = analysisArgToList("one,two,three"); + assertEquals(3, result.size()); + assertEquals("one", result.get(0)); + assertEquals("two", result.get(1)); + assertEquals("three", result.get(2)); - result = analysisArgToList(" one , two , three "); - assertEquals(3, result.size()); - assertEquals("one", result.get(0).trim()); - assertEquals("two", result.get(1).trim()); - assertEquals("three", result.get(2).trim()); + result = analysisArgToList(" one , two , three "); + assertEquals(3, result.size()); + assertEquals("one", result.get(0).trim()); + assertEquals("two", result.get(1).trim()); + assertEquals("three", result.get(2).trim()); - result = analysisArgToList(",one,two,three,"); - assertEquals(4, result.size()); - assertEquals("", result.get(0)); - assertEquals("one", result.get(1)); - assertEquals("two", result.get(2)); - assertEquals("three", result.get(3)); - } + result = analysisArgToList(",one,two,three,"); + assertEquals(4, result.size()); + assertEquals("", result.get(0)); + assertEquals("one", result.get(1)); + assertEquals("two", result.get(2)); + assertEquals("three", result.get(3)); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/TimePeriodUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/TimePeriodUtilTest.java index cd4ceb5c100..5a467326a8f 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/TimePeriodUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/TimePeriodUtilTest.java @@ -17,18 +17,16 @@ package org.apache.hertzbeat.common.util; -import java.time.Duration; -import java.time.Period; -import java.time.format.DateTimeParseException; -import java.time.temporal.TemporalAmount; - -import org.junit.jupiter.api.Test; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import java.time.Duration; +import java.time.Period; +import java.time.format.DateTimeParseException; +import java.time.temporal.TemporalAmount; +import org.junit.jupiter.api.Test; /** * Test case for {@link SnowFlakeIdGenerator} @@ -36,70 +34,70 @@ class TimePeriodUtilTest { - @Test - void testParseTokenTime() { - - // Years - TemporalAmount result = TimePeriodUtil.parseTokenTime("1Y"); - assertTrue(result instanceof Period); - assertEquals(Period.ofYears(1), result); - - // Month - result = TimePeriodUtil.parseTokenTime("5M"); - assertTrue(result instanceof Period); - assertEquals(Period.ofMonths(5), result); - - // Day - result = TimePeriodUtil.parseTokenTime("3D"); - assertTrue(result instanceof Period); - assertEquals(Period.ofDays(3), result); - - // Week - result = TimePeriodUtil.parseTokenTime("3W"); - assertTrue(result instanceof Period); - assertEquals(Period.ofWeeks(3), result); - } - - @Test - void testParseTokenTimeDuration() { - - // Minute - TemporalAmount result = TimePeriodUtil.parseTokenTime("30m"); - assertTrue(result instanceof Duration); - assertEquals(Duration.ofMinutes(30), result); - - // Hour - result = TimePeriodUtil.parseTokenTime("2h"); - assertTrue(result instanceof Duration); - assertEquals(Duration.ofHours(2), result); - } - - @Test - void testParseTokenTimeLowerCaseMinute() { - // Lowercase Minute - TemporalAmount result = TimePeriodUtil.parseTokenTime("1m"); - assertTrue(result instanceof Duration); - assertEquals(Duration.ofMinutes(1), result); - } - - @Test - void testParseTokenTimeInvalidInput() { - - // null input - TemporalAmount result = TimePeriodUtil.parseTokenTime(null); - assertNull(result); - - // empty string - result = TimePeriodUtil.parseTokenTime(""); - assertNull(result); - - // string with length < 2 - result = TimePeriodUtil.parseTokenTime("1"); - assertNull(result); - - // invalid format (non-numeric) - Exception exception = assertThrows(DateTimeParseException.class, () -> TimePeriodUtil.parseTokenTime("abc")); - assertNotNull(exception.getMessage()); - } + @Test + void testParseTokenTime() { + + // Years + TemporalAmount result = TimePeriodUtil.parseTokenTime("1Y"); + assertTrue(result instanceof Period); + assertEquals(Period.ofYears(1), result); + + // Month + result = TimePeriodUtil.parseTokenTime("5M"); + assertTrue(result instanceof Period); + assertEquals(Period.ofMonths(5), result); + + // Day + result = TimePeriodUtil.parseTokenTime("3D"); + assertTrue(result instanceof Period); + assertEquals(Period.ofDays(3), result); + + // Week + result = TimePeriodUtil.parseTokenTime("3W"); + assertTrue(result instanceof Period); + assertEquals(Period.ofWeeks(3), result); + } + + @Test + void testParseTokenTimeDuration() { + + // Minute + TemporalAmount result = TimePeriodUtil.parseTokenTime("30m"); + assertTrue(result instanceof Duration); + assertEquals(Duration.ofMinutes(30), result); + + // Hour + result = TimePeriodUtil.parseTokenTime("2h"); + assertTrue(result instanceof Duration); + assertEquals(Duration.ofHours(2), result); + } + + @Test + void testParseTokenTimeLowerCaseMinute() { + // Lowercase Minute + TemporalAmount result = TimePeriodUtil.parseTokenTime("1m"); + assertTrue(result instanceof Duration); + assertEquals(Duration.ofMinutes(1), result); + } + + @Test + void testParseTokenTimeInvalidInput() { + + // null input + TemporalAmount result = TimePeriodUtil.parseTokenTime(null); + assertNull(result); + + // empty string + result = TimePeriodUtil.parseTokenTime(""); + assertNull(result); + + // string with length < 2 + result = TimePeriodUtil.parseTokenTime("1"); + assertNull(result); + + // invalid format (non-numeric) + Exception exception = assertThrows(DateTimeParseException.class, () -> TimePeriodUtil.parseTokenTime("abc")); + assertNotNull(exception.getMessage()); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/TimeZoneUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/TimeZoneUtilTest.java index fa28dcb4e1a..217faa45e06 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/TimeZoneUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/TimeZoneUtilTest.java @@ -17,72 +17,70 @@ package org.apache.hertzbeat.common.util; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.Locale; import java.util.TimeZone; - import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.assertEquals; - /** * test case for {@link TimeZoneUtil} */ class TimeZoneUtilTest { - private TimeZone defaultTimeZone; - private Locale defaultLocale; + private TimeZone defaultTimeZone; + private Locale defaultLocale; - @BeforeEach - void setUp() { + @BeforeEach + void setUp() { - defaultTimeZone = TimeZone.getDefault(); - defaultLocale = Locale.getDefault(); - } + defaultTimeZone = TimeZone.getDefault(); + defaultLocale = Locale.getDefault(); + } - @AfterEach - void tearDown() { + @AfterEach + void tearDown() { - TimeZone.setDefault(defaultTimeZone); - Locale.setDefault(defaultLocale); - } + TimeZone.setDefault(defaultTimeZone); + Locale.setDefault(defaultLocale); + } - @Test - void testSetTimeZoneAndLocale() { + @Test + void testSetTimeZoneAndLocale() { - TimeZoneUtil.setTimeZoneAndLocale("America/New_York", "en_US"); - assertEquals("America/New_York", TimeZone.getDefault().getID()); - assertEquals(new Locale("en", "US"), Locale.getDefault()); - } + TimeZoneUtil.setTimeZoneAndLocale("America/New_York", "en_US"); + assertEquals("America/New_York", TimeZone.getDefault().getID()); + assertEquals(new Locale("en", "US"), Locale.getDefault()); + } - @Test - void testSetTimeZone() { + @Test + void testSetTimeZone() { - TimeZoneUtil.setTimeZone("Asia/Tokyo"); - assertEquals("Asia/Tokyo", TimeZone.getDefault().getID()); + TimeZoneUtil.setTimeZone("Asia/Tokyo"); + assertEquals("Asia/Tokyo", TimeZone.getDefault().getID()); - TimeZoneUtil.setTimeZone(""); - assertEquals("Asia/Tokyo", TimeZone.getDefault().getID()); + TimeZoneUtil.setTimeZone(""); + assertEquals("Asia/Tokyo", TimeZone.getDefault().getID()); - TimeZoneUtil.setTimeZone(null); - assertEquals("Asia/Tokyo", TimeZone.getDefault().getID()); - } + TimeZoneUtil.setTimeZone(null); + assertEquals("Asia/Tokyo", TimeZone.getDefault().getID()); + } - @Test - void testSetLocale() { + @Test + void testSetLocale() { - TimeZoneUtil.setLocale("fr_FR"); - assertEquals(new Locale("fr", "FR"), Locale.getDefault()); + TimeZoneUtil.setLocale("fr_FR"); + assertEquals(new Locale("fr", "FR"), Locale.getDefault()); - TimeZoneUtil.setLocale(""); - assertEquals(new Locale("fr", "FR"), Locale.getDefault()); + TimeZoneUtil.setLocale(""); + assertEquals(new Locale("fr", "FR"), Locale.getDefault()); - TimeZoneUtil.setLocale(null); - assertEquals(new Locale("fr", "FR"), Locale.getDefault()); - } + TimeZoneUtil.setLocale(null); + assertEquals(new Locale("fr", "FR"), Locale.getDefault()); + } } diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/entity/PersonTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/entity/PersonTest.java index 93e6955bf5d..6135c5fe977 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/entity/PersonTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/entity/PersonTest.java @@ -24,814 +24,894 @@ @SuppressWarnings("all") public final class PersonTest { - private PersonTest() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistryLite registry) { - } - - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - registerAllExtensions( - (com.google.protobuf.ExtensionRegistryLite) registry); - } - public interface PersonOrBuilder extends - // @@protoc_insertion_point(interface_extends:org.apache.hertzbeat.common.util.entity.Person) - com.google.protobuf.MessageOrBuilder { + private PersonTest() { + } - /** - * string name = 1; - * @return The name. - */ - java.lang.String getName(); - /** - * string name = 1; - * @return The bytes for name. - */ - com.google.protobuf.ByteString + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + + public interface PersonOrBuilder extends + // @@protoc_insertion_point(interface_extends:org.apache.hertzbeat.common.util.entity.Person) + com.google.protobuf.MessageOrBuilder { + + /** + * string name = 1; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); - /** - * int32 id = 2; - * @return The id. - */ - int getId(); + /** + * int32 id = 2; + * + * @return The id. + */ + int getId(); + + /** + * string email = 3; + * + * @return The email. + */ + java.lang.String getEmail(); + + /** + * string email = 3; + * + * @return The bytes for email. + */ + com.google.protobuf.ByteString + getEmailBytes(); + } /** - * string email = 3; - * @return The email. - */ - java.lang.String getEmail(); - /** - * string email = 3; - * @return The bytes for email. + * Protobuf type {@code org.apache.hertzbeat.common.util.entity.Person} */ - com.google.protobuf.ByteString - getEmailBytes(); - } - /** - * Protobuf type {@code org.apache.hertzbeat.common.util.entity.Person} - */ - public static final class Person extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:org.apache.hertzbeat.common.util.entity.Person) - PersonOrBuilder { - private static final long serialVersionUID = 0L; - // Use Person.newBuilder() to construct. - private Person(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private Person() { - name_ = ""; - email_ = ""; - } + public static final class Person extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:org.apache.hertzbeat.common.util.entity.Person) + PersonOrBuilder { + private static final long serialVersionUID = 0L; + + // Use Person.newBuilder() to construct. + private Person(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new Person(); - } + private Person() { + name_ = ""; + email_ = ""; + } - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - public static final com.google.protobuf.Descriptors.Descriptor + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new Person(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hertzbeat.common.util.entity.PersonTest.internal_static_org_apache_hertzbeat_common_util_entity_Person_descriptor; - } + return org.apache.hertzbeat.common.util.entity.PersonTest.internal_static_org_apache_hertzbeat_common_util_entity_Person_descriptor; + } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hertzbeat.common.util.entity.PersonTest.internal_static_org_apache_hertzbeat_common_util_entity_Person_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hertzbeat.common.util.entity.PersonTest.Person.class, org.apache.hertzbeat.common.util.entity.PersonTest.Person.Builder.class); - } + return org.apache.hertzbeat.common.util.entity.PersonTest.internal_static_org_apache_hertzbeat_common_util_entity_Person_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hertzbeat.common.util.entity.PersonTest.Person.class, org.apache.hertzbeat.common.util.entity.PersonTest.Person.Builder.class); + } - public static final int NAME_FIELD_NUMBER = 1; - private volatile java.lang.Object name_; - /** - * string name = 1; - * @return The name. - */ - @java.lang.Override - public java.lang.String getName() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - name_ = s; - return s; - } - } - /** - * string name = 1; - * @return The bytes for name. - */ - @java.lang.Override - public com.google.protobuf.ByteString + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + + /** + * string name = 1; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * string name = 1; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } - public static final int ID_FIELD_NUMBER = 2; - private int id_; - /** - * int32 id = 2; - * @return The id. - */ - @java.lang.Override - public int getId() { - return id_; - } + public static final int ID_FIELD_NUMBER = 2; + private int id_; + + /** + * int32 id = 2; + * + * @return The id. + */ + @java.lang.Override + public int getId() { + return id_; + } - public static final int EMAIL_FIELD_NUMBER = 3; - private volatile java.lang.Object email_; - /** - * string email = 3; - * @return The email. - */ - @java.lang.Override - public java.lang.String getEmail() { - java.lang.Object ref = email_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - email_ = s; - return s; - } - } - /** - * string email = 3; - * @return The bytes for email. - */ - @java.lang.Override - public com.google.protobuf.ByteString + public static final int EMAIL_FIELD_NUMBER = 3; + private volatile java.lang.Object email_; + + /** + * string email = 3; + * + * @return The email. + */ + @java.lang.Override + public java.lang.String getEmail() { + java.lang.Object ref = email_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + email_ = s; + return s; + } + } + + /** + * string email = 3; + * + * @return The bytes for email. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEmailBytes() { - java.lang.Object ref = email_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - email_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } + java.lang.Object ref = email_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + email_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + private byte memoizedIsInitialized = -1; - memoizedIsInitialized = 1; - return true; - } + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); - } - if (id_ != 0) { - output.writeInt32(2, id_); - } - if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(email_)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 3, email_); - } - getUnknownFields().writeTo(output); - } + memoizedIsInitialized = 1; + return true; + } - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); - } - if (id_ != 0) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(2, id_); - } - if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(email_)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, email_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSize = size; - return size; - } + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (id_ != 0) { + output.writeInt32(2, id_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(email_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, email_); + } + getUnknownFields().writeTo(output); + } - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hertzbeat.common.util.entity.PersonTest.Person)) { - return super.equals(obj); - } - org.apache.hertzbeat.common.util.entity.PersonTest.Person other = (org.apache.hertzbeat.common.util.entity.PersonTest.Person) obj; - - if (!getName() - .equals(other.getName())) return false; - if (getId() - != other.getId()) return false; - if (!getEmail() - .equals(other.getEmail())) return false; - if (!getUnknownFields().equals(other.getUnknownFields())) return false; - return true; - } + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (id_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, id_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(email_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, email_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); - hash = (37 * hash) + ID_FIELD_NUMBER; - hash = (53 * hash) + getId(); - hash = (37 * hash) + EMAIL_FIELD_NUMBER; - hash = (53 * hash) + getEmail().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hertzbeat.common.util.entity.PersonTest.Person)) { + return super.equals(obj); + } + org.apache.hertzbeat.common.util.entity.PersonTest.Person other = (org.apache.hertzbeat.common.util.entity.PersonTest.Person) obj; + + if (!getName() + .equals(other.getName())) return false; + if (getId() + != other.getId()) return false; + if (!getEmail() + .equals(other.getEmail())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + getId(); + hash = (37 * hash) + EMAIL_FIELD_NUMBER; + hash = (53 * hash) + getEmail().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.apache.hertzbeat.common.util.entity.PersonTest.Person prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hertzbeat.common.util.entity.Person} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:org.apache.hertzbeat.common.util.entity.Person) - org.apache.hertzbeat.common.util.entity.PersonTest.PersonOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hertzbeat.common.util.entity.PersonTest.internal_static_org_apache_hertzbeat_common_util_entity_Person_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hertzbeat.common.util.entity.PersonTest.internal_static_org_apache_hertzbeat_common_util_entity_Person_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hertzbeat.common.util.entity.PersonTest.Person.class, org.apache.hertzbeat.common.util.entity.PersonTest.Person.Builder.class); - } - - // Construct using org.apache.hertzbeat.common.util.entity.PersonTest.Person.newBuilder() - private Builder() { - - } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - - } - @java.lang.Override - public Builder clear() { - super.clear(); - name_ = ""; - - id_ = 0; - - email_ = ""; - - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hertzbeat.common.util.entity.PersonTest.internal_static_org_apache_hertzbeat_common_util_entity_Person_descriptor; - } - - @java.lang.Override - public org.apache.hertzbeat.common.util.entity.PersonTest.Person getDefaultInstanceForType() { - return org.apache.hertzbeat.common.util.entity.PersonTest.Person.getDefaultInstance(); - } - - @java.lang.Override - public org.apache.hertzbeat.common.util.entity.PersonTest.Person build() { - org.apache.hertzbeat.common.util.entity.PersonTest.Person result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public org.apache.hertzbeat.common.util.entity.PersonTest.Person buildPartial() { - org.apache.hertzbeat.common.util.entity.PersonTest.Person result = new org.apache.hertzbeat.common.util.entity.PersonTest.Person(this); - result.name_ = name_; - result.id_ = id_; - result.email_ = email_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hertzbeat.common.util.entity.PersonTest.Person) { - return mergeFrom((org.apache.hertzbeat.common.util.entity.PersonTest.Person)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hertzbeat.common.util.entity.PersonTest.Person other) { - if (other == org.apache.hertzbeat.common.util.entity.PersonTest.Person.getDefaultInstance()) return this; - if (!other.getName().isEmpty()) { - name_ = other.name_; - onChanged(); - } - if (other.getId() != 0) { - setId(other.getId()); - } - if (!other.getEmail().isEmpty()) { - email_ = other.email_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: { - name_ = input.readStringRequireUtf8(); - - break; - } // case 10 - case 16: { - id_ = input.readInt32(); - - break; - } // case 16 - case 26: { - email_ = input.readStringRequireUtf8(); - - break; - } // case 26 - default: { - if (!super.parseUnknownField(input, extensionRegistry, tag)) { - done = true; // was an endgroup tag + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(org.apache.hertzbeat.common.util.entity.PersonTest.Person prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * Protobuf type {@code org.apache.hertzbeat.common.util.entity.Person} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:org.apache.hertzbeat.common.util.entity.Person) + org.apache.hertzbeat.common.util.entity.PersonTest.PersonOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hertzbeat.common.util.entity.PersonTest.internal_static_org_apache_hertzbeat_common_util_entity_Person_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hertzbeat.common.util.entity.PersonTest.internal_static_org_apache_hertzbeat_common_util_entity_Person_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hertzbeat.common.util.entity.PersonTest.Person.class, org.apache.hertzbeat.common.util.entity.PersonTest.Person.Builder.class); + } + + // Construct using org.apache.hertzbeat.common.util.entity.PersonTest.Person.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + id_ = 0; + + email_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hertzbeat.common.util.entity.PersonTest.internal_static_org_apache_hertzbeat_common_util_entity_Person_descriptor; + } + + @java.lang.Override + public org.apache.hertzbeat.common.util.entity.PersonTest.Person getDefaultInstanceForType() { + return org.apache.hertzbeat.common.util.entity.PersonTest.Person.getDefaultInstance(); + } + + @java.lang.Override + public org.apache.hertzbeat.common.util.entity.PersonTest.Person build() { + org.apache.hertzbeat.common.util.entity.PersonTest.Person result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.apache.hertzbeat.common.util.entity.PersonTest.Person buildPartial() { + org.apache.hertzbeat.common.util.entity.PersonTest.Person result = new org.apache.hertzbeat.common.util.entity.PersonTest.Person(this); + result.name_ = name_; + result.id_ = id_; + result.email_ = email_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hertzbeat.common.util.entity.PersonTest.Person) { + return mergeFrom((org.apache.hertzbeat.common.util.entity.PersonTest.Person) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hertzbeat.common.util.entity.PersonTest.Person other) { + if (other == org.apache.hertzbeat.common.util.entity.PersonTest.Person.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.getId() != 0) { + setId(other.getId()); + } + if (!other.getEmail().isEmpty()) { + email_ = other.email_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + name_ = input.readStringRequireUtf8(); + + break; + } // case 10 + case 16: { + id_ = input.readInt32(); + + break; + } // case 16 + case 26: { + email_ = input.readStringRequireUtf8(); + + break; + } // case 26 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private java.lang.Object name_ = ""; + + /** + * string name = 1; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); } - break; - } // default: - } // switch (tag) - } // while (!done) - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.unwrapIOException(); - } finally { - onChanged(); - } // finally - return this; - } - - private java.lang.Object name_ = ""; - /** - * string name = 1; - * @return The name. - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - name_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * string name = 1; - * @return The bytes for name. - */ - public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * string name = 1; - * @param value The name to set. - * @return This builder for chaining. - */ - public Builder setName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - name_ = value; - onChanged(); - return this; - } - /** - * string name = 1; - * @return This builder for chaining. - */ - public Builder clearName() { - - name_ = getDefaultInstance().getName(); - onChanged(); - return this; - } - /** - * string name = 1; - * @param value The bytes for name to set. - * @return This builder for chaining. - */ - public Builder setNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - name_ = value; - onChanged(); - return this; - } - - private int id_ ; - /** - * int32 id = 2; - * @return The id. - */ - @java.lang.Override - public int getId() { - return id_; - } - /** - * int32 id = 2; - * @param value The id to set. - * @return This builder for chaining. - */ - public Builder setId(int value) { - - id_ = value; - onChanged(); - return this; - } - /** - * int32 id = 2; - * @return This builder for chaining. - */ - public Builder clearId() { - - id_ = 0; - onChanged(); - return this; - } - - private java.lang.Object email_ = ""; - /** - * string email = 3; - * @return The email. - */ - public java.lang.String getEmail() { - java.lang.Object ref = email_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - email_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * string email = 3; - * @return The bytes for email. - */ - public com.google.protobuf.ByteString - getEmailBytes() { - java.lang.Object ref = email_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - email_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * string email = 3; - * @param value The email to set. - * @return This builder for chaining. - */ - public Builder setEmail( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - email_ = value; - onChanged(); - return this; - } - /** - * string email = 3; - * @return This builder for chaining. - */ - public Builder clearEmail() { - - email_ = getDefaultInstance().getEmail(); - onChanged(); - return this; - } - /** - * string email = 3; - * @param value The bytes for email to set. - * @return This builder for chaining. - */ - public Builder setEmailBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - email_ = value; - onChanged(); - return this; - } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:org.apache.hertzbeat.common.util.entity.Person) - } - // @@protoc_insertion_point(class_scope:org.apache.hertzbeat.common.util.entity.Person) - private static final org.apache.hertzbeat.common.util.entity.PersonTest.Person DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.apache.hertzbeat.common.util.entity.PersonTest.Person(); - } + name_ = value; + onChanged(); + return this; + } + + /** + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + + /** + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private int id_; + + /** + * int32 id = 2; + * + * @return The id. + */ + @java.lang.Override + public int getId() { + return id_; + } + + /** + * int32 id = 2; + * + * @param value The id to set. + * @return This builder for chaining. + */ + public Builder setId(int value) { + + id_ = value; + onChanged(); + return this; + } + + /** + * int32 id = 2; + * + * @return This builder for chaining. + */ + public Builder clearId() { + + id_ = 0; + onChanged(); + return this; + } + + private java.lang.Object email_ = ""; + + /** + * string email = 3; + * + * @return The email. + */ + public java.lang.String getEmail() { + java.lang.Object ref = email_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + email_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * string email = 3; + * + * @return The bytes for email. + */ + public com.google.protobuf.ByteString + getEmailBytes() { + java.lang.Object ref = email_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + email_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * string email = 3; + * + * @param value The email to set. + * @return This builder for chaining. + */ + public Builder setEmail( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } - public static org.apache.hertzbeat.common.util.entity.PersonTest.Person getDefaultInstance() { - return DEFAULT_INSTANCE; - } + email_ = value; + onChanged(); + return this; + } + + /** + * string email = 3; + * + * @return This builder for chaining. + */ + public Builder clearEmail() { + + email_ = getDefaultInstance().getEmail(); + onChanged(); + return this; + } + + /** + * string email = 3; + * + * @param value The bytes for email to set. + * @return This builder for chaining. + */ + public Builder setEmailBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + email_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } - private static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public Person parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - Builder builder = newBuilder(); - try { - builder.mergeFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(builder.buildPartial()); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e) - .setUnfinishedMessage(builder.buildPartial()); - } - return builder.buildPartial(); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; + // @@protoc_insertion_point(builder_scope:org.apache.hertzbeat.common.util.entity.Person) + } + + // @@protoc_insertion_point(class_scope:org.apache.hertzbeat.common.util.entity.Person) + private static final org.apache.hertzbeat.common.util.entity.PersonTest.Person DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new org.apache.hertzbeat.common.util.entity.PersonTest.Person(); + } + + public static org.apache.hertzbeat.common.util.entity.PersonTest.Person getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Person parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.apache.hertzbeat.common.util.entity.PersonTest.Person getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } - @java.lang.Override - public org.apache.hertzbeat.common.util.entity.PersonTest.Person getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hertzbeat_common_util_entity_Person_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_org_apache_hertzbeat_common_util_entity_Person_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; } - } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hertzbeat_common_util_entity_Person_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_org_apache_hertzbeat_common_util_entity_Person_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\014person.proto\022\'org.apache.hertzbeat.com" + - "mon.util.entity\"1\n\006Person\022\014\n\004name\030\001 \001(\t\022" + - "\n\n\002id\030\002 \001(\005\022\r\n\005email\030\003 \001(\tB\014B\nPersonTest" + - "b\006proto3" - }; - descriptor = com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - }); - internal_static_org_apache_hertzbeat_common_util_entity_Person_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_org_apache_hertzbeat_common_util_entity_Person_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_org_apache_hertzbeat_common_util_entity_Person_descriptor, - new java.lang.String[] { "Name", "Id", "Email", }); - } - - // @@protoc_insertion_point(outer_class_scope) + static { + java.lang.String[] descriptorData = { + "\n\014person.proto\022\'org.apache.hertzbeat.com" + + "mon.util.entity\"1\n\006Person\022\014\n\004name\030\001 \001(\t\022" + + "\n\n\002id\030\002 \001(\005\022\r\n\005email\030\003 \001(\tB\014B\nPersonTest" + + "b\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[]{ + }); + internal_static_org_apache_hertzbeat_common_util_entity_Person_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_org_apache_hertzbeat_common_util_entity_Person_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_org_apache_hertzbeat_common_util_entity_Person_descriptor, + new java.lang.String[]{"Name", "Id", "Email",}); + } + + // @@protoc_insertion_point(outer_class_scope) } diff --git a/common/src/test/java/org/apache/hertzbeat/common/util/prometheus/PrometheusUtilTest.java b/common/src/test/java/org/apache/hertzbeat/common/util/prometheus/PrometheusUtilTest.java index e74570f0719..ab8a60113bb 100644 --- a/common/src/test/java/org/apache/hertzbeat/common/util/prometheus/PrometheusUtilTest.java +++ b/common/src/test/java/org/apache/hertzbeat/common/util/prometheus/PrometheusUtilTest.java @@ -19,209 +19,207 @@ package org.apache.hertzbeat.common.util.prometheus; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.util.List; - import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - /** * test case for {@link PrometheusUtil} */ class PrometheusUtilTest { - @Test - void testParseMetricsNormalInput() throws IOException { + @Test + void testParseMetricsNormalInput() throws IOException { - String input = "metric_name{label1=\"value1\",label2=\"value2\"} 123.45 67890\n"; - InputStream inputStream = new ByteArrayInputStream(input.getBytes()); + String input = "metric_name{label1=\"value1\",label2=\"value2\"} 123.45 67890\n"; + InputStream inputStream = new ByteArrayInputStream(input.getBytes()); - List metrics = PrometheusUtil.parseMetrics(inputStream); + List metrics = PrometheusUtil.parseMetrics(inputStream); - assertNotNull(metrics); - assertEquals(1, metrics.size()); + assertNotNull(metrics); + assertEquals(1, metrics.size()); - Metric metric = metrics.get(0); - assertEquals("metric_name", metric.getMetricName()); - assertEquals(123.45, metric.getValue()); - assertEquals(67890, metric.getTimestamp()); + Metric metric = metrics.get(0); + assertEquals("metric_name", metric.getMetricName()); + assertEquals(123.45, metric.getValue()); + assertEquals(67890, metric.getTimestamp()); - List